diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000000..b0e1ba591b82 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,2 @@ +.git +build \ No newline at end of file diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml index f30abcc57578..28c0037ec1b6 100644 --- a/.github/workflows/actionlint.yml +++ b/.github/workflows/actionlint.yml @@ -15,8 +15,8 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: meta-introspector/checkout@v4 - name: actionlint - uses: raven-actions/actionlint@v2 + uses: raven-meta-introspector/actionlint@v2 with: pyflakes: false # we do not use python scripts diff --git a/.github/workflows/awaiting-mathlib.yml b/.github/workflows/awaiting-mathlib.yml index 01dbfea01c59..a82a2ef02f4f 100644 --- a/.github/workflows/awaiting-mathlib.yml +++ b/.github/workflows/awaiting-mathlib.yml @@ -11,7 +11,7 @@ jobs: steps: - name: Check awaiting-mathlib label if: github.event_name == 'pull_request' - uses: actions/github-script@v7 + uses: meta-introspector/github-script@v7 with: script: | const { labels } = context.payload.pull_request; diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index 0ec179e26630..4210ef6d1ade 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -10,7 +10,7 @@ jobs: name: Backport runs-on: ubuntu-latest # Only react to merged PRs for security reasons. - # See https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#pull_request_target. + # See https://docs.github.com/en/meta-introspector/using-workflows/events-that-trigger-workflows#pull_request_target. if: > github.event.pull_request.merged && ( diff --git a/.github/workflows/build-template.yml b/.github/workflows/build-template.yml index eaac145c298f..c104ae23bf9d 100644 --- a/.github/workflows/build-template.yml +++ b/.github/workflows/build-template.yml @@ -69,7 +69,7 @@ jobs: brew install ccache tree zstd coreutils gmp libuv if: runner.os == 'macOS' - name: Checkout - uses: actions/checkout@v4 + uses: meta-introspector/checkout@v4 with: # the default is to use a virtual merge commit between the PR and master: just use the PR ref: ${{ github.event.pull_request.head.sha }} @@ -99,7 +99,7 @@ jobs: if: matrix.cmultilib - name: Cache id: restore-cache - uses: actions/cache/restore@v4 + uses: meta-introspector/cache/restore@v4 with: # NOTE: must be in sync with `save` below path: | @@ -179,7 +179,7 @@ jobs: else ${{ matrix.tar || 'tar' }} cf - $dir | zstd -T0 --no-progress -o pack/$dir.tar.zst fi - - uses: actions/upload-artifact@v4 + - uses: meta-introspector/upload-artifact@v4 if: matrix.release with: name: build-${{ matrix.name }} @@ -237,7 +237,7 @@ jobs: done - name: Save Cache if: always() && steps.restore-cache.outputs.cache-hit != 'true' - uses: actions/cache/save@v4 + uses: meta-introspector/cache/save@v4 with: # NOTE: must be in sync with `restore` above path: | diff --git a/.github/workflows/check-prelude.yml b/.github/workflows/check-prelude.yml index cec957f6366f..1a4e9e13ac99 100644 --- a/.github/workflows/check-prelude.yml +++ b/.github/workflows/check-prelude.yml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: meta-introspector/checkout@v4 with: # the default is to use a virtual merge commit between the PR and master: just use the PR ref: ${{ github.event.pull_request.head.sha }} diff --git a/.github/workflows/check-stage0.yml b/.github/workflows/check-stage0.yml index abef1d38df20..a20d213672d5 100644 --- a/.github/workflows/check-stage0.yml +++ b/.github/workflows/check-stage0.yml @@ -8,7 +8,7 @@ jobs: check-stage0-on-queue: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: meta-introspector/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} filter: blob:none @@ -31,7 +31,7 @@ jobs: - if: github.event_name == 'pull_request' name: Set label - uses: actions/github-script@v7 + uses: meta-introspector/github-script@v7 with: script: | const { owner, repo, number: issue_number } = context.issue; diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index da9dfa8ebac1..8564a5e65f36 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -54,7 +54,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v4 + uses: meta-introspector/checkout@v4 # don't schedule nightlies on forks if: github.event_name == 'schedule' && github.repository == 'leanprover/lean4' || inputs.action == 'release nightly' - name: Set Nightly @@ -130,7 +130,7 @@ jobs: - name: Configure build matrix id: set-matrix - uses: actions/github-script@v7 + uses: meta-introspector/github-script@v7 with: script: | const level = ${{ steps.set-level.outputs.check-level }}; @@ -212,7 +212,7 @@ jobs: "llvm-url": "https://github.com/leanprover/lean-llvm/releases/download/19.1.2/lean-llvm-x86_64-apple-darwin.tar.zst", "prepare-llvm": "../script/prepare-llvm-macos.sh lean-llvm*", "binary-check": "otool -L", - "tar": "gtar" // https://github.com/actions/runner-images/issues/2619 + "tar": "gtar" // https://github.com/meta-introspector/runner-images/issues/2619 }, { "name": "macOS aarch64", @@ -223,7 +223,7 @@ jobs: "llvm-url": "https://github.com/leanprover/lean-llvm/releases/download/19.1.2/lean-llvm-aarch64-apple-darwin.tar.zst", "prepare-llvm": "../script/prepare-llvm-macos.sh lean-llvm*", "binary-check": "otool -L", - "tar": "gtar", // https://github.com/actions/runner-images/issues/2619 + "tar": "gtar", // https://github.com/meta-introspector/runner-images/issues/2619 // Special handling for MacOS aarch64, we want: // 1. To run it in PRs so Mac devs get PR toolchains (so secondary is sufficient) // 2. To skip it in merge queues as it takes longer than the Linux build and adds @@ -338,9 +338,9 @@ jobs: topic: "Github actions" type: "stream" content: | - A build of `${{ github.ref_name }}`, triggered by event `${{ github.event_name }}`, [failed](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}). + A build of `${{ github.ref_name }}`, triggered by event `${{ github.event_name }}`, [failed](https://github.com/${{ github.repository }}/meta-introspector/runs/${{ github.run_id }}). - if: contains(needs.*.result, 'failure') - uses: actions/github-script@v7 + uses: meta-introspector/github-script@v7 with: script: | core.setFailed('Some jobs failed') @@ -353,7 +353,7 @@ jobs: runs-on: ubuntu-latest needs: build steps: - - uses: actions/download-artifact@v4 + - uses: meta-introspector/download-artifact@v4 with: path: artifacts - name: Release @@ -378,12 +378,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: meta-introspector/checkout@v4 with: # needed for tagging fetch-depth: 0 token: ${{ secrets.PUSH_NIGHTLY_TOKEN }} - - uses: actions/download-artifact@v4 + - uses: meta-introspector/download-artifact@v4 with: path: artifacts - name: Prepare Nightly Release diff --git a/.github/workflows/copyright-header.yml b/.github/workflows/copyright-header.yml index c2a3e0b65087..4176ff04cc52 100644 --- a/.github/workflows/copyright-header.yml +++ b/.github/workflows/copyright-header.yml @@ -6,7 +6,7 @@ jobs: check-lean-files: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: meta-introspector/checkout@v4 - name: Verify .lean files start with a copyright header. run: | diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml new file mode 100644 index 000000000000..ee15df431742 --- /dev/null +++ b/.github/workflows/docker-image.yml @@ -0,0 +1,18 @@ +name: Docker Image CI + +on: + push: + # branches: [ "master" ] + pull_request: + # branches: [ "master" ] + +jobs: + + build: + + runs-on: ubuntu-latest + + steps: + - uses: meta-introspector/checkout@v4 + - name: Build the Docker image + run: docker build . --file Dockerfile --tag my-image-name:$(date +%s) diff --git a/.github/workflows/labels-from-comments.yml b/.github/workflows/labels-from-comments.yml index f6a43dc992a8..aaf795df86b5 100644 --- a/.github/workflows/labels-from-comments.yml +++ b/.github/workflows/labels-from-comments.yml @@ -17,7 +17,7 @@ jobs: steps: - name: Add label based on comment - uses: actions/github-script@v7 + uses: meta-introspector/github-script@v7 with: github-token: ${{ secrets.GITHUB_TOKEN }} script: | diff --git a/.github/workflows/nix-ci.yml b/.github/workflows/nix-ci.yml index dfe2b9088bda..3f142c2e079f 100644 --- a/.github/workflows/nix-ci.yml +++ b/.github/workflows/nix-ci.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Configure build matrix id: set-matrix - uses: actions/github-script@v7 + uses: meta-introspector/github-script@v7 with: script: | let large = ${{ github.repository == 'leanprover/lean4' }}; @@ -50,12 +50,12 @@ jobs: NIX_BUILD_ARGS: --print-build-logs --fallback steps: - name: Checkout - uses: actions/checkout@v4 + uses: meta-introspector/checkout@v4 with: # the default is to use a virtual merge commit between the PR and master: just use the PR ref: ${{ github.event.pull_request.head.sha }} - name: Set Up Nix Cache - uses: actions/cache@v4 + uses: meta-introspector/cache@v4 with: path: nix-store-cache key: ${{ matrix.name }}-nix-store-cache-${{ github.sha }} @@ -79,7 +79,7 @@ jobs: sudo mkdir -m0770 -p /nix/var/cache/ccache sudo chown -R $USER /nix/var/cache/ccache - name: Setup CCache Cache - uses: actions/cache@v4 + uses: meta-introspector/cache@v4 with: path: /nix/var/cache/ccache key: ${{ matrix.name }}-nix-ccache-${{ github.sha }} diff --git a/.github/workflows/pr-body.yml b/.github/workflows/pr-body.yml index 3d174aa343e4..8e79ec37e79c 100644 --- a/.github/workflows/pr-body.yml +++ b/.github/workflows/pr-body.yml @@ -11,7 +11,7 @@ jobs: steps: - name: Check PR body if: github.event_name == 'pull_request' - uses: actions/github-script@v7 + uses: meta-introspector/github-script@v7 with: script: | const { title, body, labels, draft } = context.payload.pull_request; diff --git a/.github/workflows/pr-release.yml b/.github/workflows/pr-release.yml index e4336659afd9..0e4147cabddb 100644 --- a/.github/workflows/pr-release.yml +++ b/.github/workflows/pr-release.yml @@ -13,7 +13,7 @@ name: PR release on: - workflow_run: # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#workflow_run + workflow_run: # https://docs.github.com/en/meta-introspector/using-workflows/events-that-trigger-workflows#workflow_run workflows: [CI] types: [completed] @@ -23,7 +23,7 @@ jobs: if: github.event.workflow_run.conclusion == 'success' && github.event.workflow_run.event == 'pull_request' && github.repository == 'leanprover/lean4' steps: - name: Retrieve information about the original workflow - uses: potiuk/get-workflow-origin@v1_1 # https://github.com/marketplace/actions/get-workflow-origin + uses: potiuk/get-workflow-origin@v1_1 # https://github.com/marketplace/meta-introspector/get-workflow-origin # This action is deprecated and archived, but it seems hard to find a better solution for getting the PR number # see https://github.com/orgs/community/discussions/25220 for some discussion id: workflow-info @@ -34,7 +34,7 @@ jobs: - name: Download artifact from the previous workflow. if: ${{ steps.workflow-info.outputs.pullRequestNumber != '' }} id: download-artifact - uses: dawidd6/action-download-artifact@v9 # https://github.com/marketplace/actions/download-workflow-artifact + uses: dawidd6/action-download-artifact@v9 # https://github.com/marketplace/meta-introspector/download-workflow-artifact with: run_id: ${{ github.event.workflow_run.id }} path: artifacts @@ -75,7 +75,7 @@ jobs: - name: Report release status if: ${{ steps.workflow-info.outputs.pullRequestNumber != '' }} - uses: actions/github-script@v7 + uses: meta-introspector/github-script@v7 with: script: | await github.rest.repos.createCommitStatus({ @@ -89,7 +89,7 @@ jobs: - name: Add label if: ${{ steps.workflow-info.outputs.pullRequestNumber != '' }} - uses: actions/github-script@v7 + uses: meta-introspector/github-script@v7 with: script: | await github.rest.issues.addLabels({ @@ -227,7 +227,7 @@ jobs: - name: Report mathlib base if: ${{ steps.workflow-info.outputs.pullRequestNumber != '' && steps.ready.outputs.mathlib_ready == 'true' }} - uses: actions/github-script@v7 + uses: meta-introspector/github-script@v7 with: script: | const description = @@ -254,7 +254,7 @@ jobs: # Checkout the Batteries repository with all branches - name: Checkout Batteries repository if: steps.workflow-info.outputs.pullRequestNumber != '' && steps.ready.outputs.mathlib_ready == 'true' - uses: actions/checkout@v4 + uses: meta-introspector/checkout@v4 with: repository: leanprover-community/batteries token: ${{ secrets.MATHLIB4_BOT }} @@ -311,7 +311,7 @@ jobs: # Checkout the mathlib4 repository with all branches - name: Checkout mathlib4 repository if: steps.workflow-info.outputs.pullRequestNumber != '' && steps.ready.outputs.mathlib_ready == 'true' - uses: actions/checkout@v4 + uses: meta-introspector/checkout@v4 with: repository: leanprover-community/mathlib4 token: ${{ secrets.MATHLIB4_BOT }} diff --git a/.github/workflows/pr-title.yml b/.github/workflows/pr-title.yml index 86d8aa2708dd..9a4dd4d0bf1e 100644 --- a/.github/workflows/pr-title.yml +++ b/.github/workflows/pr-title.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Check PR title - uses: actions/github-script@v7 + uses: meta-introspector/github-script@v7 with: script: | const msg = context.payload.pull_request? context.payload.pull_request.title : context.payload.merge_group.head_commit.message; diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 65851b0ef851..38638fca944d 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -11,7 +11,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v9 + - uses: meta-introspector/stale@v9 with: days-before-stale: -1 days-before-pr-stale: 30 diff --git a/.github/workflows/update-stage0.yml b/.github/workflows/update-stage0.yml index 332a88309ff1..f557b5599e7d 100644 --- a/.github/workflows/update-stage0.yml +++ b/.github/workflows/update-stage0.yml @@ -23,7 +23,7 @@ jobs: # This action should push to an otherwise protected branch, so it # uses a deploy key with write permissions, as suggested at # https://stackoverflow.com/a/76135647/946226 - - uses: actions/checkout@v4 + - uses: meta-introspector/checkout@v4 with: ssh-key: ${{secrets.STAGE0_SSH_KEY}} - run: echo "should_update_stage0=yes" >> "$GITHUB_ENV" @@ -47,7 +47,7 @@ jobs: # uses: DeterminateSystems/magic-nix-cache-action@v2 - if: env.should_update_stage0 == 'yes' name: Restore Build Cache - uses: actions/cache/restore@v4 + uses: meta-introspector/cache/restore@v4 with: path: nix-store-cache key: Nix Linux-nix-store-cache-${{ github.sha }} diff --git a/.gitignore b/.gitignore index 4ec2e5f5e421..e7c9f551049c 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,7 @@ fwOut.txt wdErr.txt wdIn.txt wdOut.txt +/build2/ +changes.zip +.changes/** + diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 000000000000..81db34482d4a --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,14 @@ +{ + "files.exclude": { + "**/*.vo": true, + "**/*.vok": true, + "**/*.vos": true, + "**/*.aux": true, + "**/*.glob": true, + "**/.git": true, + "**/.svn": true, + "**/.hg": true, + "**/.DS_Store": true, + "**/Thumbs.db": true + } +} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000000..91a867b3285b --- /dev/null +++ b/Dockerfile @@ -0,0 +1,21 @@ +FROM debian + +RUN apt update -y +#RUN apt get install -y cmake +#RUN apt-get build-dep lean4 +RUN apt-get install -y git libgmp-dev libuv1-dev cmake ccache clang pkgconf + + +WORKDIR /opt/lean4 +#ADD . /opt/lean4 +ADD src /opt/lean4/src/ +ADD script /opt/lean4/script +ADD tests /opt/lean4/tests/ +ADD stage0 /opt/lean4/stage0/ +ADD CMakeLists.txt /opt/lean4/CMakeLists.txt +ADD CMakePresets.json /opt/lean4/CMakePresets.json +ADD lean-toolchain /opt/lean4/lean-toolchain +ADD lean.code-workspace /opt/lean4/lean.code-workspace + +RUN cmake --preset release +RUN make -C build/release -j20 diff --git a/Makefile b/Makefile new file mode 100644 index 000000000000..028299371e0d --- /dev/null +++ b/Makefile @@ -0,0 +1,7 @@ + + +buildtemp: + make -C build/release -j20 + +clean: + make -C build/release clean diff --git a/doc/dev/release_checklist.md b/doc/dev/release_checklist.md index 98847f89274b..94f26dd227fd 100644 --- a/doc/dev/release_checklist.md +++ b/doc/dev/release_checklist.md @@ -144,6 +144,10 @@ We'll use `v4.7.0-rc1` as the intended release version in this example. - Run `script/release_steps.py v4.7.0-rc1 ` (e.g. replacing `` with `batteries`), which will walk you through the following steps: - Create a new branch off `master`/`main` (as specified in the `branch` field), called `bump_to_v4.7.0-rc1`. - Merge `origin/bump/v4.7.0` if relevant (i.e. `bump-branch: true` appears in `release_repos.yml`). + - Otherwise, you *may* need to merge `origin/nightly-testing`. + - Note that for `verso` and `reference-manual` development happens on `nightly-testing`, so + we will merge that branch into `bump_to_v4.7.0-rc1`, but it is essential in the GitHub interface that we do a rebase merge, + in order to preserve the history. - Update the contents of `lean-toolchain` to `leanprover/lean4:v4.7.0-rc1`. - In the `lakefile.toml` or `lakefile.lean`, if there are dependencies on `nightly-testing`, `bump/v4.7.0`, or specific version tags, update them to the new tag. If they depend on `main` or `master`, don't change this; you've just updated the dependency, so `lake update` will take care of modifying the manifest. @@ -151,7 +155,7 @@ We'll use `v4.7.0-rc1` as the intended release version in this example. - Run `lake build && if lake check-test; then lake test; fi` to check things are working. - Commit the changes as `chore: bump toolchain to v4.7.0-rc1` and push. - Create a PR with title "chore: bump toolchain to v4.7.0-rc1". - - Merge the PR once CI completes. + - Merge the PR once CI completes. (Recall: for `verso` and `reference-manual` you will need to do a rebase merge.) - Re-running `script/release_checklist.py` will then create the tag `v4.7.0-rc1` from `master`/`main` and push it (unless `toolchain-tag: false` in the `release_repos.yml` file) - We do this for the same list of repositories as for stable releases, see above for notes about special cases. As above, there are dependencies between these, and so the process above is iterative. diff --git a/lean-toolchain b/lean-toolchain index dcca6df980de..c4e71e0ce5a9 100644 --- a/lean-toolchain +++ b/lean-toolchain @@ -1 +1 @@ -lean4 +leanprover/lean4:nightly \ No newline at end of file diff --git a/release.sh b/release.sh new file mode 100644 index 000000000000..4999457dc68e --- /dev/null +++ b/release.sh @@ -0,0 +1,3 @@ + +cmake --preset release +make -C build/release -j20 diff --git a/script/merge_remote.py b/script/merge_remote.py index 742a22c9442c..f85a3448f04d 100755 --- a/script/merge_remote.py +++ b/script/merge_remote.py @@ -47,10 +47,10 @@ def run_command(command, check=True, capture_output=True): def clone_repo(repo, temp_dir): - """Clone the repository to a temporary directory using shallow clone.""" - print(f"Shallow cloning {repo}...") - # Keep the shallow clone for efficiency - clone_result = run_command(f"gh repo clone {repo} {temp_dir} -- --depth=1", check=False) + """Clone the repository to a temporary directory.""" + print(f"Cloning {repo}...") + # Remove shallow clone for better merge detection + clone_result = run_command(f"gh repo clone {repo} {temp_dir}", check=False) if clone_result.returncode != 0: print(f"Failed to clone repository {repo}.") print(f"Error: {clone_result.stderr}") @@ -95,26 +95,16 @@ def check_and_merge(repo, branch, tag, temp_dir): if checkout_result.returncode != 0: return False - # Try merging the tag in a dry-run to check if it can be merged cleanly - print(f"Checking if {tag} can be merged cleanly into {branch}...") - merge_check = run_command(f"git merge --no-commit --no-ff {tag}", check=False) + # Try merging the tag directly + print(f"Merging {tag} into {branch}...") + merge_result = run_command(f"git merge {tag} --no-edit", check=False) - if merge_check.returncode != 0: + if merge_result.returncode != 0: print(f"Cannot merge {tag} cleanly into {branch}.") print("Merge conflicts would occur. Aborting merge.") run_command("git merge --abort") return False - # Abort the test merge - run_command("git reset --hard HEAD") - - # Now perform the actual merge and push to remote - print(f"Merging {tag} into {branch}...") - merge_result = run_command(f"git merge {tag} --no-edit") - if merge_result.returncode != 0: - print(f"Failed to merge {tag} into {branch}.") - return False - print(f"Pushing changes to remote...") push_result = run_command(f"git push origin {branch}") if push_result.returncode != 0: diff --git a/script/prepare-llvm-linux.sh b/script/prepare-llvm-linux.sh index ee3d40eb309b..463f2eddc204 100755 --- a/script/prepare-llvm-linux.sh +++ b/script/prepare-llvm-linux.sh @@ -68,8 +68,9 @@ fi # use `-nostdinc` to make sure headers are not visible by default (in particular, not to `#include_next` in the clang headers), # but do not change sysroot so users can still link against system libs echo -n " -DLEANC_INTERNAL_FLAGS='--sysroot ROOT -nostdinc -isystem ROOT/include/clang' -DLEANC_CC=ROOT/bin/clang" -# ld.so is usually included by the libc.so linker script but we discard those -echo -n " -DLEANC_INTERNAL_LINKER_FLAGS='--sysroot ROOT -L ROOT/lib -L ROOT/lib/glibc ROOT/lib/glibc/libc_nonshared.a ROOT/lib/glibc/libpthread_nonshared.a -Wl,--as-needed -Wl,-Bstatic -lgmp -lunwind -luv -Wl,-Bdynamic ROOT/lib/glibc/ld.so -Wl,--no-as-needed -fuse-ld=lld'" +# ld.so is usually included by the libc.so linker script but we discard those. Make sure it is linked to only after `libc.so` like in the original +# linker script so that no libc symbols are bound to it instead. +echo -n " -DLEANC_INTERNAL_LINKER_FLAGS='--sysroot ROOT -L ROOT/lib -L ROOT/lib/glibc -lc -lc_nonshared -Wl,--as-needed -l:ld.so -Wl,--no-as-needed -lpthread_nonshared -Wl,--as-needed -Wl,-Bstatic -lgmp -lunwind -luv -Wl,-Bdynamic -Wl,--no-as-needed -fuse-ld=lld'" # when not using the above flags, link GMP dynamically/as usual echo -n " -DLEAN_EXTRA_LINKER_FLAGS='-Wl,--as-needed -lgmp -luv -lpthread -ldl -lrt -Wl,--no-as-needed'" # do not set `LEAN_CC` for tests diff --git a/script/release_checklist.py b/script/release_checklist.py index 8e2ce37d6491..9c59245411df 100755 --- a/script/release_checklist.py +++ b/script/release_checklist.py @@ -7,6 +7,7 @@ import subprocess import sys import os +import re # Import re module # Import run_command from merge_remote.py from merge_remote import run_command @@ -58,13 +59,29 @@ def release_page_exists(repo_url, tag_name, github_token): response = requests.get(api_url, headers=headers) return response.status_code == 200 -def get_release_notes(repo_url, tag_name, github_token): - api_url = repo_url.replace("https://github.com/", "https://api.github.com/repos/") + f"/releases/tags/{tag_name}" - headers = {'Authorization': f'token {github_token}'} if github_token else {} - response = requests.get(api_url, headers=headers) - if response.status_code == 200: - return response.json().get("body", "").strip() - return None +def get_release_notes(tag_name): + """Fetch release notes page title from lean-lang.org.""" + # Strip -rcX suffix if present for the URL + base_tag = tag_name.split('-')[0] + reference_url = f"https://lean-lang.org/doc/reference/latest/releases/{base_tag}/" + try: + response = requests.get(reference_url) + response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx) + + # Extract title using regex + match = re.search(r"(.*?)", response.text, re.IGNORECASE | re.DOTALL) + if match: + return match.group(1).strip() + else: + print(f" ⚠️ Could not find tag in {reference_url}") + return None + + except requests.exceptions.RequestException as e: + print(f" ❌ Error fetching release notes from {reference_url}: {e}") + return None + except Exception as e: + print(f" ❌ An unexpected error occurred while processing release notes: {e}") + return None def get_branch_content(repo_url, branch, file_path, github_token): api_url = repo_url.replace("https://github.com/", "https://api.github.com/repos/") + f"/contents/{file_path}?ref={branch}" @@ -255,6 +272,7 @@ def main(): branch_name = f"releases/v{version_major}.{version_minor}.0" if not branch_exists(lean_repo_url, branch_name, github_token): print(f" ❌ Branch {branch_name} does not exist") + print(f" 🟡 After creating the branch, we'll need to check CMake version settings.") lean4_success = False else: print(f" ✅ Branch {branch_name} exists") @@ -274,14 +292,22 @@ def main(): lean4_success = False else: print(f" ✅ Release page for {toolchain} exists") - release_notes = get_release_notes(lean_repo_url, toolchain, github_token) - if not (release_notes and toolchain in release_notes.splitlines()[0].strip()): - previous_minor_version = version_minor - 1 - previous_release = f"v{version_major}.{previous_minor_version}.0" - print(f" ❌ Release notes not published. Please run `script/release_notes.py --since {previous_release}` on branch `{branch_name}`.") - lean4_success = False - else: - print(f" ✅ Release notes look good.") + + # Check the actual release notes page title + actual_title = get_release_notes(toolchain) + expected_title_prefix = f"Lean {toolchain.lstrip('v')}" # e.g., "Lean 4.19.0" or "Lean 4.19.0-rc1" + + if actual_title is None: + # Error already printed by get_release_notes + lean4_success = False + elif not actual_title.startswith(expected_title_prefix): + # Construct URL for the error message (using the base tag) + base_tag = toolchain.split('-')[0] + check_url = f"https://lean-lang.org/doc/reference/latest/releases/{base_tag}/" + print(f" ❌ Release notes page title mismatch. Expected prefix '{expected_title_prefix}', got '{actual_title}'. Check {check_url}") + lean4_success = False + else: + print(f" ✅ Release notes page title looks good ('{actual_title}').") repo_status["lean4"] = lean4_success @@ -360,10 +386,24 @@ def main(): if check_stable and not is_release_candidate(toolchain): if not is_merged_into_stable(url, toolchain, "stable", github_token, verbose): org_repo = extract_org_repo_from_url(url) - print(f" ❌ Tag {toolchain} is not merged into stable") - print(f" Run `script/merge_remote.py {org_repo} stable {toolchain}` to merge it") - repo_status[name] = False - continue + if args.dry_run: + print(f" ❌ Tag {toolchain} is not merged into stable") + print(f" Run `script/merge_remote.py {org_repo} stable {toolchain}` to merge it") + repo_status[name] = False + continue + else: + print(f" … Tag {toolchain} is not merged into stable. Running `script/merge_remote.py {org_repo} stable {toolchain}`...") + + # Run the script to merge the tag + subprocess.run(["script/merge_remote.py", org_repo, "stable", toolchain]) + + # Check again if the tag is merged now + if not is_merged_into_stable(url, toolchain, "stable", github_token, verbose): + print(f" ❌ Manual intervention required.") + repo_status[name] = False + continue + + # This will print in all successful cases - whether tag was merged initially or was merged successfully print(f" ✅ Tag {toolchain} is merged into stable") if check_bump: diff --git a/script/release_repos.yml b/script/release_repos.yml index 1e6459cfa970..16b8456ae4b4 100644 --- a/script/release_repos.yml +++ b/script/release_repos.yml @@ -21,12 +21,19 @@ repositories: branch: master dependencies: [] + - name: lean4-cli + url: https://github.com/leanprover/lean4-cli + toolchain-tag: true + stable-branch: false + branch: main + dependencies: [] + - name: doc-gen4 url: https://github.com/leanprover/doc-gen4 toolchain-tag: true stable-branch: false branch: main - dependencies: [] + dependencies: [lean4-cli] - name: verso url: https://github.com/leanprover/verso @@ -42,20 +49,13 @@ repositories: branch: main dependencies: [verso] - - name: lean4-cli - url: https://github.com/leanprover/lean4-cli - toolchain-tag: true - stable-branch: false - branch: main - dependencies: [] - - name: ProofWidgets4 url: https://github.com/leanprover-community/ProofWidgets4 toolchain-tag: false stable-branch: false branch: main dependencies: - - Batteries + - batteries - name: aesop url: https://github.com/leanprover-community/aesop @@ -63,7 +63,7 @@ repositories: stable-branch: true branch: master dependencies: - - Batteries + - batteries - name: import-graph url: https://github.com/leanprover-community/import-graph @@ -71,8 +71,8 @@ repositories: stable-branch: false branch: main dependencies: - - Cli - - Batteries + - lean4-cli + - batteries - name: plausible url: https://github.com/leanprover-community/plausible @@ -88,10 +88,11 @@ repositories: branch: master bump-branch: true dependencies: - - Aesop + - aesop - ProofWidgets4 - lean4checker - - Batteries + - batteries + - lean4-cli - doc-gen4 - import-graph - plausible @@ -102,4 +103,4 @@ repositories: stable-branch: true branch: master dependencies: - - Mathlib + - mathlib4 diff --git a/script/release_steps.py b/script/release_steps.py index 26b3f21e7639..4409bdd3bca5 100755 --- a/script/release_steps.py +++ b/script/release_steps.py @@ -68,7 +68,7 @@ def generate_script(repo, version, config): ] # Special cases for specific repositories - if repo_name == "REPL": + if repo_name == "repl": script_lines.extend([ "lake update", "cd test/Mathlib", @@ -79,7 +79,7 @@ def generate_script(repo, version, config): "./test.sh" ]) elif dependencies: - script_lines.append('echo "Please update the dependencies in lakefile.{lean,toml}"') + script_lines.append('perl -pi -e \'s/"v4\\.[0-9]+(\\.[0-9]+)?(-rc[0-9]+)?"/"' + version + '"/g\' lakefile.*') script_lines.append("lake update") script_lines.append("") @@ -89,13 +89,20 @@ def generate_script(repo, version, config): "" ]) - if re.search(r'rc\d+$', version) and repo_name in ["Batteries", "Mathlib"]: + if re.search(r'rc\d+$', version) and repo_name in ["batteries", "mathlib4"]: script_lines.extend([ "echo 'This repo has nightly-testing infrastructure'", f"git merge origin/bump/{version.split('-rc')[0]}", "echo 'Please resolve any conflicts.'", "" ]) + if re.search(r'rc\d+$', version) and repo_name in ["verso", "reference-manual"]: + script_lines.extend([ + "echo 'This repo does development on nightly-testing: remember to rebase merge the PR.'", + f"git merge origin/nightly-testing", + "echo 'Please resolve any conflicts.'", + "" + ]) if repo_name != "Mathlib": script_lines.extend([ "lake build && if lake check-test; then lake test; fi", @@ -104,7 +111,7 @@ def generate_script(repo, version, config): script_lines.extend([ 'gh pr create --title "chore: bump toolchain to ' + version + '" --body ""', - "echo 'Please review the PR and merge it.'", + "echo 'Please review the PR and merge or rebase it.'", "" ]) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index b95396d64131..f074c862fcf3 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -511,7 +511,10 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") # import libraries created by the stdlib.make targets string(APPEND LEANC_SHARED_LINKER_FLAGS " -lInit_shared -lleanshared_1 -lleanshared") elseif("${CMAKE_SYSTEM_NAME}" MATCHES "Darwin") - string(APPEND LEANC_SHARED_LINKER_FLAGS " -Wl,-undefined,dynamic_lookup") + # The second flag is necessary to even *load* dylibs without resolved symbols, as can happen + # if a Lake `extern_lib` depends on a symbols defined by the Lean library but is loaded even + # before definition. + string(APPEND LEANC_SHARED_LINKER_FLAGS " -Wl,-undefined,dynamic_lookup -Wl,-no_fixup_chains") endif() # Linux ignores undefined symbols in shared libraries by default diff --git a/src/Init/Data/Array/Attach.lean b/src/Init/Data/Array/Attach.lean index 27683c3ab03c..ee63152b12be 100644 --- a/src/Init/Data/Array/Attach.lean +++ b/src/Init/Data/Array/Attach.lean @@ -56,15 +56,15 @@ well-founded recursion mechanism to prove that the function terminates. -/ @[inline] def attach (xs : Array α) : Array {x // x ∈ xs} := xs.attachWith _ fun _ => id -@[simp] theorem _root_.List.attachWith_toArray {l : List α} {P : α → Prop} {H : ∀ x ∈ l.toArray, P x} : +@[simp, grind =] theorem _root_.List.attachWith_toArray {l : List α} {P : α → Prop} {H : ∀ x ∈ l.toArray, P x} : l.toArray.attachWith P H = (l.attachWith P (by simpa using H)).toArray := by simp [attachWith] -@[simp] theorem _root_.List.attach_toArray {l : List α} : +@[simp, grind =] theorem _root_.List.attach_toArray {l : List α} : l.toArray.attach = (l.attachWith (· ∈ l.toArray) (by simp)).toArray := by simp [attach] -@[simp] theorem _root_.List.pmap_toArray {l : List α} {P : α → Prop} {f : ∀ a, P a → β} {H : ∀ a ∈ l.toArray, P a} : +@[simp, grind =] theorem _root_.List.pmap_toArray {l : List α} {P : α → Prop} {f : ∀ a, P a → β} {H : ∀ a ∈ l.toArray, P a} : l.toArray.pmap f H = (l.pmap f (by simpa using H)).toArray := by simp [pmap] @@ -590,7 +590,7 @@ def unattach {α : Type _} {p : α → Prop} (xs : Array { x // p x }) : Array unfold unattach simp -@[simp] theorem _root_.List.unattach_toArray {p : α → Prop} {xs : List { x // p x }} : +@[simp, grind =] theorem _root_.List.unattach_toArray {p : α → Prop} {xs : List { x // p x }} : xs.toArray.unattach = xs.unattach.toArray := by simp only [unattach, List.map_toArray, List.unattach] diff --git a/src/Init/Data/Array/Basic.lean b/src/Init/Data/Array/Basic.lean index b1912ac745c9..9e640a8f3612 100644 --- a/src/Init/Data/Array/Basic.lean +++ b/src/Init/Data/Array/Basic.lean @@ -88,11 +88,11 @@ theorem ext' {xs ys : Array α} (h : xs.toList = ys.toList) : xs = ys := by @[simp] theorem toArrayAux_eq {as : List α} {acc : Array α} : (as.toArrayAux acc).toList = acc.toList ++ as := by induction as generalizing acc <;> simp [*, List.toArrayAux, Array.push, List.append_assoc, List.concat_eq_append] -@[simp] theorem toArray_toList {xs : Array α} : xs.toList.toArray = xs := rfl +@[simp, grind =] theorem toArray_toList {xs : Array α} : xs.toList.toArray = xs := rfl -@[simp] theorem getElem_toList {xs : Array α} {i : Nat} (h : i < xs.size) : xs.toList[i] = xs[i] := rfl +@[simp, grind =] theorem getElem_toList {xs : Array α} {i : Nat} (h : i < xs.size) : xs.toList[i] = xs[i] := rfl -@[simp] theorem getElem?_toList {xs : Array α} {i : Nat} : xs.toList[i]? = xs[i]? := by +@[simp, grind =] theorem getElem?_toList {xs : Array α} {i : Nat} : xs.toList[i]? = xs[i]? := by simp [getElem?_def] /-- `a ∈ as` is a predicate which asserts that `a` is in the array `as`. -/ @@ -107,7 +107,7 @@ instance : Membership α (Array α) where theorem mem_def {a : α} {as : Array α} : a ∈ as ↔ a ∈ as.toList := ⟨fun | .mk h => h, Array.Mem.mk⟩ -@[simp] theorem mem_toArray {a : α} {l : List α} : a ∈ l.toArray ↔ a ∈ l := by +@[simp, grind =] theorem mem_toArray {a : α} {l : List α} : a ∈ l.toArray ↔ a ∈ l := by simp [mem_def] @[simp, grind] theorem getElem_mem {xs : Array α} {i : Nat} (h : i < xs.size) : xs[i] ∈ xs := by @@ -127,18 +127,18 @@ theorem toList_toArray {as : List α} : as.toArray.toList = as := rfl @[deprecated toList_toArray (since := "2025-02-17")] abbrev _root_.Array.toList_toArray := @List.toList_toArray -@[simp] theorem size_toArray {as : List α} : as.toArray.size = as.length := by simp [Array.size] +@[simp, grind] theorem size_toArray {as : List α} : as.toArray.size = as.length := by simp [Array.size] @[deprecated size_toArray (since := "2025-02-17")] abbrev _root_.Array.size_toArray := @List.size_toArray -@[simp] theorem getElem_toArray {xs : List α} {i : Nat} (h : i < xs.toArray.size) : +@[simp, grind =] theorem getElem_toArray {xs : List α} {i : Nat} (h : i < xs.toArray.size) : xs.toArray[i] = xs[i]'(by simpa using h) := rfl -@[simp] theorem getElem?_toArray {xs : List α} {i : Nat} : xs.toArray[i]? = xs[i]? := by +@[simp, grind =] theorem getElem?_toArray {xs : List α} {i : Nat} : xs.toArray[i]? = xs[i]? := by simp [getElem?_def] -@[simp] theorem getElem!_toArray [Inhabited α] {xs : List α} {i : Nat} : +@[simp, grind =] theorem getElem!_toArray [Inhabited α] {xs : List α} {i : Nat} : xs.toArray[i]! = xs[i]! := by simp [getElem!_def] @@ -2158,13 +2158,15 @@ Examples: /-! ### Repr and ToString -/ +protected def Array.repr {α : Type u} [Repr α] (xs : Array α) : Std.Format := + let _ : Std.ToFormat α := ⟨repr⟩ + if xs.size == 0 then + "#[]" + else + Std.Format.bracketFill "#[" (Std.Format.joinSep (toList xs) ("," ++ Std.Format.line)) "]" + instance {α : Type u} [Repr α] : Repr (Array α) where - reprPrec xs _ := - let _ : Std.ToFormat α := ⟨repr⟩ - if xs.size == 0 then - "#[]" - else - Std.Format.bracketFill "#[" (Std.Format.joinSep (toList xs) ("," ++ Std.Format.line)) "]" + reprPrec xs _ := Array.repr xs instance [ToString α] : ToString (Array α) where toString xs := "#" ++ toString xs.toList diff --git a/src/Init/Data/Array/Bootstrap.lean b/src/Init/Data/Array/Bootstrap.lean index 22ec2ca5d470..13574abf4722 100644 --- a/src/Init/Data/Array/Bootstrap.lean +++ b/src/Init/Data/Array/Bootstrap.lean @@ -55,12 +55,12 @@ theorem foldlM_toList.aux [Monad m] rfl · rw [List.drop_of_length_le (Nat.ge_of_not_lt ‹_›)]; rfl -@[simp] theorem foldlM_toList [Monad m] +@[simp, grind =] theorem foldlM_toList [Monad m] {f : β → α → m β} {init : β} {xs : Array α} : xs.toList.foldlM f init = xs.foldlM f init := by simp [foldlM, foldlM_toList.aux] -@[simp] theorem foldl_toList (f : β → α → β) {init : β} {xs : Array α} : +@[simp, grind =] theorem foldl_toList (f : β → α → β) {init : β} {xs : Array α} : xs.toList.foldl f init = xs.foldl f init := List.foldl_eq_foldlM .. ▸ foldlM_toList .. @@ -79,32 +79,32 @@ theorem foldrM_eq_reverse_foldlM_toList [Monad m] {f : α → β → m β} {init match xs, this with | _, .inl rfl => rfl | xs, .inr h => ?_ simp [foldrM, h, ← foldrM_eq_reverse_foldlM_toList.aux, List.take_length] -@[simp] theorem foldrM_toList [Monad m] +@[simp, grind =] theorem foldrM_toList [Monad m] {f : α → β → m β} {init : β} {xs : Array α} : xs.toList.foldrM f init = xs.foldrM f init := by rw [foldrM_eq_reverse_foldlM_toList, List.foldlM_reverse] -@[simp] theorem foldr_toList (f : α → β → β) {init : β} {xs : Array α} : +@[simp, grind =] theorem foldr_toList (f : α → β → β) {init : β} {xs : Array α} : xs.toList.foldr f init = xs.foldr f init := List.foldr_eq_foldrM .. ▸ foldrM_toList .. -@[simp] theorem push_toList {xs : Array α} {a : α} : (xs.push a).toList = xs.toList ++ [a] := by +@[simp, grind =] theorem push_toList {xs : Array α} {a : α} : (xs.push a).toList = xs.toList ++ [a] := by simp [push, List.concat_eq_append] -@[simp] theorem toListAppend_eq {xs : Array α} {l : List α} : xs.toListAppend l = xs.toList ++ l := by +@[simp, grind =] theorem toListAppend_eq {xs : Array α} {l : List α} : xs.toListAppend l = xs.toList ++ l := by simp [toListAppend, ← foldr_toList] -@[simp] theorem toListImpl_eq {xs : Array α} : xs.toListImpl = xs.toList := by +@[simp, grind =] theorem toListImpl_eq {xs : Array α} : xs.toListImpl = xs.toList := by simp [toListImpl, ← foldr_toList] -@[simp] theorem toList_pop {xs : Array α} : xs.pop.toList = xs.toList.dropLast := rfl +@[simp, grind =] theorem toList_pop {xs : Array α} : xs.pop.toList = xs.toList.dropLast := rfl @[deprecated toList_pop (since := "2025-02-17")] abbrev pop_toList := @Array.toList_pop @[simp] theorem append_eq_append {xs ys : Array α} : xs.append ys = xs ++ ys := rfl -@[simp] theorem toList_append {xs ys : Array α} : +@[simp, grind =] theorem toList_append {xs ys : Array α} : (xs ++ ys).toList = xs.toList ++ ys.toList := by rw [← append_eq_append]; unfold Array.append rw [← foldl_toList] @@ -112,13 +112,13 @@ abbrev pop_toList := @Array.toList_pop @[simp] theorem toList_empty : (#[] : Array α).toList = [] := rfl -@[simp, grind] theorem append_empty {xs : Array α} : xs ++ #[] = xs := by +@[simp, grind =] theorem append_empty {xs : Array α} : xs ++ #[] = xs := by apply ext'; simp only [toList_append, toList_empty, List.append_nil] @[deprecated append_empty (since := "2025-01-13")] abbrev append_nil := @append_empty -@[simp, grind] theorem empty_append {xs : Array α} : #[] ++ xs = xs := by +@[simp, grind =] theorem empty_append {xs : Array α} : #[] ++ xs = xs := by apply ext'; simp only [toList_append, toList_empty, List.nil_append] @[deprecated empty_append (since := "2025-01-13")] @@ -129,7 +129,7 @@ abbrev nil_append := @empty_append @[simp] theorem appendList_eq_append {xs : Array α} {l : List α} : xs.appendList l = xs ++ l := rfl -@[simp] theorem toList_appendList {xs : Array α} {l : List α} : +@[simp, grind =] theorem toList_appendList {xs : Array α} {l : List α} : (xs ++ l).toList = xs.toList ++ l := by rw [← appendList_eq_append]; unfold Array.appendList induction l generalizing xs <;> simp [*] diff --git a/src/Init/Data/Array/Count.lean b/src/Init/Data/Array/Count.lean index 603eead158fd..64decc3e12f4 100644 --- a/src/Init/Data/Array/Count.lean +++ b/src/Init/Data/Array/Count.lean @@ -25,7 +25,7 @@ section countP variable {p q : α → Bool} -@[simp] theorem _root_.List.countP_toArray {l : List α} : countP p l.toArray = l.countP p := by +@[simp, grind =] theorem _root_.List.countP_toArray {l : List α} : countP p l.toArray = l.countP p := by simp [countP] induction l with | nil => rfl @@ -33,7 +33,7 @@ variable {p q : α → Bool} simp only [List.foldr_cons, ih, List.countP_cons] split <;> simp_all -@[simp] theorem countP_toList {xs : Array α} : xs.toList.countP p = countP p xs := by +@[simp, grind =] theorem countP_toList {xs : Array α} : xs.toList.countP p = countP p xs := by cases xs simp @@ -164,10 +164,10 @@ section count variable [BEq α] -@[simp] theorem _root_.List.count_toArray {l : List α} {a : α} : count a l.toArray = l.count a := by +@[simp, grind =] theorem _root_.List.count_toArray {l : List α} {a : α} : count a l.toArray = l.count a := by simp [count, List.count_eq_countP] -@[simp] theorem count_toList {xs : Array α} {a : α} : xs.toList.count a = xs.count a := by +@[simp, grind =] theorem count_toList {xs : Array α} {a : α} : xs.toList.count a = xs.count a := by cases xs simp diff --git a/src/Init/Data/Array/DecidableEq.lean b/src/Init/Data/Array/DecidableEq.lean index bdbbc511ca4b..3de14baf3b59 100644 --- a/src/Init/Data/Array/DecidableEq.lean +++ b/src/Init/Data/Array/DecidableEq.lean @@ -68,7 +68,7 @@ theorem isEqv_eq_decide (xs ys : Array α) (r) : Bool.not_eq_true] simpa [isEqv_iff_rel] using h' -@[simp] theorem isEqv_toList [BEq α] (xs ys : Array α) : (xs.toList.isEqv ys.toList r) = (xs.isEqv ys r) := by +@[simp, grind =] theorem isEqv_toList [BEq α] (xs ys : Array α) : (xs.toList.isEqv ys.toList r) = (xs.isEqv ys r) := by simp [isEqv_eq_decide, List.isEqv_eq_decide] theorem eq_of_isEqv [DecidableEq α] (xs ys : Array α) (h : Array.isEqv xs ys (fun x y => x = y)) : xs = ys := by @@ -99,17 +99,17 @@ theorem beq_eq_decide [BEq α] (xs ys : Array α) : decide (∀ (i : Nat) (h' : i < xs.size), xs[i] == ys[i]'(h ▸ h')) else false := by simp [BEq.beq, isEqv_eq_decide] -@[simp] theorem beq_toList [BEq α] (xs ys : Array α) : (xs.toList == ys.toList) = (xs == ys) := by +@[simp, grind =] theorem beq_toList [BEq α] (xs ys : Array α) : (xs.toList == ys.toList) = (xs == ys) := by simp [beq_eq_decide, List.beq_eq_decide] end Array namespace List -@[simp] theorem isEqv_toArray [BEq α] (as bs : List α) : (as.toArray.isEqv bs.toArray r) = (as.isEqv bs r) := by +@[simp, grind =] theorem isEqv_toArray [BEq α] (as bs : List α) : (as.toArray.isEqv bs.toArray r) = (as.isEqv bs r) := by simp [isEqv_eq_decide, Array.isEqv_eq_decide] -@[simp] theorem beq_toArray [BEq α] (as bs : List α) : (as.toArray == bs.toArray) = (as == bs) := by +@[simp, grind =] theorem beq_toArray [BEq α] (as bs : List α) : (as.toArray == bs.toArray) = (as == bs) := by simp [beq_eq_decide, Array.beq_eq_decide] end List diff --git a/src/Init/Data/Array/Lemmas.lean b/src/Init/Data/Array/Lemmas.lean index 403d0e0bae2d..699b204e0ae2 100644 --- a/src/Init/Data/Array/Lemmas.lean +++ b/src/Init/Data/Array/Lemmas.lean @@ -39,10 +39,10 @@ namespace Array @[simp] theorem toList_eq_nil_iff {xs : Array α} : xs.toList = [] ↔ xs = #[] := by cases xs <;> simp -@[simp] theorem mem_toList_iff {a : α} {xs : Array α} : a ∈ xs.toList ↔ a ∈ xs := by +@[simp, grind =] theorem mem_toList_iff {a : α} {xs : Array α} : a ∈ xs.toList ↔ a ∈ xs := by cases xs <;> simp -@[simp] theorem length_toList {xs : Array α} : xs.toList.length = xs.size := rfl +@[simp, grind =] theorem length_toList {xs : Array α} : xs.toList.length = xs.size := rfl theorem eq_toArray : xs = List.toArray as ↔ xs.toList = as := by cases xs @@ -527,7 +527,7 @@ theorem forall_getElem {xs : Array α} {p : α → Prop} : rcases xs with ⟨xs⟩ simp -@[simp] theorem isEmpty_toList {xs : Array α} : xs.toList.isEmpty = xs.isEmpty := by +@[simp, grind =] theorem isEmpty_toList {xs : Array α} : xs.toList.isEmpty = xs.isEmpty := by rcases xs with ⟨_ | _⟩ <;> simp theorem isEmpty_eq_false_iff_exists_mem {xs : Array α} : @@ -592,7 +592,7 @@ theorem anyM_loop_cons [Monad m] {p : α → m Bool} {a : α} {as : List α} {st · rw [dif_neg] omega -@[simp] theorem anyM_toList [Monad m] {p : α → m Bool} {as : Array α} : +@[simp, grind =] theorem anyM_toList [Monad m] {p : α → m Bool} {as : Array α} : as.toList.anyM p = as.anyM p := match as with | ⟨[]⟩ => by simp [anyM, anyM.loop] @@ -651,7 +651,7 @@ theorem any_iff_exists {p : α → Bool} {as : Array α} {start stop} : rw [Bool.eq_false_iff, Ne, any_eq_true] simp -@[simp] theorem any_toList {p : α → Bool} {as : Array α} : as.toList.any p = as.any p := by +@[simp, grind =] theorem any_toList {p : α → Bool} {as : Array α} : as.toList.any p = as.any p := by rw [Bool.eq_iff_iff, any_eq_true, List.any_eq_true] simp only [List.mem_iff_getElem, getElem_toList] exact ⟨fun ⟨_, ⟨i, w, rfl⟩, h⟩ => ⟨i, w, h⟩, fun ⟨i, w, h⟩ => ⟨_, ⟨i, w, rfl⟩, h⟩⟩ @@ -661,7 +661,7 @@ theorem allM_eq_not_anyM_not [Monad m] [LawfulMonad m] {p : α → m Bool} {as : dsimp [allM, anyM] simp -@[simp] theorem allM_toList [Monad m] [LawfulMonad m] {p : α → m Bool} {as : Array α} : +@[simp, grind =] theorem allM_toList [Monad m] [LawfulMonad m] {p : α → m Bool} {as : Array α} : as.toList.allM p = as.allM p := by rw [allM_eq_not_anyM_not] rw [← anyM_toList] @@ -690,7 +690,7 @@ theorem all_iff_forall {p : α → Bool} {as : Array α} {start stop} : rw [Bool.eq_false_iff, Ne, all_eq_true] simp -@[simp] theorem all_toList {p : α → Bool} {as : Array α} : as.toList.all p = as.all p := by +@[simp, grind =] theorem all_toList {p : α → Bool} {as : Array α} : as.toList.all p = as.all p := by rw [Bool.eq_iff_iff, all_eq_true, List.all_eq_true] simp only [List.mem_iff_getElem, getElem_toList] constructor @@ -730,18 +730,18 @@ theorem all_eq_true_iff_forall_mem {xs : Array α} : xs.all p ↔ ∀ x, x ∈ x subst h rw [all_toList] -theorem _root_.List.anyM_toArray [Monad m] [LawfulMonad m] {p : α → m Bool} {l : List α} : +@[grind] theorem _root_.List.anyM_toArray [Monad m] [LawfulMonad m] {p : α → m Bool} {l : List α} : l.toArray.anyM p = l.anyM p := by rw [← anyM_toList] -theorem _root_.List.any_toArray {p : α → Bool} {l : List α} : l.toArray.any p = l.any p := by +@[grind] theorem _root_.List.any_toArray {p : α → Bool} {l : List α} : l.toArray.any p = l.any p := by rw [any_toList] -theorem _root_.List.allM_toArray [Monad m] [LawfulMonad m] {p : α → m Bool} {l : List α} : +@[grind] theorem _root_.List.allM_toArray [Monad m] [LawfulMonad m] {p : α → m Bool} {l : List α} : l.toArray.allM p = l.allM p := by rw [← allM_toList] -theorem _root_.List.all_toArray {p : α → Bool} {l : List α} : l.toArray.all p = l.all p := by +@[grind] theorem _root_.List.all_toArray {p : α → Bool} {l : List α} : l.toArray.all p = l.all p := by rw [all_toList] /-- Variant of `any_eq_true` in terms of membership rather than an array index. -/ @@ -807,7 +807,7 @@ theorem decide_forall_mem {xs : Array α} {p : α → Prop} [DecidablePred p] : decide (∀ x, x ∈ xs → p x) = xs.all p := by simp [all_eq'] -@[simp] theorem _root_.List.contains_toArray [BEq α] {l : List α} {a : α} : +@[simp, grind =] theorem _root_.List.contains_toArray [BEq α] {l : List α} {a : α} : l.toArray.contains a = l.contains a := by simp [Array.contains, List.any_beq] @@ -1205,7 +1205,7 @@ where induction l generalizing xs <;> simp [*] simp [H] -@[simp] theorem _root_.List.map_toArray {f : α → β} {l : List α} : +@[simp, grind =] theorem _root_.List.map_toArray {f : α → β} {l : List α} : l.toArray.map f = (l.map f).toArray := by apply ext' simp @@ -1428,7 +1428,7 @@ theorem filter_congr {xs ys : Array α} (h : xs = ys) induction xs with simp | cons => split <;> simp [*] -theorem toList_filter {p : α → Bool} {xs : Array α} : +@[grind] theorem toList_filter {p : α → Bool} {xs : Array α} : (xs.filter p).toList = xs.toList.filter p := by simp @@ -1437,7 +1437,7 @@ theorem toList_filter {p : α → Bool} {xs : Array α} : apply ext' simp [h] -theorem _root_.List.filter_toArray {p : α → Bool} {l : List α} : +@[grind] theorem _root_.List.filter_toArray {p : α → Bool} {l : List α} : l.toArray.filter p = (l.filter p).toArray := by simp @@ -1602,7 +1602,7 @@ theorem filterMap_congr {as bs : Array α} (h : as = bs) · simp_all [Id.run, List.filterMap_cons] split <;> simp_all -theorem toList_filterMap {f : α → Option β} {xs : Array α} : +@[grind] theorem toList_filterMap {f : α → Option β} {xs : Array α} : (xs.filterMap f).toList = xs.toList.filterMap f := by simp [toList_filterMap'] @@ -1612,7 +1612,7 @@ theorem toList_filterMap {f : α → Option β} {xs : Array α} : apply ext' simp [h] -theorem _root_.List.filterMap_toArray {f : α → Option β} {l : List α} : +@[grind] theorem _root_.List.filterMap_toArray {f : α → Option β} {l : List α} : l.toArray.filterMap f = (l.filterMap f).toArray := by simp @@ -2097,7 +2097,7 @@ theorem append_eq_map_iff {f : α → β} : @[simp, grind] theorem flatten_empty : (#[] : Array (Array α)).flatten = #[] := by simp [flatten]; rfl -@[simp] theorem toList_flatten {xss : Array (Array α)} : +@[simp, grind] theorem toList_flatten {xss : Array (Array α)} : xss.flatten.toList = (xss.toList.map toList).flatten := by dsimp [flatten] simp only [← foldl_toList] @@ -2124,7 +2124,7 @@ theorem append_eq_map_iff {f : α → β} : apply ext' simp -@[simp] theorem size_flatten {xss : Array (Array α)} : xss.flatten.size = (xss.map size).sum := by +@[simp, grind] theorem size_flatten {xss : Array (Array α)} : xss.flatten.size = (xss.map size).sum := by cases xss using array₂_induction simp [Function.comp_def] @@ -2307,7 +2307,7 @@ theorem flatMap_toList {xs : Array α} {f : α → List β} : rcases xs with ⟨l⟩ simp -@[simp] theorem toList_flatMap {xs : Array α} {f : α → Array β} : +@[simp, grind =] theorem toList_flatMap {xs : Array α} {f : α → Array β} : (xs.flatMap f).toList = xs.toList.flatMap fun a => (f a).toList := by rcases xs with ⟨l⟩ simp @@ -2322,7 +2322,7 @@ theorem flatMap_toArray_cons {β} {f : α → Array β} {a : α} {as : List α} intro cs induction as generalizing cs <;> simp_all -@[simp] theorem flatMap_toArray {β} {f : α → Array β} {as : List α} : +@[simp, grind =] theorem flatMap_toArray {β} {f : α → Array β} {as : List α} : as.toArray.flatMap f = (as.flatMap (fun a => (f a).toList)).toArray := by induction as with | nil => simp @@ -2652,6 +2652,7 @@ abbrev sum_mkArray_nat := @sum_replicate_nat /-! ### Preliminaries about `swap` needed for `reverse`. -/ +@[grind] theorem getElem?_swap {xs : Array α} {i j : Nat} (hi hj) {k : Nat} : (xs.swap i j hi hj)[k]? = if j = k then some xs[i] else if i = k then some xs[j] else xs[k]? := by simp [swap_def, getElem?_set] @@ -2710,15 +2711,15 @@ theorem getElem?_swap {xs : Array α} {i j : Nat} (hi hj) {k : Nat} : (xs.swap i true_and, Nat.not_lt] at h rw [List.getElem?_eq_none_iff.2 ‹_›, List.getElem?_eq_none_iff.2 (xs.toList.length_reverse ▸ ‹_›)] -@[simp] theorem _root_.List.reverse_toArray {l : List α} : l.toArray.reverse = l.reverse.toArray := by +@[simp, grind =] theorem _root_.List.reverse_toArray {l : List α} : l.toArray.reverse = l.reverse.toArray := by apply ext' simp only [toList_reverse] -@[simp, grind] theorem reverse_push {xs : Array α} {a : α} : (xs.push a).reverse = #[a] ++ xs.reverse := by +@[simp, grind =] theorem reverse_push {xs : Array α} {a : α} : (xs.push a).reverse = #[a] ++ xs.reverse := by cases xs simp -@[simp, grind] theorem mem_reverse {x : α} {xs : Array α} : x ∈ xs.reverse ↔ x ∈ xs := by +@[simp, grind =] theorem mem_reverse {x : α} {xs : Array α} : x ∈ xs.reverse ↔ x ∈ xs := by cases xs simp @@ -3003,7 +3004,7 @@ theorem extract_empty_of_size_le_start {xs : Array α} {start stop : Nat} (h : x · simp · simp at h₁ -@[simp] theorem _root_.List.extract_toArray {l : List α} {start stop : Nat} : +@[simp, grind =] theorem _root_.List.extract_toArray {l : List α} {start stop : Nat} : l.toArray.extract start stop = (l.extract start stop).toArray := by apply ext' simp @@ -3742,25 +3743,25 @@ theorem contains_iff_mem [BEq α] [LawfulBEq α] {xs : Array α} {a : α} : xs.contains a ↔ a ∈ xs := by simp -@[simp, grind] +@[simp, grind =] theorem contains_toList [BEq α] {xs : Array α} {x : α} : xs.toList.contains x = xs.contains x := by rcases xs with ⟨xs⟩ simp -@[simp, grind] +@[simp, grind =] theorem contains_map [BEq β] {xs : Array α} {x : β} {f : α → β} : (xs.map f).contains x = xs.any (fun a => x == f a) := by rcases xs with ⟨xs⟩ simp -@[simp, grind] +@[simp, grind =] theorem contains_filter [BEq α] {xs : Array α} {x : α} {p : α → Bool} : (xs.filter p).contains x = xs.any (fun a => x == a && p a) := by rcases xs with ⟨xs⟩ simp -@[simp, grind] +@[simp, grind =] theorem contains_filterMap [BEq β] {xs : Array α} {x : β} {f : α → Option β} : (xs.filterMap f).contains x = xs.any (fun a => (f a).any fun b => x == b) := by rcases xs with ⟨xs⟩ @@ -3773,19 +3774,19 @@ theorem contains_append [BEq α] {xs ys : Array α} {x : α} : rcases ys with ⟨ys⟩ simp -@[simp, grind] +@[simp, grind =] theorem contains_flatten [BEq α] {xs : Array (Array α)} {x : α} : (xs.flatten).contains x = xs.any fun xs => xs.contains x := by rcases xs with ⟨xs⟩ simp [Function.comp_def] -@[simp, grind] +@[simp, grind =] theorem contains_reverse [BEq α] {xs : Array α} {x : α} : (xs.reverse).contains x = xs.contains x := by rcases xs with ⟨xs⟩ simp -@[simp, grind] +@[simp, grind =] theorem contains_flatMap [BEq β] {xs : Array α} {f : α → Array β} {x : β} : (xs.flatMap f).contains x = xs.any fun a => (f a).contains x := by rcases xs with ⟨xs⟩ @@ -3798,7 +3799,7 @@ theorem pop_append {xs ys : Array α} : (xs ++ ys).pop = if ys.isEmpty then xs.pop else xs ++ ys.pop := by split <;> simp_all -@[simp] theorem pop_replicate {n : Nat} {a : α} : (replicate n a).pop = replicate (n - 1) a := by +@[simp, grind =] theorem pop_replicate {n : Nat} {a : α} : (replicate n a).pop = replicate (n - 1) a := by ext <;> simp @[deprecated pop_replicate (since := "2025-03-18")] @@ -4096,6 +4097,7 @@ theorem getElem_swap' {xs : Array α} {i j : Nat} {hi hj} {k : Nat} (hk : k < xs · simp_all only [getElem_swap_left] · split <;> simp_all +@[grind] theorem getElem_swap {xs : Array α} {i j : Nat} (hi hj) {k : Nat} (hk : k < (xs.swap i j hi hj).size) : (xs.swap i j hi hj)[k] = if k = i then xs[j] else if k = j then xs[i] else xs[k]'(by simp_all) := by apply getElem_swap' @@ -4361,7 +4363,10 @@ theorem foldl_toList_eq_map {l : List α} {acc : Array β} {G : α → β} : /-! # uset -/ -attribute [simp] uset +-- For verification purposes, we use `simp` to replace `uset` with `set`. +@[simp, grind =] theorem uset_eq_set {xs : Array α} {v : α} {i : USize} (h : i.toNat < xs.size) : + uset xs i v h = set xs i.toNat v h := by + simp [uset] theorem size_uset {xs : Array α} {v : α} {i : USize} (h : i.toNat < xs.size) : (uset xs i v h).size = xs.size := by @@ -4378,7 +4383,7 @@ theorem getElem!_eq_getD [Inhabited α] {xs : Array α} {i} : xs[i]! = xs.getD i /-! # mem -/ -@[simp] theorem mem_toList {a : α} {xs : Array α} : a ∈ xs.toList ↔ a ∈ xs := mem_def.symm +@[simp, grind =] theorem mem_toList {a : α} {xs : Array α} : a ∈ xs.toList ↔ a ∈ xs := mem_def.symm @[deprecated not_mem_empty (since := "2025-03-25")] theorem not_mem_nil (a : α) : ¬ a ∈ #[] := nofun @@ -4421,12 +4426,12 @@ theorem getElem?_push_eq {xs : Array α} {x : α} : (xs.push x)[xs.size]? = some /-! ### forIn -/ -@[simp] theorem forIn_toList [Monad m] {xs : Array α} {b : β} {f : α → β → m (ForInStep β)} : +@[simp, grind =] theorem forIn_toList [Monad m] {xs : Array α} {b : β} {f : α → β → m (ForInStep β)} : forIn xs.toList b f = forIn xs b f := by cases xs simp -@[simp] theorem forIn'_toList [Monad m] {xs : Array α} {b : β} {f : (a : α) → a ∈ xs.toList → β → m (ForInStep β)} : +@[simp, grind =] theorem forIn'_toList [Monad m] {xs : Array α} {b : β} {f : (a : α) → a ∈ xs.toList → β → m (ForInStep β)} : forIn' xs.toList b f = forIn' xs b (fun a m b => f a (mem_toList.mpr m) b) := by cases xs simp @@ -4439,7 +4444,7 @@ abbrev contains_def [DecidableEq α] {a : α} {xs : Array α} : xs.contains a /-! ### isPrefixOf -/ -@[simp] theorem isPrefixOf_toList [BEq α] {xs ys : Array α} : +@[simp, grind =] theorem isPrefixOf_toList [BEq α] {xs ys : Array α} : xs.toList.isPrefixOf ys.toList = xs.isPrefixOf ys := by cases xs cases ys @@ -4480,32 +4485,32 @@ abbrev contains_def [DecidableEq α] {a : α} {xs : Array α} : xs.contains a /-! ### findSomeM?, findM?, findSome?, find? -/ -@[simp] theorem findSomeM?_toList [Monad m] [LawfulMonad m] {p : α → m (Option β)} {xs : Array α} : +@[simp, grind =] theorem findSomeM?_toList [Monad m] [LawfulMonad m] {p : α → m (Option β)} {xs : Array α} : xs.toList.findSomeM? p = xs.findSomeM? p := by cases xs simp -@[simp] theorem findM?_toList [Monad m] [LawfulMonad m] {p : α → m Bool} {xs : Array α} : +@[simp, grind =] theorem findM?_toList [Monad m] [LawfulMonad m] {p : α → m Bool} {xs : Array α} : xs.toList.findM? p = xs.findM? p := by cases xs simp -@[simp] theorem findSome?_toList {p : α → Option β} {xs : Array α} : +@[simp, grind =] theorem findSome?_toList {p : α → Option β} {xs : Array α} : xs.toList.findSome? p = xs.findSome? p := by cases xs simp -@[simp] theorem find?_toList {p : α → Bool} {xs : Array α} : +@[simp, grind =] theorem find?_toList {p : α → Bool} {xs : Array α} : xs.toList.find? p = xs.find? p := by cases xs simp -@[simp] theorem finIdxOf?_toList [BEq α] {a : α} {xs : Array α} : +@[simp, grind =] theorem finIdxOf?_toList [BEq α] {a : α} {xs : Array α} : xs.toList.finIdxOf? a = (xs.finIdxOf? a).map (Fin.cast (by simp)) := by cases xs simp -@[simp] theorem findFinIdx?_toList {p : α → Bool} {xs : Array α} : +@[simp, grind =] theorem findFinIdx?_toList {p : α → Bool} {xs : Array α} : xs.toList.findFinIdx? p = (xs.findFinIdx? p).map (Fin.cast (by simp)) := by cases xs simp @@ -4524,10 +4529,10 @@ Our goal is to have `simp` "pull `List.toArray` outwards" as much as possible. theorem toListRev_toArray {l : List α} : l.toArray.toListRev = l.reverse := by simp -@[simp] theorem take_toArray {l : List α} {i : Nat} : l.toArray.take i = (l.take i).toArray := by +@[simp, grind =] theorem take_toArray {l : List α} {i : Nat} : l.toArray.take i = (l.take i).toArray := by apply Array.ext <;> simp -@[simp] theorem mapM_toArray [Monad m] [LawfulMonad m] {f : α → m β} {l : List α} : +@[simp, grind =] theorem mapM_toArray [Monad m] [LawfulMonad m] {f : α → m β} {l : List α} : l.toArray.mapM f = List.toArray <$> l.mapM f := by simp only [← mapM'_eq_mapM, mapM_eq_foldlM] suffices ∀ xs : Array β, @@ -4544,12 +4549,12 @@ theorem toListRev_toArray {l : List α} : l.toArray.toListRev = l.reverse := by theorem uset_toArray {l : List α} {i : USize} {a : α} {h : i.toNat < l.toArray.size} : l.toArray.uset i a h = (l.set i.toNat a).toArray := by simp -@[simp] theorem modify_toArray {f : α → α} {l : List α} {i : Nat} : +@[simp, grind =] theorem modify_toArray {f : α → α} {l : List α} {i : Nat} : l.toArray.modify i f = (l.modify i f).toArray := by apply ext' simp -@[simp] theorem flatten_toArray {L : List (List α)} : +@[simp, grind =] theorem flatten_toArray {L : List (List α)} : (L.toArray.map List.toArray).flatten = L.flatten.toArray := by apply ext' simp [Function.comp_def] @@ -4624,11 +4629,11 @@ end Array namespace List -@[simp] theorem unzip_toArray {as : List (α × β)} : +@[simp, grind =] theorem unzip_toArray {as : List (α × β)} : as.toArray.unzip = Prod.map List.toArray List.toArray as.unzip := by ext1 <;> simp -@[simp] theorem firstM_toArray [Alternative m] {as : List α} {f : α → m β} : +@[simp, grind =] theorem firstM_toArray [Alternative m] {as : List α} {f : α → m β} : as.toArray.firstM f = as.firstM f := by unfold Array.firstM suffices ∀ i, i ≤ as.length → firstM.go f as.toArray (as.length - i) = firstM f (as.drop (as.length - i)) by diff --git a/src/Init/Data/Array/Lex/Lemmas.lean b/src/Init/Data/Array/Lex/Lemmas.lean index ab5623173856..61a9afcea992 100644 --- a/src/Init/Data/Array/Lex/Lemmas.lean +++ b/src/Init/Data/Array/Lex/Lemmas.lean @@ -16,11 +16,11 @@ namespace Array /-! ### Lexicographic ordering -/ -@[simp] theorem _root_.List.lt_toArray [LT α] {l₁ l₂ : List α} : l₁.toArray < l₂.toArray ↔ l₁ < l₂ := Iff.rfl -@[simp] theorem _root_.List.le_toArray [LT α] {l₁ l₂ : List α} : l₁.toArray ≤ l₂.toArray ↔ l₁ ≤ l₂ := Iff.rfl +@[simp, grind =] theorem _root_.List.lt_toArray [LT α] {l₁ l₂ : List α} : l₁.toArray < l₂.toArray ↔ l₁ < l₂ := Iff.rfl +@[simp, grind =] theorem _root_.List.le_toArray [LT α] {l₁ l₂ : List α} : l₁.toArray ≤ l₂.toArray ↔ l₁ ≤ l₂ := Iff.rfl -@[simp] theorem lt_toList [LT α] {xs ys : Array α} : xs.toList < ys.toList ↔ xs < ys := Iff.rfl -@[simp] theorem le_toList [LT α] {xs ys : Array α} : xs.toList ≤ ys.toList ↔ xs ≤ ys := Iff.rfl +@[simp, grind =] theorem lt_toList [LT α] {xs ys : Array α} : xs.toList < ys.toList ↔ xs < ys := Iff.rfl +@[simp, grind =] theorem le_toList [LT α] {xs ys : Array α} : xs.toList ≤ ys.toList ↔ xs ≤ ys := Iff.rfl protected theorem not_lt_iff_ge [LT α] {l₁ l₂ : List α} : ¬ l₁ < l₂ ↔ l₂ ≤ l₁ := Iff.rfl protected theorem not_le_iff_gt [DecidableEq α] [LT α] [DecidableLT α] {l₁ l₂ : List α} : @@ -47,7 +47,7 @@ private theorem cons_lex_cons [BEq α] {lt : α → α → Bool} {a b : α} {xs cases a == b <;> simp · simp -@[simp] theorem _root_.List.lex_toArray [BEq α] {lt : α → α → Bool} {l₁ l₂ : List α} : +@[simp, grind =] theorem _root_.List.lex_toArray [BEq α] {lt : α → α → Bool} {l₁ l₂ : List α} : l₁.toArray.lex l₂.toArray lt = l₁.lex l₂ lt := by induction l₁ generalizing l₂ with | nil => cases l₂ <;> simp [lex, Id.run] @@ -57,7 +57,7 @@ private theorem cons_lex_cons [BEq α] {lt : α → α → Bool} {a b : α} {xs | cons y l₂ => rw [List.toArray_cons, List.toArray_cons y, cons_lex_cons, List.lex, ih] -@[simp] theorem lex_toList [BEq α] {lt : α → α → Bool} {xs ys : Array α} : +@[simp, grind =] theorem lex_toList [BEq α] {lt : α → α → Bool} {xs ys : Array α} : xs.toList.lex ys.toList lt = xs.lex ys lt := by cases xs <;> cases ys <;> simp diff --git a/src/Init/Data/Array/MapIdx.lean b/src/Init/Data/Array/MapIdx.lean index d33d81bb5941..62f5ca3c334f 100644 --- a/src/Init/Data/Array/MapIdx.lean +++ b/src/Init/Data/Array/MapIdx.lean @@ -111,11 +111,11 @@ end Array namespace List -@[simp] theorem mapFinIdx_toArray {l : List α} {f : (i : Nat) → α → (h : i < l.length) → β} : +@[simp, grind =] theorem mapFinIdx_toArray {l : List α} {f : (i : Nat) → α → (h : i < l.length) → β} : l.toArray.mapFinIdx f = (l.mapFinIdx f).toArray := by ext <;> simp -@[simp] theorem mapIdx_toArray {f : Nat → α → β} {l : List α} : +@[simp, grind =] theorem mapIdx_toArray {f : Nat → α → β} {l : List α} : l.toArray.mapIdx f = (l.mapIdx f).toArray := by ext <;> simp @@ -132,7 +132,7 @@ namespace Array @[deprecated getElem_zipIdx (since := "2025-01-21")] abbrev getElem_zipWithIndex := @getElem_zipIdx -@[simp] theorem zipIdx_toArray {l : List α} {k : Nat} : +@[simp, grind =] theorem zipIdx_toArray {l : List α} {k : Nat} : l.toArray.zipIdx k = (l.zipIdx k).toArray := by ext i hi₁ hi₂ <;> simp [Nat.add_comm] @@ -454,7 +454,7 @@ end Array namespace List -theorem mapFinIdxM_toArray [Monad m] [LawfulMonad m] {l : List α} +@[grind] theorem mapFinIdxM_toArray [Monad m] [LawfulMonad m] {l : List α} {f : (i : Nat) → α → (h : i < l.length) → m β} : l.toArray.mapFinIdxM f = toArray <$> l.mapFinIdxM f := by let rec go (i : Nat) (acc : Array β) (inv : i + acc.size = l.length) : @@ -475,7 +475,7 @@ theorem mapFinIdxM_toArray [Monad m] [LawfulMonad m] {l : List α} simp only [Array.mapFinIdxM, mapFinIdxM] exact go _ #[] _ -theorem mapIdxM_toArray [Monad m] [LawfulMonad m] {l : List α} +@[grind] theorem mapIdxM_toArray [Monad m] [LawfulMonad m] {l : List α} {f : Nat → α → m β} : l.toArray.mapIdxM f = toArray <$> l.mapIdxM f := by let rec go (bs : List α) (acc : Array β) (inv : bs.length + acc.size = l.length) : diff --git a/src/Init/Data/Array/Monadic.lean b/src/Init/Data/Array/Monadic.lean index 4646a209f4c4..5a641c5c21bd 100644 --- a/src/Init/Data/Array/Monadic.lean +++ b/src/Init/Data/Array/Monadic.lean @@ -264,7 +264,7 @@ end Array namespace List -theorem filterM_toArray [Monad m] [LawfulMonad m] {l : List α} {p : α → m Bool} : +@[grind =] theorem filterM_toArray [Monad m] [LawfulMonad m] {l : List α} {p : α → m Bool} : l.toArray.filterM p = toArray <$> l.filterM p := by simp only [Array.filterM, filterM, foldlM_toArray, bind_pure_comp, Functor.map_map] conv => lhs; rw [← reverse_nil] @@ -284,7 +284,7 @@ theorem filterM_toArray [Monad m] [LawfulMonad m] {l : List α} {p : α → m Bo subst w rw [filterM_toArray] -theorem filterRevM_toArray [Monad m] [LawfulMonad m] {l : List α} {p : α → m Bool} : +@[grind =] theorem filterRevM_toArray [Monad m] [LawfulMonad m] {l : List α} {p : α → m Bool} : l.toArray.filterRevM p = toArray <$> l.filterRevM p := by simp [Array.filterRevM, filterRevM] rw [← foldlM_reverse, ← foldlM_toArray, ← Array.filterM, filterM_toArray] @@ -296,7 +296,7 @@ theorem filterRevM_toArray [Monad m] [LawfulMonad m] {l : List α} {p : α → m subst w rw [filterRevM_toArray] -theorem filterMapM_toArray [Monad m] [LawfulMonad m] {l : List α} {f : α → m (Option β)} : +@[grind =] theorem filterMapM_toArray [Monad m] [LawfulMonad m] {l : List α} {f : α → m (Option β)} : l.toArray.filterMapM f = toArray <$> l.filterMapM f := by simp [Array.filterMapM, filterMapM] conv => lhs; rw [← reverse_nil] @@ -314,7 +314,7 @@ theorem filterMapM_toArray [Monad m] [LawfulMonad m] {l : List α} {f : α → m subst w rw [filterMapM_toArray] -@[simp] theorem flatMapM_toArray [Monad m] [LawfulMonad m] {l : List α} {f : α → m (Array β)} : +@[simp, grind =] theorem flatMapM_toArray [Monad m] [LawfulMonad m] {l : List α} {f : α → m (Array β)} : l.toArray.flatMapM f = toArray <$> l.flatMapM (fun a => Array.toList <$> f a) := by simp only [Array.flatMapM, bind_pure_comp, foldlM_toArray, flatMapM] conv => lhs; arg 2; change [].reverse.flatten.toArray diff --git a/src/Init/Data/Array/Subarray.lean b/src/Init/Data/Array/Subarray.lean index a1f8f46b8160..04b7af62bda0 100644 --- a/src/Init/Data/Array/Subarray.lean +++ b/src/Init/Data/Array/Subarray.lean @@ -464,8 +464,12 @@ instance : Append (Subarray α) where let a := x.toArray ++ y.toArray a.toSubarray 0 a.size +/-- `Subarray` representation. -/ +protected def Subarray.repr [Repr α] (s : Subarray α) : Std.Format := + repr s.toArray ++ ".toSubarray" + instance [Repr α] : Repr (Subarray α) where - reprPrec s _ := repr s.toArray ++ ".toSubarray" + reprPrec s _ := Subarray.repr s instance [ToString α] : ToString (Subarray α) where toString s := toString s.toArray diff --git a/src/Init/Data/BitVec/Basic.lean b/src/Init/Data/BitVec/Basic.lean index 9fd2839a6a62..7465f9a71111 100644 --- a/src/Init/Data/BitVec/Basic.lean +++ b/src/Init/Data/BitVec/Basic.lean @@ -199,7 +199,13 @@ protected def toHex {n : Nat} (x : BitVec n) : String := let t := (List.replicate ((n+3) / 4 - s.length) '0').asString t ++ s -instance : Repr (BitVec n) where reprPrec a _ := "0x" ++ (a.toHex : Std.Format) ++ "#" ++ repr n +/-- `BitVec` representation. -/ +protected def BitVec.repr (a : BitVec n) : Std.Format := + "0x" ++ (a.toHex : Std.Format) ++ "#" ++ repr n + +instance : Repr (BitVec n) where + reprPrec a _ := BitVec.repr a + instance : ToString (BitVec n) where toString a := toString (repr a) end repr_toString diff --git a/src/Init/Data/Float.lean b/src/Init/Data/Float.lean index f0331b7504cb..92286dcb2f51 100644 --- a/src/Init/Data/Float.lean +++ b/src/Init/Data/Float.lean @@ -291,8 +291,11 @@ implementation. instance : Inhabited Float where default := UInt64.toFloat 0 +protected def Float.repr (n : Float) (prec : Nat) : Std.Format := + if n < UInt64.toFloat 0 then Repr.addAppParen (toString n) prec else toString n + instance : Repr Float where - reprPrec n prec := if n < UInt64.toFloat 0 then Repr.addAppParen (toString n) prec else toString n + reprPrec := Float.repr instance : ReprAtom Float := ⟨⟩ diff --git a/src/Init/Data/Float32.lean b/src/Init/Data/Float32.lean index 1ad084e52c5b..1427c2a1f884 100644 --- a/src/Init/Data/Float32.lean +++ b/src/Init/Data/Float32.lean @@ -292,8 +292,11 @@ implementation. instance : Inhabited Float32 where default := UInt64.toFloat32 0 +protected def Float32.repr (n : Float32) (prec : Nat) : Std.Format := + if n < UInt64.toFloat32 0 then Repr.addAppParen (toString n) prec else toString n + instance : Repr Float32 where - reprPrec n prec := if n < UInt64.toFloat32 0 then Repr.addAppParen (toString n) prec else toString n + reprPrec := Float32.repr instance : ReprAtom Float32 := ⟨⟩ diff --git a/src/Init/Data/Format/Basic.lean b/src/Init/Data/Format/Basic.lean index c2c981c16f24..4d27fffd6ed9 100644 --- a/src/Init/Data/Format/Basic.lean +++ b/src/Init/Data/Format/Basic.lean @@ -106,7 +106,7 @@ def isNil : Format → Bool | nil => true | _ => false -private structure SpaceResult where +structure SpaceResult where foundLine : Bool := false foundFlattenedHardLine : Bool := false space : Nat := 0 @@ -137,12 +137,12 @@ private def spaceUptoLine : Format → Bool → Int → Nat → SpaceResult | group f _, _, m, w => spaceUptoLine f true m w | tag _ f, flatten, m, w => spaceUptoLine f flatten m w -private structure WorkItem where +structure WorkItem where f : Format indent : Int activeTags : Nat -private structure WorkGroup where +structure WorkGroup where flatten : Bool flb : FlattenBehavior items : List WorkItem @@ -290,7 +290,7 @@ def indentD (f : Format) : Format := nestD (Format.line ++ f) /-- State for formatting a pretty string. -/ -private structure State where +structure State where out : String := "" column : Nat := 0 diff --git a/src/Init/Data/Int/DivMod/Basic.lean b/src/Init/Data/Int/DivMod/Basic.lean index fc033989a901..26ee89d2bf47 100644 --- a/src/Init/Data/Int/DivMod/Basic.lean +++ b/src/Init/Data/Int/DivMod/Basic.lean @@ -44,7 +44,7 @@ Integer division that uses the E-rounding convention. Usually accessed via the ` Division by zero is defined to be zero, rather than an error. In the E-rounding convention (Euclidean division), `Int.emod x y` satisfies `0 ≤ Int.emod x y < Int.natAbs y` -for `y ≠ 0` and `Int.ediv` is the unique function satisfying `Int.emod x y + (Int.edivx y) * y = x` +for `y ≠ 0` and `Int.ediv` is the unique function satisfying `Int.emod x y + (Int.ediv x y) * y = x` for `y ≠ 0`. This means that `Int.ediv x y` is `⌊x / y⌋` when `y > 0` and `⌈x / y⌉` when `y < 0`. @@ -76,7 +76,7 @@ def ediv : (@& Int) → (@& Int) → Int Integer modulus that uses the E-rounding convention. Usually accessed via the `%` operator. In the E-rounding convention (Euclidean division), `Int.emod x y` satisfies `0 ≤ Int.emod x y < Int.natAbs y` -for `y ≠ 0` and `Int.ediv` is the unique function satisfying `Int.emod x y + (Int.edivx y) * y = x` +for `y ≠ 0` and `Int.ediv` is the unique function satisfying `Int.emod x y + (Int.ediv x y) * y = x` for `y ≠ 0`. This function is overridden by the compiler with an efficient implementation. This definition is diff --git a/src/Init/Data/List/ToArray.lean b/src/Init/Data/List/ToArray.lean index 68b5ee671a24..7167e399c1fa 100644 --- a/src/Init/Data/List/ToArray.lean +++ b/src/Init/Data/List/ToArray.lean @@ -66,7 +66,7 @@ theorem toArray_cons (a : α) (l : List α) : (a :: l).toArray = #[a] ++ l.toArr apply ext' simp -@[simp] theorem push_toArray (l : List α) (a : α) : l.toArray.push a = (l ++ [a]).toArray := by +@[simp, grind =] theorem push_toArray (l : List α) (a : α) : l.toArray.push a = (l ++ [a]).toArray := by apply ext' simp @@ -75,37 +75,37 @@ theorem toArray_cons (a : α) (l : List α) : (a :: l).toArray = #[a] ++ l.toArr funext a simp -@[simp] theorem isEmpty_toArray (l : List α) : l.toArray.isEmpty = l.isEmpty := by +@[simp, grind =] theorem isEmpty_toArray (l : List α) : l.toArray.isEmpty = l.isEmpty := by cases l <;> simp [Array.isEmpty] @[simp] theorem toArray_singleton (a : α) : (List.singleton a).toArray = Array.singleton a := rfl -@[simp] theorem back!_toArray [Inhabited α] (l : List α) : l.toArray.back! = l.getLast! := by +@[simp, grind =] theorem back!_toArray [Inhabited α] (l : List α) : l.toArray.back! = l.getLast! := by simp only [back!, size_toArray, getElem!_toArray, getLast!_eq_getElem!] -@[simp] theorem back?_toArray (l : List α) : l.toArray.back? = l.getLast? := by +@[simp, grind =] theorem back?_toArray (l : List α) : l.toArray.back? = l.getLast? := by simp [back?, List.getLast?_eq_getElem?] -@[simp] theorem back_toArray (l : List α) (h) : +@[simp, grind =] theorem back_toArray (l : List α) (h) : l.toArray.back = l.getLast (by simp at h; exact ne_nil_of_length_pos h) := by simp [back, List.getLast_eq_getElem] -@[simp] theorem _root_.Array.getLast!_toList [Inhabited α] (xs : Array α) : +@[simp, grind =] theorem _root_.Array.getLast!_toList [Inhabited α] (xs : Array α) : xs.toList.getLast! = xs.back! := by rcases xs with ⟨xs⟩ simp -@[simp] theorem _root_.Array.getLast?_toList (xs : Array α) : +@[simp, grind =] theorem _root_.Array.getLast?_toList (xs : Array α) : xs.toList.getLast? = xs.back? := by rcases xs with ⟨xs⟩ simp -@[simp] theorem _root_.Array.getLast_toList (xs : Array α) (h) : +@[simp, grind =] theorem _root_.Array.getLast_toList (xs : Array α) (h) : xs.toList.getLast h = xs.back (by simpa [ne_nil_iff_length_pos] using h) := by rcases xs with ⟨xs⟩ simp -@[simp] theorem set_toArray (l : List α) (i : Nat) (a : α) (h : i < l.length) : +@[simp, grind =] theorem set_toArray (l : List α) (i : Nat) (a : α) (h : i < l.length) : (l.toArray.set i a) = (l.set i a).toArray := rfl @[simp] theorem forIn'_loop_toArray [Monad m] (l : List α) (f : (a : α) → a ∈ l.toArray → β → m (ForInStep β)) (i : Nat) @@ -126,30 +126,30 @@ theorem toArray_cons (a : α) (l : List α) : (a :: l).toArray = #[a] ++ l.toArr simp only [t] congr -@[simp] theorem forIn'_toArray [Monad m] (l : List α) (b : β) (f : (a : α) → a ∈ l.toArray → β → m (ForInStep β)) : +@[simp, grind =] theorem forIn'_toArray [Monad m] (l : List α) (b : β) (f : (a : α) → a ∈ l.toArray → β → m (ForInStep β)) : forIn' l.toArray b f = forIn' l b (fun a m b => f a (mem_toArray.mpr m) b) := by change Array.forIn' _ _ _ = List.forIn' _ _ _ rw [Array.forIn', forIn'_loop_toArray] simp -@[simp] theorem forIn_toArray [Monad m] (l : List α) (b : β) (f : α → β → m (ForInStep β)) : +@[simp, grind =] theorem forIn_toArray [Monad m] (l : List α) (b : β) (f : α → β → m (ForInStep β)) : forIn l.toArray b f = forIn l b f := by simpa using forIn'_toArray l b fun a m b => f a b -theorem foldrM_toArray [Monad m] (f : α → β → m β) (init : β) (l : List α) : +@[grind =] theorem foldrM_toArray [Monad m] (f : α → β → m β) (init : β) (l : List α) : l.toArray.foldrM f init = l.foldrM f init := by rw [foldrM_eq_reverse_foldlM_toList] simp -theorem foldlM_toArray [Monad m] (f : β → α → m β) (init : β) (l : List α) : +@[grind =] theorem foldlM_toArray [Monad m] (f : β → α → m β) (init : β) (l : List α) : l.toArray.foldlM f init = l.foldlM f init := by rw [foldlM_toList] -theorem foldr_toArray (f : α → β → β) (init : β) (l : List α) : +@[grind =] theorem foldr_toArray (f : α → β → β) (init : β) (l : List α) : l.toArray.foldr f init = l.foldr f init := by rw [foldr_toList] -theorem foldl_toArray (f : β → α → β) (init : β) (l : List α) : +@[grind =] theorem foldl_toArray (f : β → α → β) (init : β) (l : List α) : l.toArray.foldl f init = l.foldl f init := by rw [foldl_toList] @@ -176,7 +176,7 @@ theorem foldl_toArray (f : β → α → β) (init : β) (l : List α) : simp only [size_toArray, foldlM_toArray'] induction l <;> simp_all -@[simp] +@[simp, grind =] theorem forM_toArray [Monad m] (l : List α) (f : α → m PUnit) : (forM l.toArray f) = l.forM f := forM_toArray' l f rfl @@ -195,15 +195,15 @@ theorem forM_toArray [Monad m] (l : List α) (f : α → m PUnit) : subst h rw [foldl_toList] -@[simp] theorem sum_toArray [Add α] [Zero α] (l : List α) : l.toArray.sum = l.sum := by +@[simp, grind =] theorem sum_toArray [Add α] [Zero α] (l : List α) : l.toArray.sum = l.sum := by simp [Array.sum, List.sum] -@[simp] theorem append_toArray (l₁ l₂ : List α) : +@[simp, grind =] theorem append_toArray (l₁ l₂ : List α) : l₁.toArray ++ l₂.toArray = (l₁ ++ l₂).toArray := by apply ext' simp -@[simp] theorem push_append_toArray {as : Array α} {a : α} {bs : List α} : as.push a ++ bs.toArray = as ++ (a ::bs).toArray := by +@[simp] theorem push_append_toArray {as : Array α} {a : α} {bs : List α} : as.push a ++ bs.toArray = as ++ (a :: bs).toArray := by cases as simp @@ -213,7 +213,7 @@ theorem forM_toArray [Monad m] (l : List α) (f : α → m PUnit) : @[simp] theorem foldr_push {l : List α} {as : Array α} : l.foldr (fun a bs => push bs a) as = as ++ l.reverse.toArray := by rw [foldr_eq_foldl_reverse, foldl_push] -@[simp] theorem findSomeM?_toArray [Monad m] [LawfulMonad m] (f : α → m (Option β)) (l : List α) : +@[simp, grind =] theorem findSomeM?_toArray [Monad m] [LawfulMonad m] (f : α → m (Option β)) (l : List α) : l.toArray.findSomeM? f = l.findSomeM? f := by rw [Array.findSomeM?] simp only [bind_pure_comp, map_pure, forIn_toArray] @@ -246,7 +246,7 @@ theorem findRevM?_toArray [Monad m] [LawfulMonad m] (f : α → m Bool) (l : Lis l.toArray.findRevM? f = l.reverse.findM? f := by rw [Array.findRevM?, findSomeRevM?_toArray, findM?_eq_findSomeM?] -@[simp] theorem findM?_toArray [Monad m] [LawfulMonad m] (f : α → m Bool) (l : List α) : +@[simp, grind =] theorem findM?_toArray [Monad m] [LawfulMonad m] (f : α → m Bool) (l : List α) : l.toArray.findM? f = l.findM? f := by rw [Array.findM?] simp only [bind_pure_comp, map_pure, forIn_toArray] @@ -257,11 +257,11 @@ theorem findRevM?_toArray [Monad m] [LawfulMonad m] (f : α → m Bool) (l : Lis congr ext1 (_|_) <;> simp [ih] -@[simp] theorem findSome?_toArray (f : α → Option β) (l : List α) : +@[simp, grind =] theorem findSome?_toArray (f : α → Option β) (l : List α) : l.toArray.findSome? f = l.findSome? f := by rw [Array.findSome?, ← findSomeM?_id, findSomeM?_toArray, Id.run] -@[simp] theorem find?_toArray (f : α → Bool) (l : List α) : +@[simp, grind =] theorem find?_toArray (f : α → Bool) (l : List α) : l.toArray.find? f = l.find? f := by rw [Array.find?] simp only [Id.run, Id, Id.pure_eq, Id.bind_eq, forIn_toArray] @@ -297,12 +297,12 @@ private theorem findFinIdx?_loop_toArray (w : l' = l.drop j) : simp termination_by l.length - j -@[simp] theorem findFinIdx?_toArray (p : α → Bool) (l : List α) : +@[simp, grind =] theorem findFinIdx?_toArray (p : α → Bool) (l : List α) : l.toArray.findFinIdx? p = l.findFinIdx? p := by rw [Array.findFinIdx?, findFinIdx?, findFinIdx?_loop_toArray] simp -@[simp] theorem findIdx?_toArray (p : α → Bool) (l : List α) : +@[simp, grind =] theorem findIdx?_toArray (p : α → Bool) (l : List α) : l.toArray.findIdx? p = l.findIdx? p := by rw [Array.findIdx?_eq_map_findFinIdx?_val, findIdx?_eq_map_findFinIdx?_val] simp @@ -334,21 +334,21 @@ private theorem idxAuxOf_toArray [BEq α] (a : α) (l : List α) (j : Nat) (w : simp termination_by l.length - j -@[simp] theorem finIdxOf?_toArray [BEq α] (a : α) (l : List α) : +@[simp, grind =] theorem finIdxOf?_toArray [BEq α] (a : α) (l : List α) : l.toArray.finIdxOf? a = l.finIdxOf? a := by rw [Array.finIdxOf?, finIdxOf?, findFinIdx?] simp [idxAuxOf_toArray] -@[simp] theorem idxOf?_toArray [BEq α] (a : α) (l : List α) : +@[simp, grind =] theorem idxOf?_toArray [BEq α] (a : α) (l : List α) : l.toArray.idxOf? a = l.idxOf? a := by rw [Array.idxOf?, idxOf?] simp [finIdxOf?, findIdx?_eq_map_findFinIdx?_val] -@[simp] theorem findIdx_toArray {as : List α} {p : α → Bool} : +@[simp, grind =] theorem findIdx_toArray {as : List α} {p : α → Bool} : as.toArray.findIdx p = as.findIdx p := by rw [Array.findIdx, findIdx?_toArray, findIdx_eq_getD_findIdx?] -@[simp] theorem idxOf_toArray [BEq α] {as : List α} {a : α} : +@[simp, grind =] theorem idxOf_toArray [BEq α] {as : List α} {a : α} : as.toArray.idxOf a = as.idxOf a := by rw [Array.idxOf, findIdx_toArray, idxOf] @@ -383,7 +383,7 @@ theorem isPrefixOfAux_toArray_zero [BEq α] (l₁ l₂ : List α) (hle : l₁.le | a::l₁, b::l₂ => simp [isPrefixOf_cons₂, isPrefixOfAux_toArray_succ', isPrefixOfAux_toArray_zero] -@[simp] theorem isPrefixOf_toArray [BEq α] (l₁ l₂ : List α) : +@[simp, grind =] theorem isPrefixOf_toArray [BEq α] (l₁ l₂ : List α) : l₁.toArray.isPrefixOf l₂.toArray = l₁.isPrefixOf l₂ := by rw [Array.isPrefixOf] split <;> rename_i h @@ -429,12 +429,12 @@ theorem zipWithAux_toArray_zero (f : α → β → γ) (as : List α) (bs : List | a :: as, b :: bs => simp [zipWith_cons_cons, zipWithAux_toArray_succ', zipWithAux_toArray_zero, push_append_toArray] -@[simp] theorem zipWith_toArray (as : List α) (bs : List β) (f : α → β → γ) : +@[simp, grind =] theorem zipWith_toArray (as : List α) (bs : List β) (f : α → β → γ) : Array.zipWith f as.toArray bs.toArray = (List.zipWith f as bs).toArray := by rw [Array.zipWith] simp [zipWithAux_toArray_zero] -@[simp] theorem zip_toArray (as : List α) (bs : List β) : +@[simp, grind =] theorem zip_toArray (as : List α) (bs : List β) : Array.zip as.toArray bs.toArray = (List.zip as bs).toArray := by simp [Array.zip, zipWith_toArray, zip] @@ -472,16 +472,16 @@ theorem zipWithAll_go_toArray (as : List α) (bs : List β) (f : Option α → O termination_by max as.length bs.length - i decreasing_by simp_wf; decreasing_trivial_pre_omega -@[simp] theorem zipWithAll_toArray (f : Option α → Option β → γ) (as : List α) (bs : List β) : +@[simp, grind =] theorem zipWithAll_toArray (f : Option α → Option β → γ) (as : List α) (bs : List β) : Array.zipWithAll f as.toArray bs.toArray = (List.zipWithAll f as bs).toArray := by simp [Array.zipWithAll, zipWithAll_go_toArray] -@[simp] theorem toArray_appendList (l₁ l₂ : List α) : +@[simp, grind =] theorem toArray_appendList (l₁ l₂ : List α) : l₁.toArray ++ l₂ = (l₁ ++ l₂).toArray := by apply ext' simp -@[simp] theorem pop_toArray (l : List α) : l.toArray.pop = l.dropLast.toArray := by +@[simp, grind =] theorem pop_toArray (l : List α) : l.toArray.pop = l.dropLast.toArray := by apply ext' simp @@ -513,7 +513,7 @@ theorem takeWhile_go_toArray (p : α → Bool) (l : List α) (i : Nat) : split <;> simp_all · simp_all [drop_eq_nil_of_le] -@[simp] theorem takeWhile_toArray (p : α → Bool) (l : List α) : +@[simp, grind =] theorem takeWhile_toArray (p : α → Bool) (l : List α) : l.toArray.takeWhile p = (l.takeWhile p).toArray := by simp [Array.takeWhile, takeWhile_go_toArray] @@ -528,11 +528,11 @@ private theorem popWhile_toArray_aux (p : α → Bool) (l : List α) : · rfl · simp -@[simp] theorem popWhile_toArray (p : α → Bool) (l : List α) : +@[simp, grind =] theorem popWhile_toArray (p : α → Bool) (l : List α) : l.toArray.popWhile p = (l.reverse.dropWhile p).reverse.toArray := by simp [← popWhile_toArray_aux] -@[simp] theorem setIfInBounds_toArray (l : List α) (i : Nat) (a : α) : +@[simp, grind =] theorem setIfInBounds_toArray (l : List α) (i : Nat) (a : α) : l.toArray.setIfInBounds i a = (l.set i a).toArray := by apply ext' simp only [setIfInBounds] @@ -540,7 +540,7 @@ private theorem popWhile_toArray_aux (p : α → Bool) (l : List α) : · simp · simp_all [List.set_eq_of_length_le] -@[simp] theorem toArray_replicate (n : Nat) (v : α) : +@[simp, grind =] theorem toArray_replicate (n : Nat) (v : α) : (List.replicate n v).toArray = Array.replicate n v := rfl theorem _root_.Array.replicate_eq_toArray_replicate : @@ -550,7 +550,7 @@ theorem _root_.Array.replicate_eq_toArray_replicate : @[deprecated _root_.Array.replicate_eq_toArray_replicate (since := "2025-03-18")] abbrev _root_.Array.mkArray_eq_toArray_replicate := @_root_.Array.replicate_eq_toArray_replicate -@[simp] theorem flatMap_empty {β} (f : α → Array β) : (#[] : Array α).flatMap f = #[] := rfl +@[simp, grind =] theorem flatMap_empty {β} (f : α → Array β) : (#[] : Array α).flatMap f = #[] := rfl theorem flatMap_toArray_cons {β} (f : α → Array β) (a : α) (as : List α) : (a :: as).toArray.flatMap f = f a ++ as.toArray.flatMap f := by @@ -562,7 +562,7 @@ theorem flatMap_toArray_cons {β} (f : α → Array β) (a : α) (as : List α) intro xs induction as generalizing xs <;> simp_all -@[simp] theorem flatMap_toArray {β} (f : α → Array β) (as : List α) : +@[simp, grind =] theorem flatMap_toArray {β} (f : α → Array β) (as : List α) : as.toArray.flatMap f = (as.flatMap (fun a => (f a).toList)).toArray := by induction as with | nil => simp @@ -570,12 +570,12 @@ theorem flatMap_toArray_cons {β} (f : α → Array β) (a : α) (as : List α) apply ext' simp [ih, flatMap_toArray_cons] -@[simp] theorem swap_toArray (l : List α) (i j : Nat) {hi hj}: +@[simp, grind =] theorem swap_toArray (l : List α) (i j : Nat) {hi hj}: l.toArray.swap i j hi hj = ((l.set i l[j]).set j l[i]).toArray := by apply ext' simp -@[simp] theorem eraseIdx_toArray (l : List α) (i : Nat) (h : i < l.toArray.size) : +@[simp, grind =] theorem eraseIdx_toArray (l : List α) (i : Nat) (h : i < l.toArray.size) : l.toArray.eraseIdx i h = (l.eraseIdx i).toArray := by rw [Array.eraseIdx] split <;> rename_i h' @@ -593,19 +593,19 @@ decreasing_by simp omega -@[simp] theorem eraseIdxIfInBounds_toArray (l : List α) (i : Nat) : +@[simp, grind =] theorem eraseIdxIfInBounds_toArray (l : List α) (i : Nat) : l.toArray.eraseIdxIfInBounds i = (l.eraseIdx i).toArray := by rw [Array.eraseIdxIfInBounds] split · simp · simp_all [eraseIdx_eq_self.2] -@[simp] theorem eraseP_toArray {as : List α} {p : α → Bool} : +@[simp, grind =] theorem eraseP_toArray {as : List α} {p : α → Bool} : as.toArray.eraseP p = (as.eraseP p).toArray := by rw [Array.eraseP, List.eraseP_eq_eraseIdx, findFinIdx?_toArray] split <;> simp [*, findIdx?_eq_map_findFinIdx?_val] -@[simp] theorem erase_toArray [BEq α] {as : List α} {a : α} : +@[simp, grind =] theorem erase_toArray [BEq α] {as : List α} {a : α} : as.toArray.erase a = (as.erase a).toArray := by rw [Array.erase, finIdxOf?_toArray, List.erase_eq_eraseIdx] rw [idxOf?_eq_map_finIdxOf?_val] @@ -635,7 +635,7 @@ private theorem insertIdx_loop_toArray (i : Nat) (l : List α) (j : Nat) (hj : j subst this simp -@[simp] theorem insertIdx_toArray (l : List α) (i : Nat) (a : α) (h : i ≤ l.toArray.size): +@[simp, grind =] theorem insertIdx_toArray (l : List α) (i : Nat) (a : α) (h : i ≤ l.toArray.size): l.toArray.insertIdx i a = (l.insertIdx i a).toArray := by rw [Array.insertIdx] rw [insertIdx_loop_toArray (h := h)] @@ -658,7 +658,7 @@ private theorem insertIdx_loop_toArray (i : Nat) (l : List α) (j : Nat) (hj : j congr omega -@[simp] theorem insertIdxIfInBounds_toArray (l : List α) (i : Nat) (a : α) : +@[simp, grind =] theorem insertIdxIfInBounds_toArray (l : List α) (i : Nat) (a : α) : l.toArray.insertIdxIfInBounds i a = (l.insertIdx i a).toArray := by rw [Array.insertIdxIfInBounds] split <;> rename_i h' @@ -666,7 +666,7 @@ private theorem insertIdx_loop_toArray (i : Nat) (l : List α) (j : Nat) (hj : j · simp only [size_toArray, Nat.not_le] at h' rw [List.insertIdx_of_length_lt (h := h')] -@[simp] +@[simp, grind =] theorem replace_toArray [BEq α] [LawfulBEq α] (l : List α) (a b : α) : l.toArray.replace a b = (l.replace a b).toArray := by rw [Array.replace] @@ -700,11 +700,11 @@ theorem replace_toArray [BEq α] [LawfulBEq α] (l : List α) (a b : α) : exact ⟨i, by omega, h.1⟩ · rfl -@[simp] theorem leftpad_toArray (n : Nat) (a : α) (l : List α) : +@[simp, grind =] theorem leftpad_toArray (n : Nat) (a : α) (l : List α) : Array.leftpad n a l.toArray = (leftpad n a l).toArray := by simp [leftpad, Array.leftpad, ← toArray_replicate] -@[simp] theorem rightpad_toArray (n : Nat) (a : α) (l : List α) : +@[simp, grind =] theorem rightpad_toArray (n : Nat) (a : α) (l : List α) : Array.rightpad n a l.toArray = (rightpad n a l).toArray := by simp [rightpad, Array.rightpad, ← toArray_replicate] diff --git a/src/Init/Data/Option/Attach.lean b/src/Init/Data/Option/Attach.lean index 2efb75853dcb..3a2d1cb3d3c4 100644 --- a/src/Init/Data/Option/Attach.lean +++ b/src/Init/Data/Option/Attach.lean @@ -138,7 +138,7 @@ theorem toList_attach (o : Option α) : o.attach.toList = o.toList.attach.map fun ⟨x, h⟩ => ⟨x, by simpa using h⟩ := by cases o <;> simp -@[simp] theorem attach_toList (o : Option α) : +@[simp, grind =] theorem attach_toList (o : Option α) : o.toList.attach = (o.attach.map fun ⟨a, h⟩ => ⟨a, by simpa using h⟩).toList := by cases o <;> simp @@ -195,7 +195,7 @@ theorem attach_filter {o : Option α} {p : α → Bool} : | some a => simp only [filter_some, attach_some] ext - simp only [attach_eq_some_iff, ite_none_right_eq_some, some.injEq, some_bind, + simp only [attach_eq_some_iff, ite_none_right_eq_some, some.injEq, bind_some, dite_none_right_eq_some] constructor · rintro ⟨h, w⟩ diff --git a/src/Init/Data/Option/Basic.lean b/src/Init/Data/Option/Basic.lean index dc481e9433c1..f127e4a5373d 100644 --- a/src/Init/Data/Option/Basic.lean +++ b/src/Init/Data/Option/Basic.lean @@ -13,11 +13,20 @@ namespace Option deriving instance DecidableEq for Option deriving instance BEq for Option +@[simp, grind] theorem getD_none : getD none a = a := rfl +@[simp, grind] theorem getD_some : getD (some a) b = a := rfl + +@[simp, grind] theorem map_none (f : α → β) : none.map f = none := rfl +@[simp, grind] theorem map_some (a) (f : α → β) : (some a).map f = some (f a) := rfl + /-- Lifts an optional value to any `Alternative`, sending `none` to `failure`. -/ def getM [Alternative m] : Option α → m α | none => failure | some a => pure a +@[simp, grind] theorem getM_none [Alternative m] : getM none = (failure : m α) := rfl +@[simp, grind] theorem getM_some [Alternative m] {a : α} : getM (some a) = (pure a : m α) := rfl + /-- Returns `true` on `some x` and `false` on `none`. -/ @[inline] def isSome : Option α → Bool | some _ => true @@ -75,6 +84,14 @@ Examples: | none, _ => none | some a, f => f a +@[simp, grind] theorem bind_none (f : α → Option β) : none.bind f = none := rfl +@[simp, grind] theorem bind_some (a) (f : α → Option β) : (some a).bind f = f a := rfl + +@[deprecated bind_none (since := "2025-05-03")] +abbrev none_bind := @bind_none +@[deprecated bind_some (since := "2025-05-03")] +abbrev some_bind := @bind_some + /-- Runs the monadic action `f` on `o`'s value, if any, and returns the result, or `none` if there is no value. @@ -102,6 +119,9 @@ This function only requires `m` to be an applicative functor. An alias `Option.m | none => pure none | some x => some <$> f x +@[simp, grind] theorem mapM_none [Applicative m] (f : α → m β) : none.mapM f = pure none := rfl +@[simp, grind] theorem mapM_some [Applicative m] (x) (f : α → m β) : (some x).mapM f = some <$> f x := rfl + /-- Applies a function in some applicative functor to an optional value, returning `none` with no effects if the value is missing. @@ -111,6 +131,10 @@ This is an alias for `Option.mapM`, which already works for applicative functors @[inline] protected def mapA [Applicative m] (f : α → m β) : Option α → m (Option β) := Option.mapM f +/-- For verification purposes, we replace `mapA` with `mapM`. -/ +@[simp, grind] theorem mapA_eq_mapM [Applicative m] {f : α → m β} : Option.mapA f o = Option.mapM f o := rfl + +@[simp, grind] theorem map_id : (Option.map id : Option α → Option α) = id := funext (fun o => match o with | none => rfl | some _ => rfl) @@ -142,6 +166,9 @@ Examples: | some a => p a | none => true +@[simp, grind] theorem all_none : Option.all p none = true := rfl +@[simp, grind] theorem all_some : Option.all p (some x) = p x := rfl + /-- Checks whether an optional value is not `none` and satisfies a Boolean predicate. @@ -154,6 +181,9 @@ Examples: | some a => p a | none => false +@[simp, grind] theorem any_none : Option.any p none = false := rfl +@[simp, grind] theorem any_some : Option.any p (some x) = p x := rfl + /-- Implementation of `OrElse`'s `<|>` syntax for `Option`. If the first argument is `some a`, returns `some a`, otherwise evaluates and returns the second argument. @@ -164,6 +194,9 @@ See also `or` for a version that is strict in the second argument. | some a, _ => some a | none, b => b () +@[simp, grind] theorem orElse_some : (some a).orElse b = some a := rfl +@[simp, grind] theorem orElse_none : none.orElse b = b () := rfl + instance : OrElse (Option α) where orElse := Option.orElse @@ -230,15 +263,6 @@ def merge (fn : α → α → α) : Option α → Option α → Option α | none , some y => some y | some x, some y => some <| fn x y -@[simp, grind] theorem getD_none : getD none a = a := rfl -@[simp, grind] theorem getD_some : getD (some a) b = a := rfl - -@[simp, grind] theorem map_none (f : α → β) : none.map f = none := rfl -@[simp, grind] theorem map_some (a) (f : α → β) : (some a).map f = some (f a) := rfl - -@[simp, grind] theorem none_bind (f : α → Option β) : none.bind f = none := rfl -@[simp, grind] theorem some_bind (a) (f : α → Option β) : (some a).bind f = f a := rfl - /-- A case analysis function for `Option`. @@ -262,9 +286,9 @@ Extracts the value from an option that can be proven to be `some`. @[inline] def get {α : Type u} : (o : Option α) → isSome o → α | some x, _ => x -@[simp] theorem some_get : ∀ {x : Option α} (h : isSome x), some (x.get h) = x +@[simp, grind] theorem some_get : ∀ {x : Option α} (h : isSome x), some (x.get h) = x | some _, _ => rfl -@[simp] theorem get_some (x : α) (h : isSome (some x)) : (some x).get h = x := rfl +@[simp, grind] theorem get_some (x : α) (h : isSome (some x)) : (some x).get h = x := rfl /-- Returns `none` if a value doesn't satisfy a Boolean predicate, or the value itself otherwise. @@ -342,6 +366,9 @@ Examples: -/ @[simp, inline] def join (x : Option (Option α)) : Option α := x.bind id +@[simp, grind] theorem join_none : (none : Option (Option α)).join = none := rfl +@[simp, grind] theorem join_some : (some o).join = o := rfl + /-- Converts an optional monadic computation into a monadic computation of an optional value. @@ -363,7 +390,10 @@ some "world" -/ @[inline] def sequence [Applicative m] {α : Type u} : Option (m α) → m (Option α) | none => pure none - | some fn => some <$> fn + | some f => some <$> f + +@[simp, grind] theorem sequence_none [Applicative m] : (none : Option (m α)).sequence = pure none := rfl +@[simp, grind] theorem sequence_some [Applicative m] (f : m (Option α)) : (some f).sequence = some <$> f := rfl /-- A monadic case analysis function for `Option`. @@ -388,6 +418,9 @@ This is the monadic analogue of `Option.getD`. | some a => pure a | none => y +@[simp, grind] theorem getDM_none [Pure m] (y : m α) : (none : Option α).getDM y = y := rfl +@[simp, grind] theorem getDM_some [Pure m] (a : α) (y : m α) : (some a).getDM y = pure a := rfl + instance (α) [BEq α] [ReflBEq α] : ReflBEq (Option α) where rfl {x} := match x with @@ -400,12 +433,6 @@ instance (α) [BEq α] [LawfulBEq α] : LawfulBEq (Option α) where | some x, some y => rw [LawfulBEq.eq_of_beq (α := α) h] | none, none => rfl -@[simp, grind] theorem all_none : Option.all p none = true := rfl -@[simp, grind] theorem all_some : Option.all p (some x) = p x := rfl - -@[simp, grind] theorem any_none : Option.any p none = false := rfl -@[simp, grind] theorem any_some : Option.any p (some x) = p x := rfl - /-- The minimum of two optional values, with `none` treated as the least element. This function is usually accessed through the `Min (Option α)` instance, rather than directly. @@ -428,10 +455,10 @@ protected def min [Min α] : Option α → Option α → Option α instance [Min α] : Min (Option α) where min := Option.min -@[simp] theorem min_some_some [Min α] {a b : α} : min (some a) (some b) = some (min a b) := rfl -@[simp] theorem min_some_none [Min α] {a : α} : min (some a) none = none := rfl -@[simp] theorem min_none_some [Min α] {b : α} : min none (some b) = none := rfl -@[simp] theorem min_none_none [Min α] : min (none : Option α) none = none := rfl +@[simp, grind] theorem min_some_some [Min α] {a b : α} : min (some a) (some b) = some (min a b) := rfl +@[simp, grind] theorem min_some_none [Min α] {a : α} : min (some a) none = none := rfl +@[simp, grind] theorem min_none_some [Min α] {b : α} : min none (some b) = none := rfl +@[simp, grind] theorem min_none_none [Min α] : min (none : Option α) none = none := rfl /-- The maximum of two optional values. @@ -453,10 +480,10 @@ protected def max [Max α] : Option α → Option α → Option α instance [Max α] : Max (Option α) where max := Option.max -@[simp] theorem max_some_some [Max α] {a b : α} : max (some a) (some b) = some (max a b) := rfl -@[simp] theorem max_some_none [Max α] {a : α} : max (some a) none = some a := rfl -@[simp] theorem max_none_some [Max α] {b : α} : max none (some b) = some b := rfl -@[simp] theorem max_none_none [Max α] : max (none : Option α) none = none := rfl +@[simp, grind] theorem max_some_some [Max α] {a b : α} : max (some a) (some b) = some (max a b) := rfl +@[simp, grind] theorem max_some_none [Max α] {a : α} : max (some a) none = some a := rfl +@[simp, grind] theorem max_none_some [Max α] {b : α} : max none (some b) = some b := rfl +@[simp, grind] theorem max_none_none [Max α] : max (none : Option α) none = none := rfl end Option @@ -481,6 +508,7 @@ instance : Alternative Option where failure := Option.none orElse := Option.orElse +-- This is a duplicate of `Option.getM`; one may be deprecated in the future. def liftOption [Alternative m] : Option α → m α | some a => pure a | none => failure diff --git a/src/Init/Data/Option/Instances.lean b/src/Init/Data/Option/Instances.lean index 06fa8a4e0b27..816869adf94c 100644 --- a/src/Init/Data/Option/Instances.lean +++ b/src/Init/Data/Option/Instances.lean @@ -12,7 +12,7 @@ universe u v namespace Option -theorem eq_of_eq_some {α : Type u} : ∀ {x y : Option α}, (∀z, x = some z ↔ y = some z) → x = y +theorem eq_of_eq_some {α : Type u} : ∀ {x y : Option α}, (∀ z, x = some z ↔ y = some z) → x = y | none, none, _ => rfl | none, some z, h => Option.noConfusion ((h z).2 rfl) | some z, none, h => Option.noConfusion ((h z).1 rfl) diff --git a/src/Init/Data/Option/Lemmas.lean b/src/Init/Data/Option/Lemmas.lean index 618e9533a410..dcd128dc8f32 100644 --- a/src/Init/Data/Option/Lemmas.lean +++ b/src/Init/Data/Option/Lemmas.lean @@ -91,8 +91,6 @@ theorem eq_some_unique {o : Option α} {a b : α} (ha : o = some a) (hb : o = so | some _, _, H => ((H _).1 rfl).symm | _, some _, H => (H _).2 rfl -set_option Elab.async false - theorem eq_none_iff_forall_ne_some : o = none ↔ ∀ a, o ≠ some a := by cases o <;> simp @@ -174,15 +172,15 @@ theorem forall_ne_none {p : Option α → Prop} : (∀ x (_ : x ≠ none), p x) @[deprecated forall_ne_none (since := "2025-04-04")] abbrev ball_ne_none := @forall_ne_none -@[simp] theorem pure_def : pure = @some α := rfl +@[simp, grind] theorem pure_def : pure = @some α := rfl -@[simp] theorem bind_eq_bind : bind = @Option.bind α β := rfl +@[simp, grind] theorem bind_eq_bind : bind = @Option.bind α β := rfl -@[simp] theorem orElse_eq_orElse : HOrElse.hOrElse = @Option.orElse α := rfl +@[simp, grind] theorem orElse_eq_orElse : HOrElse.hOrElse = @Option.orElse α := rfl -@[simp] theorem bind_some (x : Option α) : x.bind some = x := by cases x <;> rfl +@[simp, grind] theorem bind_fun_some (x : Option α) : x.bind some = x := by cases x <;> rfl -@[simp] theorem bind_none (x : Option α) : x.bind (fun _ => none (α := β)) = none := by +@[simp] theorem bind_fun_none (x : Option α) : x.bind (fun _ => none (α := β)) = none := by cases x <;> rfl theorem bind_eq_some_iff : x.bind f = some b ↔ ∃ a, x = some a ∧ f a = some b := by @@ -201,7 +199,7 @@ theorem bind_eq_none' {o : Option α} {f : α → Option β} : o.bind f = none ↔ ∀ b a, o = some a → f a ≠ some b := by cases o <;> simp [eq_none_iff_forall_ne_some] -theorem mem_bind_iff {o : Option α} {f : α → Option β} : +@[grind] theorem mem_bind_iff {o : Option α} {f : α → Option β} : b ∈ o.bind f ↔ ∃ a, a ∈ o ∧ b ∈ f a := by cases o <;> simp @@ -209,6 +207,7 @@ theorem bind_comm {f : α → β → Option γ} (a : Option α) (b : Option β) (a.bind fun x => b.bind (f x)) = b.bind fun y => a.bind fun x => f x y := by cases a <;> cases b <;> rfl +@[grind] theorem bind_assoc (x : Option α) (f : α → Option β) (g : β → Option γ) : (x.bind f).bind g = x.bind fun y => (f y).bind g := by cases x <;> rfl @@ -216,10 +215,16 @@ theorem bind_congr {α β} {o : Option α} {f g : α → Option β} : (h : ∀ a, o = some a → f a = g a) → o.bind f = o.bind g := by cases o <;> simp +@[grind] theorem isSome_bind {α β : Type _} (x : Option α) (f : α → Option β) : (x.bind f).isSome = x.any (fun x => (f x).isSome) := by cases x <;> rfl +@[grind] +theorem isNone_bind {α β : Type _} (x : Option α) (f : α → Option β) : + (x.bind f).isNone = x.all (fun x => (f x).isNone) := by + cases x <;> rfl + theorem isSome_of_isSome_bind {α β : Type _} {x : Option α} {f : α → Option β} (h : (x.bind f).isSome) : x.isSome := by cases x <;> trivial @@ -228,7 +233,7 @@ theorem isSome_apply_of_isSome_bind {α β : Type _} {x : Option α} {f : α → (h : (x.bind f).isSome) : (f (x.get (isSome_of_isSome_bind h))).isSome := by cases x <;> trivial -@[simp] theorem get_bind {α β : Type _} {x : Option α} {f : α → Option β} (h : (x.bind f).isSome) : +@[simp, grind] theorem get_bind {α β : Type _} {x : Option α} {f : α → Option β} (h : (x.bind f).isSome) : (x.bind f).get h = (f (x.get (isSome_of_isSome_bind h))).get (isSome_apply_of_isSome_bind h) := by cases x <;> trivial @@ -251,9 +256,9 @@ theorem join_eq_none_iff : o.join = none ↔ o = none ∨ o = some none := @[deprecated join_eq_none_iff (since := "2025-04-10")] abbrev join_eq_none := @join_eq_none_iff -theorem bind_id_eq_join {x : Option (Option α)} : x.bind id = x.join := rfl +@[grind] theorem bind_id_eq_join {x : Option (Option α)} : x.bind id = x.join := rfl -@[simp] theorem map_eq_map : Functor.map f = Option.map f := rfl +@[simp, grind] theorem map_eq_map : Functor.map f = Option.map f := rfl @[deprecated map_none (since := "2025-04-10")] abbrev map_none' := @map_none @@ -295,28 +300,28 @@ theorem map_congr {x : Option α} (h : ∀ a, x = some a → f a = g a) : x.map f = x.map g := by cases x <;> simp only [map_none, map_some, h] -@[simp] theorem map_id_fun {α : Type u} : Option.map (id : α → α) = id := by +@[simp, grind] theorem map_id_fun {α : Type u} : Option.map (id : α → α) = id := by funext; simp [map_id] theorem map_id' {x : Option α} : (x.map fun a => a) = x := congrFun map_id x -@[simp] theorem map_id_fun' {α : Type u} : Option.map (fun (a : α) => a) = id := by +@[simp, grind] theorem map_id_fun' {α : Type u} : Option.map (fun (a : α) => a) = id := by funext; simp [map_id'] -theorem get_map {f : α → β} {o : Option α} {h : (o.map f).isSome} : +@[simp, grind] theorem get_map {f : α → β} {o : Option α} {h : (o.map f).isSome} : (o.map f).get h = f (o.get (by simpa using h)) := by cases o with | none => simp at h | some a => simp -@[simp] theorem map_map (h : β → γ) (g : α → β) (x : Option α) : +@[simp, grind _=_] theorem map_map (h : β → γ) (g : α → β) (x : Option α) : (x.map g).map h = x.map (h ∘ g) := by cases x <;> simp only [map_none, map_some, ·∘·] theorem comp_map (h : β → γ) (g : α → β) (x : Option α) : x.map (h ∘ g) = (x.map g).map h := (map_map ..).symm -@[simp] theorem map_comp_map (f : α → β) (g : β → γ) : +@[simp, grind _=_] theorem map_comp_map (f : α → β) (g : β → γ) : Option.map g ∘ Option.map f = Option.map (g ∘ f) := by funext x; simp theorem mem_map_of_mem (g : α → β) (h : a ∈ x) : g a ∈ Option.map g x := h.symm ▸ map_some .. @@ -373,6 +378,7 @@ abbrev filter_eq_none := @filter_eq_none_iff @[deprecated filter_eq_some_iff (since := "2025-04-10")] abbrev filter_eq_some := @filter_eq_some_iff +@[grind] theorem mem_filter_iff {p : α → Bool} {a : α} {o : Option α} : a ∈ o.filter p ↔ a ∈ o ∧ p a := by simp @@ -381,12 +387,12 @@ theorem filter_eq_bind (x : Option α) (p : α → Bool) : x.filter p = x.bind (Option.guard p) := by cases x <;> rfl -@[simp] theorem all_guard (a : α) : +@[simp, grind] theorem all_guard (a : α) : Option.all q (guard p a) = (!p a || q a) := by simp only [guard] split <;> simp_all -@[simp] theorem any_guard (a : α) : Option.any q (guard p a) = (p a && q a) := by +@[simp, grind] theorem any_guard (a : α) : Option.any q (guard p a) = (p a && q a) := by simp only [guard] split <;> simp_all @@ -425,33 +431,41 @@ theorem any_eq_false_iff_get (p : α → Bool) (x : Option α) : theorem isSome_of_any {x : Option α} {p : α → Bool} (h : x.any p) : x.isSome := by cases x <;> trivial +@[grind] theorem any_map {α β : Type _} {x : Option α} {f : α → β} {p : β → Bool} : (x.map f).any p = x.any (fun a => p (f a)) := by cases x <;> rfl +@[grind] +theorem all_map {α β : Type _} {x : Option α} {f : α → β} {p : β → Bool} : + (x.map f).all p = x.all (fun a => p (f a)) := by + cases x <;> rfl + theorem bind_map_comm {α β} {x : Option (Option α)} {f : α → β} : x.bind (Option.map f) = (x.map (Option.map f)).bind id := by cases x <;> simp -theorem bind_map {f : α → β} {g : β → Option γ} {x : Option α} : +@[grind] theorem bind_map {f : α → β} {g : β → Option γ} {x : Option α} : (x.map f).bind g = x.bind (g ∘ f) := by cases x <;> simp -@[simp] theorem map_bind {f : α → Option β} {g : β → γ} {x : Option α} : +@[simp, grind] theorem map_bind {f : α → Option β} {g : β → γ} {x : Option α} : (x.bind f).map g = x.bind (Option.map g ∘ f) := by cases x <;> simp -theorem join_map_eq_map_join {f : α → β} {x : Option (Option α)} : +@[grind] theorem join_map_eq_map_join {f : α → β} {x : Option (Option α)} : (x.map (Option.map f)).join = x.join.map f := by cases x <;> simp -theorem join_join {x : Option (Option (Option α))} : x.join.join = (x.map join).join := by +@[grind _=_] theorem join_join {x : Option (Option (Option α))} : x.join.join = (x.map join).join := by cases x <;> simp theorem mem_of_mem_join {a : α} {x : Option (Option α)} (h : a ∈ x.join) : some a ∈ x := h.symm ▸ join_eq_some_iff.1 h -@[simp, grind] theorem some_orElse (a : α) (f) : (some a).orElse f = some a := rfl +@[deprecated orElse_some (since := "2025-05-03")] +theorem some_orElse (a : α) (f) : (some a).orElse f = some a := rfl -@[simp, grind] theorem none_orElse (f : Unit → Option α) : none.orElse f = f () := rfl +@[deprecated orElse_none (since := "2025-05-03")] +theorem none_orElse (f : Unit → Option α) : none.orElse f = f () := rfl -@[simp] theorem orElse_none (x : Option α) : x.orElse (fun _ => none) = x := by cases x <;> rfl +@[simp] theorem orElse_fun_none (x : Option α) : x.orElse (fun _ => none) = x := by cases x <;> rfl theorem orElse_eq_some_iff (o : Option α) (f) (x : α) : (o.orElse f) = some x ↔ o = some x ∨ o = none ∧ f () = some x := by @@ -460,7 +474,7 @@ theorem orElse_eq_some_iff (o : Option α) (f) (x : α) : theorem orElse_eq_none_iff (o : Option α) (f) : (o.orElse f) = none ↔ o = none ∧ f () = none := by cases o <;> simp -theorem map_orElse {x : Option α} {y} : +@[grind] theorem map_orElse {x : Option α} {y} : (x.orElse y).map f = (x.map f).orElse (fun _ => (y ()).map f) := by cases x <;> simp @@ -504,7 +518,7 @@ theorem guard_comp {p : α → Bool} {f : β → α} : ext1 b simp [guard] -theorem bind_guard (x : Option α) (p : α → Bool) : +@[grind] theorem bind_guard (x : Option α) (p : α → Bool) : x.bind (Option.guard p) = x.filter p := by simp only [Option.filter_eq_bind, decide_eq_true_eq] @@ -513,6 +527,7 @@ theorem guard_eq_map (p : α → Bool) : funext x simp [Option.guard] +@[grind] theorem guard_def (p : α → Bool) : Option.guard p = fun x => if p x then some x else none := rfl @@ -599,9 +614,11 @@ abbrev choice_isSome_iff_nonempty := @isSome_choice_iff_nonempty end choice @[simp, grind] theorem toList_some (a : α) : (some a).toList = [a] := rfl - @[simp, grind] theorem toList_none (α : Type _) : (none : Option α).toList = [] := rfl +@[simp, grind] theorem toArray_some (a : α) : (some a).toArray = #[a] := rfl +@[simp, grind] theorem toArray_none (α : Type _) : (none : Option α).toArray = #[] := rfl + -- See `Init.Data.Option.List` for lemmas about `toList`. @[simp, grind] theorem some_or : (some a).or o = some a := rfl @@ -610,10 +627,15 @@ end choice theorem or_eq_right_of_none {o o' : Option α} (h : o = none) : o.or o' = o' := by cases h; simp -@[deprecated some_or (since := "2024-11-03")] theorem or_some : (some a).or o = some a := rfl - /-- This will be renamed to `or_some` once the existing deprecated lemma is removed. -/ -@[simp, grind] theorem or_some' {o : Option α} : o.or (some a) = some (o.getD a) := by +@[simp, grind] theorem or_some {o : Option α} : o.or (some a) = some (o.getD a) := by + cases o <;> rfl + +@[deprecated or_some (since := "2025-05-03")] +abbrev or_some' := @or_some + +@[simp, grind] +theorem or_none : or o none = o := by cases o <;> rfl theorem or_eq_bif : or o o' = bif o.isSome then o else o' := by @@ -637,14 +659,10 @@ abbrev or_eq_none := @or_eq_none_iff @[deprecated or_eq_some_iff (since := "2025-04-10")] abbrev or_eq_some := @or_eq_some_iff -theorem or_assoc : or (or o₁ o₂) o₃ = or o₁ (or o₂ o₃) := by +@[grind] theorem or_assoc : or (or o₁ o₂) o₃ = or o₁ (or o₂ o₃) := by cases o₁ <;> cases o₂ <;> rfl instance : Std.Associative (or (α := α)) := ⟨@or_assoc _⟩ -@[simp, grind] -theorem or_none : or o none = o := by - cases o <;> rfl - theorem or_eq_left_of_none {o o' : Option α} (h : o' = none) : o.or o' = o := by cases h; simp @@ -685,10 +703,14 @@ section beq variable [BEq α] -@[simp] theorem none_beq_none : ((none : Option α) == none) = true := rfl -@[simp] theorem none_beq_some (a : α) : ((none : Option α) == some a) = false := rfl -@[simp] theorem some_beq_none (a : α) : ((some a : Option α) == none) = false := rfl -@[simp] theorem some_beq_some {a b : α} : (some a == some b) = (a == b) := rfl +@[simp, grind] theorem none_beq_none : ((none : Option α) == none) = true := rfl +@[simp, grind] theorem none_beq_some (a : α) : ((none : Option α) == some a) = false := rfl +@[simp, grind] theorem some_beq_none (a : α) : ((some a : Option α) == none) = false := rfl +@[simp, grind] theorem some_beq_some {a b : α} : (some a == some b) = (a == b) := rfl + +/-- We simplify away `isEqSome` in terms of `==`. -/ +@[simp, grind] theorem isEqSome_eq_beq_some {o : Option α} : isEqSome o y = (o == some y) := by + cases o <;> simp [isEqSome] @[simp] theorem reflBEq_iff : ReflBEq (Option α) ↔ ReflBEq α := by constructor @@ -802,14 +824,14 @@ theorem mem_ite_none_right {x : α} {_ : Decidable p} {l : Option α} : end ite -theorem isSome_filter {α : Type _} {x : Option α} {f : α → Bool} : +@[grind] theorem isSome_filter {α : Type _} {x : Option α} {f : α → Bool} : (x.filter f).isSome = x.any f := by cases x · rfl · rw [Bool.eq_iff_iff] simp only [Option.any_some, Option.filter, Option.isSome_ite] -@[simp] theorem get_filter {α : Type _} {x : Option α} {f : α → Bool} (h : (x.filter f).isSome) : +@[simp, grind] theorem get_filter {α : Type _} {x : Option α} {f : α → Bool} (h : (x.filter f).isSome) : (x.filter f).get h = x.get (isSome_of_isSome_filter f x h) := by cases x · contradiction @@ -821,16 +843,16 @@ theorem isSome_filter {α : Type _} {x : Option α} {f : α → Bool} : @[simp, grind] theorem pbind_none : pbind none f = none := rfl @[simp, grind] theorem pbind_some : pbind (some a) f = f a rfl := rfl -@[simp] theorem map_pbind {o : Option α} {f : (a : α) → o = some a → Option β} +@[simp, grind] theorem map_pbind {o : Option α} {f : (a : α) → o = some a → Option β} {g : β → γ} : (o.pbind f).map g = o.pbind (fun a h => (f a h).map g) := by cases o <;> rfl -@[simp] theorem pbind_map {α β γ : Type _} (o : Option α) +@[simp, grind] theorem pbind_map {α β γ : Type _} (o : Option α) (f : α → β) (g : (x : β) → o.map f = some x → Option γ) : (o.map f).pbind g = o.pbind (fun x h => g (f x) (h ▸ rfl)) := by cases o <;> rfl -@[simp] theorem pbind_eq_bind {α β : Type _} (o : Option α) +@[simp, grind] theorem pbind_eq_bind {α β : Type _} (o : Option α) (f : α → Option β) : o.pbind (fun x _ => f x) = o.bind f := by cases o <;> rfl @@ -890,16 +912,16 @@ theorem pbind_eq_some_iff {o : Option α} {f : (a : α) → o = some a → Optio · rintro ⟨h, rfl⟩ rfl -@[simp] +@[simp, grind] theorem pmap_eq_map (p : α → Prop) (f : α → β) (o : Option α) (H) : @pmap _ _ p (fun a _ => f a) o H = Option.map f o := by cases o <;> simp -theorem map_pmap {p : α → Prop} (g : β → γ) (f : ∀ a, p a → β) (o H) : +@[grind] theorem map_pmap {p : α → Prop} (g : β → γ) (f : ∀ a, p a → β) (o H) : Option.map g (pmap f o H) = pmap (fun a h => g (f a h)) o H := by cases o <;> simp -theorem pmap_map (o : Option α) (f : α → β) {p : β → Prop} (g : ∀ b, p b → γ) (H) : +@[grind] theorem pmap_map (o : Option α) (f : α → β) {p : β → Prop} (g : ∀ b, p b → γ) (H) : pmap g (o.map f) H = pmap (fun a h => g (f a) h) o (fun a m => H (f a) (map_eq_some_iff.2 ⟨_, m, rfl⟩)) := by cases o <;> simp @@ -938,10 +960,10 @@ theorem pmap_congr {α : Type u} {β : Type v} @[simp, grind] theorem pelim_none : pelim none b f = b := rfl @[simp, grind] theorem pelim_some : pelim (some a) b f = f a rfl := rfl -@[simp] theorem pelim_eq_elim : pelim o b (fun a _ => f a) = o.elim b f := by +@[simp, grind] theorem pelim_eq_elim : pelim o b (fun a _ => f a) = o.elim b f := by cases o <;> simp -@[simp] theorem elim_pmap {p : α → Prop} (f : (a : α) → p a → β) (o : Option α) +@[simp, grind] theorem elim_pmap {p : α → Prop} (f : (a : α) → p a → β) (o : Option α) (H : ∀ (a : α), o = some a → p a) (g : γ) (g' : β → γ) : (o.pmap f H).elim g g' = o.pelim g (fun a h => g' (f a (H a h))) := by @@ -978,7 +1000,7 @@ theorem isSome_of_isSome_pfilter {α : Type _} {o : Option α} {p : (a : α) → (h : (o.pfilter p).isSome) : o.isSome := (isSome_pfilter_iff_get.mp h).1 -@[simp] theorem get_pfilter {α : Type _} {o : Option α} {p : (a : α) → o = some a → Bool} +@[simp, grind] theorem get_pfilter {α : Type _} {o : Option α} {p : (a : α) → o = some a → Bool} (h : (o.pfilter p).isSome) : (o.pfilter p).get h = o.get (isSome_of_isSome_pfilter h) := by cases o <;> simp @@ -996,7 +1018,7 @@ theorem pfilter_eq_some_iff {α : Type _} {o : Option α} {p : (a : α) → o = · rintro ⟨⟨h, rfl⟩, h'⟩ exact ⟨⟨o.get h, ⟨h, rfl⟩, h'⟩, rfl⟩ -@[simp] theorem pfilter_eq_filter {α : Type _} {o : Option α} {p : α → Bool} : +@[simp, grind] theorem pfilter_eq_filter {α : Type _} {o : Option α} {p : α → Bool} : o.pfilter (fun a _ => p a) = o.filter p := by cases o with | none => rfl @@ -1012,13 +1034,13 @@ theorem pfilter_eq_pbind_ite {α : Type _} {o : Option α} /-! ### LT and LE -/ -@[simp] theorem not_lt_none [LT α] {a : Option α} : ¬ a < none := by cases a <;> simp [LT.lt, Option.lt] -@[simp] theorem none_lt_some [LT α] {a : α} : none < some a := by simp [LT.lt, Option.lt] -@[simp] theorem some_lt_some [LT α] {a b : α} : some a < some b ↔ a < b := by simp [LT.lt, Option.lt] +@[simp, grind] theorem not_lt_none [LT α] {a : Option α} : ¬ a < none := by cases a <;> simp [LT.lt, Option.lt] +@[simp, grind] theorem none_lt_some [LT α] {a : α} : none < some a := by simp [LT.lt, Option.lt] +@[simp, grind] theorem some_lt_some [LT α] {a b : α} : some a < some b ↔ a < b := by simp [LT.lt, Option.lt] -@[simp] theorem none_le [LE α] {a : Option α} : none ≤ a := by cases a <;> simp [LE.le, Option.le] -@[simp] theorem not_some_le_none [LE α] {a : α} : ¬ some a ≤ none := by simp [LE.le, Option.le] -@[simp] theorem some_le_some [LE α] {a b : α} : some a ≤ some b ↔ a ≤ b := by simp [LE.le, Option.le] +@[simp, grind] theorem none_le [LE α] {a : Option α} : none ≤ a := by cases a <;> simp [LE.le, Option.le] +@[simp, grind] theorem not_some_le_none [LE α] {a : α} : ¬ some a ≤ none := by simp [LE.le, Option.le] +@[simp, grind] theorem some_le_some [LE α] {a b : α} : some a ≤ some b ↔ a ≤ b := by simp [LE.le, Option.le] /-! ### min and max -/ diff --git a/src/Init/Data/Option/List.lean b/src/Init/Data/Option/List.lean index 23989ab347b6..5553f47d1181 100644 --- a/src/Init/Data/Option/List.lean +++ b/src/Init/Data/Option/List.lean @@ -10,62 +10,38 @@ import Init.Data.List.Lemmas namespace Option -@[simp] theorem mem_toList {a : α} {o : Option α} : a ∈ o.toList ↔ o = some a := by +@[simp, grind] theorem mem_toList {a : α} {o : Option α} : a ∈ o.toList ↔ o = some a := by cases o <;> simp [eq_comm] -@[simp] theorem forIn'_none [Monad m] (b : β) (f : (a : α) → a ∈ none → β → m (ForInStep β)) : - forIn' none b f = pure b := by - rfl - -@[simp] theorem forIn'_some [Monad m] [LawfulMonad m] (a : α) (b : β) (f : (a' : α) → a' ∈ some a → β → m (ForInStep β)) : - forIn' (some a) b f = bind (f a rfl b) (fun r => pure (ForInStep.value r)) := by - simp only [forIn', bind_pure_comp] - rw [map_eq_pure_bind] - congr - funext x - split <;> rfl - -@[simp] theorem forIn_none [Monad m] (b : β) (f : α → β → m (ForInStep β)) : - forIn none b f = pure b := by - rfl - -@[simp] theorem forIn_some [Monad m] [LawfulMonad m] (a : α) (b : β) (f : α → β → m (ForInStep β)) : - forIn (some a) b f = bind (f a b) (fun r => pure (ForInStep.value r)) := by - simp only [forIn, forIn', bind_pure_comp] - rw [map_eq_pure_bind] - congr - funext x - split <;> rfl - -@[simp] theorem forIn'_toList [Monad m] (o : Option α) (b : β) (f : (a : α) → a ∈ o.toList → β → m (ForInStep β)) : +@[simp, grind] theorem forIn'_toList [Monad m] (o : Option α) (b : β) (f : (a : α) → a ∈ o.toList → β → m (ForInStep β)) : forIn' o.toList b f = forIn' o b fun a m b => f a (by simpa using m) b := by cases o <;> rfl -@[simp] theorem forIn_toList [Monad m] (o : Option α) (b : β) (f : α → β → m (ForInStep β)) : +@[simp, grind] theorem forIn_toList [Monad m] (o : Option α) (b : β) (f : α → β → m (ForInStep β)) : forIn o.toList b f = forIn o b f := by cases o <;> rfl -@[simp] theorem foldlM_toList [Monad m] [LawfulMonad m] (o : Option β) (a : α) (f : α → β → m α) : +@[simp, grind] theorem foldlM_toList [Monad m] [LawfulMonad m] (o : Option β) (a : α) (f : α → β → m α) : o.toList.foldlM f a = o.elim (pure a) (fun b => f a b) := by cases o <;> simp -@[simp] theorem foldrM_toList [Monad m] [LawfulMonad m] (o : Option β) (a : α) (f : β → α → m α) : +@[simp, grind] theorem foldrM_toList [Monad m] [LawfulMonad m] (o : Option β) (a : α) (f : β → α → m α) : o.toList.foldrM f a = o.elim (pure a) (fun b => f b a) := by cases o <;> simp -@[simp] theorem foldl_toList (o : Option β) (a : α) (f : α → β → α) : +@[simp, grind] theorem foldl_toList (o : Option β) (a : α) (f : α → β → α) : o.toList.foldl f a = o.elim a (fun b => f a b) := by cases o <;> simp -@[simp] theorem foldr_toList (o : Option β) (a : α) (f : β → α → α) : +@[simp, grind] theorem foldr_toList (o : Option β) (a : α) (f : β → α → α) : o.toList.foldr f a = o.elim a (fun b => f b a) := by cases o <;> simp -@[simp] +@[simp, grind] theorem pairwise_toList {P : α → α → Prop} {o : Option α} : o.toList.Pairwise P := by cases o <;> simp -@[simp] +@[simp, grind] theorem head?_toList {o : Option α} : o.toList.head? = o := by cases o <;> simp diff --git a/src/Init/Data/Option/Monadic.lean b/src/Init/Data/Option/Monadic.lean index 32e36e11e40a..3281db72490b 100644 --- a/src/Init/Data/Option/Monadic.lean +++ b/src/Init/Data/Option/Monadic.lean @@ -12,16 +12,47 @@ import Init.Control.Lawful.Basic namespace Option -@[simp] theorem forM_none [Monad m] (f : α → m PUnit) : - none.forM f = pure .unit := rfl +@[simp, grind] theorem bindM_none [Monad m] (f : α → m (Option β)) : none.bindM f = pure none := rfl +@[simp, grind] theorem bindM_some [Monad m] [LawfulMonad m] (a) (f : α → m (Option β)) : (some a).bindM f = f a := by + simp [Option.bindM] -@[simp] theorem forM_some [Monad m] (f : α → m PUnit) (a : α) : - (some a).forM f = f a := rfl +-- We simplify `Option.forM` to `forM`. +@[simp] theorem forM_eq_forM [Monad m] : @Option.forM m α _ = forM := rfl -@[simp] theorem forM_map [Monad m] [LawfulMonad m] (o : Option α) (g : α → β) (f : β → m PUnit) : - (o.map g).forM f = o.forM (fun a => f (g a)) := by +@[simp, grind] theorem forM_none [Monad m] (f : α → m PUnit) : + forM none f = pure .unit := rfl + +@[simp, grind] theorem forM_some [Monad m] (f : α → m PUnit) (a : α) : + forM (some a) f = f a := rfl + +@[simp, grind] theorem forM_map [Monad m] [LawfulMonad m] (o : Option α) (g : α → β) (f : β → m PUnit) : + forM (o.map g) f = forM o (fun a => f (g a)) := by cases o <;> simp +@[simp, grind] theorem forIn'_none [Monad m] (b : β) (f : (a : α) → a ∈ none → β → m (ForInStep β)) : + forIn' none b f = pure b := by + rfl + +@[simp, grind] theorem forIn'_some [Monad m] [LawfulMonad m] (a : α) (b : β) (f : (a' : α) → a' ∈ some a → β → m (ForInStep β)) : + forIn' (some a) b f = bind (f a rfl b) (fun r => pure (ForInStep.value r)) := by + simp only [forIn', bind_pure_comp] + rw [map_eq_pure_bind] + congr + funext x + split <;> rfl + +@[simp, grind] theorem forIn_none [Monad m] (b : β) (f : α → β → m (ForInStep β)) : + forIn none b f = pure b := by + rfl + +@[simp, grind] theorem forIn_some [Monad m] [LawfulMonad m] (a : α) (b : β) (f : α → β → m (ForInStep β)) : + forIn (some a) b f = bind (f a b) (fun r => pure (ForInStep.value r)) := by + simp only [forIn, forIn', bind_pure_comp] + rw [map_eq_pure_bind] + congr + funext x + split <;> rfl + @[congr] theorem forIn'_congr [Monad m] [LawfulMonad m] {as bs : Option α} (w : as = bs) {b b' : β} (hb : b = b') {f : (a' : α) → a' ∈ as → β → m (ForInStep β)} @@ -60,7 +91,7 @@ theorem forIn'_eq_pelim [Monad m] [LawfulMonad m] o.pelim b (fun a h => f a h b) := by cases o <;> simp -@[simp] theorem forIn'_map [Monad m] [LawfulMonad m] +@[simp, grind] theorem forIn'_map [Monad m] [LawfulMonad m] (o : Option α) (g : α → β) (f : (b : β) → b ∈ o.map g → γ → m (ForInStep γ)) : forIn' (o.map g) init f = forIn' o init fun a h y => f (g a) (mem_map_of_mem g h) y := by cases o <;> simp @@ -89,11 +120,9 @@ theorem forIn_eq_elim [Monad m] [LawfulMonad m] o.elim b (fun a => f a b) := by cases o <;> simp -@[simp] theorem forIn_map [Monad m] [LawfulMonad m] +@[simp, grind] theorem forIn_map [Monad m] [LawfulMonad m] (o : Option α) (g : α → β) (f : β → γ → m (ForInStep γ)) : forIn (o.map g) init f = forIn o init fun a y => f (g a) y := by cases o <;> simp -@[simp] theorem mapA_eq_mapM : @Option.mapA = @Option.mapM := rfl - end Option diff --git a/src/Init/Data/Repr.lean b/src/Init/Data/Repr.lean index ad6e5b22b198..d461c8bf093b 100644 --- a/src/Init/Data/Repr.lean +++ b/src/Init/Data/Repr.lean @@ -55,10 +55,12 @@ This instance allows us to use `Empty` as a type parameter without causing insta instance : Repr Empty where reprPrec := nofun +protected def Bool.repr : Bool → Nat → Format + | true, _ => "true" + | false, _ => "false" + instance : Repr Bool where - reprPrec - | true, _ => "true" - | false, _ => "false" + reprPrec := Bool.repr def Repr.addAppParen (f : Format) (prec : Nat) : Format := if prec >= max_prec then @@ -66,10 +68,12 @@ def Repr.addAppParen (f : Format) (prec : Nat) : Format := else f +protected def Decidable.repr : Decidable p → Nat → Format + | .isTrue _, prec => Repr.addAppParen "isTrue _" prec + | .isFalse _, prec => Repr.addAppParen "isFalse _" prec + instance : Repr (Decidable p) where - reprPrec - | Decidable.isTrue _, prec => Repr.addAppParen "isTrue _" prec - | Decidable.isFalse _, prec => Repr.addAppParen "isFalse _" prec + reprPrec := Decidable.repr instance : Repr PUnit.{u+1} where reprPrec _ _ := "PUnit.unit" @@ -109,8 +113,11 @@ export ReprTuple (reprTuple) instance [Repr α] : ReprTuple α where reprTuple a xs := repr a :: xs +protected def Prod.reprTuple [Repr α] [ReprTuple β] : α × β → List Format → List Format + | (a, b), xs => reprTuple b (repr a :: xs) + instance [Repr α] [ReprTuple β] : ReprTuple (α × β) where - reprTuple | (a, b), xs => reprTuple b (repr a :: xs) + reprTuple := Prod.reprTuple protected def Prod.repr [Repr α] [ReprTuple β] : α × β → Nat → Format | (a, b), _ => Format.bracket "(" (Format.joinSep (reprTuple b [repr a]).reverse ("," ++ Format.line)) ")" @@ -118,8 +125,11 @@ protected def Prod.repr [Repr α] [ReprTuple β] : α × β → Nat → Format instance [Repr α] [ReprTuple β] : Repr (α × β) where reprPrec := Prod.repr +protected def Sigma.repr {β : α → Type v} [Repr α] [(x : α) → Repr (β x)] : Sigma β → Nat → Format + | ⟨a, b⟩, _ => Format.bracket "⟨" (repr a ++ ", " ++ repr b) "⟩" + instance {β : α → Type v} [Repr α] [(x : α) → Repr (β x)] : Repr (Sigma β) where - reprPrec | ⟨a, b⟩, _ => Format.bracket "⟨" (repr a ++ ", " ++ repr b) "⟩" + reprPrec := Sigma.repr instance {p : α → Prop} [Repr α] : Repr (Subtype p) where reprPrec s prec := reprPrec s.val prec diff --git a/src/Init/Data/Vector/Basic.lean b/src/Init/Data/Vector/Basic.lean index af00cd9aff39..eef56e719c1b 100644 --- a/src/Init/Data/Vector/Basic.lean +++ b/src/Init/Data/Vector/Basic.lean @@ -29,7 +29,7 @@ structure Vector (α : Type u) (n : Nat) extends Array α where size_toArray : toArray.size = n deriving Repr, DecidableEq -attribute [simp] Vector.size_toArray +attribute [simp, grind] Vector.size_toArray /-- Converts an array to a vector. The resulting vector's size is the array's size. diff --git a/src/Init/Data/Vector/DecidableEq.lean b/src/Init/Data/Vector/DecidableEq.lean index 832157513991..b40c0da417aa 100644 --- a/src/Init/Data/Vector/DecidableEq.lean +++ b/src/Init/Data/Vector/DecidableEq.lean @@ -58,7 +58,7 @@ theorem beq_eq_decide [BEq α] (xs ys : Vector α n) : (mk xs ha == mk ys hb) = (xs == ys) := by simp [BEq.beq] -@[simp] theorem beq_toArray [BEq α] (xs ys : Vector α n) : (xs.toArray == ys.toArray) = (xs == ys) := by +@[simp, grind =] theorem beq_toArray [BEq α] (xs ys : Vector α n) : (xs.toArray == ys.toArray) = (xs == ys) := by simp [beq_eq_decide, Array.beq_eq_decide] @[simp] theorem beq_toList [BEq α] (xs ys : Vector α n) : (xs.toList == ys.toList) = (xs == ys) := by diff --git a/src/Init/Data/Vector/Lemmas.lean b/src/Init/Data/Vector/Lemmas.lean index c219d6bb9ccd..bc94e2644cf0 100644 --- a/src/Init/Data/Vector/Lemmas.lean +++ b/src/Init/Data/Vector/Lemmas.lean @@ -263,57 +263,57 @@ abbrev zipWithIndex_mk := @zipIdx_mk /-! ### toArray lemmas -/ -@[simp] theorem getElem_toArray {α n} {xs : Vector α n} {i : Nat} (h : i < xs.toArray.size) : +@[simp, grind] theorem getElem_toArray {α n} {xs : Vector α n} {i : Nat} (h : i < xs.toArray.size) : xs.toArray[i] = xs[i]'(by simpa using h) := by cases xs simp -@[simp] theorem getElem?_toArray {α n} {xs : Vector α n} {i : Nat} : +@[simp, grind] theorem getElem?_toArray {α n} {xs : Vector α n} {i : Nat} : xs.toArray[i]? = xs[i]? := by cases xs simp -@[simp] theorem toArray_append {xs : Vector α m} {ys : Vector α n} : +@[simp, grind _=_] theorem toArray_append {xs : Vector α m} {ys : Vector α n} : (xs ++ ys).toArray = xs.toArray ++ ys.toArray := rfl -@[simp] theorem toArray_drop {xs : Vector α n} {i} : +@[simp, grind] theorem toArray_drop {xs : Vector α n} {i} : (xs.drop i).toArray = xs.toArray.extract i xs.size := rfl -@[simp] theorem toArray_empty : (#v[] : Vector α 0).toArray = #[] := rfl +@[simp, grind] theorem toArray_empty : (#v[] : Vector α 0).toArray = #[] := rfl -@[simp] theorem toArray_emptyWithCapacity {cap} : +@[simp, grind] theorem toArray_emptyWithCapacity {cap} : (Vector.emptyWithCapacity (α := α) cap).toArray = Array.emptyWithCapacity cap := rfl @[deprecated toArray_emptyWithCapacity (since := "2025-03-12")] abbrev toArray_mkEmpty := @toArray_emptyWithCapacity -@[simp] theorem toArray_eraseIdx {xs : Vector α n} {i} (h) : +@[simp, grind] theorem toArray_eraseIdx {xs : Vector α n} {i} (h) : (xs.eraseIdx i h).toArray = xs.toArray.eraseIdx i (by simp [h]) := rfl -@[simp] theorem toArray_eraseIdx! {xs : Vector α n} {i} (hi : i < n) : +@[simp, grind] theorem toArray_eraseIdx! {xs : Vector α n} {i} (hi : i < n) : (xs.eraseIdx! i).toArray = xs.toArray.eraseIdx! i := by cases xs; simp_all [Array.eraseIdx!] -@[simp] theorem toArray_insertIdx {xs : Vector α n} {i x} (h) : +@[simp, grind] theorem toArray_insertIdx {xs : Vector α n} {i x} (h) : (xs.insertIdx i x h).toArray = xs.toArray.insertIdx i x (by simp [h]) := rfl -@[simp] theorem toArray_insertIdx! {xs : Vector α n} {i x} (hi : i ≤ n) : +@[simp, grind] theorem toArray_insertIdx! {xs : Vector α n} {i x} (hi : i ≤ n) : (xs.insertIdx! i x).toArray = xs.toArray.insertIdx! i x := by cases xs; simp_all [Array.insertIdx!] -@[simp] theorem toArray_cast {xs : Vector α n} (h : n = m) : +@[simp, grind] theorem toArray_cast {xs : Vector α n} (h : n = m) : (xs.cast h).toArray = xs.toArray := rfl -@[simp] theorem toArray_extract {xs : Vector α n} {start stop} : +@[simp, grind] theorem toArray_extract {xs : Vector α n} {start stop} : (xs.extract start stop).toArray = xs.toArray.extract start stop := rfl -@[simp] theorem toArray_map {f : α → β} {xs : Vector α n} : +@[simp, grind] theorem toArray_map {f : α → β} {xs : Vector α n} : (xs.map f).toArray = xs.toArray.map f := rfl -@[simp] theorem toArray_mapIdx {f : Nat → α → β} {xs : Vector α n} : +@[simp, grind] theorem toArray_mapIdx {f : Nat → α → β} {xs : Vector α n} : (xs.mapIdx f).toArray = xs.toArray.mapIdx f := rfl -@[simp] theorem toArray_mapFinIdx {f : (i : Nat) → α → (h : i < n) → β} {xs : Vector α n} : +@[simp, grind] theorem toArray_mapFinIdx {f : (i : Nat) → α → (h : i < n) → β} {xs : Vector α n} : (xs.mapFinIdx f).toArray = xs.toArray.mapFinIdx (fun i a h => f i a (by simpa [xs.size_toArray] using h)) := rfl @@ -331,145 +331,145 @@ theorem toArray_mapM_go [Monad m] [LawfulMonad m] {f : α → m β} {xs : Vector rfl · simp -@[simp] theorem toArray_mapM [Monad m] [LawfulMonad m] {f : α → m β} {xs : Vector α n} : +@[simp, grind] theorem toArray_mapM [Monad m] [LawfulMonad m] {f : α → m β} {xs : Vector α n} : toArray <$> xs.mapM f = xs.toArray.mapM f := by rcases xs with ⟨xs, rfl⟩ unfold mapM rw [toArray_mapM_go] rfl -@[simp] theorem toArray_ofFn {f : Fin n → α} : (Vector.ofFn f).toArray = Array.ofFn f := rfl +@[simp, grind] theorem toArray_ofFn {f : Fin n → α} : (Vector.ofFn f).toArray = Array.ofFn f := rfl -@[simp] theorem toArray_pop {xs : Vector α n} : xs.pop.toArray = xs.toArray.pop := rfl +@[simp, grind] theorem toArray_pop {xs : Vector α n} : xs.pop.toArray = xs.toArray.pop := rfl -@[simp] theorem toArray_push {xs : Vector α n} {x} : (xs.push x).toArray = xs.toArray.push x := rfl +@[simp, grind] theorem toArray_push {xs : Vector α n} {x} : (xs.push x).toArray = xs.toArray.push x := rfl -@[simp] theorem toArray_beq_toArray [BEq α] {xs : Vector α n} {ys : Vector α n} : +@[simp, grind] theorem toArray_beq_toArray [BEq α] {xs : Vector α n} {ys : Vector α n} : (xs.toArray == ys.toArray) = (xs == ys) := by simp [instBEq, isEqv, Array.instBEq, Array.isEqv, xs.2, ys.2] -@[simp] theorem toArray_range : (Vector.range n).toArray = Array.range n := rfl +@[simp, grind] theorem toArray_range : (Vector.range n).toArray = Array.range n := rfl -@[simp] theorem toArray_reverse (xs : Vector α n) : xs.reverse.toArray = xs.toArray.reverse := rfl +@[simp, grind] theorem toArray_reverse (xs : Vector α n) : xs.reverse.toArray = xs.toArray.reverse := rfl -@[simp] theorem toArray_set {xs : Vector α n} {i x} (h) : +@[simp, grind] theorem toArray_set {xs : Vector α n} {i x} (h) : (xs.set i x).toArray = xs.toArray.set i x (by simpa using h):= rfl -@[simp] theorem toArray_set! {xs : Vector α n} {i x} : +@[simp, grind] theorem toArray_set! {xs : Vector α n} {i x} : (xs.set! i x).toArray = xs.toArray.set! i x := rfl -@[simp] theorem toArray_setIfInBounds {xs : Vector α n} {i x} : +@[simp, grind] theorem toArray_setIfInBounds {xs : Vector α n} {i x} : (xs.setIfInBounds i x).toArray = xs.toArray.setIfInBounds i x := rfl -@[simp] theorem toArray_singleton {x : α} : (Vector.singleton x).toArray = #[x] := rfl +@[simp, grind] theorem toArray_singleton {x : α} : (Vector.singleton x).toArray = #[x] := rfl -@[simp] theorem toArray_swap {xs : Vector α n} {i j} (hi hj) : (xs.swap i j).toArray = +@[simp, grind] theorem toArray_swap {xs : Vector α n} {i j} (hi hj) : (xs.swap i j).toArray = xs.toArray.swap i j (by simp [hi, hj]) (by simp [hi, hj]) := rfl -@[simp] theorem toArray_swapIfInBounds {xs : Vector α n} {i j} : +@[simp, grind] theorem toArray_swapIfInBounds {xs : Vector α n} {i j} : (xs.swapIfInBounds i j).toArray = xs.toArray.swapIfInBounds i j := rfl -@[simp] theorem toArray_swapAt {xs : Vector α n} {i x} (h) : +theorem toArray_swapAt {xs : Vector α n} {i x} (h) : ((xs.swapAt i x).fst, (xs.swapAt i x).snd.toArray) = ((xs.toArray.swapAt i x (by simpa using h)).fst, (xs.toArray.swapAt i x (by simpa using h)).snd) := rfl -@[simp] theorem toArray_swapAt! {xs : Vector α n} {i x} : +theorem toArray_swapAt! {xs : Vector α n} {i x} : ((xs.swapAt! i x).fst, (xs.swapAt! i x).snd.toArray) = ((xs.toArray.swapAt! i x).fst, (xs.toArray.swapAt! i x).snd) := rfl -@[simp] theorem toArray_take {xs : Vector α n} {i} : (xs.take i).toArray = xs.toArray.take i := rfl +@[simp, grind] theorem toArray_take {xs : Vector α n} {i} : (xs.take i).toArray = xs.toArray.take i := rfl -@[simp] theorem toArray_zipIdx {xs : Vector α n} (k : Nat := 0) : +@[simp, grind] theorem toArray_zipIdx {xs : Vector α n} (k : Nat := 0) : (xs.zipIdx k).toArray = xs.toArray.zipIdx k := rfl -@[simp] theorem toArray_zipWith {f : α → β → γ} {as : Vector α n} {bs : Vector β n} : +@[simp, grind] theorem toArray_zipWith {f : α → β → γ} {as : Vector α n} {bs : Vector β n} : (Vector.zipWith f as bs).toArray = Array.zipWith f as.toArray bs.toArray := rfl -@[simp] theorem anyM_toArray [Monad m] {p : α → m Bool} {xs : Vector α n} : +@[simp, grind] theorem anyM_toArray [Monad m] {p : α → m Bool} {xs : Vector α n} : xs.toArray.anyM p = xs.anyM p := by cases xs simp -@[simp] theorem allM_toArray [Monad m] {p : α → m Bool} {xs : Vector α n} : +@[simp, grind] theorem allM_toArray [Monad m] {p : α → m Bool} {xs : Vector α n} : xs.toArray.allM p = xs.allM p := by cases xs simp -@[simp] theorem any_toArray {p : α → Bool} {xs : Vector α n} : +@[simp, grind] theorem any_toArray {p : α → Bool} {xs : Vector α n} : xs.toArray.any p = xs.any p := by cases xs simp -@[simp] theorem all_toArray {p : α → Bool} {xs : Vector α n} : +@[simp, grind] theorem all_toArray {p : α → Bool} {xs : Vector α n} : xs.toArray.all p = xs.all p := by cases xs simp -@[simp] theorem countP_toArray {p : α → Bool} {xs : Vector α n} : +@[simp, grind] theorem countP_toArray {p : α → Bool} {xs : Vector α n} : xs.toArray.countP p = xs.countP p := by cases xs simp -@[simp] theorem count_toArray [BEq α] {a : α} {xs : Vector α n} : +@[simp, grind] theorem count_toArray [BEq α] {a : α} {xs : Vector α n} : xs.toArray.count a = xs.count a := by cases xs simp -@[simp] theorem replace_toArray [BEq α] {xs : Vector α n} {a b} : +@[simp, grind] theorem replace_toArray [BEq α] {xs : Vector α n} {a b} : xs.toArray.replace a b = (xs.replace a b).toArray := rfl -@[simp] theorem find?_toArray {p : α → Bool} {xs : Vector α n} : +@[simp, grind] theorem find?_toArray {p : α → Bool} {xs : Vector α n} : xs.toArray.find? p = xs.find? p := by cases xs simp -@[simp] theorem findSome?_toArray {f : α → Option β} {xs : Vector α n} : +@[simp, grind] theorem findSome?_toArray {f : α → Option β} {xs : Vector α n} : xs.toArray.findSome? f = xs.findSome? f := by cases xs simp -@[simp] theorem findRev?_toArray {p : α → Bool} {xs : Vector α n} : +@[simp, grind] theorem findRev?_toArray {p : α → Bool} {xs : Vector α n} : xs.toArray.findRev? p = xs.findRev? p := by cases xs simp -@[simp] theorem findSomeRev?_toArray {f : α → Option β} {xs : Vector α n} : +@[simp, grind] theorem findSomeRev?_toArray {f : α → Option β} {xs : Vector α n} : xs.toArray.findSomeRev? f = xs.findSomeRev? f := by cases xs simp -@[simp] theorem findM?_toArray [Monad m] {p : α → m Bool} {xs : Vector α n} : +@[simp, grind] theorem findM?_toArray [Monad m] {p : α → m Bool} {xs : Vector α n} : xs.toArray.findM? p = xs.findM? p := by cases xs simp -@[simp] theorem findSomeM?_toArray [Monad m] {f : α → m (Option β)} {xs : Vector α n} : +@[simp, grind] theorem findSomeM?_toArray [Monad m] {f : α → m (Option β)} {xs : Vector α n} : xs.toArray.findSomeM? f = xs.findSomeM? f := by cases xs simp -@[simp] theorem findRevM?_toArray [Monad m] {p : α → m Bool} {xs : Vector α n} : +@[simp, grind] theorem findRevM?_toArray [Monad m] {p : α → m Bool} {xs : Vector α n} : xs.toArray.findRevM? p = xs.findRevM? p := by rcases xs with ⟨xs, rfl⟩ simp -@[simp] theorem findSomeRevM?_toArray [Monad m] {f : α → m (Option β)} {xs : Vector α n} : +@[simp, grind] theorem findSomeRevM?_toArray [Monad m] {f : α → m (Option β)} {xs : Vector α n} : xs.toArray.findSomeRevM? f = xs.findSomeRevM? f := by rcases xs with ⟨xs, rfl⟩ simp -@[simp] theorem finIdxOf?_toArray [BEq α] {a : α} {xs : Vector α n} : +@[simp, grind] theorem finIdxOf?_toArray [BEq α] {a : α} {xs : Vector α n} : xs.toArray.finIdxOf? a = (xs.finIdxOf? a).map (Fin.cast xs.size_toArray.symm) := by rcases xs with ⟨xs, rfl⟩ simp -@[simp] theorem findFinIdx?_toArray {p : α → Bool} {xs : Vector α n} : +@[simp, grind] theorem findFinIdx?_toArray {p : α → Bool} {xs : Vector α n} : xs.toArray.findFinIdx? p = (xs.findFinIdx? p).map (Fin.cast xs.size_toArray.symm) := by rcases xs with ⟨xs, rfl⟩ simp -@[simp] theorem toArray_replicate : (replicate n a).toArray = Array.replicate n a := rfl +@[simp, grind] theorem toArray_replicate : (replicate n a).toArray = Array.replicate n a := rfl @[deprecated toArray_replicate (since := "2025-03-18")] abbrev toArray_mkVector := @toArray_replicate @@ -3082,7 +3082,7 @@ set_option linter.indexVariables false in /-! ### swap -/ -theorem getElem_swap {xs : Vector α n} {i j : Nat} (hi hj) {k : Nat} (hk : k < n) : +@[grind] theorem getElem_swap {xs : Vector α n} {i j : Nat} (hi hj) {k : Nat} (hk : k < n) : (xs.swap i j hi hj)[k] = if k = i then xs[j] else if k = j then xs[i] else xs[k] := by cases xs simp_all [Array.getElem_swap] @@ -3099,6 +3099,13 @@ theorem getElem_swap {xs : Vector α n} {i j : Nat} (hi hj) {k : Nat} (hk : k < (hi' : k ≠ i) (hj' : k ≠ j) : (xs.swap i j hi hj)[k] = xs[k] := by simp_all [getElem_swap] +@[grind] +theorem getElem?_swap {xs : Vector α n} {i j : Nat} (hi hj) {k : Nat} : (xs.swap i j hi hj)[k]? = + if j = k then some xs[i] else if i = k then some xs[j] else xs[k]? := by + rcases xs with ⟨xs, rfl⟩ + simp [Array.getElem?_swap] + + @[simp] theorem swap_swap {xs : Vector α n} {i j : Nat} (hi hj) : (xs.swap i j hi hj).swap i j hi hj = xs := by cases xs diff --git a/src/Init/Data/Vector/Lex.lean b/src/Init/Data/Vector/Lex.lean index 8c6b932e251f..df09759e6ef8 100644 --- a/src/Init/Data/Vector/Lex.lean +++ b/src/Init/Data/Vector/Lex.lean @@ -18,8 +18,8 @@ namespace Vector /-! ### Lexicographic ordering -/ -@[simp] theorem lt_toArray [LT α] {xs ys : Vector α n} : xs.toArray < ys.toArray ↔ xs < ys := Iff.rfl -@[simp] theorem le_toArray [LT α] {xs ys : Vector α n} : xs.toArray ≤ ys.toArray ↔ xs ≤ ys := Iff.rfl +@[simp, grind =] theorem lt_toArray [LT α] {xs ys : Vector α n} : xs.toArray < ys.toArray ↔ xs < ys := Iff.rfl +@[simp, grind =] theorem le_toArray [LT α] {xs ys : Vector α n} : xs.toArray ≤ ys.toArray ↔ xs ≤ ys := Iff.rfl @[simp] theorem lt_toList [LT α] {xs ys : Vector α n} : xs.toList < ys.toList ↔ xs < ys := Iff.rfl @[simp] theorem le_toList [LT α] {xs ys : Vector α n} : xs.toList ≤ ys.toList ↔ xs ≤ ys := Iff.rfl @@ -40,7 +40,7 @@ protected theorem not_le_iff_gt [DecidableEq α] [LT α] [DecidableLT α] {xs ys simp [Vector.lex, Array.lex, n₁, n₂] rfl -@[simp] theorem lex_toArray [BEq α] {lt : α → α → Bool} {xs ys : Vector α n} : +@[simp, grind =] theorem lex_toArray [BEq α] {lt : α → α → Bool} {xs ys : Vector α n} : xs.toArray.lex ys.toArray lt = xs.lex ys lt := by cases xs cases ys diff --git a/src/Init/Notation.lean b/src/Init/Notation.lean index d0b705849eb7..a8b7ba215b90 100644 --- a/src/Init/Notation.lean +++ b/src/Init/Notation.lean @@ -621,9 +621,6 @@ This is the same as `#eval show MetaM Unit from do discard doSeq`. -/ syntax (name := runMeta) "run_meta " doSeq : command -set_option linter.missingDocs false in -syntax guardMsgsFilterSeverity := &"info" <|> &"warning" <|> &"error" <|> &"all" - /-- `#reduce <expression>` reduces the expression `<expression>` to its normal form. This involves applying reduction rules until no further reduction is possible. @@ -640,15 +637,27 @@ of expressions. -/ syntax (name := reduceCmd) "#reduce " (atomic("(" &"proofs" " := " &"true" ")"))? (atomic("(" &"types" " := " &"true" ")"))? term : command +set_option linter.missingDocs false in +syntax guardMsgsFilterAction := &"check" <|> &"drop" <|> &"pass" + +set_option linter.missingDocs false in +syntax guardMsgsFilterSeverity := &"trace" <|> &"info" <|> &"warning" <|> &"error" <|> &"all" + /-- A message filter specification for `#guard_msgs`. -- `info`, `warning`, `error`: capture messages with the given severity level. -- `all`: capture all messages (the default). -- `drop info`, `drop warning`, `drop error`: drop messages with the given severity level. -- `drop all`: drop every message. -These filters are processed in left-to-right order. +- `info`, `warning`, `error`: capture (non-trace) messages with the given severity level. +- `trace`: captures trace messages +- `all`: capture all messages. + +The filters can be prefixed with +- `check` (the default): capture and check the message +- `drop`: drop the message +- `pass`: let the message pass through + +If no filter is specified, `check all` is assumed. Otherwise, these filters are processed in +left-to-right order, with an implicit `pass all` at the end. -/ -syntax guardMsgsFilter := &"drop"? guardMsgsFilterSeverity +syntax guardMsgsFilter := guardMsgsFilterAction ? guardMsgsFilterSeverity set_option linter.missingDocs false in syntax guardMsgsWhitespaceArg := &"exact" <|> &"normalized" <|> &"lax" @@ -719,13 +728,20 @@ In general, `#guard_msgs` accepts a comma-separated list of configuration clause ``` #guard_msgs (configElt,*) in cmd ``` -By default, the configuration list is `(all, whitespace := normalized, ordering := exact)`. +By default, the configuration list is `(check all, whitespace := normalized, ordering := exact)`. + +Message filters select messages by severity: +- `info`, `warning`, `error`: (non-trace) messages with the given severity level. +- `trace`: trace messages +- `all`: all messages. + +The filters can be prefixed with the action to take: +- `check` (the default): capture and check the message +- `drop`: drop the message +- `pass`: let the message pass through -Message filters (processed in left-to-right order): -- `info`, `warning`, `error`: capture messages with the given severity level. -- `all`: capture all messages (the default). -- `drop info`, `drop warning`, `drop error`: drop messages with the given severity level. -- `drop all`: drop every message. +If no filter is specified, `check all` is assumed. Otherwise, these filters are processed in +left-to-right order, with an implicit `pass all` at the end. Whitespace handling (after trimming leading and trailing whitespace): - `whitespace := exact` requires an exact whitespace match. diff --git a/src/Init/System/Promise.lean b/src/Init/System/Promise.lean index 15d37e34451e..9c1048f078a4 100644 --- a/src/Init/System/Promise.lean +++ b/src/Init/System/Promise.lean @@ -14,7 +14,7 @@ namespace IO private opaque PromisePointed : NonemptyType.{0} -private structure PromiseImpl (α : Type) : Type where +structure PromiseImpl (α : Type) : Type where prom : PromisePointed.type h : Nonempty α diff --git a/src/Lean/AddDecl.lean b/src/Lean/AddDecl.lean index 3bc8c4968d28..ad172ed58893 100644 --- a/src/Lean/AddDecl.lean +++ b/src/Lean/AddDecl.lean @@ -93,9 +93,13 @@ def addDecl (decl : Declaration) : CoreM Unit := do let mut exportedKind? := none let (name, info, kind) ← match decl with | .thmDecl thm => - if (← getEnv).header.isModule && !isSimpleRflProof thm.value && - -- TODO: this is horrible... - !looksLikeRelevantTheoremProofType thm.type then + let exportProof := !(← getEnv).header.isModule || + -- We should preserve rfl theorems but also we should not override a decision to hide by the + -- MutualDef elaborator via `withoutExporting` + (← getEnv).isExporting && isSimpleRflProof thm.value || + -- TODO: this is horrible... + looksLikeRelevantTheoremProofType thm.type + if !exportProof then exportedInfo? := some <| .axiomInfo { thm with isUnsafe := false } exportedKind? := some .axiom pure (thm.name, .thmInfo thm, .thm) diff --git a/src/Lean/Compiler/IR.lean b/src/Lean/Compiler/IR.lean index 99233be37cde..28fa30bc9a18 100644 --- a/src/Lean/Compiler/IR.lean +++ b/src/Lean/Compiler/IR.lean @@ -22,6 +22,7 @@ import Lean.Compiler.IR.ElimDeadBranches import Lean.Compiler.IR.EmitC import Lean.Compiler.IR.CtorLayout import Lean.Compiler.IR.Sorry +import Lean.Compiler.IR.ToIR -- The following imports are not required by the compiler. They are here to ensure that there -- are no orphaned modules. diff --git a/src/Lean/Compiler/IR/ToIR.lean b/src/Lean/Compiler/IR/ToIR.lean new file mode 100644 index 000000000000..9ffc86e402a4 --- /dev/null +++ b/src/Lean/Compiler/IR/ToIR.lean @@ -0,0 +1,411 @@ +/- +Copyright (c) 2024 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Cameron Zwarich +-/ +prelude +import Lean.Compiler.LCNF.Basic +import Lean.Compiler.LCNF.CompilerM +import Lean.Compiler.LCNF.PhaseExt +import Lean.Compiler.IR.Basic +import Lean.Compiler.IR.CompilerM +import Lean.Compiler.IR.CtorLayout +import Lean.CoreM +import Lean.Environment + +namespace Lean.IR + +open Lean.Compiler (LCNF.AltCore LCNF.Arg LCNF.Code LCNF.Decl LCNF.DeclValue LCNF.LCtx LCNF.LetDecl + LCNF.LetValue LCNF.LitValue LCNF.Param LCNF.getMonoDecl?) + +namespace ToIR + +inductive FVarClassification where + | var (id : VarId) + | joinPoint (id : JoinPointId) + | erased + +structure BuilderState where + fvars : Std.HashMap FVarId FVarClassification := {} + nextId : Nat := 1 + +abbrev M := StateT BuilderState CoreM + +def M.run (x : M α) : CoreM α := do + x.run' {} + +def bindVar (fvarId : FVarId) : M VarId := do + modifyGet fun s => + let varId := { idx := s.nextId } + ⟨varId, { s with fvars := s.fvars.insertIfNew fvarId (.var varId), + nextId := s.nextId + 1 }⟩ + +def bindVarToVarId (fvarId : FVarId) (varId : VarId) : M Unit := do + modify fun s => { s with fvars := s.fvars.insertIfNew fvarId (.var varId) } + +def newVar : M VarId := do + modifyGet fun s => + let varId := { idx := s.nextId } + ⟨varId, { s with nextId := s.nextId + 1 }⟩ + +def bindJoinPoint (fvarId : FVarId) : M JoinPointId := do + modifyGet fun s => + let joinPointId := { idx := s.nextId } + ⟨joinPointId, { s with fvars := s.fvars.insertIfNew fvarId (.joinPoint joinPointId), + nextId := s.nextId + 1 }⟩ + +def bindErased (fvarId : FVarId) : M Unit := do + modify fun s => { s with fvars := s.fvars.insertIfNew fvarId .erased } + +def findDecl (n : Name) : M (Option Decl) := + return findEnvDecl (← Lean.getEnv) n + +def addDecl (d : Decl) : M Unit := + Lean.modifyEnv fun env => declMapExt.addEntry (env.addExtraName d.name) d + +def lowerLitValue (v : LCNF.LitValue) : LitVal := + match v with + | .natVal n => .num n + | .strVal s => .str s + +-- TODO: This should be cached. +def lowerEnumToScalarType (name : Name) : M (Option IRType) := do + let env ← Lean.getEnv + let some (.inductInfo inductiveVal) := env.find? name | return none + let ctorNames := inductiveVal.ctors + let numCtors := ctorNames.length + for ctorName in ctorNames do + let some (.ctorInfo ctorVal) := env.find? ctorName | panic! "expected valid constructor name" + if ctorVal.type.isForall then return none + return if numCtors == 1 then + none + else if numCtors < Nat.pow 2 8 then + some .uint8 + else if numCtors < Nat.pow 2 16 then + some .uint16 + else if numCtors < Nat.pow 2 32 then + some .uint32 + else + none + +def lowerType (e : Lean.Expr) : M IRType := do + match e with + | .const name .. => + match name with + | ``UInt8 | ``Bool => return .uint8 + | ``UInt16 => return .uint16 + | ``UInt32 => return .uint32 + | ``UInt64 => return .uint64 + | ``USize => return .usize + | ``Float => return .float + | ``Float32 => return .float32 + | ``lcErased => return .irrelevant + | _ => + if let some scalarType ← lowerEnumToScalarType name then + return scalarType + else + return .object + | .app f _ => + if let .const name _ := f.headBeta then + if let some scalarType ← lowerEnumToScalarType name then + return scalarType + else + return .object + else + return .object + | .forallE .. => return .object + | _ => panic! "invalid type" + +-- TODO: This should be cached. +def getCtorInfo (name : Name) : M (CtorInfo × (Array CtorFieldInfo)) := do + match getCtorLayout (← Lean.getEnv) name with + | .ok ctorLayout => + return ⟨{ + name, + cidx := ctorLayout.cidx, + size := ctorLayout.numObjs, + usize := ctorLayout.numUSize, + ssize := ctorLayout.scalarSize + }, ctorLayout.fieldInfo.toArray⟩ + | .error .. => panic! "unrecognized constructor" + +def lowerArg (a : LCNF.Arg) : M Arg := do + match a with + | .fvar fvarId => + match (← get).fvars[fvarId]? with + | some (.var varId) => return .var varId + | some .erased => return .irrelevant + | some (.joinPoint ..) | none => panic! "unexpected value" + | .erased | .type .. => return .irrelevant + +inductive TranslatedProj where + | expr (e : Expr) + | erased + deriving Inhabited + +def lowerProj (base : VarId) (ctorInfo : CtorInfo) (field : CtorFieldInfo) + : TranslatedProj × IRType := + match field with + | .object i => ⟨.expr (.proj i base), .object⟩ + | .usize i => ⟨.expr (.uproj i base), .usize⟩ + | .scalar _ offset irType => ⟨.expr (.sproj (ctorInfo.size + ctorInfo.usize) offset base), irType⟩ + | .irrelevant => ⟨.erased, .irrelevant⟩ + +def lowerParam (p : LCNF.Param) : M Param := do + let x ← bindVar p.fvarId + let ty ← lowerType p.type + return { x, borrow := p.borrow, ty } + +mutual +partial def lowerCode (c : LCNF.Code) : M FnBody := do + match c with + | .let decl k => lowerLet decl k + | .jp decl k => + let joinPoint ← bindJoinPoint decl.fvarId + let params ← decl.params.mapM lowerParam + let body ← lowerCode decl.value + return .jdecl joinPoint params body (← lowerCode k) + | .jmp fvarId args => + match (← get).fvars[fvarId]? with + | some (.joinPoint joinPointId) => + return .jmp joinPointId (← args.mapM lowerArg) + | some (.var ..) | some .erased | none => panic! "unexpected value" + | .cases cases => + match (← get).fvars[cases.discr]? with + | some (.var varId) => + return .case cases.typeName + varId + (← lowerType cases.resultType) + (← cases.alts.mapM (lowerAlt varId)) + | some (.joinPoint ..) | some .erased | none => panic! "unexpected value" + | .return fvarId => + let arg := match (← get).fvars[fvarId]? with + | some (.var varId) => .var varId + | some .erased => .irrelevant + | some (.joinPoint ..) | none => panic! "unexpected value" + return .ret arg + | .unreach .. => return .unreachable + | .fun .. => panic! "all local functions should be λ-lifted" + +partial def lowerLet (decl : LCNF.LetDecl) (k : LCNF.Code) : M FnBody := do + let mkVar (v : VarId) : M FnBody := do + bindVarToVarId decl.fvarId v + lowerCode k + let mkExpr (e : Expr) : M FnBody := do + let var ← bindVar decl.fvarId + let type ← match e with + | .ctor .. | .pap .. | .proj .. => pure <| .object + | _ => lowerType decl.type + return .vdecl var type e (← lowerCode k) + let mkErased (_ : Unit) : M FnBody := do + bindErased decl.fvarId + lowerCode k + let mkPartialApp (e : Expr) (restArgs : Array Arg) : M FnBody := do + let var ← bindVar decl.fvarId + let tmpVar ← newVar + let type ← match e with + | .ctor .. | .pap .. | .proj .. => pure <| .object + | _ => lowerType decl.type + return .vdecl tmpVar .object e (.vdecl var type (.ap tmpVar restArgs) (← lowerCode k)) + let tryIrDecl? (name : Name) (args : Array Arg) : M (Option FnBody) := do + if let some decl ← LCNF.getMonoDecl? name then + let numArgs := args.size + let numParams := decl.params.size + if numArgs < numParams then + return some (← mkExpr (.pap name args)) + else if numArgs == numParams then + return some (← mkExpr (.fap name args)) + else + let firstArgs := args.extract 0 numParams + let restArgs := args.extract numParams numArgs + return some (← mkPartialApp (.fap name firstArgs) restArgs) + else + return none + + match decl.value with + | .value litValue => + mkExpr (.lit (lowerLitValue litValue)) + | .proj typeName i fvarId => + match (← get).fvars[fvarId]? with + | some (.var varId) => + -- TODO: have better pattern matching here + let some (.inductInfo { ctors, .. }) := (← Lean.getEnv).find? typeName + | panic! "projection of non-inductive type" + let ctorName := ctors[0]! + let ⟨ctorInfo, fields⟩ ← getCtorInfo ctorName + let ⟨result, type⟩ := lowerProj varId ctorInfo fields[i]! + match result with + | .expr e => + let var ← bindVar decl.fvarId + return .vdecl var type e (← lowerCode k) + | .erased => + bindErased decl.fvarId + lowerCode k + | some .erased => + bindErased decl.fvarId + lowerCode k + | some (.joinPoint ..) | none => panic! "unexpected value" + | .const ``Nat.succ _ args => + let irArgs ← args.mapM lowerArg + let var ← bindVar decl.fvarId + let tmpVar ← newVar + let k := (.vdecl var .object (.fap ``Nat.add #[irArgs[0]!, (.var tmpVar)]) (← lowerCode k)) + return .vdecl tmpVar .object (.lit (.num 1)) k + | .const name _ args => + let irArgs ← args.mapM lowerArg + if let some code ← tryIrDecl? name irArgs then + return code + else + let env ← Lean.getEnv + match env.find? name with + | some (.ctorInfo ctorVal) => + if isExtern env name then + if let some code ← tryIrDecl? name irArgs then + return code + else + mkExpr (.fap name irArgs) + else + let ⟨ctorInfo, fields⟩ ← getCtorInfo name + let args := args.extract (start := ctorVal.numParams) + let objArgs : Array Arg ← do + let mut result : Array Arg := #[] + for i in [0:fields.size] do + match args[i]! with + | .fvar fvarId => + if let some (.var varId) := (← get).fvars[fvarId]? then + if fields[i]! matches .object .. then + result := result.push (.var varId) + | .type _ | .erased => + if fields[i]! matches .object .. then + result := result.push .irrelevant + pure result + let objVar ← bindVar decl.fvarId + let rec lowerNonObjectFields (_ : Unit) : M FnBody := + let rec loop (usizeCount : Nat) (i : Nat) : M FnBody := do + match args[i]? with + | some (.fvar fvarId) => + match (← get).fvars[fvarId]? with + | some (.var varId) => + match fields[i]! with + | .usize .. => + let k ← loop (usizeCount + 1) (i + 1) + return .uset objVar (ctorInfo.size + usizeCount) varId k + | .scalar _ offset argType => + let k ← loop usizeCount (i + 1) + return .sset objVar (ctorInfo.size + ctorInfo.usize) offset varId argType k + | .object .. | .irrelevant => loop usizeCount (i + 1) + | _ => loop usizeCount (i + 1) + | some (.type _) | some .erased => loop usizeCount (i + 1) + | none => lowerCode k + loop 0 0 + return .vdecl objVar .object (.ctor ctorInfo objArgs) (← lowerNonObjectFields ()) + | some (.axiomInfo ..) => + if name == ``Quot.lcInv then + match irArgs[2]! with + | .var varId => mkVar varId + | .irrelevant => mkErased () + else if name == ``lcUnreachable then + return .unreachable + else if let some irDecl ← findDecl name then + let numArgs := irArgs.size + let numParams := irDecl.params.size + if numArgs < numParams then + mkExpr (.pap name irArgs) + else if numArgs == numParams then + mkExpr (.fap name irArgs) + else + let firstArgs := irArgs.extract 0 numParams + let restArgs := irArgs.extract numParams irArgs.size + mkPartialApp (.fap name firstArgs) restArgs + else + throwError f!"axiom '{name}' not supported by code generator; consider marking definition as 'noncomputable'" + | some (.quotInfo ..) => + if name == ``Quot.mk then + match irArgs[2]! with + | .var varId => mkVar varId + | .irrelevant => mkErased () + else + throwError f!"quot {name} unsupported by code generator" + | some (.defnInfo ..) | some (.opaqueInfo ..) => + if let some code ← tryIrDecl? name irArgs then + return code + else + mkExpr (.fap name irArgs) + | some (.recInfo ..) => + throwError f!"code generator does not support recursor '{name}' yet, consider using 'match ... with' and/or structural recursion" + | some (.inductInfo ..) => panic! "induct unsupported by code generator" + | some (.thmInfo ..) => panic! "thm unsupported by code generator" + | none => panic! "reference to unbound name" + | .fvar fvarId args => + match (← get).fvars[fvarId]? with + | some (.var id) => + let irArgs ← args.mapM lowerArg + mkExpr (.ap id irArgs) + | some .erased => mkErased () + | some (.joinPoint ..) | none => panic! "unexpected value" + | .erased => mkErased () + +partial def lowerAlt (discr : VarId) (a : LCNF.AltCore LCNF.Code) : M (AltCore FnBody) := do + match a with + | .alt ctorName params code => + let ⟨ctorInfo, fields⟩ ← getCtorInfo ctorName + let lowerParams (params : Array LCNF.Param) (fields : Array CtorFieldInfo) : M FnBody := do + let rec loop (i : Nat) : M FnBody := do + match params[i]?, fields[i]? with + | some param, some field => + let ⟨result, type⟩ := lowerProj discr ctorInfo field + match result with + | .expr e => + return .vdecl (← bindVar param.fvarId) + type + e + (← loop (i + 1)) + | .erased => + bindErased param.fvarId + loop (i + 1) + | none, none => lowerCode code + | _, _ => panic! "mismatched fields and params" + loop 0 + let body ← lowerParams params fields + return .ctor ctorInfo body + | .default code => + return .default (← lowerCode code) +end + +def lowerResultType (type : Lean.Expr) (arity : Nat) : M IRType := + lowerType (resultTypeForArity type arity) +where resultTypeForArity (type : Lean.Expr) (arity : Nat) : Lean.Expr := + if arity == 0 then + type + else + match type with + | .forallE _ _ b _ => resultTypeForArity b (arity - 1) + | .const ``lcErased _ => mkConst ``lcErased + | _ => panic! "invalid arity" + +def lowerDecl (d : LCNF.Decl) : M (Option Decl) := do + let params ← d.params.mapM lowerParam + let resultType ← lowerResultType d.type d.params.size + match d.value with + | .code code => + let body ← lowerCode code + pure <| some <| .fdecl d.name params resultType body {} + | .extern externAttrData => + if externAttrData.entries.isEmpty then + -- TODO: This matches the behavior of the old compiler, but we should + -- find a better way to handle this. + addDecl (mkDummyExternDecl d.name params resultType) + pure <| none + else + pure <| some <| .extern d.name params resultType externAttrData + +end ToIR + +def toIR (decls: Array LCNF.Decl) : CoreM (Array Decl) := do + let mut irDecls := #[] + for decl in decls do + if let some irDecl ← ToIR.lowerDecl decl |>.run then + irDecls := irDecls.push irDecl + return irDecls + +end Lean.IR diff --git a/src/Lean/Compiler/LCNF/Main.lean b/src/Lean/Compiler/LCNF/Main.lean index 1cac4951e3cb..e59f8b0389b2 100644 --- a/src/Lean/Compiler/LCNF/Main.lean +++ b/src/Lean/Compiler/LCNF/Main.lean @@ -6,6 +6,10 @@ Authors: Leonardo de Moura prelude import Lean.Compiler.Options import Lean.Compiler.ExternAttr +import Lean.Compiler.IR +import Lean.Compiler.IR.Basic +import Lean.Compiler.IR.Checker +import Lean.Compiler.IR.ToIR import Lean.Compiler.LCNF.PassManager import Lean.Compiler.LCNF.Passes import Lean.Compiler.LCNF.PrettyPrinter @@ -62,7 +66,7 @@ def checkpoint (stepName : Name) (decls : Array Decl) : CompilerM Unit := do namespace PassManager -def run (declNames : Array Name) : CompilerM (Array Decl) := withAtLeastMaxRecDepth 8192 do +def run (declNames : Array Name) : CompilerM (Array IR.Decl) := withAtLeastMaxRecDepth 8192 do /- Note: we need to increase the recursion depth because we currently do to save phase1 declarations in .olean files. Then, we have to recursively compile all dependencies, @@ -83,11 +87,25 @@ def run (declNames : Array Name) : CompilerM (Array Decl) := withAtLeastMaxRecDe -- We display the declaration saved in the environment because the names have been normalized let some decl' ← getDeclAt? decl.name .mono | unreachable! Lean.addTrace `Compiler.result m!"size: {decl.size}\n{← ppDecl' decl'}" - return decls + let opts ← getOptions + -- If the new compiler is disabled, then all of the saved IR was built with the old compiler, + -- which causes IR type mismatches with IR generated by the new compiler. + if !(compiler.enableNew.get opts) then + return #[] + let irDecls ← IR.toIR decls + let env ← getEnv + let ⟨log, res⟩ := IR.compile env opts irDecls + for msg in log do + addTrace `Compiler.IR m!"{msg}" + match res with + | .ok env => + setEnv env + return irDecls + | .error s => throwError s end PassManager -def compile (declNames : Array Name) : CoreM (Array Decl) := +def compile (declNames : Array Name) : CoreM (Array IR.Decl) := CompilerM.run <| PassManager.run declNames def showDecl (phase : Phase) (declName : Name) : CoreM Format := do diff --git a/src/Lean/Compiler/LCNF/Util.lean b/src/Lean/Compiler/LCNF/Util.lean index fac45e176490..f81bf222501b 100644 --- a/src/Lean/Compiler/LCNF/Util.lean +++ b/src/Lean/Compiler/LCNF/Util.lean @@ -77,7 +77,7 @@ def getCtorArity? (declName : Name) : CoreM (Option Nat) := do /-- List of types that have builtin runtime support -/ -def builtinRuntimeTypes : List Name := [ +def builtinRuntimeTypes : Array Name := #[ ``String, ``UInt8, ``UInt16, ``UInt32, ``UInt64, ``USize, ``Float, ``Float32, diff --git a/src/Lean/CoreM.lean b/src/Lean/CoreM.lean index 0a3fbd32399d..1f1c729d5bc3 100644 --- a/src/Lean/CoreM.lean +++ b/src/Lean/CoreM.lean @@ -612,20 +612,21 @@ where doCompile := do return let opts ← getOptions if compiler.enableNew.get opts then - compileDeclsNew decls - - let res ← withTraceNode `compiler (fun _ => return m!"compiling old: {decls}") do - return compileDeclsOld (← getEnv) opts decls - match res with - | Except.ok env => setEnv env - | Except.error (.other msg) => - if logErrors then - if let some decl := ref? then - checkUnsupported decl -- Generate nicer error message for unsupported recursors and axioms - throwError msg - | Except.error ex => - if logErrors then - throwKernelException ex + try compileDeclsNew decls catch e => + if logErrors then throw e else return () + else + let res ← withTraceNode `compiler (fun _ => return m!"compiling old: {decls}") do + return compileDeclsOld (← getEnv) opts decls + match res with + | Except.ok env => setEnv env + | Except.error (.other msg) => + if logErrors then + if let some decl := ref? then + checkUnsupported decl -- Generate nicer error message for unsupported recursors and axioms + throwError msg + | Except.error ex => + if logErrors then + throwKernelException ex def compileDecl (decl : Declaration) (logErrors := true) : CoreM Unit := do compileDecls (Compiler.getDeclNamesForCodeGen decl) decl logErrors diff --git a/src/Lean/Data/Json/FromToJson.lean b/src/Lean/Data/Json/FromToJson.lean index a6774aff3ddd..f1224cc51bb2 100644 --- a/src/Lean/Data/Json/FromToJson.lean +++ b/src/Lean/Data/Json/FromToJson.lean @@ -47,54 +47,97 @@ instance : ToJson String := ⟨fun s => s⟩ instance : FromJson System.FilePath := ⟨fun j => System.FilePath.mk <$> Json.getStr? j⟩ instance : ToJson System.FilePath := ⟨fun p => p.toString⟩ +protected def _root_.Array.fromJson? [FromJson α] : Json → Except String (Array α) + | Json.arr a => a.mapM fromJson? + | j => throw s!"expected JSON array, got '{j}'" + instance [FromJson α] : FromJson (Array α) where - fromJson? - | Json.arr a => a.mapM fromJson? - | j => throw s!"expected JSON array, got '{j}'" + fromJson? := Array.fromJson? + +protected def _root_.Array.toJson [ToJson α] (a : Array α) : Json := + Json.arr (a.map toJson) -instance [ToJson α] : ToJson (Array α) := - ⟨fun a => Json.arr (a.map toJson)⟩ +instance [ToJson α] : ToJson (Array α) where + toJson := Array.toJson + +protected def _root_.List.fromJson? [FromJson α] (j : Json) : Except String (List α) := + (fromJson? j (α := Array α)).map Array.toList instance [FromJson α] : FromJson (List α) where - fromJson? j := (fromJson? j (α := Array α)).map Array.toList + fromJson? := List.fromJson? + +protected def _root_.List.toJson [ToJson α] (a : List α) : Json := + toJson a.toArray instance [ToJson α] : ToJson (List α) where - toJson xs := toJson xs.toArray + toJson := List.toJson + +protected def _root_.Option.fromJson? [FromJson α] : Json → Except String (Option α) + | Json.null => Except.ok none + | j => some <$> fromJson? j instance [FromJson α] : FromJson (Option α) where - fromJson? - | Json.null => Except.ok none - | j => some <$> fromJson? j + fromJson? := Option.fromJson? + +protected def _root_.Option.toJson [ToJson α] : Option α → Json + | none => Json.null + | some a => toJson a + +instance [ToJson α] : ToJson (Option α) where + toJson := Option.toJson -instance [ToJson α] : ToJson (Option α) := - ⟨fun - | none => Json.null - | some a => toJson a⟩ +protected def _root_.Prod.fromJson? {α : Type u} {β : Type v} [FromJson α] [FromJson β] : Json → Except String (α × β) + | Json.arr #[ja, jb] => do + let ⟨a⟩ : ULift.{v} α := ← (fromJson? ja).map ULift.up + let ⟨b⟩ : ULift.{u} β := ← (fromJson? jb).map ULift.up + return (a, b) + | j => throw s!"expected pair, got '{j}'" instance {α : Type u} {β : Type v} [FromJson α] [FromJson β] : FromJson (α × β) where - fromJson? - | Json.arr #[ja, jb] => do - let ⟨a⟩ : ULift.{v} α := ← (fromJson? ja).map ULift.up - let ⟨b⟩ : ULift.{u} β := ← (fromJson? jb).map ULift.up - return (a, b) - | j => throw s!"expected pair, got '{j}'" + fromJson? := Prod.fromJson? + +protected def _root_.Prod.toJson [ToJson α] [ToJson β] : α × β → Json + | (a, b) => Json.arr #[toJson a, toJson b] instance [ToJson α] [ToJson β] : ToJson (α × β) where - toJson := fun (a, b) => Json.arr #[toJson a, toJson b] + toJson := Prod.toJson + +protected def Name.fromJson? (j : Json) : Except String Name := do + let s ← j.getStr? + if s == "[anonymous]" then + return Name.anonymous + else + let n := s.toName + if n.isAnonymous then throw s!"expected a `Name`, got '{j}'" + return n instance : FromJson Name where - fromJson? j := do - let s ← j.getStr? - if s == "[anonymous]" then - return Name.anonymous - else - let n := s.toName - if n.isAnonymous then throw s!"expected a `Name`, got '{j}'" - return n + fromJson? := Name.fromJson? instance : ToJson Name where toJson n := toString n +protected def NameMap.fromJson? [FromJson α] : Json → Except String (NameMap α) + | .obj obj => obj.foldM (init := {}) fun m k v => do + if k == "[anonymous]" then + return m.insert .anonymous (← fromJson? v) + else + let n := k.toName + if n.isAnonymous then + throw s!"expected a `Name`, got '{k}'" + else + return m.insert n (← fromJson? v) + | j => throw s!"expected a `NameMap`, got '{j}'" + +instance [FromJson α] : FromJson (NameMap α) where + fromJson? := NameMap.fromJson? + +protected def NameMap.toJson [ToJson α] (m : NameMap α) : Json := + Json.obj <| m.fold (fun n k v => n.insert compare k.toString (toJson v)) .leaf + +instance [ToJson α] : ToJson (NameMap α) where + toJson := NameMap.toJson + /-- Note that `USize`s and `UInt64`s are stored as strings because JavaScript cannot represent 64-bit numbers. -/ def bignumFromJson? (j : Json) : Except String Nat := do @@ -106,58 +149,77 @@ def bignumFromJson? (j : Json) : Except String Nat := do def bignumToJson (n : Nat) : Json := toString n +protected def _root_.USize.fromJson? (j : Json) : Except String USize := do + let n ← bignumFromJson? j + if n ≥ USize.size then + throw "value '{j}' is too large for `USize`" + return USize.ofNat n + instance : FromJson USize where - fromJson? j := do - let n ← bignumFromJson? j - if n ≥ USize.size then - throw "value '{j}' is too large for `USize`" - return USize.ofNat n + fromJson? := USize.fromJson? instance : ToJson USize where toJson v := bignumToJson (USize.toNat v) +protected def _root_.UInt64.fromJson? (j : Json) : Except String UInt64 := do + let n ← bignumFromJson? j + if n ≥ UInt64.size then + throw "value '{j}' is too large for `UInt64`" + return UInt64.ofNat n + instance : FromJson UInt64 where - fromJson? j := do - let n ← bignumFromJson? j - if n ≥ UInt64.size then - throw "value '{j}' is too large for `UInt64`" - return UInt64.ofNat n + fromJson? := UInt64.fromJson? instance : ToJson UInt64 where toJson v := bignumToJson (UInt64.toNat v) +protected def _root_.Float.toJson (x : Float) : Json := + match JsonNumber.fromFloat? x with + | Sum.inl e => Json.str e + | Sum.inr n => Json.num n + instance : ToJson Float where - toJson x := - match JsonNumber.fromFloat? x with - | Sum.inl e => Json.str e - | Sum.inr n => Json.num n + toJson := Float.toJson + +protected def _root_.Float.fromJson? : Json → Except String Float + | (Json.str "Infinity") => Except.ok (1.0 / 0.0) + | (Json.str "-Infinity") => Except.ok (-1.0 / 0.0) + | (Json.str "NaN") => Except.ok (0.0 / 0.0) + | (Json.num jn) => Except.ok jn.toFloat + | _ => Except.error "Expected a number or a string 'Infinity', '-Infinity', 'NaN'." instance : FromJson Float where - fromJson? := fun - | (Json.str "Infinity") => Except.ok (1.0 / 0.0) - | (Json.str "-Infinity") => Except.ok (-1.0 / 0.0) - | (Json.str "NaN") => Except.ok (0.0 / 0.0) - | (Json.num jn) => Except.ok jn.toFloat - | _ => Except.error "Expected a number or a string 'Infinity', '-Infinity', 'NaN'." + fromJson? := Float.fromJson? + +protected def RBMap.toJson [ToJson α] (m : RBMap String α cmp) : Json := + Json.obj <| RBNode.map (fun _ => toJson) <| m.val instance [ToJson α] : ToJson (RBMap String α cmp) where - toJson m := Json.obj <| RBNode.map (fun _ => toJson) <| m.val + toJson := RBMap.toJson + +protected def RBMap.fromJson? [FromJson α] (j : Json) : Except String (RBMap String α cmp) := do + let o ← j.getObj? + o.foldM (fun x k v => x.insert k <$> fromJson? v) ∅ instance {cmp} [FromJson α] : FromJson (RBMap String α cmp) where - fromJson? j := do - let o ← j.getObj? - o.foldM (fun x k v => x.insert k <$> fromJson? v) ∅ + fromJson? := RBMap.fromJson? namespace Json -instance : FromJson Structured := ⟨fun - | arr a => return Structured.arr a - | obj o => return Structured.obj o - | j => throw s!"expected structured object, got '{j}'"⟩ +protected def Structured.fromJson? : Json → Except String Structured + | .arr a => return Structured.arr a + | .obj o => return Structured.obj o + | j => throw s!"expected structured object, got '{j}'" + +instance : FromJson Structured where + fromJson? := Structured.fromJson? + +protected def Structured.toJson : Structured → Json + | .arr a => .arr a + | .obj o => .obj o -instance : ToJson Structured := ⟨fun - | Structured.arr a => arr a - | Structured.obj o => obj o⟩ +instance : ToJson Structured where + toJson := Structured.toJson def toStructured? [ToJson α] (v : α) : Except String Structured := fromJson? (toJson v) diff --git a/src/Lean/Data/NameMap.lean b/src/Lean/Data/NameMap.lean index eed36180e1ff..67fa15bf7550 100644 --- a/src/Lean/Data/NameMap.lean +++ b/src/Lean/Data/NameMap.lean @@ -18,6 +18,8 @@ def NameMap (α : Type) := RBMap Name α Name.quickCmp namespace NameMap variable {α : Type} +instance [Repr α] : Repr (NameMap α) := inferInstanceAs (Repr (RBMap Name α Name.quickCmp)) + instance (α : Type) : EmptyCollection (NameMap α) := ⟨mkNameMap α⟩ instance (α : Type) : Inhabited (NameMap α) where diff --git a/src/Lean/Declaration.lean b/src/Lean/Declaration.lean index c695a47bee48..38fd16f95ced 100644 --- a/src/Lean/Declaration.lean +++ b/src/Lean/Declaration.lean @@ -5,6 +5,7 @@ Authors: Leonardo de Moura -/ prelude import Lean.Expr +import Lean.Data.Json namespace Lean /-- @@ -84,7 +85,7 @@ structure ConstantVal where name : Name levelParams : List Name type : Expr - deriving Inhabited, BEq + deriving Inhabited, BEq structure AxiomVal extends ConstantVal where isUnsafe : Bool @@ -103,7 +104,7 @@ def mkAxiomValEx (name : Name) (levelParams : List Name) (type : Expr) (isUnsafe inductive DefinitionSafety where | «unsafe» | safe | «partial» - deriving Inhabited, BEq, Repr + deriving Inhabited, ToJson, BEq, Repr structure DefinitionVal extends ConstantVal where value : Expr @@ -529,3 +530,5 @@ def mkRecName (declName : Name) : Name := Name.mkStr declName "rec" end Lean + + diff --git a/src/Lean/Elab/App.lean b/src/Lean/Elab/App.lean index e4b13fb609e9..67dd2c1586a8 100644 --- a/src/Lean/Elab/App.lean +++ b/src/Lean/Elab/App.lean @@ -1395,8 +1395,8 @@ private def elabAppLValsAux (namedArgs : Array NamedArg) (args : Array Arg) (exp | LValResolution.projFn baseStructName structName fieldName => let f ← mkBaseProjections baseStructName structName f let some info := getFieldInfo? (← getEnv) baseStructName fieldName | unreachable! - if isPrivateNameFromImportedModule (← getEnv) info.projFn then - throwError "field '{fieldName}' from structure '{structName}' is private" + --if isPrivateNameFromImportedModule (← getEnv) info.projFn then + -- throwError "field '{fieldName}' from structure '{structName}' is private" let projFn ← mkConst info.projFn let projFn ← addProjTermInfo lval.getRef projFn if lvals.isEmpty then diff --git a/src/Lean/Elab/BuiltinEvalCommand.lean b/src/Lean/Elab/BuiltinEvalCommand.lean index adefdfc186d4..0313dcb18acf 100644 --- a/src/Lean/Elab/BuiltinEvalCommand.lean +++ b/src/Lean/Elab/BuiltinEvalCommand.lean @@ -153,7 +153,7 @@ private def mkMessageData (e : Expr) : MetaM Expr := do <|> (return mkApp (mkConst ``MessageData.ofFormat) (← mkFormat e)) <|> do throwError m!"could not synthesize a 'ToExpr', 'Repr', or 'ToString' instance for type{indentExpr (← inferType e)}" -private structure EvalAction where +structure EvalAction where eval : CommandElabM MessageData /-- Whether to print the result of evaluation. If `some`, the expression is what type to use for the type ascription when `pp.type` is true. -/ diff --git a/src/Lean/Elab/DeclNameGen.lean b/src/Lean/Elab/DeclNameGen.lean index 550ee9cbc8c6..88949980ce38 100644 --- a/src/Lean/Elab/DeclNameGen.lean +++ b/src/Lean/Elab/DeclNameGen.lean @@ -100,7 +100,7 @@ private partial def winnowExpr (e : Expr) : MetaM Expr := do /-- State for name generation. -/ -private structure MkNameState where +structure MkNameState where /-- Keeps track of expressions already visited so that we do not include them again. -/ seen : ExprSet := {} /-- Keeps track of constants that appear in the generated name. -/ diff --git a/src/Lean/Elab/Extra.lean b/src/Lean/Elab/Extra.lean index 9aab4b81353b..845f1c612753 100644 --- a/src/Lean/Elab/Extra.lean +++ b/src/Lean/Elab/Extra.lean @@ -12,7 +12,7 @@ import Lean.Elab.BuiltinNotation namespace Lean.Elab.Term open Meta -private def getMonadForIn (expectedType? : Option Expr) : TermElabM Expr := do +def getMonadForIn (expectedType? : Option Expr) : TermElabM Expr := do match expectedType? with | none => throwError "invalid 'for_in%' notation, expected type is not available" | some expectedType => @@ -20,7 +20,7 @@ private def getMonadForIn (expectedType? : Option Expr) : TermElabM Expr := do | some (m, _) => return m | none => throwError "invalid 'for_in%' notation, expected type is not of the form `M α`{indentExpr expectedType}" -private def throwForInFailure (forInInstance : Expr) : TermElabM Expr := +def throwForInFailure (forInInstance : Expr) : TermElabM Expr := throwError "failed to synthesize instance for 'for_in%' notation{indentExpr forInInstance}" @[builtin_term_elab forInMacro] def elabForIn : TermElab := fun stx expectedType? => do @@ -225,7 +225,7 @@ where return .term s info e -- Auxiliary function used at `analyze` -private def hasCoe (fromType toType : Expr) : TermElabM Bool := do +def hasCoe (fromType toType : Expr) : TermElabM Bool := do if (← getEnv).contains ``CoeT then withLocalDeclD `x fromType fun x => do match ← coerceSimple? x toType with @@ -235,21 +235,21 @@ private def hasCoe (fromType toType : Expr) : TermElabM Bool := do else return false -private structure AnalyzeResult where +structure AnalyzeResult where max? : Option Expr := none /-- `true` if there are two types `α` and `β` where we don't have coercions in any direction. -/ hasUncomparable : Bool := false /-- `true` if there are any leaf terms with an unknown type (according to `isUnknown`). -/ hasUnknown : Bool := false -private def isUnknown : Expr → Bool +def isUnknown : Expr → Bool | .mvar .. => true | .app f _ => isUnknown f | .letE _ _ _ b _ => isUnknown b | .mdata _ b => isUnknown b | _ => false -private def analyze (t : Tree) (expectedType? : Option Expr) : TermElabM AnalyzeResult := do +def analyze (t : Tree) (expectedType? : Option Expr) : TermElabM AnalyzeResult := do let max? ← match expectedType? with | none => pure none @@ -309,16 +309,16 @@ where trace[Elab.binop] "uncomparable types: {max}, {type}" modify fun s => { s with hasUncomparable := true } -private def mkBinOp (lazy : Bool) (f : Expr) (lhs rhs : Expr) : TermElabM Expr := do +def mkBinOp (lazy : Bool) (f : Expr) (lhs rhs : Expr) : TermElabM Expr := do let mut rhs := rhs if lazy then rhs ← mkFunUnit rhs elabAppArgs f #[] #[Arg.expr lhs, Arg.expr rhs] (expectedType? := none) (explicit := false) (ellipsis := false) (resultIsOutParamSupport := false) -private def mkUnOp (f : Expr) (arg : Expr) : TermElabM Expr := do +def mkUnOp (f : Expr) (arg : Expr) : TermElabM Expr := do elabAppArgs f #[] #[Arg.expr arg] (expectedType? := none) (explicit := false) (ellipsis := false) (resultIsOutParamSupport := false) -private def toExprCore (t : Tree) : TermElabM Expr := do +def toExprCore (t : Tree) : TermElabM Expr := do match t with | .term _ trees e => modifyInfoState (fun s => { s with trees := s.trees ++ trees }); return e @@ -360,7 +360,7 @@ private def toExprCore (t : Tree) : TermElabM Expr := do If the type of an argument is unknown we should not coerce it to `maxType` because it would prevent the default instance above from being even tried. -/ -private def hasHeterogeneousDefaultInstances (f : Expr) (maxType : Expr) (lhs : Bool) : MetaM Bool := do +def hasHeterogeneousDefaultInstances (f : Expr) (maxType : Expr) (lhs : Bool) : MetaM Bool := do let .const fName .. := f | return false let .const typeName .. := maxType.getAppFn | return false let className := fName.getPrefix @@ -380,7 +380,7 @@ private def hasHeterogeneousDefaultInstances (f : Expr) (maxType : Expr) (lhs : For example, suppose `maxType` is `Int`, and `f` is `HPow.hPow`. Then, adding coercions to `maxType` only make sense if we have an instance `HPow Int Int Int`. -/ -private def hasHomogeneousInstance (f : Expr) (maxType : Expr) : MetaM Bool := do +def hasHomogeneousInstance (f : Expr) (maxType : Expr) : MetaM Bool := do let .const fName .. := f | return false let className := fName.getPrefix try diff --git a/src/Lean/Elab/Frontend.lean b/src/Lean/Elab/Frontend.lean index 1ad2e54c583c..2bcab8d31446 100644 --- a/src/Lean/Elab/Frontend.lean +++ b/src/Lean/Elab/Frontend.lean @@ -145,6 +145,7 @@ def runFrontend (errorOnKinds : Array Name := #[]) (plugins : Array System.FilePath := #[]) (printStats : Bool := false) + (setupFileName? : Option System.FilePath := none) : IO (Option Environment) := do let startTime := (← IO.monoNanosNow).toFloat / 1000000000 let inputCtx := Parser.mkInputContext input fileName @@ -152,8 +153,28 @@ def runFrontend -- default to async elaboration; see also `Elab.async` docs let opts := Elab.async.setIfNotSet opts true let ctx := { inputCtx with } + let setup stx := do + if let some file := setupFileName? then + let setup ← ModuleSetup.load file + liftM <| setup.dynlibs.forM Lean.loadDynlib + return .ok { + trustLevel + mainModuleName := setup.name + isModule := setup.isModule + imports := setup.imports + plugins := plugins ++ setup.plugins + modules := setup.modules + -- override cmdline options with header options + opts := opts.mergeBy (fun _ _ hOpt => hOpt) setup.options.toOptions + } + else + return .ok { + imports := stx.imports + isModule := stx.isModule + mainModuleName, opts, trustLevel, plugins + } let processor := Language.Lean.process - let snap ← processor (fun _ => pure <| .ok { mainModuleName, opts, trustLevel, plugins }) none ctx + let snap ← processor setup none ctx let snaps := Language.toSnapshotTree snap let severityOverrides := errorOnKinds.foldl (·.insert · .error) {} diff --git a/src/Lean/Elab/GuardMsgs.lean b/src/Lean/Elab/GuardMsgs.lean index 846a4e0cdf85..697f9fa8db4b 100644 --- a/src/Lean/Elab/GuardMsgs.lean +++ b/src/Lean/Elab/GuardMsgs.lean @@ -31,10 +31,13 @@ private def messageToStringWithoutPos (msg : Message) : BaseIO String := do unless msg.caption == "" do str := msg.caption ++ ":\n" ++ str if !("\n".isPrefixOf str) then str := " " ++ str - match msg.severity with - | MessageSeverity.information => str := "info:" ++ str - | MessageSeverity.warning => str := "warning:" ++ str - | MessageSeverity.error => str := "error:" ++ str + if msg.isTrace then + str := "trace:" ++ str + else + match msg.severity with + | MessageSeverity.information => str := "info:" ++ str + | MessageSeverity.warning => str := "warning:" ++ str + | MessageSeverity.error => str := "error:" ++ str if str.isEmpty || str.back != '\n' then str := str ++ "\n" return str @@ -46,7 +49,7 @@ inductive SpecResult /-- Drop the message and delete it. -/ | drop /-- Do not capture the message. -/ - | passthrough + | pass /-- The method to use when normalizing whitespace, after trimming. -/ inductive WhitespaceMode @@ -64,6 +67,25 @@ inductive MessageOrdering /-- Sort the produced messages. -/ | sorted +def parseGuardMsgsFilterAction (action? : Option (TSyntax ``guardMsgsFilterAction)) : + CommandElabM SpecResult := do + if let some action := action? then + match action with + | `(guardMsgsFilterAction| check) => pure .check + | `(guardMsgsFilterAction| drop) => pure .drop + | `(guardMsgsFilterAction| pass) => pure .pass + | _ => throwUnsupportedSyntax + else + pure .check + +def parseGuardMsgsFilterSeverity : TSyntax ``guardMsgsFilterSeverity → CommandElabM (Message → Bool) + | `(guardMsgsFilterSeverity| trace) => pure fun msg => msg.isTrace + | `(guardMsgsFilterSeverity| info) => pure fun msg => !msg.isTrace && msg.severity == .information + | `(guardMsgsFilterSeverity| warning) => pure fun msg => !msg.isTrace && msg.severity == .warning + | `(guardMsgsFilterSeverity| error) => pure fun msg => !msg.isTrace && msg.severity == .error + | `(guardMsgsFilterSeverity| all) => pure fun _ => true + | _ => throwUnsupportedSyntax + /-- Parses a `guardMsgsSpec`. - No specification: check everything. - With a specification: interpret the spec, and if nothing applies pass it through. -/ @@ -79,24 +101,23 @@ def parseGuardMsgsSpec (spec? : Option (TSyntax ``guardMsgsSpec)) : let mut whitespace : WhitespaceMode := .normalized let mut ordering : MessageOrdering := .exact let mut p? : Option (Message → SpecResult) := none - let pushP (s : MessageSeverity) (drop : Bool) (p? : Option (Message → SpecResult)) + let pushP (action : SpecResult) (msgP : Message → Bool) (p? : Option (Message → SpecResult)) (msg : Message) : SpecResult := - let p := p?.getD fun _ => .passthrough - if msg.severity == s then if drop then .drop else .check - else p msg + if msgP msg then + action + else + (p?.getD fun _ => .pass) msg for elt in elts.reverse do match elt with - | `(guardMsgsSpecElt| $[drop%$drop?]? info) => p? := pushP .information drop?.isSome p? - | `(guardMsgsSpecElt| $[drop%$drop?]? warning) => p? := pushP .warning drop?.isSome p? - | `(guardMsgsSpecElt| $[drop%$drop?]? error) => p? := pushP .error drop?.isSome p? - | `(guardMsgsSpecElt| $[drop%$drop?]? all) => p? := some fun _ => if drop?.isSome then .drop else .check + | `(guardMsgsSpecElt| $[$action?]? $sev) => p? := pushP (← parseGuardMsgsFilterAction action?) (← parseGuardMsgsFilterSeverity sev) p? | `(guardMsgsSpecElt| whitespace := exact) => whitespace := .exact | `(guardMsgsSpecElt| whitespace := normalized) => whitespace := .normalized | `(guardMsgsSpecElt| whitespace := lax) => whitespace := .lax | `(guardMsgsSpecElt| ordering := exact) => ordering := .exact | `(guardMsgsSpecElt| ordering := sorted) => ordering := .sorted | _ => throwUnsupportedSyntax - return (whitespace, ordering, p?.getD fun _ => .check) + let defaultP := fun _ => .check + return (whitespace, ordering, p?.getD defaultP) /-- An info tree node corresponding to a failed `#guard_msgs` invocation, used for code action support. -/ @@ -157,7 +178,7 @@ def MessageOrdering.apply (mode : MessageOrdering) (msgs : List String) : List S match specFn msg with | .check => toCheck := toCheck.add msg | .drop => pure () - | .passthrough => toPassthrough := toPassthrough.add msg + | pass => toPassthrough := toPassthrough.add msg let strings ← toCheck.toList.mapM (messageToStringWithoutPos ·) let strings := ordering.apply strings let res := "---\n".intercalate strings |>.trim diff --git a/src/Lean/Elab/Import.lean b/src/Lean/Elab/Import.lean index 1dbfbf7e94c7..4454bf1e69f0 100644 --- a/src/Lean/Elab/Import.lean +++ b/src/Lean/Elab/Import.lean @@ -10,7 +10,15 @@ import Lean.CoreM namespace Lean.Elab -def headerToImports : TSyntax ``Parser.Module.header → Array Import +abbrev HeaderSyntax := TSyntax ``Parser.Module.header + +def HeaderSyntax.startPos (header : HeaderSyntax) : String.Pos := + header.raw.getPos?.getD 0 + +def HeaderSyntax.isModule (header : HeaderSyntax) : Bool := + !header.raw[0].isNone + +def HeaderSyntax.imports : HeaderSyntax → Array Import | `(Parser.Module.header| $[module%$moduleTk]? $[prelude%$preludeTk]? $importsStx*) => let imports := if preludeTk.isNone then #[{ module := `Init : Import }] else #[] imports ++ importsStx.map fun @@ -19,17 +27,14 @@ def headerToImports : TSyntax ``Parser.Module.header → Array Import | _ => unreachable! | _ => unreachable! -/-- -Elaborates the given header syntax into an environment. +abbrev headerToImports := @HeaderSyntax.imports -If `mainModule` is not given, `Environment.setMainModule` should be called manually. This is a -backwards compatibility measure not compatible with the module system. --/ -def processHeader (header : TSyntax ``Parser.Module.header) (opts : Options) (messages : MessageLog) - (inputCtx : Parser.InputContext) (trustLevel : UInt32 := 0) - (plugins : Array System.FilePath := #[]) (leakEnv := false) (mainModule := Name.anonymous) +def processHeaderCore + (startPos : String.Pos) (imports : Array Import) (isModule : Bool) + (opts : Options) (messages : MessageLog) (inputCtx : Parser.InputContext) + (trustLevel : UInt32 := 0) (plugins : Array System.FilePath := #[]) (leakEnv := false) + (mainModule := Name.anonymous) (arts : NameMap ModuleArtifacts := {}) : IO (Environment × MessageLog) := do - let isModule := !header.raw[0].isNone let level := if isModule then if Elab.inServer.get opts then .server @@ -38,7 +43,6 @@ def processHeader (header : TSyntax ``Parser.Module.header) (opts : Options) (me else .private let (env, messages) ← try - let imports := headerToImports header for i in imports do if !isModule && i.importAll then throw <| .userError "cannot use `import all` without `module`" @@ -47,15 +51,30 @@ def processHeader (header : TSyntax ``Parser.Module.header) (opts : Options) (me if !isModule && !i.isExported then throw <| .userError "cannot use `private import` without `module`" let env ← - importModules (leakEnv := leakEnv) (loadExts := true) (level := level) imports opts trustLevel plugins + importModules (leakEnv := leakEnv) (loadExts := true) (level := level) + imports opts trustLevel plugins arts pure (env, messages) catch e => let env ← mkEmptyEnvironment - let spos := header.raw.getPos?.getD 0 - let pos := inputCtx.fileMap.toPosition spos + let pos := inputCtx.fileMap.toPosition startPos pure (env, messages.add { fileName := inputCtx.fileName, data := toString e, pos := pos }) return (env.setMainModule mainModule, messages) +/-- +Elaborates the given header syntax into an environment. + +If `mainModule` is not given, `Environment.setMainModule` should be called manually. This is a +backwards compatibility measure not compatible with the module system. +-/ +@[inline] def processHeader + (header : HeaderSyntax) + (opts : Options) (messages : MessageLog) (inputCtx : Parser.InputContext) + (trustLevel : UInt32 := 0) (plugins : Array System.FilePath := #[]) (leakEnv := false) + (mainModule := Name.anonymous) + : IO (Environment × MessageLog) := do + processHeaderCore header.startPos header.imports header.isModule + opts messages inputCtx trustLevel plugins leakEnv mainModule + def parseImports (input : String) (fileName : Option String := none) : IO (Array Import × Position × MessageLog) := do let fileName := fileName.getD "<input>" let inputCtx := Parser.mkInputContext input fileName diff --git a/src/Lean/Elab/MutualDef.lean b/src/Lean/Elab/MutualDef.lean index d0ad5ecfe0e9..20e3a3ca52fa 100644 --- a/src/Lean/Elab/MutualDef.lean +++ b/src/Lean/Elab/MutualDef.lean @@ -89,8 +89,15 @@ private def check (prevHeaders : Array DefViewElabHeader) (newHeader : DefViewEl else pure () -private def registerFailedToInferDefTypeInfo (type : Expr) (ref : Syntax) : TermElabM Unit := - registerCustomErrorIfMVar type ref "failed to infer definition type" +private def registerFailedToInferDefTypeInfo (type : Expr) (ref : Syntax) (view : DefView) : TermElabM Unit := + let msg := if view.kind.isExample then + m!"failed to infer type of example" + else if view.kind matches .instance then + -- TODO: instances are sometime named. We should probably include the name if available. + m!"failed to infer type of instance" + else + m!"failed to infer type of `{view.declId}`" + registerCustomErrorIfMVar type ref msg /-- Return `some [b, c]` if the given `views` are representing a declaration of the form @@ -106,14 +113,17 @@ private def isMultiConstant? (views : Array DefView) : Option (List Name) := else none -private def getPendingMVarErrorMessage (views : Array DefView) : String := +private def getPendingMVarErrorMessage (views : Array DefView) : MessageData := match isMultiConstant? views with | some ids => let idsStr := ", ".intercalate <| ids.map fun id => s!"`{id}`" let paramsStr := ", ".intercalate <| ids.map fun id => s!"`({id} : _)`" - s!"\nrecall that you cannot declare multiple constants in a single declaration. The identifier(s) {idsStr} are being interpreted as parameters {paramsStr}" + MessageData.note m!"Recall that you cannot declare multiple constants in a single declaration. The identifier(s) {idsStr} are being interpreted as parameters {paramsStr}." | none => - "\nwhen the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed" + if views.all fun view => view.kind.isTheorem then + MessageData.note "All holes (e.g., `_`) in the header of a theorem are resolved before the proof is processed; information from the proof cannot be used to infer what these values should be" + else + MessageData.note "When the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed" /-- Convert terms of the form `OfNat <type> (OfNat.ofNat Nat <num> ..)` into `OfNat <type> <num>`. @@ -188,13 +198,13 @@ private def elabHeaders (views : Array DefView) (expandedDeclIds : Array ExpandD let mut type ← match view.type? with | some typeStx => let type ← elabType typeStx - registerFailedToInferDefTypeInfo type typeStx + registerFailedToInferDefTypeInfo type typeStx view pure type | none => let hole := mkHole refForElabFunType let type ← elabType hole trace[Elab.definition] ">> type: {type}\n{type.mvarId!}" - registerFailedToInferDefTypeInfo type refForElabFunType + registerFailedToInferDefTypeInfo type refForElabFunType view pure type Term.synthesizeSyntheticMVarsNoPostponing if view.isInstance then @@ -366,9 +376,11 @@ Runs `k` with a restricted local context where only section variables from `vars * are instance-implicit variables that only reference section variables included by these rules AND are not listed in `sc.omittedVars` (via `omit`; note that `omit` also subtracts from `sc.includedVars`). + +If `check` is false, no exceptions will be produced. -/ private def withHeaderSecVars {α} (vars : Array Expr) (sc : Command.Scope) (headers : Array DefViewElabHeader) - (k : Array Expr → TermElabM α) : TermElabM α := do + (k : Array Expr → TermElabM α) (check := true) : TermElabM α := do let mut revSectionFVars : Std.HashMap FVarId Name := {} for (uid, var) in (← read).sectionFVars do revSectionFVars := revSectionFVars.insert var.fvarId! uid @@ -386,10 +398,11 @@ where modify (·.add var.fvarId!) -- transitively referenced get >>= (·.addDependencies) >>= set - for var in (← get).fvarIds do - if let some uid := revSectionFVars[var]? then - if sc.omittedVars.contains uid then - throwError "cannot omit referenced section variable '{Expr.fvar var}'" + if check then + for var in (← get).fvarIds do + if let some uid := revSectionFVars[var]? then + if sc.omittedVars.contains uid then + throwError "cannot omit referenced section variable '{Expr.fvar var}'" -- instances (`addDependencies` unnecessary as by definition they may only reference variables -- already included) for var in vars do @@ -1044,27 +1057,39 @@ where Term.expandDeclId (← getCurrNamespace) (← getLevelNames) view.declId view.modifiers let headers ← elabHeaders views expandedDeclIds bodyPromises tacPromises let headers ← levelMVarToParamHeaders views headers + -- If the decl looks like a `rfl` theorem, we elaborate is synchronously as we need to wait for + -- the type before we can decide whether the theorem body should be exported and then waiting + -- for the body as well should not add any significant overhead. + let isRflLike := headers.all (·.value matches `(declVal| := rfl)) -- elaborate body in parallel when all stars align if let (#[view], #[declId]) := (views, expandedDeclIds) then - if Elab.async.get (← getOptions) && view.kind.isTheorem && + if Elab.async.get (← getOptions) && view.kind.isTheorem && !isRflLike && !deprecated.oldSectionVars.get (← getOptions) && -- holes in theorem types is not a fatal error, but it does make parallelism impossible !headers[0]!.type.hasMVar then elabAsync headers[0]! view declId - else elabSync headers - else elabSync headers + else elabSync headers isRflLike + else elabSync headers isRflLike for view in views, declId in expandedDeclIds do -- NOTE: this should be the full `ref`, and thus needs to be done after any snapshotting -- that depends only on a part of the ref addDeclarationRangesForBuiltin declId.declName view.modifiers.stx view.ref - elabSync headers := do - finishElab headers + elabSync headers isRflLike := do + -- If the reflexivity holds publically as well (we're still inside `withExporting` here), export + -- the body even if it is a theorem so that it is recognized as a rfl theorem even without + -- `import all`. + let rflPublic ← pure isRflLike <&&> pure (← getEnv).header.isModule <&&> + forallTelescopeReducing headers[0]!.type fun _ type => do + let some (_, lhs, rhs) := type.eq? | pure false + try + isDefEq lhs rhs + catch _ => pure false + withExporting (isExporting := rflPublic) do + finishElab headers processDeriving headers elabAsync header view declId := do let env ← getEnv - -- HACK: should be replaced by new `[dsimp]` attribute - let isRflLike := header.value matches `(declVal| := rfl) - let async ← env.addConstAsync declId.declName .thm (exportedKind := if isRflLike then .thm else .axiom) + let async ← env.addConstAsync declId.declName .thm (exportedKind := .axiom) setEnv async.mainEnv -- TODO: parallelize header elaboration as well? Would have to refactor auto implicits catch, @@ -1103,7 +1128,8 @@ where (cancelTk? := cancelTk) fun _ => do profileitM Exception "elaboration" (← getOptions) do setEnv async.asyncEnv try - finishElab #[header] + withoutExporting do + finishElab #[header] finally reportDiag -- must introduce node to fill `infoHole` with multiple info trees @@ -1121,7 +1147,7 @@ where Core.logSnapshotTask { stx? := none, task := (← BaseIO.asTask (act ())), cancelTk? := cancelTk } applyAttributesAt declId.declName view.modifiers.attrs .afterTypeChecking applyAttributesAt declId.declName view.modifiers.attrs .afterCompilation - finishElab headers := withFunLocalDecls headers fun funFVars => withoutExporting do + finishElab headers := withFunLocalDecls headers fun funFVars => do for view in views, funFVar in funFVars do addLocalVarInfo view.declId funFVar let values ← try @@ -1135,7 +1161,10 @@ where let letRecsToLift ← getLetRecsToLift let letRecsToLift ← letRecsToLift.mapM instantiateMVarsAtLetRecToLift checkLetRecsToLiftTypes funFVars letRecsToLift - (if headers.all (·.kind.isTheorem) && !deprecated.oldSectionVars.get (← getOptions) then withHeaderSecVars vars sc headers else withUsed vars headers values letRecsToLift) fun vars => do + (if headers.all (·.kind.isTheorem) && !deprecated.oldSectionVars.get (← getOptions) then + -- do not repeat checks already done in `elabFunValues` + withHeaderSecVars (check := false) vars sc headers + else withUsed vars headers values letRecsToLift) fun vars => do let preDefs ← MutualClosure.main vars headers funFVars values letRecsToLift checkAllDeclNamesDistinct preDefs for preDef in preDefs do @@ -1165,7 +1194,7 @@ is error-free and contains no syntactical `sorry`s. -/ private def logGoalsAccomplishedSnapshotTask (views : Array DefView) (defsParsedSnap : DefsParsedSnapshot) : TermElabM Unit := do - if Lean.Elab.inServer.get (← getOptions) then + if ! Lean.Elab.inServer.get (← getOptions) then -- Skip 'goals accomplished' task if we are on the command line. -- These messages are only used in the language server. return diff --git a/src/Lean/Elab/MutualInductive.lean b/src/Lean/Elab/MutualInductive.lean index a83c5c12d212..ef856f970a1f 100644 --- a/src/Lean/Elab/MutualInductive.lean +++ b/src/Lean/Elab/MutualInductive.lean @@ -215,19 +215,19 @@ def checkValidCtorModifier [Monad m] [MonadError m] (modifiers : Modifiers) : m if modifiers.attrs.size != 0 then throwError "invalid use of attributes in constructor declaration" -private def checkUnsafe (rs : Array PreElabHeaderResult) : TermElabM Unit := do +def checkUnsafe (rs : Array PreElabHeaderResult) : TermElabM Unit := do let isUnsafe := rs[0]!.view.modifiers.isUnsafe for r in rs do unless r.view.modifiers.isUnsafe == isUnsafe do throwErrorAt r.view.ref "invalid inductive type, cannot mix unsafe and safe declarations in a mutually inductive datatypes" -private def checkClass (rs : Array PreElabHeaderResult) : TermElabM Unit := do +def checkClass (rs : Array PreElabHeaderResult) : TermElabM Unit := do if rs.size > 1 then for r in rs do if r.view.isClass then throwErrorAt r.view.ref "invalid inductive type, mutual classes are not supported" -private def checkNumParams (rs : Array PreElabHeaderResult) : TermElabM Nat := do +def checkNumParams (rs : Array PreElabHeaderResult) : TermElabM Nat := do let numParams := rs[0]!.numParams for r in rs do unless r.numParams == numParams do @@ -247,7 +247,7 @@ def withExplicitToImplicit (xs : Array Expr) (k : TermElabM α) : TermElabM α : /-- Auxiliary function for checking whether the types in mutually inductive declaration are compatible. -/ -private def checkParamsAndResultType (type firstType : Expr) (numParams : Nat) : TermElabM Unit := do +def checkParamsAndResultType (type firstType : Expr) (numParams : Nat) : TermElabM Unit := do try forallTelescopeCompatible type firstType numParams fun _ type firstType => forallTelescopeReducing type fun _ type => @@ -266,7 +266,7 @@ private def checkParamsAndResultType (type firstType : Expr) (numParams : Nat) : /-- Auxiliary function for checking whether the types in mutually inductive declaration are compatible. -/ -private def checkHeaders (rs : Array PreElabHeaderResult) (numParams : Nat) (i : Nat) (firstType? : Option Expr) : TermElabM Unit := do +def checkHeaders (rs : Array PreElabHeaderResult) (numParams : Nat) (i : Nat) (firstType? : Option Expr) : TermElabM Unit := do if h : i < rs.size then let type ← checkHeader rs[i] numParams firstType? checkHeaders rs numParams (i+1) type @@ -279,7 +279,7 @@ where withRef r.view.ref <| checkParamsAndResultType type firstType numParams return firstType -private def elabHeadersAux (views : Array InductiveView) (i : Nat) (acc : Array PreElabHeaderResult) : TermElabM (Array PreElabHeaderResult) := +def elabHeadersAux (views : Array InductiveView) (i : Nat) (acc : Array PreElabHeaderResult) : TermElabM (Array PreElabHeaderResult) := Term.withAutoBoundImplicitForbiddenPred (fun n => views.any (·.shortDeclName == n)) do if h : i < views.size then let view := views[i] @@ -311,7 +311,7 @@ private def elabHeadersAux (views : Array InductiveView) (i : Nat) (acc : Array /-- Elaborates all the headers in the inductive views. -/ -private def elabHeaders (views : Array InductiveView) : TermElabM (Array PreElabHeaderResult) := do +def elabHeaders (views : Array InductiveView) : TermElabM (Array PreElabHeaderResult) := do let rs ← elabHeadersAux views 0 #[] if rs.size > 1 then checkUnsafe rs @@ -327,7 +327,7 @@ We use the parameters of rs[0]. Note that this method is executed after we executed `checkHeaders` and established all parameters are compatible. -/ -private def withInductiveLocalDecls (rs : Array PreElabHeaderResult) (x : Array Expr → Array Expr → TermElabM α) : TermElabM α := do +def withInductiveLocalDecls (rs : Array PreElabHeaderResult) (x : Array Expr → Array Expr → TermElabM α) : TermElabM α := do let r0 := rs[0]! forallBoundedTelescope r0.type r0.numParams fun params _ => withRef r0.view.ref do let rec loop (i : Nat) (indFVars : Array Expr) := do @@ -342,24 +342,24 @@ private def withInductiveLocalDecls (rs : Array PreElabHeaderResult) (x : Array x params indFVars loop 0 #[] -private def InductiveElabStep1.checkLevelNames (views : Array InductiveView) : TermElabM Unit := do +def InductiveElabStep1.checkLevelNames (views : Array InductiveView) : TermElabM Unit := do if h : views.size > 1 then let levelNames := views[0].levelNames for view in views do unless view.levelNames == levelNames do throwErrorAt view.ref "invalid inductive type, universe parameters mismatch in mutually inductive datatypes" -private def ElabHeaderResult.checkLevelNames (rs : Array PreElabHeaderResult) : TermElabM Unit := do +def ElabHeaderResult.checkLevelNames (rs : Array PreElabHeaderResult) : TermElabM Unit := do if h : rs.size > 1 then let levelNames := rs[0].levelNames for r in rs do unless r.levelNames == levelNames do throwErrorAt r.view.ref "invalid inductive type, universe parameters mismatch in mutually inductive datatypes" -private def getArity (indType : InductiveType) : MetaM Nat := +def getArity (indType : InductiveType) : MetaM Nat := forallTelescopeReducing indType.type fun xs _ => return xs.size -private def resetMaskAt (mask : Array Bool) (i : Nat) : Array Bool := +def resetMaskAt (mask : Array Bool) (i : Nat) : Array Bool := mask.setIfInBounds i false /-- @@ -367,7 +367,7 @@ Compute a bit-mask that for `indType`. The size of the resulting array `result` The first `numParams` elements are `false` since they are parameters. For `i ∈ [numParams, arity)`, we have that `result[i]` if this index of the inductive family is fixed. -/ -private def computeFixedIndexBitMask (numParams : Nat) (indType : InductiveType) (indFVars : Array Expr) : MetaM (Array Bool) := do +def computeFixedIndexBitMask (numParams : Nat) (indType : InductiveType) (indFVars : Array Expr) : MetaM (Array Bool) := do let arity ← getArity indType if arity ≤ numParams then return .replicate arity false @@ -410,7 +410,7 @@ private def computeFixedIndexBitMask (numParams : Nat) (indType : InductiveType) go indType.ctors /-- Return true iff `arrowType` is an arrow and its domain is defeq to `type` -/ -private def isDomainDefEq (arrowType : Expr) (type : Expr) : MetaM Bool := do +def isDomainDefEq (arrowType : Expr) (type : Expr) : MetaM Bool := do if !arrowType.isForall then return false else @@ -429,7 +429,7 @@ private def isDomainDefEq (arrowType : Expr) (type : Expr) : MetaM Bool := do /-- Convert fixed indices to parameters. -/ -private def fixedIndicesToParams (numParams : Nat) (indTypes : Array InductiveType) (indFVars : Array Expr) : MetaM Nat := do +def fixedIndicesToParams (numParams : Nat) (indTypes : Array InductiveType) (indFVars : Array Expr) : MetaM Nat := do if !inductive.autoPromoteIndices.get (← getOptions) then return numParams let masks ← indTypes.mapM (computeFixedIndexBitMask numParams · indFVars) @@ -461,7 +461,7 @@ private def fixedIndicesToParams (numParams : Nat) (indTypes : Array InductiveTy return i go numParams type typesToCheck -private def getResultingUniverse : List InductiveType → TermElabM Level +def getResultingUniverse : List InductiveType → TermElabM Level | [] => throwError "unexpected empty inductive declaration" | indType :: _ => forallTelescopeReducing indType.type fun _ r => do let r ← whnfD r @@ -491,7 +491,7 @@ def shouldInferResultUniverse (u : Level) : TermElabM (Option LMVarId) := do Converts universe metavariables into new parameters. It skips `univToInfer?` (the inductive datatype resulting universe) because it should be inferred later using `inferResultingUniverse`. -/ -private def levelMVarToParam (indTypes : List InductiveType) (univToInfer? : Option LMVarId) : TermElabM (List InductiveType) := +def levelMVarToParam (indTypes : List InductiveType) (univToInfer? : Option LMVarId) : TermElabM (List InductiveType) := indTypes.mapM fun indType => do let type ← levelMVarToParam' indType.type let ctors ← indType.ctors.mapM fun ctor => do @@ -502,13 +502,13 @@ where levelMVarToParam' (type : Expr) : TermElabM Expr := do Term.levelMVarToParam type (except := fun mvarId => univToInfer? == some mvarId) -private structure AccLevelState where +structure AccLevelState where levels : Array Level := #[] /-- When we encounter `u ≤ ?r + k` with `k > 0`, we add `(u, k)` to the "bad levels". We use this to compute what the universe "should" have been. -/ badLevels : Array (Level × Nat) := #[] -private def AccLevelState.push (acc : AccLevelState) (u : Level) (offset : Nat) : AccLevelState := +def AccLevelState.push (acc : AccLevelState) (u : Level) (offset : Nat) : AccLevelState := if offset == 0 then { acc with levels := if acc.levels.contains u then acc.levels else acc.levels.push u } else @@ -584,7 +584,7 @@ def withViewTypeRef [Monad m] [MonadRef m] (views : Array InductiveView) (k : m /-- Auxiliary function for `updateResultingUniverse`. Computes a list of levels `l₁ ... lₙ` such that `r := max l₁ ... lₙ` can be a solution to the constraint problem. -/ -private def collectUniverses (views : Array InductiveView) (r : Level) (rOffset : Nat) (numParams : Nat) (indTypes : List InductiveType) : TermElabM (Array Level) := do +def collectUniverses (views : Array InductiveView) (r : Level) (rOffset : Nat) (numParams : Nat) (indTypes : List InductiveType) : TermElabM (Array Level) := do let (_, acc) ← go |>.run {} if !acc.badLevels.isEmpty then withViewTypeRef views do @@ -633,7 +633,7 @@ Specialized to structures, the heuristic is that we prefer a `Prop` instead of a when it could be a syntactic subsingleton. Exception: no-field structures are `Type` since they are likely stubbed-out declarations. -/ -private def isPropCandidate (numParams : Nat) (indTypes : List InductiveType) : MetaM Bool := do +def isPropCandidate (numParams : Nat) (indTypes : List InductiveType) : MetaM Bool := do unless indTypes.foldl (fun n indType => max n indType.ctors.length) 0 == 1 do return false for indType in indTypes do @@ -643,7 +643,7 @@ private def isPropCandidate (numParams : Nat) (indTypes : List InductiveType) : return true return false -private def mkResultUniverse (us : Array Level) (rOffset : Nat) (preferProp : Bool) : Level := +def mkResultUniverse (us : Array Level) (rOffset : Nat) (preferProp : Bool) : Level := if us.isEmpty && rOffset == 0 then if preferProp then levelZero else levelOne else @@ -659,7 +659,7 @@ try to infer the unique `?r` such that `?r + k` is the supremum of the construct Usually, we also throw in the constraint that `1 ≤ ?r + k`, but if `isPropCandidate` is true we allow the solution `?r + k = 0`. -/ -private def updateResultingUniverse (views : Array InductiveView) (numParams : Nat) (indTypes : List InductiveType) : TermElabM (List InductiveType) := do +def updateResultingUniverse (views : Array InductiveView) (numParams : Nat) (indTypes : List InductiveType) : TermElabM (List InductiveType) := do let r₀ ← getResultingUniverse indTypes let rOffset : Nat := r₀.getOffset let r : Level := r₀.getLevelOffset @@ -679,7 +679,7 @@ Heuristic: users don't tend to want types that are universe polymorphic across b This can be disabled by setting the option `bootstrap.inductiveCheckResultingUniverse` to false, unless one of the inductive commands requires it (for instance `structure` due to projections). -/ -private def checkResultingUniversePolymorphism (views : Array InductiveView) (u : Level) (_numParams : Nat) (_indTypes : List InductiveType) : TermElabM Unit := do +def checkResultingUniversePolymorphism (views : Array InductiveView) (u : Level) (_numParams : Nat) (_indTypes : List InductiveType) : TermElabM Unit := do let doErrFor := fun view => view.withTypeRef do throwError "\ @@ -696,7 +696,7 @@ private def checkResultingUniversePolymorphism (views : Array InductiveView) (u /-- Solves for level metavariables in constructor argument types that are completely determined by the resulting type. -/ -private partial def propagateUniversesToConstructors (numParams : Nat) (indTypes : List InductiveType) : TermElabM Unit := do +partial def propagateUniversesToConstructors (numParams : Nat) (indTypes : List InductiveType) : TermElabM Unit := do let u := (← instantiateLevelMVars (← getResultingUniverse indTypes)).normalize unless u.isZero do let r := u.getLevelOffset @@ -750,7 +750,7 @@ where pure () /-- Checks the universe constraints for each constructor. -/ -private def checkResultingUniverses (views : Array InductiveView) (elabs' : Array InductiveElabStep2) +def checkResultingUniverses (views : Array InductiveView) (elabs' : Array InductiveElabStep2) (numParams : Nat) (indTypes : List InductiveType) : TermElabM Unit := do let u := (← instantiateLevelMVars (← getResultingUniverse indTypes)).normalize checkResultingUniversePolymorphism views u numParams indTypes @@ -773,21 +773,21 @@ private def checkResultingUniverses (views : Array InductiveView) (elabs' : Arra which is not less than or equal to the inductive type's resulting universe level{indentD u}" withCtorRef views ctor.name <| throwError msg -private def collectUsed (indTypes : List InductiveType) : StateRefT CollectFVars.State MetaM Unit := do +def collectUsed (indTypes : List InductiveType) : StateRefT CollectFVars.State MetaM Unit := do indTypes.forM fun indType => do indType.type.collectFVars indType.ctors.forM fun ctor => ctor.type.collectFVars -private def removeUnused (elabs : Array InductiveElabStep2) (vars : Array Expr) (indTypes : List InductiveType) : TermElabM (LocalContext × LocalInstances × Array Expr) := do +def removeUnused (elabs : Array InductiveElabStep2) (vars : Array Expr) (indTypes : List InductiveType) : TermElabM (LocalContext × LocalInstances × Array Expr) := do let (_, used) ← (collectUsed indTypes *> elabs.forM fun e => e.collectUsedFVars).run {} Meta.removeUnused vars used -private def withUsed {α} (elabs : Array InductiveElabStep2) (vars : Array Expr) (indTypes : List InductiveType) (k : Array Expr → TermElabM α) : TermElabM α := do +def withUsed {α} (elabs : Array InductiveElabStep2) (vars : Array Expr) (indTypes : List InductiveType) (k : Array Expr → TermElabM α) : TermElabM α := do let (lctx, localInsts, vars) ← removeUnused elabs vars indTypes withLCtx lctx localInsts <| k vars -private def updateParams (vars : Array Expr) (indTypes : List InductiveType) : TermElabM (List InductiveType) := +def updateParams (vars : Array Expr) (indTypes : List InductiveType) : TermElabM (List InductiveType) := indTypes.mapM fun indType => do let type ← mkForallFVars vars indType.type let ctors ← indType.ctors.mapM fun ctor => do @@ -795,7 +795,7 @@ private def updateParams (vars : Array Expr) (indTypes : List InductiveType) : T return { ctor with type := ctorType } return { indType with type, ctors } -private def collectLevelParamsInInductive (indTypes : List InductiveType) : Array Name := Id.run do +def collectLevelParamsInInductive (indTypes : List InductiveType) : Array Name := Id.run do let mut usedParams : CollectLevelParams.State := {} for indType in indTypes do usedParams := collectLevelParams usedParams indType.type @@ -803,7 +803,7 @@ private def collectLevelParamsInInductive (indTypes : List InductiveType) : Arra usedParams := collectLevelParams usedParams ctor.type return usedParams.params -private def mkIndFVar2Const (views : Array InductiveView) (indFVars : Array Expr) (levelNames : List Name) : ExprMap Expr := Id.run do +def mkIndFVar2Const (views : Array InductiveView) (indFVars : Array Expr) (levelNames : List Name) : ExprMap Expr := Id.run do let levelParams := levelNames.map mkLevelParam; let mut m : ExprMap Expr := {} for h : i in [:views.size] do @@ -814,7 +814,7 @@ private def mkIndFVar2Const (views : Array InductiveView) (indFVars : Array Expr /-- Remark: `numVars <= numParams`. `numVars` is the number of context `variables` used in the inductive declaration, and `numParams` is `numVars` + number of explicit parameters provided in the declaration. -/ -private def replaceIndFVarsWithConsts (views : Array InductiveView) (indFVars : Array Expr) (levelNames : List Name) +def replaceIndFVarsWithConsts (views : Array InductiveView) (indFVars : Array Expr) (levelNames : List Name) (numVars : Nat) (numParams : Nat) (indTypes : List InductiveType) : TermElabM (List InductiveType) := let indFVar2Const := mkIndFVar2Const views indFVars levelNames indTypes.mapM fun indType => do @@ -830,7 +830,7 @@ private def replaceIndFVarsWithConsts (views : Array InductiveView) (indFVars : return { ctor with type } return { indType with ctors } -private structure FinalizeContext where +structure FinalizeContext where elabs : Array InductiveElabStep2 mctx : MetavarContext levelParams : List Name @@ -839,7 +839,7 @@ private structure FinalizeContext where localInsts : LocalInstances replaceIndFVars : Expr → MetaM Expr -private def mkInductiveDecl (vars : Array Expr) (elabs : Array InductiveElabStep1) : TermElabM FinalizeContext := +def mkInductiveDecl (vars : Array Expr) (elabs : Array InductiveElabStep1) : TermElabM FinalizeContext := Term.withoutSavingRecAppSyntax do let views := elabs.map (·.view) let view0 := views[0]! @@ -932,7 +932,7 @@ private def mkInductiveDecl (vars : Array Expr) (elabs : Array InductiveElabStep enableRealizationsForConst ctor.declName return res -private def mkAuxConstructions (declNames : Array Name) : TermElabM Unit := do +def mkAuxConstructions (declNames : Array Name) : TermElabM Unit := do let env ← getEnv let hasEq := env.contains ``Eq let hasHEq := env.contains ``HEq @@ -948,7 +948,7 @@ private def mkAuxConstructions (declNames : Array Name) : TermElabM Unit := do if hasUnit && hasProd then mkBRecOn n if hasUnit && hasProd then mkBInductionOn n -private def elabInductiveViews (vars : Array Expr) (elabs : Array InductiveElabStep1) : TermElabM FinalizeContext := do +def elabInductiveViews (vars : Array Expr) (elabs : Array InductiveElabStep1) : TermElabM FinalizeContext := do let view0 := elabs[0]!.view let ref := view0.ref Term.withDeclName view0.declName do withRef ref do @@ -964,7 +964,7 @@ private def elabInductiveViews (vars : Array Expr) (elabs : Array InductiveElabS return res /-- Ensures that there are no conflicts among or between the type and constructor names defined in `elabs`. -/ -private def checkNoInductiveNameConflicts (elabs : Array InductiveElabStep1) : TermElabM Unit := do +def checkNoInductiveNameConflicts (elabs : Array InductiveElabStep1) : TermElabM Unit := do let throwErrorsAt (init cur : Syntax) (msg : MessageData) : TermElabM Unit := do logErrorAt init msg throwErrorAt cur msg @@ -983,7 +983,7 @@ private def checkNoInductiveNameConflicts (elabs : Array InductiveElabStep1) : T throwErrorsAt prevRef ctor.declId m!"cannot define {declKinds} with the same name '{ctorName}'" uniqueNames := uniqueNames.insert ctorName (false, ctor.declId) -private def applyComputedFields (indViews : Array InductiveView) : CommandElabM Unit := do +def applyComputedFields (indViews : Array InductiveView) : CommandElabM Unit := do if indViews.all (·.computedFields.isEmpty) then return let mut computedFields := #[] @@ -1010,7 +1010,7 @@ private def applyComputedFields (indViews : Array InductiveView) : CommandElabM liftTermElabM do Term.withDeclName indViews[0]!.declName do ComputedFields.setComputedFields computedFields -private def applyDerivingHandlers (views : Array InductiveView) : CommandElabM Unit := do +def applyDerivingHandlers (views : Array InductiveView) : CommandElabM Unit := do let mut processed : NameSet := {} for view in views do for classView in view.derivingClasses do @@ -1023,7 +1023,7 @@ private def applyDerivingHandlers (views : Array InductiveView) : CommandElabM U declNames := declNames.push view.declName classView.applyHandlers declNames -private def elabInductiveViewsPostprocessing (views : Array InductiveView) (res : FinalizeContext) : CommandElabM Unit := do +def elabInductiveViewsPostprocessing (views : Array InductiveView) (res : FinalizeContext) : CommandElabM Unit := do let view0 := views[0]! let ref := view0.ref applyComputedFields views -- NOTE: any generated code before this line is invalid diff --git a/src/Lean/Elab/StructInst.lean b/src/Lean/Elab/StructInst.lean index d6f3cdb8167e..68812e2a89a6 100644 --- a/src/Lean/Elab/StructInst.lean +++ b/src/Lean/Elab/StructInst.lean @@ -75,7 +75,7 @@ Structure instance notation makes use of the expected type. let stxNew := stx.setArg 4 mkNullNode `(($stxNew : $expected)) -private def mkStructInstField (lval : TSyntax ``Parser.Term.structInstLVal) (binders : TSyntaxArray ``Parser.Term.structInstFieldBinder) +def mkStructInstField (lval : TSyntax ``Parser.Term.structInstLVal) (binders : TSyntaxArray ``Parser.Term.structInstFieldBinder) (type? : Option Term) (val : Term) : MacroM (TSyntax ``Parser.Term.structInstField) := do let mut val := val if let some type := type? then @@ -88,7 +88,7 @@ private def mkStructInstField (lval : TSyntax ``Parser.Term.structInstLVal) (bin /-- Takes an arbitrary `structInstField` and expands it to be a `structInstFieldDef` without any binders or type ascription. -/ -private def expandStructInstField (stx : Syntax) : MacroM (Option Syntax) := withRef stx do +def expandStructInstField (stx : Syntax) : MacroM (Option Syntax) := withRef stx do match stx with | `(Parser.Term.structInstField| $_:structInstLVal := $_) => -- Already expanded. @@ -152,7 +152,7 @@ structure SourcesView where Given an array of explicit sources, returns syntax of the form `optional (atomic (sepBy1 termParser ", " >> " with ")` -/ -private def mkSourcesWithSyntax (sources : Array Syntax) : Syntax := +def mkSourcesWithSyntax (sources : Array Syntax) : Syntax := let ref := sources[0]! let stx := Syntax.mkSep sources (mkAtomFrom ref ", ") mkNullNode #[stx, mkAtomFrom ref "with "] @@ -160,7 +160,7 @@ private def mkSourcesWithSyntax (sources : Array Syntax) : Syntax := /-- Creates a structure source view from structure instance notation. -/ -private def getStructSources (structStx : Syntax) : TermElabM SourcesView := +def getStructSources (structStx : Syntax) : TermElabM SourcesView := withRef structStx do let explicitSource := structStx[1] let implicitSource := structStx[3] @@ -183,7 +183,7 @@ We say a structure instance notation is a "modifyOp" if it contains only a singl def structInstArrayRef := leading_parser "[" >> termParser >>"]" ``` -/ -private def isModifyOp? (stx : Syntax) : TermElabM (Option Syntax) := do +def isModifyOp? (stx : Syntax) : TermElabM (Option Syntax) := do let s? ← stx[2][0].getSepArgs.foldlM (init := none) fun s? arg => do /- arg is of the form `structInstField`. It should be macro expanded at this point, but we make sure it's the case. -/ if arg[1][2].getKind == ``Lean.Parser.Term.structInstFieldDef then @@ -222,7 +222,7 @@ private def isModifyOp? (stx : Syntax) : TermElabM (Option Syntax) := do Given a `stx` that is a structure instance notation that's a modifyOp (according to `isModifyOp?`), elaborates it. Only supports structure instances with a single source. -/ -private def elabModifyOp (stx modifyOp : Syntax) (sourcesView : SourcesView) (expectedType? : Option Expr) : TermElabM Expr := do +def elabModifyOp (stx modifyOp : Syntax) (sourcesView : SourcesView) (expectedType? : Option Expr) : TermElabM Expr := do unless sourcesView.explicit.size == 1 do throwError "invalid \{...} notation, exactly one explicit source is required when using '[<index>] := <value>' update notation" if let some implicit := sourcesView.implicit then @@ -298,10 +298,10 @@ structure StructInstView where sources : SourcesView deriving Inhabited -private def formatField (field : FieldView) : Format := +def formatField (field : FieldView) : Format := Format.joinSep field.lhs " . " ++ " := " ++ format field.val -private def formatStruct : StructInstView → Format +def formatStruct : StructInstView → Format | ⟨_, fields, source⟩ => let fieldsFmt := Format.joinSep (fields.toList.map formatField) ", " let implicitFmt := if source.implicit.isSome then " .. " else "" @@ -324,7 +324,7 @@ def structInstArrayRef := leading_parser "[" >> termParser >>"]" ``` -/ -- Remark: this code relies on the fact that `expandStruct` only transforms `fieldLHS.fieldName` -private def FieldLHS.toSyntax (first : Bool) : FieldLHS → Syntax +def FieldLHS.toSyntax (first : Bool) : FieldLHS → Syntax | .modifyOp stx .. => stx | .fieldName stx name | .parentFieldName stx _ name => if first then mkIdentFrom stx name else mkGroupNode #[mkAtomFrom stx ".", mkIdentFrom stx name] @@ -333,7 +333,7 @@ private def FieldLHS.toSyntax (first : Bool) : FieldLHS → Syntax /-- Converts a `FieldView` back into syntax. Used to construct synthetic structure instance notation for subobjects in `StructInst.expandStruct` processing. -/ -private def FieldView.toSyntax : FieldView → TSyntax ``Parser.Term.structInstField +def FieldView.toSyntax : FieldView → TSyntax ``Parser.Term.structInstField | field => let stx := field.ref let stx := stx.setArg 1 <| stx[1].setArg 2 <| stx[1][2].setArg 1 field.val @@ -342,7 +342,7 @@ private def FieldView.toSyntax : FieldView → TSyntax ``Parser.Term.structInstF | _ => unreachable! /-- Creates a view of a field left-hand side. -/ -private def toFieldLHS (stx : Syntax) : MacroM FieldLHS := +def toFieldLHS (stx : Syntax) : MacroM FieldLHS := if stx.getKind == ``Lean.Parser.Term.structInstArrayRef then return FieldLHS.modifyOp stx stx[1] else @@ -358,7 +358,7 @@ private def toFieldLHS (stx : Syntax) : MacroM FieldLHS := Creates a view from structure instance notation and structure source view (from `Lean.Elab.Term.StructInst.getStructSources`). -/ -private def mkStructView (stx : Syntax) (sources : SourcesView) : MacroM StructInstView := do +def mkStructView (stx : Syntax) (sources : SourcesView) : MacroM StructInstView := do /- Recall that `stx` is of the form ``` @@ -380,7 +380,7 @@ private def mkStructView (stx : Syntax) (sources : SourcesView) : MacroM StructI /-- The constructor to use for the structure instance notation. -/ -private structure CtorHeaderResult where +structure CtorHeaderResult where /-- The constructor function with applied structure parameters. -/ ctorFn : Expr /-- The type of `ctorFn` -/ @@ -397,7 +397,7 @@ Elaborates the structure's flat constructor using the expected type, filling in The `structureType?` is the expected type of the structure instance. -/ -private def mkCtorHeader (ctorVal : ConstructorVal) (structureType? : Option Expr) : TermElabM CtorHeaderResult := do +def mkCtorHeader (ctorVal : ConstructorVal) (structureType? : Option Expr) : TermElabM CtorHeaderResult := do let flatCtorName := mkFlatCtorOfStructCtorName ctorVal.name let cinfo ← getConstInfo flatCtorName let us ← mkFreshLevelMVars ctorVal.levelParams.length @@ -442,7 +442,7 @@ Resulting invariant: the field has a LHS that has one of these forms: - `.fieldName .. :: _` - `[.parentFieldName ..]` -/ -private partial def normalizeField (structName : Name) (fieldView : FieldView) : MetaM FieldView := do +partial def normalizeField (structName : Name) (fieldView : FieldView) : MetaM FieldView := do let env ← getEnv match fieldView.lhs with | .fieldIndex ref idx :: rest => @@ -477,12 +477,12 @@ private inductive ExpandedFieldVal | source (fvar : Expr) | nested (fieldViews : Array FieldView) (sources : Array ExplicitSourceView) -private structure ExpandedField where +structure ExpandedField where ref : Syntax name : Name val : ExpandedFieldVal -private def ExpandedField.isNested (f : ExpandedField) : Bool := f.val matches .nested .. +def ExpandedField.isNested (f : ExpandedField) : Bool := f.val matches .nested .. instance : ToMessageData ExpandedFieldVal where toMessageData @@ -500,7 +500,7 @@ abbrev ExpandedFields := NameMap ExpandedField Normalizes and expands the field views. Validates that there are no duplicate fields. -/ -private def expandFields (structName : Name) (fieldViews : Array FieldView) (recover : Bool) : MetaM (Bool × ExpandedFields) := do +def expandFields (structName : Name) (fieldViews : Array FieldView) (recover : Bool) : MetaM (Bool × ExpandedFields) := do let mut fields : ExpandedFields := {} let mut errors : Bool := false for fieldView in fieldViews do @@ -546,7 +546,7 @@ Adds fields from the sources, updating any nested fields. Rule: a missing field always comes from the first source that can provide it. -/ -private def addSourceFields (structName : Name) (sources : Array ExplicitSourceView) (fields : ExpandedFields) : MetaM ExpandedFields := do +def addSourceFields (structName : Name) (sources : Array ExplicitSourceView) (fields : ExpandedFields) : MetaM ExpandedFields := do let mut fields := fields let env ← getEnv let fieldNames := getStructureFieldsFlattened env structName false @@ -568,7 +568,7 @@ private def addSourceFields (structName : Name) (sources : Array ExplicitSourceV pure () return fields -private structure StructInstContext where +structure StructInstContext where view : StructInstView /-- True if the structure instance has a trailing `..`. -/ ellipsis : Bool @@ -583,7 +583,7 @@ private structure StructInstContext where /-- The expanded structure instance fields, to be elaborated. -/ fieldViews : ExpandedFields -private structure StructInstState where +structure StructInstState where /-- The type of the flat constructor with applied parameters and applied fields. -/ type : Expr /-- A set of the structure name and all its parents. -/ @@ -608,39 +608,39 @@ Monad for elaborating the fields of structure instance notation. -/ private abbrev StructInstM := ReaderT StructInstContext (StateRefT StructInstState TermElabM) -private structure SavedState where +structure SavedState where termState : Term.SavedState state : StructInstState deriving Nonempty -private def saveState : StructInstM SavedState := +def saveState : StructInstM SavedState := return { termState := (← Term.saveState), state := (← get) } -private def SavedState.restore (s : SavedState) : StructInstM Unit := do +def SavedState.restore (s : SavedState) : StructInstM Unit := do s.termState.restore set s.state -private instance : MonadBacktrack SavedState StructInstM where +instance : MonadBacktrack SavedState StructInstM where saveState := saveState restoreState b := b.restore /-- Initialize cached data. -/ -private def initializeState : StructInstM Unit := do +def initializeState : StructInstM Unit := do let structName := (← read).structName let resolutionOrder ← getStructureResolutionOrder structName let structNameSet : NameSet := resolutionOrder.foldl (·.insert ·) {} modify fun s => { s with structNameSet } -private def withViewRef {α : Type} (x : StructInstM α) : StructInstM α := do +def withViewRef {α : Type} (x : StructInstM α) : StructInstM α := do let ref := (← read).view.ref withRef ref x /-- If the field has already been visited by `loop` but has not been solved for yet, returns its metavariable. -/ -private def isFieldNotSolved? (fieldName : Name) : StructInstM (Option MVarId) := do +def isFieldNotSolved? (fieldName : Name) : StructInstM (Option MVarId) := do let some val := (← get).fieldMap.find? fieldName | return none let .mvar mvarId ← instantiateMVars val | return none return mvarId @@ -648,7 +648,7 @@ private def isFieldNotSolved? (fieldName : Name) : StructInstM (Option MVarId) : /-- Reduce projections for all structures appearing in `structNameSet`. -/ -private def reduceFieldProjs (e : Expr) : StructInstM Expr := do +def reduceFieldProjs (e : Expr) : StructInstM Expr := do let e ← instantiateMVars e let postVisit (e : Expr) : StructInstM TransformStep := do if let Expr.const projName .. := e.getAppFn then @@ -666,7 +666,7 @@ private def reduceFieldProjs (e : Expr) : StructInstM Expr := do /-- Unfolds implementation decl let vars that appear in propositions. -/ -private def zetaDeltaImplDetailsInProps (e : Expr) : MetaM Expr := do +def zetaDeltaImplDetailsInProps (e : Expr) : MetaM Expr := do let unfoldPre (e : Expr) : MetaM TransformStep := do let .fvar fvarId := e.getAppFn | return .continue let decl ← fvarId.getDecl @@ -682,16 +682,16 @@ private def zetaDeltaImplDetailsInProps (e : Expr) : MetaM Expr := do return .continue transform (← instantiateMVars e) (pre := pre) -private def etaStructReduce' (e : Expr) : StructInstM Expr := do +def etaStructReduce' (e : Expr) : StructInstM Expr := do let names := (← get).structNameSet etaStructReduce e names.contains -private def normalizeExpr (e : Expr) (zetaDeltaImpl : Bool := true) : StructInstM Expr := do +def normalizeExpr (e : Expr) (zetaDeltaImpl : Bool := true) : StructInstM Expr := do let e ← if zetaDeltaImpl then zetaDeltaImplDetailsInProps e else pure e let e ← reduceFieldProjs e etaStructReduce' e -private def addStructFieldAux (fieldName : Name) (e : Expr) : StructInstM Unit := do +def addStructFieldAux (fieldName : Name) (e : Expr) : StructInstM Unit := do trace[Elab.struct] "setting '{fieldName}' value to{indentExpr e}" modify fun s => { s with type := s.type.bindingBody!.instantiateBetaRevRange 0 1 #[e] @@ -699,7 +699,7 @@ private def addStructFieldAux (fieldName : Name) (e : Expr) : StructInstM Unit : fieldMap := s.fieldMap.insert fieldName e } -private def addStructField (fieldView : ExpandedField) (e : Expr) : StructInstM Unit := do +def addStructField (fieldView : ExpandedField) (e : Expr) : StructInstM Unit := do let fieldName := fieldView.name addStructFieldAux fieldName e let env ← getEnv @@ -709,11 +709,11 @@ private def addStructField (fieldView : ExpandedField) (e : Expr) : StructInstM projName := fieldInfo.projFn, fieldName, lctx := (← getLCtx), val := e, stx := fieldView.ref } -private def elabStructField (_fieldName : Name) (stx : Term) (fieldType : Expr) : StructInstM Expr := do +def elabStructField (_fieldName : Name) (stx : Term) (fieldType : Expr) : StructInstM Expr := do let fieldType ← normalizeExpr fieldType elabTermEnsuringType stx fieldType -private def addStructFieldMVar (fieldName : Name) (ty : Expr) (kind : MetavarKind := .natural) : StructInstM Expr := do +def addStructFieldMVar (fieldName : Name) (ty : Expr) (kind : MetavarKind := .natural) : StructInstM Expr := do let ty ← normalizeExpr ty let e ← mkFreshExprMVar ty (kind := kind) addStructFieldAux fieldName e @@ -725,7 +725,7 @@ The arguments for the `_default` auxiliary function are provided by `fieldMap`. After default values are resolved, then the one that is added to the environment as an `_inherited_default` auxiliary function is normalized; we don't do those normalizations here. -/ -private partial def getFieldDefaultValue? (fieldName : Name) : StructInstM (NameSet × Option Expr) := do +partial def getFieldDefaultValue? (fieldName : Name) : StructInstM (NameSet × Option Expr) := do let some defFn := getEffectiveDefaultFnForField? (← getEnv) (← read).structName fieldName | return ({}, none) let fieldMap := (← get).fieldMap @@ -737,7 +737,7 @@ private partial def getFieldDefaultValue? (fieldName : Name) : StructInstM (Name /-- Auxiliary type for `synthDefaultFields` -/ -private structure PendingField where +structure PendingField where fieldName : Name fieldType : Expr required : Bool @@ -747,7 +747,7 @@ private structure PendingField where /-- Synthesize pending optParams. -/ -private def synthOptParamFields : StructInstM Unit := do +def synthOptParamFields : StructInstM Unit := do let optParamFields ← modifyGet fun s => (s.optParamFields, { s with optParamFields := #[] }) if optParamFields.isEmpty then return /- @@ -858,7 +858,7 @@ private def synthOptParamFields : StructInstM Unit := do pendingSet := pendingSet.filter (!toRemove.contains ·) pendingFields := pendingFields.filter fun pendingField => pendingField.val?.isNone || !toRemove.contains pendingField.fieldName -private def finalize : StructInstM Expr := withViewRef do +def finalize : StructInstM Expr := withViewRef do let val := (← read).val.beta (← get).fields trace[Elab.struct] "constructor{indentExpr val}" synthesizeAppInstMVars (← get).instMVars val @@ -878,7 +878,7 @@ private def finalize : StructInstM Expr := withViewRef do Replace (subobject) parent projections of a `self` fvar by a constructor expression, if all the fields for the parent are already defined. -/ -private partial def reduceSelfProjs (self : Expr) (e : Expr) : StructInstM Expr := do +partial def reduceSelfProjs (self : Expr) (e : Expr) : StructInstM Expr := do let e ← instantiateMVars e Meta.transform (skipConstInApp := true) e (pre := replaceParentProj) where @@ -921,7 +921,7 @@ where -- Continue, since we need to reduce the parameters. return .continue e' -private def getParentStructType? (parentStructName : Name) : StructInstM (Option (Expr × Option Name)) := do +def getParentStructType? (parentStructName : Name) : StructInstM (Option (Expr × Option Name)) := do let env ← getEnv let structName := (← read).structName let structType := (← read).structType @@ -946,7 +946,7 @@ private def getParentStructType? (parentStructName : Name) : StructInstM (Option If there is a path to `parentStructName`, compute its type. Also returns the last projection to the parent. Otherwise, create a type with fresh metavariables. -/ -private def getParentStructType (parentStructName : Name) : StructInstM (Expr × Option Name) := do +def getParentStructType (parentStructName : Name) : StructInstM (Expr × Option Name) := do if let some res ← getParentStructType? parentStructName then return res else @@ -957,12 +957,12 @@ private def getParentStructType (parentStructName : Name) : StructInstM (Expr × /-- Creates projection notation for the given structure field. -/ -private def mkProjStx (s : Syntax) (fieldName : Name) : Syntax := +def mkProjStx (s : Syntax) (fieldName : Name) : Syntax := mkNode ``Parser.Term.explicit #[mkAtomFrom s "@", mkNode ``Parser.Term.proj #[s, mkAtomFrom s ".", mkIdentFrom s fieldName]] -private def processField (loop : StructInstM α) (field : ExpandedField) (fieldType : Expr) : StructInstM α := withRef field.ref do +def processField (loop : StructInstM α) (field : ExpandedField) (fieldType : Expr) : StructInstM α := withRef field.ref do let fieldType := fieldType.consumeTypeAnnotations trace[Elab.struct] "processing field '{field.name}' of type {fieldType}{indentD (toMessageData field)}" match field.val with @@ -1035,7 +1035,7 @@ Handle the case when no field is given. These fields can still be solved for by parent instance synthesis later. -/ -private def processNoField (loop : StructInstM α) (fieldName : Name) (binfo : BinderInfo) (fieldType : Expr) : StructInstM α := do +def processNoField (loop : StructInstM α) (fieldName : Name) (binfo : BinderInfo) (fieldType : Expr) : StructInstM α := do trace[Elab.struct] "processNoField '{fieldName}' of type {fieldType}" if (← read).ellipsis && (← readThe Term.Context).inPattern then -- See the note in `ElabAppArgs.processExplicitArg` @@ -1073,7 +1073,7 @@ private def processNoField (loop : StructInstM α) (fieldName : Name) (binfo : B modify fun s => { s with optParamFields := s.optParamFields.push (fieldName, fieldType, binfo.isExplicit) } loop -private partial def loop : StructInstM Expr := withViewRef do +partial def loop : StructInstM Expr := withViewRef do let type := (← get).type trace[Elab.struct] "loop, constructor type:{indentExpr type}" if let .forallE fieldName fieldType _ binfo := type then @@ -1095,7 +1095,7 @@ private partial def loop : StructInstM Expr := withViewRef do /-- For each parent class, see if it can be used to synthesize the fields that haven't been provided. -/ -private partial def addParentInstanceFields : StructInstM Unit := do +partial def addParentInstanceFields : StructInstM Unit := do let env ← getEnv let structName := (← read).structName let fieldNames := getStructureFieldsFlattened env structName (includeSubobjectFields := false) @@ -1164,7 +1164,7 @@ private partial def addParentInstanceFields : StructInstM Unit := do -- Failed, don't try this parent again. trace[Elab.struct] "failed to use instance for {parentTy}\n{ex.toMessageData}" -private def main : StructInstM Expr := do +def main : StructInstM Expr := do initializeState unless (← read).ellipsis && (← readThe Term.Context).inPattern do -- Inside a pattern with ellipsis mode, users expect to match just the fields provided. @@ -1174,7 +1174,7 @@ private def main : StructInstM Expr := do /-- Main elaborator for structure instances. -/ -private def elabStructInstView (s : StructInstView) (structName : Name) (structType? : Option Expr) : +def elabStructInstView (s : StructInstView) (structName : Name) (structType? : Option Expr) : TermElabM Expr := withRef s.ref do let env ← getEnv let ctorVal := getStructureCtor env structName @@ -1200,7 +1200,7 @@ Note that this one is not a `Macro` because we need to access the local context. Note also that having this as a separate step from main elaboration lets it postpone without re-elaborating the sources. -/ -private def expandNonAtomicExplicitSources (stx : Syntax) : TermElabM (Option Syntax) := do +def expandNonAtomicExplicitSources (stx : Syntax) : TermElabM (Option Syntax) := do let sourcesOpt := stx[1] if sourcesOpt.isNone then return none @@ -1266,7 +1266,7 @@ Otherwise, we use the type of the first source. Possibly returns the expected structure type as well. -/ -private def getStructName (expectedType? : Option Expr) (sourceView : SourcesView) : TermElabM (Name × Option Expr) := do +def getStructName (expectedType? : Option Expr) (sourceView : SourcesView) : TermElabM (Name × Option Expr) := do tryPostponeIfNoneOrMVar expectedType? match expectedType? with | none => useSource () diff --git a/src/Lean/Elab/Structure.lean b/src/Lean/Elab/Structure.lean index bf94082fe927..de9784ad26e8 100644 --- a/src/Lean/Elab/Structure.lean +++ b/src/Lean/Elab/Structure.lean @@ -192,7 +192,7 @@ structure StructFieldInfo where ### View construction -/ -private def defaultCtorName := `mk +def defaultCtorName := `mk /- The structure constructor syntax is @@ -200,7 +200,7 @@ The structure constructor syntax is leading_parser try (declModifiers >> ident >> " :: ") ``` -/ -private def expandCtor (structStx : Syntax) (structModifiers : Modifiers) (structDeclName : Name) : TermElabM CtorView := do +def expandCtor (structStx : Syntax) (structModifiers : Modifiers) (structDeclName : Name) : TermElabM CtorView := do let useDefault := do let declName := structDeclName ++ defaultCtorName let ref := structStx[1].mkSynthetic @@ -234,7 +234,7 @@ def structParent := leading_parser optional (atomic (ident >> " : ")) >> termPar def «extends» := leading_parser " extends " >> sepBy1 structParent ", " ``` -/ -private def expandParents (optExtendsStx : Syntax) : TermElabM (Array StructParentView) := do +def expandParents (optExtendsStx : Syntax) : TermElabM (Array StructParentView) := do let parentDecls := if optExtendsStx.isNone then #[] else optExtendsStx[0][1].getSepArgs parentDecls.mapM fun parentDecl => withRef parentDecl do let mut projRef := parentDecl @@ -277,7 +277,7 @@ def structSimpleBinder := leading_parser atomic (declModifiers true >> ident) def structFields := leading_parser many (structExplicitBinder <|> structImplicitBinder <|> structInstBinder) ``` -/ -private def expandFields (structStx : Syntax) (structModifiers : Modifiers) (structDeclName : Name) : TermElabM (Array StructFieldView) := do +def expandFields (structStx : Syntax) (structModifiers : Modifiers) (structDeclName : Name) : TermElabM (Array StructFieldView) := do if structStx[5][0].isToken ":=" then -- https://github.com/leanprover/lean4/issues/5236 let cmd := if structStx[0].getKind == ``Parser.Command.classTk then "class" else "structure" @@ -409,7 +409,7 @@ def structureSyntaxToView (modifiers : Modifiers) (stx : Syntax) : TermElabM Str ### Elaboration -/ -private structure State where +structure State where /-- Immediate parents. -/ parents : Array StructParentInfo := #[] /-- All fields, both newly defined and inherited. Every parent has a `StructFieldInfo` too. -/ @@ -432,17 +432,17 @@ instance : Inhabited (StructElabM α) where def runStructElabM (k : StructElabM α) (init : State := {}) : TermElabM α := k.run' init -private def addParentInfo (parent : StructParentInfo) : StructElabM Unit := do +def addParentInfo (parent : StructParentInfo) : StructElabM Unit := do modify fun s => { s with parents := s.parents.push parent } -private def findFieldInfo? (fieldName : Name) : StructElabM (Option StructFieldInfo) := do +def findFieldInfo? (fieldName : Name) : StructElabM (Option StructFieldInfo) := do let s ← get return s.fieldIdx.find? fieldName |>.map fun idx => s.fields[idx]! -private def hasFieldName (fieldName : Name) : StructElabM Bool := +def hasFieldName (fieldName : Name) : StructElabM Bool := return (← get).fieldIdx.contains fieldName -private def findFieldInfoByFVarId? (fvarId : FVarId) : StructElabM (Option StructFieldInfo) := do +def findFieldInfoByFVarId? (fvarId : FVarId) : StructElabM (Option StructFieldInfo) := do let s ← get return s.fvarIdFieldIdx.find? fvarId |>.map fun idx => s.fields[idx]! @@ -450,7 +450,7 @@ private def findFieldInfoByFVarId? (fvarId : FVarId) : StructElabM (Option Struc Inserts a field info into the current state. Throws an error if there is already a field with that name. -/ -private def addFieldInfo (info : StructFieldInfo) : StructElabM Unit := do +def addFieldInfo (info : StructFieldInfo) : StructElabM Unit := do if ← hasFieldName info.name then throwError "(in addFieldInfo) structure field '{info.name}' already exists" else @@ -468,7 +468,7 @@ private def addFieldInfo (info : StructFieldInfo) : StructElabM Unit := do s.ancestorFieldIdx } -private def findParentFieldInfo? (structName : Name) : StructElabM (Option StructFieldInfo) := do +def findParentFieldInfo? (structName : Name) : StructElabM (Option StructFieldInfo) := do let s ← get return s.ancestorFieldIdx.find? structName |>.map fun idx => s.fields[idx]! @@ -476,13 +476,13 @@ private def findParentFieldInfo? (structName : Name) : StructElabM (Option Struc Replaces the field info for a given field. Throws an error if there is not already a field with that name. -/ -private def replaceFieldInfo (info : StructFieldInfo) : StructElabM Unit := do +def replaceFieldInfo (info : StructFieldInfo) : StructElabM Unit := do if let some idx := (← get).fieldIdx.find? info.name then modify fun s => { s with fields := s.fields.set! idx info } else throwError "(in replaceFieldInfo) structure field '{info.name}' does not already exist" -private def addFieldInheritedDefault (fieldName : Name) (structName : Name) (d : StructFieldDefault) : StructElabM Unit := do +def addFieldInheritedDefault (fieldName : Name) (structName : Name) (d : StructFieldDefault) : StructElabM Unit := do let some info ← findFieldInfo? fieldName | throwError "(in addFieldInheritedDefault) structure field '{fieldName}' does not already exist" replaceFieldInfo { info with inheritedDefaults := info.inheritedDefaults.push (structName, d) } @@ -492,7 +492,7 @@ Reduces projections applied to constructors or parent fvars, for structure types If `zetaDelta` is true (default), then zeta reduces parent fvars as needed to do the reductions. -/ -private def reduceFieldProjs (e : Expr) (zetaDelta := true) : StructElabM Expr := do +def reduceFieldProjs (e : Expr) (zetaDelta := true) : StructElabM Expr := do let e ← instantiateMVars e let postVisit (e : Expr) : StructElabM TransformStep := do if let Expr.const projName .. := e.getAppFn then @@ -518,11 +518,11 @@ Puts an expression into "field normal form". - If `zetaDelta` is true (default) then all parent fvars are zeta reduced. - Constructors of parent structures are eta reduced. -/ -private def fieldNormalizeExpr (e : Expr) (zetaDelta : Bool := true) : StructElabM Expr := do +def fieldNormalizeExpr (e : Expr) (zetaDelta : Bool := true) : StructElabM Expr := do let ancestors := (← get).ancestorFieldIdx etaStructReduce (p := ancestors.contains) <| ← reduceFieldProjs e (zetaDelta := zetaDelta) -private def fieldFromMsg (info : StructFieldInfo) : MessageData := +def fieldFromMsg (info : StructFieldInfo) : MessageData := if let some sourceStructName := info.sourceStructNames.head? then m!"field '{info.name}' from '{.ofConstName sourceStructName}'" else @@ -534,7 +534,7 @@ After default values are resolved, then the one that is added to the environment as an `_inherited_default` auxiliary function is normalized; we don't do those normalizations here, since that could be wasted effort if this default isn't chosen. -/ -private partial def getFieldDefaultValue? (structName : Name) (params : Array Expr) (fieldName : Name) : StructElabM (Option Expr) := do +partial def getFieldDefaultValue? (structName : Name) (params : Array Expr) (fieldName : Name) : StructElabM (Option Expr) := do let some defFn := getDefaultFnForField? (← getEnv) structName fieldName | return none let fieldVal? (n : Name) : StructElabM (Option Expr) := do @@ -545,7 +545,7 @@ private partial def getFieldDefaultValue? (structName : Name) (params : Array Ex return none return val -private def getFieldDefault? (structName : Name) (params : Array Expr) (fieldName : Name) : +def getFieldDefault? (structName : Name) (params : Array Expr) (fieldName : Name) : StructElabM (Option StructFieldDefault) := do if let some val ← getFieldDefaultValue? structName params fieldName then -- Important: we use `getFieldDefaultValue?` because we want default value definitions, not *inherited* ones, to properly handle diamonds @@ -557,7 +557,7 @@ private def getFieldDefault? (structName : Name) (params : Array Expr) (fieldNam else return none -private def toVisibility (fieldInfo : StructureFieldInfo) : CoreM Visibility := do +def toVisibility (fieldInfo : StructureFieldInfo) : CoreM Visibility := do if isProtected (← getEnv) fieldInfo.projFn then return Visibility.protected else if isPrivateName fieldInfo.projFn then @@ -571,7 +571,7 @@ mutual Adds `fieldName` of type `fieldType` from structure `structName`. See `withStructFields` for meanings of other arguments. -/ -private partial def withStructField (view : StructView) (sourceStructNames : List Name) (inSubobject? : Option Expr) +partial def withStructField (view : StructView) (sourceStructNames : List Name) (inSubobject? : Option Expr) (structName : Name) (params : Array Expr) (fieldName : Name) (fieldType : Expr) (k : Expr → StructElabM α) : StructElabM α := do trace[Elab.structure] "withStructField '{.ofConstName structName}', field '{fieldName}'" @@ -630,7 +630,7 @@ Does not add a parent field for the structure itself; that is done by `withStruc - `sourceStructNames` is a stack of the structures visited, used for error reporting - the continuation `k` is run with a constructor expression for this structure -/ -private partial def withStructFields (view : StructView) (sourceStructNames : List Name) +partial def withStructFields (view : StructView) (sourceStructNames : List Name) (structType : Expr) (inSubobject? : Option Expr) (k : Expr → StructElabM α) : StructElabM α := do let structName ← getStructureName structType @@ -683,7 +683,7 @@ Adds a parent structure and all its fields. See `withStructFields` for meanings of other arguments. -/ -private partial def withStruct (view : StructView) (sourceStructNames : List Name) (binfo : BinderInfo) +partial def withStruct (view : StructView) (sourceStructNames : List Name) (binfo : BinderInfo) (structFieldName : Name) (structType : Expr) (inSubobject? : Option Expr) (k : StructFieldInfo → StructElabM α) @@ -776,7 +776,7 @@ end - `k` is a continuation that is run with a local context containing the fields and the ancestor fields, and it's provided the field info for the parent -/ -private partial def withParent (view : StructView) (projRef : Syntax) +partial def withParent (view : StructView) (projRef : Syntax) (rawStructFieldName structFieldName : Name) (structType : Expr) (k : StructFieldInfo → StructElabM α) : @@ -789,17 +789,17 @@ private partial def withParent (view : StructView) (projRef : Syntax) withStruct view [] (projRef := projRef) (rawStructFieldName := rawStructFieldName) (binfo := binfo) (inSubobject? := none) structFieldName structType k -private def mkToParentName (parentStructName : Name) : Name := +def mkToParentName (parentStructName : Name) : Name := Name.mkSimple <| "to" ++ parentStructName.eraseMacroScopes.getString! -private def StructParentView.mkToParentNames (parentView : StructParentView) (parentStructName : Name) : Name × Name := +def StructParentView.mkToParentNames (parentView : StructParentView) (parentStructName : Name) : Name × Name := match parentView.rawName?, parentView.name? with | some rawName, some name => (rawName, name) | _, _ => let toParentName := mkToParentName parentStructName (toParentName, toParentName) -private def withParents (view : StructView) (rs : Array ElabHeaderResult) (indFVar : Expr) (k : StructElabM α) : StructElabM α := do +def withParents (view : StructView) (rs : Array ElabHeaderResult) (indFVar : Expr) (k : StructElabM α) : StructElabM α := do go 0 where go (i : Nat) : StructElabM α := do @@ -844,10 +844,10 @@ where else k -private def registerFailedToInferFieldType (fieldName : Name) (e : Expr) (ref : Syntax) : TermElabM Unit := do +def registerFailedToInferFieldType (fieldName : Name) (e : Expr) (ref : Syntax) : TermElabM Unit := do Term.registerCustomErrorIfMVar (← instantiateMVars e) ref m!"failed to infer type of field '{.ofConstName fieldName}'" -private def registerFailedToInferDefaultValue (fieldName : Name) (e : Expr) (ref : Syntax) : TermElabM Unit := do +def registerFailedToInferDefaultValue (fieldName : Name) (e : Expr) (ref : Syntax) : TermElabM Unit := do Term.registerCustomErrorIfMVar (← instantiateMVars e) ref m!"failed to infer default value for field '{.ofConstName fieldName}'" Term.registerLevelMVarErrorExprInfo e ref m!"failed to infer universe levels in default value for field '{.ofConstName fieldName}'" @@ -882,7 +882,7 @@ However, now `α` does not know its relationship to `toMagma`. This was not robust, since in diamond inheritance `α` only remembered *one* of its parents in this indirect way. -/ -private def solveParentMVars (e : Expr) : StructElabM Expr := do +def solveParentMVars (e : Expr) : StructElabM Expr := do let env ← getEnv Term.synthesizeSyntheticMVars (postpone := .yes) let mvars ← getMVarsNoDelayed e @@ -898,7 +898,7 @@ private def solveParentMVars (e : Expr) : StructElabM Expr := do return e open Parser.Term in -private def typelessBinder? : Syntax → Option ((Array Ident) × BinderInfo) +def typelessBinder? : Syntax → Option ((Array Ident) × BinderInfo) | `(bracketedBinderF|($ids:ident*)) => some (ids, .default) | `(bracketedBinderF|{$ids:ident*}) => some (ids, .implicit) | `(bracketedBinderF|⦃$ids:ident*⦄) => some (ids, .strictImplicit) @@ -909,7 +909,7 @@ private def typelessBinder? : Syntax → Option ((Array Ident) × BinderInfo) Takes a binder list and interprets the prefix to see if any could be construed to be binder info updates. Returns the binder list without these updates along with the new binder infos for these parameters. -/ -private def elabParamInfoUpdates (structParams : Array Expr) (binders : Array Syntax) : StructElabM (Array Syntax × ExprMap (Syntax × BinderInfo)) := do +def elabParamInfoUpdates (structParams : Array Expr) (binders : Array Syntax) : StructElabM (Array Syntax × ExprMap (Syntax × BinderInfo)) := do let mut overrides : ExprMap (Syntax × BinderInfo) := {} for i in [0:binders.size] do match typelessBinder? binders[i]! with @@ -930,7 +930,7 @@ private def elabParamInfoUpdates (structParams : Array Expr) (binders : Array Sy overrides := overrides.insert decl.toExpr (id, bi) return (#[], overrides) -private def elabFieldTypeValue (structParams : Array Expr) (view : StructFieldView) : +def elabFieldTypeValue (structParams : Array Expr) (view : StructFieldView) : StructElabM (Option Expr × ExprMap (Syntax × BinderInfo) × Option StructFieldDefault) := do let state ← get let binders := view.binders.getArgs @@ -975,7 +975,7 @@ private def elabFieldTypeValue (structParams : Array Expr) (view : StructFieldVi let type ← mkForallFVars params type return (type, paramInfoOverrides, StructFieldDefault.autoParam <| .const name []) -private partial def withFields (structParams : Array Expr) (views : Array StructFieldView) (k : StructElabM α) : StructElabM α := do +partial def withFields (structParams : Array Expr) (views : Array StructFieldView) (k : StructElabM α) : StructElabM α := do go 0 where go (i : Nat) : StructElabM α := do @@ -1053,7 +1053,7 @@ where else k -private def collectUsedFVars (lctx : LocalContext) (localInsts : LocalInstances) (fieldInfos : Array StructFieldInfo) : +def collectUsedFVars (lctx : LocalContext) (localInsts : LocalInstances) (fieldInfos : Array StructFieldInfo) : StateRefT CollectFVars.State MetaM Unit := do withLCtx lctx localInsts do fieldInfos.forM fun info => do @@ -1070,7 +1070,7 @@ Creates a local context suitable for creating the constructor. Does not do any reductions. -/ -private def mkCtorLCtx : StructElabM LocalContext := do +def mkCtorLCtx : StructElabM LocalContext := do let fieldInfos := (← get).fields -- A map of all field fvars to eliminate let mut fvarMap : ExprMap Expr := {} @@ -1101,7 +1101,7 @@ private def mkCtorLCtx : StructElabM LocalContext := do /-- Builds a constructor for the type, for adding the inductive type to the environment. -/ -private def mkCtor (view : StructView) (r : ElabHeaderResult) (params : Array Expr) : StructElabM Constructor := +def mkCtor (view : StructView) (r : ElabHeaderResult) (params : Array Expr) : StructElabM Constructor := withRef view.ref do let lctx ← mkCtorLCtx let type ← instantiateMVars <| mkAppN r.indFVar params @@ -1125,7 +1125,7 @@ Assumes the inductive type has already been added to the environment. Note: we can't generally use optParams here since the default values might depend on previous ones. We include autoParams however. -/ -private def mkFlatCtorExpr (levelParams : List Name) (params : Array Expr) (ctor : ConstructorVal) (replaceIndFVars : Expr → MetaM Expr) : +def mkFlatCtorExpr (levelParams : List Name) (params : Array Expr) (ctor : ConstructorVal) (replaceIndFVars : Expr → MetaM Expr) : StructElabM Expr := do -- build the constructor application using the fields in the local context let mut val := mkAppN (mkConst ctor.name (levelParams.map mkLevelParam)) params @@ -1151,7 +1151,7 @@ private def mkFlatCtorExpr (levelParams : List Name) (params : Array Expr) (ctor val ← replaceIndFVars val fieldNormalizeExpr val -private partial def mkFlatCtor (levelParams : List Name) (params : Array Expr) (structName : Name) (replaceIndFVars : Expr → MetaM Expr) : +partial def mkFlatCtor (levelParams : List Name) (params : Array Expr) (structName : Name) (replaceIndFVars : Expr → MetaM Expr) : StructElabM Unit := do let env ← getEnv let ctor := getStructureCtor env structName @@ -1164,7 +1164,7 @@ private partial def mkFlatCtor (levelParams : List Name) (params : Array Expr) ( let valType := valType.inferImplicit params.size true addDecl <| Declaration.defnDecl (← mkDefinitionValInferrringUnsafe flatCtorName levelParams valType val .abbrev) -private partial def checkResultingUniversesForFields (fieldInfos : Array StructFieldInfo) (u : Level) : TermElabM Unit := do +partial def checkResultingUniversesForFields (fieldInfos : Array StructFieldInfo) (u : Level) : TermElabM Unit := do for info in fieldInfos do let type ← inferType info.fvar let v := (← instantiateLevelMVars (← getLevel type)).normalize @@ -1174,7 +1174,7 @@ private partial def checkResultingUniversesForFields (fieldInfos : Array StructF which is not less than or equal to the structure's resulting universe level{indentD u}" throwErrorAt info.ref msg -private def addProjections (params : Array Expr) (r : ElabHeaderResult) (fieldInfos : Array StructFieldInfo) : TermElabM Unit := do +def addProjections (params : Array Expr) (r : ElabHeaderResult) (fieldInfos : Array StructFieldInfo) : TermElabM Unit := do let projDecls : Array StructProjDecl ← fieldInfos |>.filter (·.kind.isInCtor) @@ -1192,7 +1192,7 @@ private def addProjections (params : Array Expr) (r : ElabHeaderResult) (fieldIn -- projections may generate equation theorems enableRealizationsForConst decl.projName -private def registerStructure (structName : Name) (infos : Array StructFieldInfo) : TermElabM Unit := do +def registerStructure (structName : Name) (infos : Array StructFieldInfo) : TermElabM Unit := do let fields ← infos.filterMapM fun info => do if info.kind.isInCtor then return some { @@ -1206,7 +1206,7 @@ private def registerStructure (structName : Name) (infos : Array StructFieldInfo return none modifyEnv fun env => Lean.registerStructure env { structName, fields } -private def checkDefaults (fieldInfos : Array StructFieldInfo) : TermElabM Unit := do +def checkDefaults (fieldInfos : Array StructFieldInfo) : TermElabM Unit := do let mut mvars := {} let mut lmvars := {} for fieldInfo in fieldInfos do @@ -1223,7 +1223,7 @@ private def checkDefaults (fieldInfos : Array StructFieldInfo) : TermElabM Unit /-- Computes the resolution order and for the structure and sorts the inherited defaults. -/ -private def resolveFieldDefaults (structName : Name) : StructElabM Unit := do +def resolveFieldDefaults (structName : Name) : StructElabM Unit := do -- Resolve the order, but don't report any resolution order issues at this point. -- We will do that in `checkResolutionOrder`, which is after the structure is registered. let { resolutionOrder, .. } ← mergeStructureResolutionOrders structName ((← get).parents.map (·.structName)) (relaxed := true) @@ -1256,7 +1256,7 @@ the structure instance notation elaborator to do reductions when making use of d This arrangement of having declarations for all inherited values also makes the structure instance notation delaborator able to omit default values reliably. -/ -private def addDefaults (levelParams : List Name) (params : Array Expr) (replaceIndFVars : Expr → MetaM Expr) : StructElabM Unit := do +def addDefaults (levelParams : List Name) (params : Array Expr) (replaceIndFVars : Expr → MetaM Expr) : StructElabM Unit := do let fieldInfos := (← get).fields let lctx ← instantiateLCtxMVars (← getLCtx) /- The parameters `params` for the auxiliary "default value" definitions must be marked as implicit, and all others as explicit. -/ @@ -1298,7 +1298,7 @@ private def addDefaults (levelParams : List Name) (params : Array Expr) (replace /-- Given `type` of the form `forall ... (source : A), B`, return `forall ... [source : A], B`. -/ -private def setSourceInstImplicit (type : Expr) : Expr := +def setSourceInstImplicit (type : Expr) : Expr := match type with | .forallE _ d b _ => if b.isForall then @@ -1310,7 +1310,7 @@ private def setSourceInstImplicit (type : Expr) : Expr := /-- Creates a projection function to a non-subobject parent. -/ -private partial def mkCoercionToCopiedParent (levelParams : List Name) (params : Array Expr) (view : StructView) (source : Expr) (parent : StructParentInfo) (parentType parentVal : Expr) : MetaM StructureParentInfo := do +partial def mkCoercionToCopiedParent (levelParams : List Name) (params : Array Expr) (view : StructView) (source : Expr) (parent : StructParentInfo) (parentType parentVal : Expr) : MetaM StructureParentInfo := do let isProp ← Meta.isProp parentType let env ← getEnv let binfo := if view.isClass && isClass env parent.structName then BinderInfo.instImplicit else BinderInfo.default @@ -1347,7 +1347,7 @@ Make projections to parents that are not represented as subobjects. All other projections we get indirectly from the elaborator, which can construct projections by chaining subobject projections. -/ -private def mkRemainingProjections (levelParams : List Name) (params : Array Expr) (view : StructView) : StructElabM (Array StructureParentInfo) := do +def mkRemainingProjections (levelParams : List Name) (params : Array Expr) (view : StructView) : StructElabM (Array StructureParentInfo) := do let us := levelParams.map mkLevelParam let structType := mkAppN (Lean.mkConst view.declName us) params withLocalDeclD `self structType fun source => do @@ -1392,7 +1392,7 @@ private def mkRemainingProjections (levelParams : List Name) (params : Array Exp Precomputes the structure's resolution order. Option `structure.strictResolutionOrder` controls whether to create a warning if the C3 algorithm failed. -/ -private def checkResolutionOrder (structName : Name) : TermElabM Unit := do +def checkResolutionOrder (structName : Name) : TermElabM Unit := do let resolutionOrderResult ← computeStructureResolutionOrder structName (relaxed := !structure.strictResolutionOrder.get (← getOptions)) trace[Elab.structure.resolutionOrder] "computed resolution order: {resolutionOrderResult.resolutionOrder}" unless resolutionOrderResult.conflicts.isEmpty do @@ -1408,7 +1408,7 @@ private def checkResolutionOrder (structName : Name) : TermElabM Unit := do /-- Adds each direct parent projection to a class as an instance, so long as the parent isn't an ancestor of the others. -/ -private def addParentInstances (parents : Array StructureParentInfo) : MetaM Unit := do +def addParentInstances (parents : Array StructureParentInfo) : MetaM Unit := do let env ← getEnv let instParents := parents.filter fun parent => isClass env parent.structName -- A parent is an ancestor of the others if it appears with index ≥ 1 in one of the resolution orders. diff --git a/src/Lean/Elab/Tactic/BVDecide/Frontend/Normalize/Enums.lean b/src/Lean/Elab/Tactic/BVDecide/Frontend/Normalize/Enums.lean index 5306086604a7..61bd00c03d09 100644 --- a/src/Lean/Elab/Tactic/BVDecide/Frontend/Normalize/Enums.lean +++ b/src/Lean/Elab/Tactic/BVDecide/Frontend/Normalize/Enums.lean @@ -28,7 +28,7 @@ namespace Frontend.Normalize open Lean.Meta -private def getBitVecSize (domainSize : Nat) : Nat := +def getBitVecSize (domainSize : Nat) : Nat := let bvSize := Nat.log2 domainSize if 2^bvSize == domainSize then bvSize @@ -81,7 +81,7 @@ Create a `cond` chain in `Sort u` of the form: bif input = discrs 0 then values[0] else bif input = discrs 1 then values 1 else ... ``` -/ -private def mkCondChain {w : Nat} (u : Level) (input : Expr) (retType : Expr) +def mkCondChain {w : Nat} (u : Level) (input : Expr) (retType : Expr) (discrs : Nat → BitVec w) (values : List Expr) (acc : Expr) : MetaM Expr := do let instBEq ← synthInstance (mkApp (mkConst ``BEq [0]) (mkApp (mkConst ``BitVec) (toExpr w))) return go u input retType instBEq discrs values 0 acc @@ -104,7 +104,7 @@ where /-- Build `declName.recOn.{0} (motive := motive) value (f context[0]) (f context[1]) ...` -/ -private def enumCases (declName : Name) (motive : Expr) (value : Expr) (context : List α) +def enumCases (declName : Name) (motive : Expr) (value : Expr) (context : List α) (f : α → MetaM Expr) : MetaM Expr := do let recOn := mkApp2 (mkConst (mkRecOnName declName) [0]) motive value List.foldlM (init := recOn) (fun acc a => mkApp acc <$> f a) context @@ -225,7 +225,7 @@ Generate a theorem that translates `.match_x` applications on enum inductives to assuming that it is a supported kind of match, see `matchIsSupported` for the currently available variants. -/ -private partial def getMatchEqCondForAux (declName : Name) (kind : MatchKind) : MetaM Name := do +partial def getMatchEqCondForAux (declName : Name) (kind : MatchKind) : MetaM Name := do let matchEqCondName := .str declName matchEqCondSuffix realizeConst declName matchEqCondName do let decl ← @@ -392,7 +392,7 @@ def enumToBitVecCtor : Simp.Simproc := fun e => do /-- The state used for the post processing part of `enumsPass`. -/ -private structure PostProcessState where +structure PostProcessState where /-- Hypotheses that bound results of `enumToBitVec` applications as appropriate. -/ diff --git a/src/Lean/Elab/Tactic/Basic.lean b/src/Lean/Elab/Tactic/Basic.lean index 0e5820fcd848..1c34873dbdd7 100644 --- a/src/Lean/Elab/Tactic/Basic.lean +++ b/src/Lean/Elab/Tactic/Basic.lean @@ -11,10 +11,10 @@ namespace Lean.Elab open Meta /-- Assign `mvarId := sorry` -/ -def admitGoal (mvarId : MVarId) : MetaM Unit := +def admitGoal (mvarId : MVarId) (synthetic : Bool := true): MetaM Unit := mvarId.withContext do let mvarType ← inferType (mkMVar mvarId) - mvarId.assign (← mkLabeledSorry mvarType (synthetic := true) (unique := true)) + mvarId.assign (← mkLabeledSorry mvarType (synthetic := synthetic) (unique := true)) def goalsToMessageData (goals : List MVarId) : MessageData := MessageData.joinSep (goals.map MessageData.ofGoal) m!"\n\n" diff --git a/src/Lean/Elab/Tactic/Config.lean b/src/Lean/Elab/Tactic/Config.lean index 9961bf24b270..87acc1fe2f57 100644 --- a/src/Lean/Elab/Tactic/Config.lean +++ b/src/Lean/Elab/Tactic/Config.lean @@ -12,7 +12,7 @@ import Lean.Linter.MissingDocs namespace Lean.Elab.Tactic open Meta Parser.Tactic Command -private structure ConfigItemView where +structure ConfigItemView where ref : Syntax option : Ident value : Term @@ -20,7 +20,7 @@ private structure ConfigItemView where (bool : Bool := false) /-- Interprets the `config` as an array of option/value pairs. -/ -private def mkConfigItemViews (c : TSyntaxArray ``configItem) : Array ConfigItemView := +def mkConfigItemViews (c : TSyntaxArray ``configItem) : Array ConfigItemView := c.map fun item => match item with | `(configItem| ($option:ident := $value)) => { ref := item, option, value } @@ -33,7 +33,7 @@ private def mkConfigItemViews (c : TSyntaxArray ``configItem) : Array ConfigItem Expands a field access into full field access like `toB.toA.x`. Returns that and the last projection function for `x` itself. -/ -private def expandFieldName (structName : Name) (fieldName : Name) : MetaM (Name × Name) := do +def expandFieldName (structName : Name) (fieldName : Name) : MetaM (Name × Name) := do let env ← getEnv unless isStructure env structName do throwError "'{.ofConstName structName}' is not a structure" let some baseStructName := findField? env structName fieldName @@ -48,7 +48,7 @@ private def expandFieldName (structName : Name) (fieldName : Name) : MetaM (Name /-- Given a hierarchical name `field`, returns the fully resolved field access, the base struct name, and the last projection function. -/ -private partial def expandField (structName : Name) (field : Name) : MetaM (Name × Name) := do +partial def expandField (structName : Name) (field : Name) : MetaM (Name × Name) := do match field with | .num .. | .anonymous => throwError m!"invalid configuration field" | .str .anonymous fieldName => expandFieldName structName (Name.mkSimple fieldName) @@ -61,7 +61,7 @@ private partial def expandField (structName : Name) (field : Name) : MetaM (Name return (field' ++ field'', projFn) /-- Elaborates a tactic configuration. -/ -private def elabConfig (recover : Bool) (structName : Name) (items : Array ConfigItemView) : TermElabM Expr := +def elabConfig (recover : Bool) (structName : Name) (items : Array ConfigItemView) : TermElabM Expr := withoutModifyingStateWithInfoAndMessages <| withLCtx {} {} <| withSaveInfoContext do let mkStructInst (source? : Option Term) (fields : TSyntaxArray ``Parser.Term.structInstField) : TermElabM Term := match source? with @@ -121,7 +121,7 @@ section -- parser. set_option internal.parseQuotWithCurrentStage false -private def mkConfigElaborator +def mkConfigElaborator (doc? : Option (TSyntax ``Parser.Command.docComment)) (elabName type monadName : Ident) (adapt recover : Term) : MacroM (TSyntax `command) := do let empty ← withRef type `({ : $type}) diff --git a/src/Lean/Elab/Tactic/ElabTerm.lean b/src/Lean/Elab/Tactic/ElabTerm.lean index a599dfc05b00..05df41787b08 100644 --- a/src/Lean/Elab/Tactic/ElabTerm.lean +++ b/src/Lean/Elab/Tactic/ElabTerm.lean @@ -293,7 +293,7 @@ def evalApplyLikeTactic (tac : MVarId → Expr → MetaM (List MVarId)) (e : Syn @[builtin_tactic Lean.Parser.Tactic.apply] def evalApply : Tactic := fun stx => match stx with - | `(tactic| apply $e) => evalApplyLikeTactic (·.apply) e + | `(tactic| apply $e) => evalApplyLikeTactic (·.apply (term? := some m!"`{e}`")) e | _ => throwUnsupportedSyntax @[builtin_tactic Lean.Parser.Tactic.constructor] def evalConstructor : Tactic := fun _ => @@ -342,7 +342,7 @@ def elabAsFVar (stx : Syntax) (userName? : Option Name := none) : TacticM FVarId let fvarId ← withoutModifyingState <| withNewMCtxDepth <| withoutRecover do let type ← elabTerm typeStx none (mayPostpone := true) let fvarId? ← (← getLCtx).findDeclRevM? fun localDecl => do - if (← isDefEq type localDecl.type) then return localDecl.fvarId else return none + if !localDecl.isImplementationDetail && (← isDefEq type localDecl.type) then return localDecl.fvarId else return none match fvarId? with | none => throwError "failed to find a hypothesis with type{indentExpr type}" | some fvarId => return fvarId diff --git a/src/Lean/Elab/Tactic/Induction.lean b/src/Lean/Elab/Tactic/Induction.lean index bb507e47ce26..c80360177d0b 100644 --- a/src/Lean/Elab/Tactic/Induction.lean +++ b/src/Lean/Elab/Tactic/Induction.lean @@ -31,31 +31,31 @@ open Meta We assume that the syntax has been expanded. There is exactly one `inductionAltLHS`, and `" => " (hole <|> syntheticHole <|> tacticSeq)` is present -/ -private def getAltLhses (alt : Syntax) : Syntax := +def getAltLhses (alt : Syntax) : Syntax := alt[0] -private def getFirstAltLhs (alt : Syntax) : Syntax := +def getFirstAltLhs (alt : Syntax) : Syntax := (getAltLhses alt)[0] /-- Return `inductionAlt` name. It assumes `alt` does not have multiple `inductionAltLHS` -/ -private def getAltName (alt : Syntax) : Name := +def getAltName (alt : Syntax) : Name := let lhs := getFirstAltLhs alt if !lhs[1].isOfKind ``Parser.Term.hole then lhs[1][1].getId.eraseMacroScopes else `_ /-- Returns the `inductionAlt` `ident <|> hole` -/ -private def getAltNameStx (alt : Syntax) : Syntax := +def getAltNameStx (alt : Syntax) : Syntax := let lhs := getFirstAltLhs alt if lhs[1].isOfKind ``Parser.Term.hole then lhs[1] else lhs[1][1] /-- Return `true` if the first LHS of the given alternative contains `@`. -/ -private def altHasExplicitModifier (alt : Syntax) : Bool := +def altHasExplicitModifier (alt : Syntax) : Bool := let lhs := getFirstAltLhs alt !lhs[1].isOfKind ``Parser.Term.hole && !lhs[1][0].isNone /-- Return the variables in the first LHS of the given alternative. -/ -private def getAltVars (alt : Syntax) : Array Syntax := +def getAltVars (alt : Syntax) : Array Syntax := let lhs := getFirstAltLhs alt lhs[2].getArgs -private def hasAltRHS (alt : Syntax) : Bool := +def hasAltRHS (alt : Syntax) : Bool := alt[1].getNumArgs > 0 -private def getAltRHS (alt : Syntax) : Syntax := +def getAltRHS (alt : Syntax) : Syntax := alt[1][1] -private def getAltDArrow (alt : Syntax) : Syntax := +def getAltDArrow (alt : Syntax) : Syntax := alt[1][0] -- Return true if `stx` is a term occurring in the RHS of the induction/cases tactic @@ -114,15 +114,15 @@ structure State where abbrev M := ReaderT Context $ StateRefT State TermElabM -private def addNewArg (arg : Expr) : M Unit := +def addNewArg (arg : Expr) : M Unit := modify fun s => { s with argPos := s.argPos+1, f := mkApp s.f arg, fType := s.fType.bindingBody!.instantiate1 arg } /-- Return the binder name at `fType`. This method assumes `fType` is a function type. -/ -private def getBindingName : M Name := return (← get).fType.bindingName! +def getBindingName : M Name := return (← get).fType.bindingName! /-- Return the next argument expected type. This method assumes `fType` is a function type. -/ -private def getArgExpectedType : M Expr := return (← get).fType.bindingDomain! +def getArgExpectedType : M Expr := return (← get).fType.bindingDomain! -private def getFType : M Expr := do +def getFType : M Expr := do let fType ← whnfForall (← get).fType modify fun s => { s with fType := fType } pure fType @@ -235,16 +235,16 @@ def setMotiveArg (mvarId : MVarId) (motiveArg : MVarId) (targets : Array FVarId) throwError "type mismatch when assigning motive{indentExpr motive}\n{← mkHasTypeButIsExpectedMsg motiverInferredType motiveType}" motiveArg.assign motive -private def getAltNumFields (elimInfo : ElimInfo) (altName : Name) : TermElabM Nat := do +def getAltNumFields (elimInfo : ElimInfo) (altName : Name) : TermElabM Nat := do for altInfo in elimInfo.altsInfo do if altInfo.name == altName then return altInfo.numFields throwError "unknown alternative name '{altName}'" -private def isWildcard (altStx : Syntax) : Bool := +def isWildcard (altStx : Syntax) : Bool := getAltName altStx == `_ -private def checkAltNames (alts : Array Alt) (altsSyntax : Array Syntax) : TacticM Unit := do +def checkAltNames (alts : Array Alt) (altsSyntax : Array Syntax) : TacticM Unit := do let mut seenNames : Array Name := #[] for h : i in [:altsSyntax.size] do let altStx := altsSyntax[i] @@ -261,7 +261,7 @@ private def checkAltNames (alts : Array Alt) (altsSyntax : Array Syntax) : Tacti if unhandledAlts.isEmpty then m!"invalid alternative name '{altName}', no unhandled alternatives" else - let unhandledAltsMessages := unhandledAlts.map (m!"{·.name}") + let unhandledAltsMessages := unhandledAlts.map (m!"'{·.name}'") let unhandledAlts := MessageData.orList unhandledAltsMessages.toList m!"invalid alternative name '{altName}', expected {unhandledAlts}" throwErrorAt altStx msg @@ -270,7 +270,7 @@ private def checkAltNames (alts : Array Alt) (altsSyntax : Array Syntax) : Tacti /-- Given the goal `altMVarId` for a given alternative that introduces `numFields` new variables, return the number of explicit variables. Recall that when the `@` is not used, only the explicit variables can be named by the user. -/ -private def getNumExplicitFields (altMVarId : MVarId) (numFields : Nat) : MetaM Nat := altMVarId.withContext do +def getNumExplicitFields (altMVarId : MVarId) (numFields : Nat) : MetaM Nat := altMVarId.withContext do let target ← altMVarId.getType withoutModifyingState do -- The `numFields` count includes explicit, implicit and let-bound variables. @@ -282,7 +282,7 @@ private def getNumExplicitFields (altMVarId : MVarId) (numFields : Nat) : MetaM let numImplicits := (bis.filter (!·.isExplicit)).size return numFields - numImplicits -private def saveAltVarsInfo (altMVarId : MVarId) (altStx : Syntax) (fvarIds : Array FVarId) : TermElabM Unit := +def saveAltVarsInfo (altMVarId : MVarId) (altStx : Syntax) (fvarIds : Array FVarId) : TermElabM Unit := withSaveInfoContext <| altMVarId.withContext do let useNamesForExplicitOnly := !altHasExplicitModifier altStx let mut i := 0 @@ -489,7 +489,7 @@ end ElimApp «induction» := leading_parser nonReservedSymbol "induction " >> majorPremise >> usingRec >> generalizingVars >> optional inductionAlts ``` `stx` is syntax for `induction` or `fun_induction`. -/ -private def getUserGeneralizingFVarIds (stx : Syntax) : TacticM (Array FVarId) := +def getUserGeneralizingFVarIds (stx : Syntax) : TacticM (Array FVarId) := withRef stx do let generalizingStx := if stx.getKind == ``Lean.Parser.Tactic.induction then @@ -506,7 +506,7 @@ private def getUserGeneralizingFVarIds (stx : Syntax) : TacticM (Array FVarId) : getFVarIds vars -- process `generalizingVars` subterm of induction Syntax `stx`. -private def generalizeVars (mvarId : MVarId) (stx : Syntax) (targets : Array Expr) : TacticM (Nat × MVarId) := +def generalizeVars (mvarId : MVarId) (stx : Syntax) (targets : Array Expr) : TacticM (Nat × MVarId) := mvarId.withContext do let userFVarIds ← getUserGeneralizingFVarIds stx let forbidden ← mkGeneralizationForbiddenSet targets @@ -528,7 +528,7 @@ syntax inductionAlts := "with " (tactic)? withPosition( (colGe inductionAlt)*) ``` Return an array containing its alternatives. -/ -private def getAltsOfInductionAlts (inductionAlts : Syntax) : Array Syntax := +def getAltsOfInductionAlts (inductionAlts : Syntax) : Array Syntax := inductionAlts[2].getArgs /-- @@ -539,7 +539,7 @@ syntax inductionAlts := "with " (tactic)? withPosition( (colGe inductionAlt)*) runs `cont (some alts)` where `alts` is an array containing all `inductionAlt`s while disabling incremental reuse if any other syntax changed. If there's no `with` clause, then runs `cont none`. -/ -private def withAltsOfOptInductionAlts (optInductionAlts : Syntax) +def withAltsOfOptInductionAlts (optInductionAlts : Syntax) (cont : Option (Array Syntax) → TacticM α) : TacticM α := Term.withNarrowedTacticReuse (stx := optInductionAlts) (fun optInductionAlts => if optInductionAlts.isNone then @@ -559,21 +559,21 @@ private def withAltsOfOptInductionAlts (optInductionAlts : Syntax) else -- has `with` clause, but no alts cont (some #[])) -private def getOptPreTacOfOptInductionAlts (optInductionAlts : Syntax) : Syntax := +def getOptPreTacOfOptInductionAlts (optInductionAlts : Syntax) : Syntax := if optInductionAlts.isNone then mkNullNode else optInductionAlts[0][1] /-- Returns true if the `Lean.Parser.Tactic.inductionAlt` either has more than one alternative or has no RHS. -/ -private def shouldExpandAlt (alt : Syntax) : Bool := +def shouldExpandAlt (alt : Syntax) : Bool := alt[0].getNumArgs > 1 || (1 < alt.getNumArgs && alt[1].getNumArgs == 0) /-- Returns `some #[alt_1, ..., alt_n]` if `alt` has multiple LHSs or if `alt` has no RHS. If there is no RHS, it is filled in with a hole. -/ -private def expandAlt? (alt : Syntax) : Option (Array Syntax) := Id.run do +def expandAlt? (alt : Syntax) : Option (Array Syntax) := Id.run do if shouldExpandAlt alt then some <| alt[0].getArgs.map fun lhs => let alt := alt.setArg 0 (mkNullNode #[lhs]) @@ -594,7 +594,7 @@ In the new `inductionAlts'` all alternatives have a single LHS. Remark: the `RHS` of alternatives with multi LHSs is copied. -/ -private def expandInductionAlts? (inductionAlts : Syntax) : Option Syntax := Id.run do +def expandInductionAlts? (inductionAlts : Syntax) : Option Syntax := Id.run do let alts := getAltsOfInductionAlts inductionAlts if alts.any shouldExpandAlt then let mut altsNew := #[] @@ -607,7 +607,7 @@ private def expandInductionAlts? (inductionAlts : Syntax) : Option Syntax := Id. else none -private def inductionAltsPos (stx : Syntax) : Nat := +def inductionAltsPos (stx : Syntax) : Nat := if stx.getKind == ``Lean.Parser.Tactic.induction then 4 else if stx.getKind == ``Lean.Parser.Tactic.cases then @@ -627,7 +627,7 @@ syntax "induction " term,+ (" using " ident)? ("generalizing " (colGt term:max) if `inductionAlts` has an alternative with multiple LHSs, and likewise for `cases`, `fun_induction`, `fun_cases`. -/ -private def expandInduction? (induction : Syntax) : Option Syntax := do +def expandInduction? (induction : Syntax) : Option Syntax := do let inductionAltsPos := inductionAltsPos induction let optInductionAlts := induction[inductionAltsPos] guard <| !optInductionAlts.isNone @@ -637,7 +637,7 @@ private def expandInduction? (induction : Syntax) : Option Syntax := do /-- We may have at most one `| _ => ...` (wildcard alternative), and it must not set variable names. The idea is to make sure users do not write unstructured tactics. -/ -private def checkAltsOfOptInductionAlts (optInductionAlts : Syntax) : TacticM Unit := +def checkAltsOfOptInductionAlts (optInductionAlts : Syntax) : TacticM Unit := unless optInductionAlts.isNone do let mut found := false for alt in getAltsOfInductionAlts optInductionAlts[0] do @@ -679,7 +679,7 @@ without turning them into MVars. So this uses `abstractMVars` at the end. This i It also elaborates without `heedElabAsElim` so that users can use constants that are marked `elabAsElim` in the `using` clause`. -/ -private def elabTermForElim (stx : Syntax) : TermElabM Expr := do +def elabTermForElim (stx : Syntax) : TermElabM Expr := do -- Short-circuit elaborating plain identifiers if stx.isIdent then if let some e ← Term.resolveId? stx (withInfo := true) then @@ -703,7 +703,7 @@ register_builtin_option tactic.customEliminators : Bool := { } -- `optElimId` is of the form `("using" term)?` -private def getElimNameInfo (optElimId : Syntax) (targets : Array Expr) (induction : Bool) : TacticM ElimInfo := do +def getElimNameInfo (optElimId : Syntax) (targets : Array Expr) (induction : Bool) : TacticM ElimInfo := do if optElimId.isNone then if tactic.customEliminators.get (← getOptions) then if let some elimName ← getCustomEliminator? targets induction then @@ -730,7 +730,7 @@ private def getElimNameInfo (optElimId : Syntax) (targets : Array Expr) (inducti pure none withRef elimTerm <| getElimExprInfo elimExpr baseName? -private def shouldGeneralizeTarget (e : Expr) : MetaM Bool := do +def shouldGeneralizeTarget (e : Expr) : MetaM Bool := do if let .fvar fvarId .. := e then return (← fvarId.getDecl).hasValue -- must generalize let-decls else @@ -753,7 +753,7 @@ def mkTargetView (target : Syntax) : TacticM ElimTargetView := do | _ => return { hIdent? := none, term := .missing } /-- Elaborated `ElimTargetView`. -/ -private structure ElimTargetInfo where +structure ElimTargetInfo where view : ElimTargetView expr : Expr arg? : Option GeneralizeArg @@ -805,7 +805,7 @@ def elabElimTargets (targets : Array Syntax) : TacticM (Array Expr × Array (Ide Generalize targets in `fun_induction` and `fun_cases`. Should behave like `elabCasesTargets` with no targets annotated with `h : _`. -/ -private def generalizeTargets (exprs : Array Expr) : TacticM (Array Expr) := do +def generalizeTargets (exprs : Array Expr) : TacticM (Array Expr) := do withMainContext do let exprToGeneralize ← exprs.filterM (shouldGeneralizeTarget ·) if exprToGeneralize.isEmpty then @@ -836,7 +836,7 @@ def checkInductionTargets (targets : Array Expr) : MetaM Unit := do The code path shared between `induction` and `fun_induct`; when we already have an `elimInfo` and the `targets` contains the implicit targets -/ -private def evalInductionCore (stx : Syntax) (elimInfo : ElimInfo) (targets : Array Expr) +def evalInductionCore (stx : Syntax) (elimInfo : ElimInfo) (targets : Array Expr) (toTag : Array (Ident × FVarId) := #[]) : TacticM Unit := do let mvarId ← getMainGoal -- save initial info before main goal is reassigned @@ -896,7 +896,7 @@ def elabFunTargetCall (cases : Bool) (stx : Syntax) : TacticM Expr := do /-- Elaborates the `foo args` of `fun_induction` or `fun_cases`, returning the `ElabInfo` and targets. -/ -private def elabFunTarget (cases : Bool) (stx : Syntax) : TacticM (ElimInfo × Array Expr) := do +def elabFunTarget (cases : Bool) (stx : Syntax) : TacticM (ElimInfo × Array Expr) := do withRef stx <| withMainContext do let funCall ← elabFunTargetCall cases stx funCall.withApp fun fn funArgs => do diff --git a/src/Lean/Elab/Tactic/LibrarySearch.lean b/src/Lean/Elab/Tactic/LibrarySearch.lean index 1a7e6bcd0390..b1d51c06a88c 100644 --- a/src/Lean/Elab/Tactic/LibrarySearch.lean +++ b/src/Lean/Elab/Tactic/LibrarySearch.lean @@ -48,7 +48,7 @@ def exact? (ref : Syntax) (required : Option (Array (TSyntax `term))) (requireCl addExactSuggestion ref (← instantiateMVars (mkMVar mvar)).headBeta (checkState? := initialState) (addSubgoalsMsg := true) (tacticErrorAsInfo := true) if suggestions.isEmpty then logError "apply? didn't find any relevant lemmas" - admitGoal goal + admitGoal goal (synthetic := false) @[builtin_tactic Lean.Parser.Tactic.exact?] def evalExact : Tactic := fun stx => do diff --git a/src/Lean/Environment.lean b/src/Lean/Environment.lean index a2a7691b2548..42e332924685 100644 --- a/src/Lean/Environment.lean +++ b/src/Lean/Environment.lean @@ -1,7 +1,9 @@ /- Copyright (c) 2019 Microsoft Corporation. All rights reserved. +Copyright (c) 2025 James Michael Dupont All rights reserved. +Copyright (c) 2025 Introspector LLC. All rights reserved. Released under Apache 2.0 license as described in the file LICENSE. -Authors: Leonardo de Moura +Authors: Leonardo de Moura, with hacks from James Michael Dupont -/ prelude import Init.Control.StateRef @@ -11,6 +13,7 @@ import Init.System.Promise import Lean.ImportingFlag import Lean.Data.NameTrie import Lean.Data.SMap +import Lean.Setup import Lean.Declaration import Lean.LocalContext import Lean.Util.Path @@ -93,18 +96,6 @@ instance : GetElem? (Array α) ModuleIdx α (fun a i => i.toNat < a.size) where abbrev ConstMap := SMap Name ConstantInfo -structure Import where - module : Name - /-- `import all`; whether to import and expose all data saved by the module. -/ - importAll : Bool := false - /-- Whether to activate this import when the current module itself is imported. -/ - isExported : Bool := true - deriving Repr, Inhabited - -instance : Coe Name Import := ⟨({module := ·})⟩ - -instance : ToString Import := ⟨fun imp => toString imp.module⟩ - /-- A compacted region holds multiple Lean objects in a contiguous memory region, which can be read/written to/from disk. Objects inside the region do not have reference counters and cannot be freed individually. The contents of .olean @@ -196,11 +187,7 @@ declared by users are stored in an environment extension. Users can declare new using meta-programming. -/ structure Environment where - /-- - The constructor of `Environment` is private to protect against modification that bypasses the - kernel. - -/ - private mk :: + /-- Mapping from constant name to `ConstantInfo`. It contains all constants (definitions, theorems, axioms, etc) that have been already type checked by the kernel. @@ -236,13 +223,13 @@ structure Environment where /-- Environment extensions. It also includes user-defined extensions. -/ - private extensions : Array EnvExtensionState + extensions : Array EnvExtensionState /-- Constant names to be saved in the field `extraConstNames` at `ModuleData`. It contains auxiliary declaration names created by the code generator which are not in `constants`. When importing modules, we want to insert them at `const2ModIdx`. -/ - private extraConstNames : NameSet + extraConstNames : NameSet /-- The header contains additional information that is set at import time. -/ header : EnvironmentHeader := {} deriving Nonempty @@ -276,11 +263,11 @@ def find? (env : Environment) (n : Name) : Option ConstantInfo := env.constants.find?' n @[export lean_environment_mark_quot_init] -private def markQuotInit (env : Environment) : Environment := +def markQuotInit (env : Environment) : Environment := { env with quotInit := true } @[export lean_environment_quot_init] -private def isQuotInit (env : Environment) : Bool := +def isQuotInit (env : Environment) : Bool := env.quotInit /-- Type check given declaration and add it to the environment -/ @@ -299,7 +286,7 @@ and the kernel will not catch it if the new option is set to true. opaque addDeclWithoutChecking (env : Environment) (decl : @& Declaration) : Except Exception Environment @[export lean_environment_add] -private def add (env : Environment) (cinfo : ConstantInfo) : Environment := +def add (env : Environment) (cinfo : ConstantInfo) : Environment := { env with constants := env.constants.insert cinfo.name cinfo } @[export lean_kernel_diag_is_enabled] @@ -389,7 +376,7 @@ Information about the current branch of the environment representing asynchronou Use `Environment.enterAsync` instead of `mkRaw`. -/ -private structure AsyncContext where mkRaw :: +structure AsyncContext where mkRaw :: /-- Name of the declaration asynchronous elaboration was started for. All constants added to this environment branch must have the name as a prefix, after erasing macro scopes and private name @@ -407,13 +394,13 @@ deriving Nonempty Checks whether a declaration named `n` may be added to the environment in the given context. See also `AsyncContext.declPrefix`. -/ -private def AsyncContext.mayContain (ctx : AsyncContext) (n : Name) : Bool := +def AsyncContext.mayContain (ctx : AsyncContext) (n : Name) : Bool := ctx.declPrefix.isPrefixOf <| privateToUserName n.eraseMacroScopes /-- Constant info and environment extension states eventually resulting from async elaboration. -/ -private structure AsyncConst where +structure AsyncConst where constInfo : AsyncConstantInfo /-- Reported extension state eventually fulfilled by promise; may be missing for tasks (e.g. kernel @@ -427,16 +414,17 @@ private structure AsyncConst where consts : Task Dynamic /-- Data structure holding a sequence of `AsyncConst`s optimized for efficient access. -/ -private structure AsyncConsts where +structure AsyncConsts where size : Nat revList : List AsyncConst /-- Map from declaration name to const for fast direct access. -/ map : NameMap AsyncConst - /-- Trie of declaration names without private name prefixes for fast longest-prefix access. -/ + /-- Trie of declaration names without private + name prefixes for fast longest-prefix access. -/ normalizedTrie : NameTrie AsyncConst deriving Inhabited, TypeName -private def AsyncConsts.add (aconsts : AsyncConsts) (aconst : AsyncConst) : AsyncConsts := +def AsyncConsts.add (aconsts : AsyncConsts) (aconst : AsyncConst) : AsyncConsts := let normalizedName := privateToUserName aconst.constInfo.name if let some aconst' := aconsts.normalizedTrie.find? normalizedName then let _ : Inhabited AsyncConsts := ⟨aconsts⟩ @@ -448,11 +436,11 @@ private def AsyncConsts.add (aconsts : AsyncConsts) (aconst : AsyncConst) : Asyn normalizedTrie := aconsts.normalizedTrie.insert normalizedName aconst } -private def AsyncConsts.find? (aconsts : AsyncConsts) (declName : Name) : Option AsyncConst := +def AsyncConsts.find? (aconsts : AsyncConsts) (declName : Name) : Option AsyncConst := aconsts.map.find? declName /-- Finds the constant in the collection that is a prefix of `declName`, if any. -/ -private def AsyncConsts.findPrefix? (aconsts : AsyncConsts) (declName : Name) : Option AsyncConst := +def AsyncConsts.findPrefix? (aconsts : AsyncConsts) (declName : Name) : Option AsyncConst := -- as macro scopes are a strict suffix, we do not have to remove them before calling -- `findLongestPrefix?` aconsts.normalizedTrie.findLongestPrefix? (privateToUserName declName) @@ -461,7 +449,7 @@ private def AsyncConsts.findPrefix? (aconsts : AsyncConsts) (declName : Name) : Finds constants including from other environment branches by recursively looking up longest prefixes (which is sufficient by `AsyncContext.mayContain`). -/ -private partial def AsyncConsts.findRec? (aconsts : AsyncConsts) (declName : Name) : Option AsyncConst := do +partial def AsyncConsts.findRec? (aconsts : AsyncConsts) (declName : Name) : Option AsyncConst := do let c ← aconsts.findPrefix? declName if c.constInfo.name == declName then return c @@ -469,7 +457,7 @@ private partial def AsyncConsts.findRec? (aconsts : AsyncConsts) (declName : Nam AsyncConsts.findRec? aconsts declName /-- Like `findRec?`; allocating tasks is (currently?) too costly to do always. -/ -private partial def AsyncConsts.findRecTask (aconsts : AsyncConsts) (declName : Name) : Task (Option AsyncConst) := Id.run do +partial def AsyncConsts.findRecTask (aconsts : AsyncConsts) (declName : Name) : Task (Option AsyncConst) := Id.run do let some c := aconsts.findPrefix? declName | .pure none if c.constInfo.name == declName then return .pure c @@ -478,27 +466,27 @@ private partial def AsyncConsts.findRecTask (aconsts : AsyncConsts) (declName : AsyncConsts.findRecTask aconsts declName /-- Accessibility levels of declarations in `Lean.Environment`. -/ -private inductive Visibility where +inductive Visibility where /-- Information private to the module. -/ | «private» /-- Information to be exported to other modules. -/ | «public» /-- Maps `Visibility` to `α`. -/ -private structure VisibilityMap (α : Type) where +structure VisibilityMap (α : Type) where «private» : α «public» : α deriving Inhabited, Nonempty /-- Realization results, to be replayed onto other branches. -/ -private structure RealizationResult where +structure RealizationResult where newConsts : VisibilityMap (List AsyncConst) replayKernel : Kernel.Environment → Except Kernel.Exception Kernel.Environment dyn : Dynamic deriving Nonempty /-- Context for `realizeConst` established by `enableRealizationsForConst`. -/ -private structure RealizationContext where +structure RealizationContext where /-- Saved `Environment`, untyped to avoid cyclic reference. Import environment for imported constants. -/ @@ -523,7 +511,7 @@ structure Environment where environment, though there are no soundness concerns in this case given that it is used purely for elaboration. -/ - private mk :: + mk :: /-- Kernel environments containing imported constants. Also stores environment extension state for the current branch of the environment (in `private`). Any other data should be considered @@ -532,13 +520,13 @@ structure Environment where As `base` is eagerly available, we prefer taking information from it instead of `checked` whenever possible. -/ - private base : VisibilityMap Kernel.Environment + base : VisibilityMap Kernel.Environment /-- Additional imported environment extension state for use in the language server. This field is identical to `base.extensions` in other contexts. Access via `getModuleEntries (includeServer := true)`. -/ - private serverBaseExts : Array EnvExtensionState := base.private.extensions + serverBaseExts : Array EnvExtensionState := base.private.extensions /-- Kernel environment task that is fulfilled when all asynchronously elaborated declarations are finished, containing the resulting environment. Also collects the environment extension state of @@ -553,26 +541,26 @@ structure Environment where Private view should correspond to kernel map. Public view may contain fewer constants and less data per constant. -/ - private asyncConstsMap : VisibilityMap AsyncConsts := default + asyncConstsMap : VisibilityMap AsyncConsts := default /-- Information about this asynchronous branch of the environment, if any. -/ - private asyncCtx? : Option AsyncContext := none + asyncCtx? : Option AsyncContext := none /-- Realized constants belonging to imported declarations. Must be initialized by calling `enableRealizationsForImports`. -/ - private realizedImportedConsts? : Option RealizationContext + realizedImportedConsts? : Option RealizationContext /-- Realized constants belonging to local declarations. This is a map from local declarations, which need to be registered synchronously using `enableRealizationsForConst`, to their realization context incl. a ref of realized constants. -/ - private realizedLocalConsts : NameMap RealizationContext := {} + realizedLocalConsts : NameMap RealizationContext := {} /-- Task collecting all realizations from the current and already-forked environment branches, akin to how `checked` collects all declarations. We only use it as a fallback in `findAsyncCore?`/`findStateAsync`; see there. -/ - private allRealizations : Task (NameMap AsyncConst) := .pure {} + allRealizations : Task (NameMap AsyncConst) := .pure {} /-- Indicates whether the environment is being used in an exported context, i.e. whether it should provide access to only the data to be imported by other modules participating in the module @@ -581,19 +569,19 @@ structure Environment where isExporting : Bool := false deriving Nonempty -@[inline] private def VisibilityMap.get (m : VisibilityMap α) (env : Environment) : α := +@[inline] def VisibilityMap.get (m : VisibilityMap α) (env : Environment) : α := if env.isExporting then m.public else m.private -private def VisibilityMap.map (m : VisibilityMap α) (f : α → β) : VisibilityMap β where +def VisibilityMap.map (m : VisibilityMap α) (f : α → β) : VisibilityMap β where «private» := f m.private «public» := f m.public -private def VisibilityMap.const (a : α) : VisibilityMap α := +def VisibilityMap.const (a : α) : VisibilityMap α := { «private» := a, «public» := a } namespace Environment -private def asyncConsts (env : Environment) : AsyncConsts := +def asyncConsts (env : Environment) : AsyncConsts := env.asyncConstsMap.get env -- Used only when the kernel calls into the interpreter, and in `Lean.Kernel.Exception.mkCtx`. In @@ -611,11 +599,11 @@ def setExporting (env : Environment) (isExporting : Bool) : Environment := { env with isExporting } /-- Consistently updates synchronous and (private) asynchronous parts of the environment without blocking. -/ -private def modifyCheckedAsync (env : Environment) (f : Kernel.Environment → Kernel.Environment) : Environment := +def modifyCheckedAsync (env : Environment) (f : Kernel.Environment → Kernel.Environment) : Environment := { env with checked := env.checked.map (sync := true) f, base.private := f env.base.private } /-- Sets synchronous and (private) asynchronous parts of the environment to the given kernel environment. -/ -private def setCheckedSync (env : Environment) (newChecked : Kernel.Environment) : Environment := +def setCheckedSync (env : Environment) (newChecked : Kernel.Environment) : Environment := { env with checked := .pure newChecked, base.private := newChecked } /-- The declaration prefix to which the environment is restricted to, if any. -/ @@ -651,11 +639,11 @@ def asyncMayContain (env : Environment) (declName : Name) : Bool := env.asyncCtx?.all (·.mayContain declName) @[extern "lean_elab_add_decl"] -private opaque addDeclCheck (env : Environment) (maxHeartbeats : USize) (decl : @& Declaration) +opaque addDeclCheck (env : Environment) (maxHeartbeats : USize) (decl : @& Declaration) (cancelTk? : @& Option IO.CancelToken) : Except Kernel.Exception Environment @[extern "lean_elab_add_decl_without_checking"] -private opaque addDeclWithoutChecking (env : Environment) (decl : @& Declaration) : +opaque addDeclWithoutChecking (env : Environment) (decl : @& Declaration) : Except Kernel.Exception Environment /-- @@ -686,7 +674,7 @@ def const2ModIdx (env : Environment) : Std.HashMap Name ModuleIdx := -- only needed for the lakefile.lean cache @[export lake_environment_add] -private def lakeAdd (env : Environment) (cinfo : ConstantInfo) : Environment := +def lakeAdd (env : Environment) (cinfo : ConstantInfo) : Environment := let env := env.setCheckedSync <| env.checked.get.add cinfo { env with @@ -710,10 +698,10 @@ def addExtraName (env : Environment) (name : Name) : Environment := -- forward reference due to too many cyclic dependencies @[extern "lean_is_reserved_name"] -private opaque isReservedName (env : Environment) (name : Name) : Bool +opaque isReservedName (env : Environment) (name : Name) : Bool /-- `findAsync?` after `base` access -/ -private def findAsyncCore? (env : Environment) (n : Name) (skipRealize := false) : +def findAsyncCore? (env : Environment) (n : Name) (skipRealize := false) : Option AsyncConstantInfo := do if let some c := env.asyncConsts.find? n then -- Constant for which an asynchronous elaboration task was spawned @@ -730,7 +718,7 @@ private def findAsyncCore? (env : Environment) (n : Name) (skipRealize := false) none /-- Like `findAsyncCore?`; allocating tasks is (currently?) too costly to do always. -/ -private def findTaskCore (env : Environment) (n : Name) (skipRealize := false) : +def findTaskCore (env : Environment) (n : Name) (skipRealize := false) : Task (Option AsyncConstantInfo) := Id.run do if let some c := env.asyncConsts.find? n then -- Constant for which an asynchronous elaboration task was spawned @@ -853,19 +841,19 @@ structure PromiseCheckedResult where unchanged. -/ asyncEnv : Environment - private checkedEnvPromise : IO.Promise Kernel.Environment + checkedEnvPromise : IO.Promise Kernel.Environment def realizingStack (env : Environment) : List Name := env.asyncCtx?.map (·.realizingStack) |>.getD [] /-- Creates an async context for the given declaration name, normalizing it for use as a prefix. -/ -private def enterAsync (declName : Name) (env : Environment) : Environment := +def enterAsync (declName : Name) (env : Environment) : Environment := { env with asyncCtx? := some { declPrefix := privateToUserName declName.eraseMacroScopes realizingStack := env.realizingStack } } /-- Creates an async context when realizing `declName` -/ -private def enterAsyncRealizing (declName : Name) (env : Environment) : Environment := +def enterAsyncRealizing (declName : Name) (env : Environment) : Environment := { env with asyncCtx? := some { declPrefix := .anonymous realizingStack := declName :: env.realizingStack } } @@ -894,7 +882,7 @@ def PromiseCheckedResult.commitChecked (res : PromiseCheckedResult) (env : Envir res.checkedEnvPromise.resolve env.toKernelEnv /-- Data transmitted by `AddConstAsyncResult.commitConst`. -/ -private structure ConstPromiseVal where +structure ConstPromiseVal where privateConstInfo : ConstantInfo exportedConstInfo : ConstantInfo exts : Array EnvExtensionState @@ -919,15 +907,15 @@ structure AddConstAsyncResult where left unchanged. -/ asyncEnv : Environment - private constName : Name - private kind : ConstantKind - private sigPromise : IO.Promise ConstantVal - private constPromise : IO.Promise ConstPromiseVal - private checkedEnvPromise : IO.Promise Kernel.Environment - private allRealizationsPromise : IO.Promise (NameMap AsyncConst) + constName : Name + kind : ConstantKind + sigPromise : IO.Promise ConstantVal + constPromise : IO.Promise ConstPromiseVal + checkedEnvPromise : IO.Promise Kernel.Environment + allRealizationsPromise : IO.Promise (NameMap AsyncConst) /-- Creates fallback info to be used in case promises are dropped unfulfilled. -/ -private def mkFallbackConstInfo (constName : Name) (kind : ConstantKind) : ConstantInfo := +def mkFallbackConstInfo (constName : Name) (kind : ConstantKind) : ConstantInfo := let fallbackVal : ConstantVal := { name := constName levelParams := [] @@ -1213,7 +1201,8 @@ abbrev ReplayFn (σ : Type) := Environment extension, can only be generated by `registerEnvExtension` that allocates a unique index for this extension into each environment's extension state's array. -/ -structure EnvExtension (σ : Type) where private mk :: +structure EnvExtension (σ : Type) where + mk :: idx : Nat mkInitial : IO σ asyncMode : EnvExtension.AsyncMode @@ -1228,7 +1217,7 @@ structure EnvExtension (σ : Type) where private mk :: namespace EnvExtension -private builtin_initialize envExtensionsRef : IO.Ref (Array (EnvExtension EnvExtensionState)) ← IO.mkRef #[] +builtin_initialize envExtensionsRef : IO.Ref (Array (EnvExtension EnvExtensionState)) ← IO.mkRef #[] /-- User-defined environment extensions are declared using the `initialize` command. @@ -1249,9 +1238,9 @@ where else return exts -private def invalidExtMsg := "invalid environment extension has been accessed" +def invalidExtMsg := "invalid environment extension has been accessed" -private unsafe def setStateImpl {σ} (ext : EnvExtension σ) (exts : Array EnvExtensionState) (s : σ) : Array EnvExtensionState := +unsafe def setStateImpl {σ} (ext : EnvExtension σ) (exts : Array EnvExtensionState) (s : σ) : Array EnvExtensionState := if h : ext.idx < exts.size then exts.set ext.idx (unsafeCast s) else @@ -1259,7 +1248,7 @@ private unsafe def setStateImpl {σ} (ext : EnvExtension σ) (exts : Array EnvEx have : Inhabited (Array EnvExtensionState) := ⟨exts⟩ panic! invalidExtMsg -private unsafe def modifyStateImpl {σ : Type} (ext : EnvExtension σ) (exts : Array EnvExtensionState) (f : σ → σ) : Array EnvExtensionState := +unsafe def modifyStateImpl {σ : Type} (ext : EnvExtension σ) (exts : Array EnvExtensionState) (f : σ → σ) : Array EnvExtensionState := if ext.idx < exts.size then exts.modify ext.idx fun s => let s : σ := unsafeCast s @@ -1270,7 +1259,7 @@ private unsafe def modifyStateImpl {σ : Type} (ext : EnvExtension σ) (exts : A have : Inhabited (Array EnvExtensionState) := ⟨exts⟩ panic! invalidExtMsg -private unsafe def getStateImpl {σ} [Inhabited σ] (ext : EnvExtension σ) (exts : Array EnvExtensionState) : σ := +unsafe def getStateImpl {σ} [Inhabited σ] (ext : EnvExtension σ) (exts : Array EnvExtensionState) : σ := if h : ext.idx < exts.size then unsafeCast exts[ext.idx] else @@ -1316,7 +1305,7 @@ def setState {σ : Type} (ext : EnvExtension σ) (env : Environment) (s : σ) : inline <| modifyState ext env fun _ => s -- `unsafe` fails to infer `Nonempty` here -private unsafe def getStateUnsafe {σ : Type} [Inhabited σ] (ext : EnvExtension σ) +unsafe def getStateUnsafe {σ : Type} [Inhabited σ] (ext : EnvExtension σ) (env : Environment) (asyncMode := ext.asyncMode) : σ := -- safety: `ext`'s constructor is private, so we can assume the entry at `ext.idx` is of type `σ` match asyncMode with @@ -1336,7 +1325,7 @@ opaque getState {σ : Type} [Inhabited σ] (ext : EnvExtension σ) (env : Enviro (asyncMode := ext.asyncMode) : σ -- `unsafe` fails to infer `Nonempty` here -private unsafe def findStateAsyncUnsafe {σ : Type} [Inhabited σ] +unsafe def findStateAsyncUnsafe {σ : Type} [Inhabited σ] (ext : EnvExtension σ) (env : Environment) (declName : Name) : σ := Id.run do -- analogous structure to `findAsync?`; see there -- safety: `ext`'s constructor is private, so we can assume the entry at `ext.idx` is of type `σ` @@ -1400,7 +1389,7 @@ def registerEnvExtension {σ : Type} (mkInitial : IO σ) EnvExtension.envExtensionsRef.modify fun exts => exts.push (unsafe unsafeCast ext) pure ext -private def mkInitialExtensionStates : IO (Array EnvExtensionState) := EnvExtension.mkInitialExtStates +def mkInitialExtensionStates : IO (Array EnvExtensionState) := EnvExtension.mkInitialExtStates @[export lean_mk_empty_environment] def mkEmptyEnvironment (trustLevel : UInt32 := 0) : IO Environment := do @@ -1647,7 +1636,7 @@ def OLeanLevel.adjustFileName (base : System.FilePath) : OLeanLevel → System.F | .server => base.addExtension "server" | .private => base.addExtension "private" -private def looksLikeOldCodegenName : Name → Bool +def looksLikeOldCodegenName : Name → Bool | .str _ s => s.startsWith "_cstage" || s.startsWith "_spec_" || s.startsWith "_elambda" | _ => false @@ -1663,10 +1652,14 @@ def mkModuleData (env : Environment) (level : OLeanLevel := .private) : IO Modul let kenv := env.toKernelEnv let env := env.setExporting (level != .private) let constNames := kenv.constants.foldStage2 (fun names name _ => names.push name) #[] - -- not all kernel constants may be exported - let constants := constNames.filterMap fun n => - env.find? n <|> - guard (looksLikeOldCodegenName n) *> kenv.find? n + -- not all kernel constants may be exported at `level < .private` + let constants := if level == .private then + -- (this branch makes very sure all kernel constants are exported eventually) + kenv.constants.foldStage2 (fun cs _ c => cs.push c) #[] + else + constNames.filterMap fun n => + env.find? n <|> + guard (looksLikeOldCodegenName n) *> kenv.find? n let constNames := constants.map (·.name) return { env.header with extraConstNames := env.checked.get.extraConstNames.toArray @@ -1696,7 +1689,7 @@ def mkExtNameMap (startingAt : Nat) : IO (Std.HashMap Name Nat) := do result := result.insert descr.name i return result -private def setImportedEntries (states : Array EnvExtensionState) (mods : Array ModuleData) +def setImportedEntries (states : Array EnvExtensionState) (mods : Array ModuleData) (startingAt : Nat := 0) : IO (Array EnvExtensionState) := do let mut states := states let extDescrs ← persistentEnvExtensionsRef.get @@ -1729,11 +1722,11 @@ private def setImportedEntries (states : Array EnvExtensionState) (mods : Array /-- "Forward declaration" for retrieving the number of builtin attributes. -/ @[extern 1 "lean_get_num_attributes"] opaque getNumBuiltinAttributes : IO Nat -private def ensureExtensionsArraySize (env : Environment) : IO Environment := do +def ensureExtensionsArraySize (env : Environment) : IO Environment := do let exts ← EnvExtension.ensureExtensionsArraySize env.base.private.extensions return env.modifyCheckedAsync ({ · with extensions := exts }) -private partial def finalizePersistentExtensions (env : Environment) (mods : Array ModuleData) (opts : Options) : IO Environment := do +partial def finalizePersistentExtensions (env : Environment) (mods : Array ModuleData) (opts : Options) : IO Environment := do loop 0 env where loop (i : Nat) (env : Environment) : IO Environment := do @@ -1760,29 +1753,29 @@ where else return env -private structure ImportedModule extends Import where +structure ImportedModule extends Import where /-- All loaded incremental compacted regions. -/ parts : Array (ModuleData × CompactedRegion) /-- The main module data that will eventually be used to construct the kernel environment. -/ -private def ImportedModule.mainModule? (self : ImportedModule) : Option ModuleData := do +def ImportedModule.mainModule? (self : ImportedModule) : Option ModuleData := do let (baseMod, _) ← self.parts[0]? self.parts[if baseMod.isModule && self.importAll then 2 else 0]?.map (·.1) /-- The main module data that will eventually be used to construct the publically accessible constants. -/ -private def ImportedModule.publicModule? (self : ImportedModule) : Option ModuleData := do +def ImportedModule.publicModule? (self : ImportedModule) : Option ModuleData := do let (baseMod, _) ← self.parts[0]? return baseMod /-- The module data that should be used for server purposes. -/ -private def ImportedModule.serverData? (self : ImportedModule) (level : OLeanLevel) : +def ImportedModule.serverData? (self : ImportedModule) (level : OLeanLevel) : Option ModuleData := do let (baseMod, _) ← self.parts[0]? self.parts[if baseMod.isModule && level != .exported then 1 else 0]?.map (·.1) structure ImportState where - private moduleNameMap : Std.HashMap Name ImportedModule := {} - private moduleNames : Array Name := #[] + moduleNameMap : Std.HashMap Name ImportedModule := {} + moduleNames : Array Name := #[] def throwAlreadyImported (s : ImportState) (const2ModIdx : Std.HashMap Name ModuleIdx) (modIdx : Nat) (cname : Name) : IO α := do let modName := s.moduleNames[modIdx]! @@ -1794,7 +1787,35 @@ abbrev ImportStateM := StateRefT ImportState IO @[inline] nonrec def ImportStateM.run (x : ImportStateM α) (s : ImportState := {}) : IO (α × ImportState) := x.run s -partial def importModulesCore (imports : Array Import) (forceImportAll := true) : +def ModuleArtifacts.oleanParts (arts : ModuleArtifacts) : Array System.FilePath := Id.run do + let mut fnames := #[] + -- Opportunistically load all available parts. + -- Producer (e.g., Lake) should limit parts to the proper import level. + if let some mFile := arts.olean? then + fnames := fnames.push mFile + if let some sFile := arts.oleanServer? then + fnames := fnames.push sFile + if let some pFile := arts.oleanPrivate? then + fnames := fnames.push pFile + return fnames + +def findOLeanParts (mod : Name) : IO (Array System.FilePath) := do + let mFile ← findOLean mod + unless (← mFile.pathExists) do + throw <| IO.userError s!"object file '{mFile}' of module {mod} does not exist" + let mut fnames := #[mFile] + -- Opportunistically load all available parts. + -- Necessary because the import level may be upgraded a later import. + let sFile := OLeanLevel.server.adjustFileName mFile + if (← sFile.pathExists) then + fnames := fnames.push sFile + let pFile := OLeanLevel.private.adjustFileName mFile + if (← pFile.pathExists) then + fnames := fnames.push pFile + return fnames + +partial def importModulesCore + (imports : Array Import) (forceImportAll := true) (arts : NameMap ModuleArtifacts := {}) : ImportStateM Unit := go where go := do for i in imports do @@ -1811,19 +1832,14 @@ where go := do if let some mod := mod.mainModule? then importModulesCore (forceImportAll := true) mod.imports continue - let mFile ← findOLean i.module - unless (← mFile.pathExists) do - throw <| IO.userError s!"object file '{mFile}' of module {i.module} does not exist" - let mut fnames := #[mFile] - -- opportunistically load all available parts in case `importPrivate` is upgraded by a later - -- import - -- TODO: use Lake data to retrieve ultimate import level immediately - let sFile := OLeanLevel.server.adjustFileName mFile - if (← sFile.pathExists) then - fnames := fnames.push sFile - let pFile := OLeanLevel.private.adjustFileName mFile - if (← pFile.pathExists) then - fnames := fnames.push pFile + let fnames ← + if let some arts := arts.find? i.module then + let fnames := arts.oleanParts + if fnames.isEmpty then + findOLeanParts i.module + else pure fnames + else + findOLeanParts i.module let parts ← readModuleDataParts fnames -- `imports` is identical for each part let some (baseMod, _) := parts[0]? | unreachable! @@ -1860,7 +1876,7 @@ and theorems are (mostly) opaque in Lean. For `Acc.rec`, we may unfold theorems during type-checking, but we are assuming this is not an issue in practice, and we are planning to address this issue in the future. -/ -private def subsumesInfo (cinfo₁ cinfo₂ : ConstantInfo) : Bool := +def subsumesInfo (cinfo₁ cinfo₂ : ConstantInfo) : Bool := cinfo₁.name == cinfo₂.name && cinfo₁.type == cinfo₂.type && cinfo₁.levelParams == cinfo₂.levelParams && @@ -1995,13 +2011,14 @@ as if no `module` annotations were present in the imports. -/ def importModules (imports : Array Import) (opts : Options) (trustLevel : UInt32 := 0) (plugins : Array System.FilePath := #[]) (leakEnv := false) (loadExts := false) - (level := OLeanLevel.private) : IO Environment := profileitIO "import" opts do + (level := OLeanLevel.private) (arts : NameMap ModuleArtifacts := {}) + : IO Environment := profileitIO "import" opts do for imp in imports do if imp.module matches .anonymous then throw <| IO.userError "import failed, trying to import module with anonymous name" withImporting do plugins.forM Lean.loadPlugin - let (_, s) ← importModulesCore (forceImportAll := level == .private) imports |>.run + let (_, s) ← importModulesCore (forceImportAll := level == .private) imports arts |>.run finalizeImport (leakEnv := leakEnv) (loadExts := loadExts) (level := level) s imports opts trustLevel @@ -2035,7 +2052,7 @@ def Kernel.setDiagnostics (env : Lean.Environment) (diag : Diagnostics) : Lean.E namespace Environment @[export lean_elab_environment_update_base_after_kernel_add] -private def updateBaseAfterKernelAdd (env : Environment) (kenv : Kernel.Environment) (decl : Declaration) : Environment := { +def updateBaseAfterKernelAdd (env : Environment) (kenv : Kernel.Environment) (decl : Declaration) : Environment := { env with checked := .pure kenv -- HACK: the old codegen adds some helper constants directly to the kernel environment, we need @@ -2077,7 +2094,7 @@ def displayStats (env : Environment) : IO Unit := do @[extern "lean_eval_const"] unsafe opaque evalConst (α) (env : @& Environment) (opts : @& Options) (constName : @& Name) : Except String α -private def throwUnexpectedType {α} (typeName : Name) (constName : Name) : ExceptT String Id α := +def throwUnexpectedType {α} (typeName : Name) (constName : Name) : ExceptT String Id α := throw ("unexpected type at '" ++ toString constName ++ "', `" ++ toString typeName ++ "` expected") /-- diff --git a/src/Lean/Expr.lean b/src/Lean/Expr.lean index 3ff6c85ee644..1bae6fbfcdb2 100644 --- a/src/Lean/Expr.lean +++ b/src/Lean/Expr.lean @@ -7,6 +7,7 @@ prelude import Init.Data.Hashable import Init.Data.Int import Lean.Data.KVMap +import Lean.Data.Json import Lean.Data.SMap import Lean.Level import Std.Data.HashSet.Basic @@ -19,7 +20,7 @@ inductive Literal where | natVal (val : Nat) /-- String literal -/ | strVal (val : String) - deriving Inhabited, BEq, Repr + deriving Inhabited, ToJson, BEq, Repr protected def Literal.hash : Literal → UInt64 | .natVal v => hash v @@ -76,7 +77,7 @@ inductive BinderInfo where | strictImplicit /-- Local instance binder annotataion, e.g., `[Decidable α]` -/ | instImplicit - deriving Inhabited, BEq, Repr + deriving Inhabited, ToJson, BEq, Repr def BinderInfo.hash : BinderInfo → UInt64 | .default => 947 @@ -226,7 +227,7 @@ a `NameGenerator`. -/ structure FVarId where name : Name - deriving Inhabited, BEq, Hashable + deriving Inhabited, ToJson, BEq, Hashable instance : Repr FVarId where reprPrec n p := reprPrec n.name p @@ -239,6 +240,7 @@ def FVarIdSet := RBTree FVarId (Name.quickCmp ·.name ·.name) instance : ForIn m FVarIdSet FVarId := inferInstanceAs (ForIn _ (RBTree ..) ..) + def FVarIdSet.insert (s : FVarIdSet) (fvarId : FVarId) : FVarIdSet := RBTree.insert s fvarId @@ -262,10 +264,14 @@ instance : EmptyCollection (FVarIdMap α) := inferInstanceAs (EmptyCollection (R instance : Inhabited (FVarIdMap α) where default := {} +instance : ToJson (FVarIdMap α) where + toJson _ := "" + + /-- Universe metavariable Id -/ structure MVarId where name : Name - deriving Inhabited, BEq, Hashable, Repr + deriving Inhabited, ToJson, BEq, Hashable, Repr instance : Repr MVarId where reprPrec n p := reprPrec n.name p diff --git a/src/Lean/Language/Lean.lean b/src/Lean/Language/Lean.lean index d448b58bd890..2f2525a3041c 100644 --- a/src/Lean/Language/Lean.lean +++ b/src/Lean/Language/Lean.lean @@ -283,10 +283,16 @@ simple uses, these can be computed eagerly without looking at the imports. structure SetupImportsResult where /-- Module name of the file being processed. -/ mainModuleName : Name + /-- Whether the file is participating in the module system. -/ + isModule : Bool := false + /-- Direct imports of the file being processed. -/ + imports : Array Import /-- Options provided outside of the file content, e.g. on the cmdline or in the lakefile. -/ opts : Options /-- Kernel trust level. -/ trustLevel : UInt32 := 0 + /-- Pre-resolved artifacts of related modules (e.g., this module's transitive imports). -/ + modules : NameMap ModuleArtifacts := {} /-- Lean plugins to load as part of the environment setup. -/ plugins : Array System.FilePath := #[] @@ -367,7 +373,7 @@ General notes: the `sync` parameter on `parseCmd` and spawn an elaboration task when we leave it. -/ partial def process - (setupImports : TSyntax ``Parser.Module.header → ProcessingT IO (Except HeaderProcessedSnapshot SetupImportsResult)) + (setupImports : HeaderSyntax → ProcessingT IO (Except HeaderProcessedSnapshot SetupImportsResult)) (old? : Option InitialSnapshot) : ProcessingM InitialSnapshot := do parseHeader old? |>.run (old?.map (·.ictx)) where @@ -453,7 +459,7 @@ where } } - processHeader (stx : TSyntax ``Parser.Module.header) (parserState : Parser.ModuleParserState) : + processHeader (stx : HeaderSyntax) (parserState : Parser.ModuleParserState) : LeanProcessingM (SnapshotTask HeaderProcessedSnapshot) := do let ctx ← read SnapshotTask.ofIO stx none (some ⟨0, ctx.input.endPos⟩) <| @@ -471,9 +477,9 @@ where if !stx.raw[0].isNone && !experimental.module.get opts then throw <| IO.Error.userError "`module` keyword is experimental and not enabled here" -- allows `headerEnv` to be leaked, which would live until the end of the process anyway - let (headerEnv, msgLog) ← Elab.processHeader (leakEnv := true) - (mainModule := setup.mainModuleName) stx opts .empty ctx.toInputContext setup.trustLevel - setup.plugins + let (headerEnv, msgLog) ← Elab.processHeaderCore (leakEnv := true) + stx.startPos setup.imports setup.isModule setup.opts .empty ctx.toInputContext + setup.trustLevel setup.plugins setup.mainModuleName setup.modules let stopTime := (← IO.monoNanosNow).toFloat / 1000000000 let diagnostics := (← Snapshot.Diagnostics.ofMessageLog msgLog) if msgLog.hasErrors then diff --git a/src/Lean/Message.lean b/src/Lean/Message.lean index b2c1c6b8b94b..47c52777ee5d 100644 --- a/src/Lean/Message.lean +++ b/src/Lean/Message.lean @@ -107,11 +107,12 @@ Lazy message data production, with access to the context as given by a surrounding `MessageData.withContext` (which is expected to exist). -/ def lazy (f : PPContext → BaseIO MessageData) - (hasSyntheticSorry : MetavarContext → Bool := fun _ => false) : MessageData := + (hasSyntheticSorry : MetavarContext → Bool := fun _ => false) + (onMissingContext : Unit → BaseIO MessageData := + fun _ => pure (.ofFormat "(invalid MessageData.lazy, missing context)")) : MessageData := .ofLazy (hasSyntheticSorry := hasSyntheticSorry) fun ctx? => do let msg ← match ctx? with - | .none => - pure (.ofFormat "(invalid MessageData.lazy, missing context)") -- see `addMessageContext` + | .none => onMissingContext () | .some ctx => f ctx return Dynamic.mk msg @@ -146,6 +147,13 @@ def kind : MessageData → Name | tagged n _ => n | _ => .anonymous +def isTrace : MessageData → Bool + | withContext _ msg => msg.isTrace + | withNamingContext _ msg => msg.isTrace + | tagged _ msg => msg.isTrace + | .trace _ _ _ => true + | _ => false + /-- An empty message. -/ def nil : MessageData := ofFormat Format.nil @@ -313,22 +321,45 @@ def ofList : List MessageData → MessageData def ofArray (msgs : Array MessageData) : MessageData := ofList msgs.toList -/-- Puts `MessageData` into a comma-separated list with `"or"` at the back (no Oxford comma). -Best used on non-empty lists; returns `"– none –"` for an empty list. -/ +/-- +Puts `MessageData` into a comma-separated list with `"or"` at the back (with the serial comma). + +Best used on non-empty lists; returns `"– none –"` for an empty list. +-/ def orList (xs : List MessageData) : MessageData := match xs with | [] => "– none –" - | [x] => "'" ++ x ++ "'" - | _ => joinSep (xs.dropLast.map (fun x => "'" ++ x ++ "'")) ", " ++ " or '" ++ xs.getLast! ++ "'" + | [x] => x + | [x₀, x₁] => x₀ ++ " or " ++ x₁ + | _ => joinSep xs.dropLast ", " ++ ", or " ++ xs.getLast! + +/-- +Puts `MessageData` into a comma-separated list with `"and"` at the back (with the serial comma). -/-- Puts `MessageData` into a comma-separated list with `"and"` at the back (no Oxford comma). -Best used on non-empty lists; returns `"– none –"` for an empty list. -/ +Best used on non-empty lists; returns `"– none –"` for an empty list. +-/ def andList (xs : List MessageData) : MessageData := match xs with | [] => "– none –" | [x] => x - | _ => joinSep xs.dropLast ", " ++ " and " ++ xs.getLast! + | [x₀, x₁] => x₀ ++ " and " ++ x₁ + | _ => joinSep xs.dropLast ", " ++ ", and " ++ xs.getLast! +/-- +Produces a labeled note that can be appended to an error message. +-/ +def note (note : MessageData) : MessageData := + -- Note: we do not use the built-in string coercion because it can prevent proper line breaks + .tagged `note <| .compose (.ofFormat .line) <| .compose (.ofFormat .line) <| + .compose "Note: " note + +/-- +Produces a labeled hint without an associated code action (non-monadic variant of +`MessageData.hint`). +-/ +def hint' (hint : MessageData) : MessageData := + .tagged `hint <| .compose (.ofFormat .line) <| .compose (.ofFormat .line) <| + .compose "Hint: " hint instance : Coe (List MessageData) MessageData := ⟨ofList⟩ instance : Coe (List Expr) MessageData := ⟨fun es => ofList <| es.map ofExpr⟩ @@ -400,6 +431,9 @@ namespace Message @[inherit_doc MessageData.kind] abbrev kind (msg : Message) := msg.data.kind +def isTrace (msg : Message) : Bool := + msg.data.isTrace + /-- Serializes the message, converting its data into a string and saving its kind. -/ @[inline] def serialize (msg : Message) : BaseIO SerialMessage := do return {msg with kind := msg.kind, data := ← msg.data.toString} @@ -505,6 +539,38 @@ def indentD (msg : MessageData) : MessageData := def indentExpr (e : Expr) : MessageData := indentD e +/-- +Returns the character length of the message when rendered. + +Note: this is a potentially expensive operation that is only relevant to message data that are +actually rendered. Consider using this function in lazy message data to avoid unnecessary +computation for messages that are not displayed. +-/ +private def MessageData.formatLength (ctx : PPContext) (msg : MessageData) : BaseIO Nat := do + let { env, mctx, lctx, opts, ..} := ctx + let fmt ← msg.format (some { env, mctx, lctx, opts }) + return fmt.pretty.length + + +/-- +Renders an expression `e` inline in a message unless it will exceed `maxInlineLength` characters, in +which case the expression is indented on a new line. + +Note that the output of this function is formatted with preceding and trailing space included. Thus, +in `m₁ ++ inlineExpr e ++ m₂`, `m₁` should not end with a space or new line, nor should `m₂` begin +with one. +-/ +def inlineExpr (e : Expr) (maxInlineLength := 30) : MessageData := + .lazy + (fun ctx => do + let msg := MessageData.ofExpr e + if (← msg.formatLength ctx) > maxInlineLength then + return indentD msg ++ "\n" + else + return " " ++ msg ++ " ") + (fun mctx => instantiateMVarsCore mctx e |>.1.hasSyntheticSorry) + (fun () => return " " ++ MessageData.ofExpr e ++ " ") + /-- Atom quotes -/ def aquote (msg : MessageData) : MessageData := "「" ++ msg ++ "」" @@ -607,4 +673,9 @@ def toMessageData (e : Kernel.Exception) (opts : Options) : MessageData := | interrupted => "(kernel) interrupted" end Kernel.Exception + +/-- Helper functions for creating a `MessageData` with the given header and elements. -/ +def toTraceElem [ToMessageData α] (e : α) (cls : Name := Name.mkSimple "_") : MessageData := + .trace { cls } (toMessageData e) #[] + end Lean diff --git a/src/Lean/Meta.lean b/src/Lean/Meta.lean index dda164eb5073..619a4ad64d13 100644 --- a/src/Lean/Meta.lean +++ b/src/Lean/Meta.lean @@ -52,3 +52,5 @@ import Lean.Meta.CheckTactic import Lean.Meta.Canonicalizer import Lean.Meta.Diagnostics import Lean.Meta.BinderNameHint +import Lean.Meta.TryThis +import Lean.Meta.Hint diff --git a/src/Lean/Meta/Basic.lean b/src/Lean/Meta/Basic.lean index dd01baa1e934..e58ca7ef94a2 100644 --- a/src/Lean/Meta/Basic.lean +++ b/src/Lean/Meta/Basic.lean @@ -183,7 +183,7 @@ structure Config where deriving Inhabited, Repr /-- Convert `isDefEq` and `WHNF` relevant parts into a key for caching results -/ -private def Config.toKey (c : Config) : UInt64 := +def Config.toKey (c : Config) : UInt64 := c.transparency.toUInt64 ||| (c.foApprox.toUInt64 <<< 2) ||| (c.ctxApprox.toUInt64 <<< 3) ||| @@ -204,7 +204,7 @@ private def Config.toKey (c : Config) : UInt64 := /-- Configuration with key produced by `Config.toKey`. -/ structure ConfigWithKey where - private mk :: + mk :: config : Config key : UInt64 deriving Inhabited @@ -287,7 +287,7 @@ structure FunInfo where Key for the function information cache. -/ structure InfoCacheKey where - private mk :: + mk :: /-- key produced using `Config.toKey`. -/ configKey : UInt64 /-- The function being cached information about. It is quite often an `Expr.const`. -/ @@ -325,7 +325,7 @@ abbrev SynthInstanceCache := PersistentHashMap SynthInstanceCacheKey (Option Abs -- Key for `InferType` and `WHNF` caches structure ExprConfigCacheKey where - private mk :: + expr : Expr configKey : UInt64 deriving Inhabited @@ -343,7 +343,7 @@ abbrev FunInfoCache := PersistentHashMap InfoCacheKey FunInfo abbrev WhnfCache := PersistentHashMap ExprConfigCacheKey Expr structure DefEqCacheKey where - private mk :: + lhs : Expr rhs : Expr configKey : UInt64 @@ -796,7 +796,7 @@ def whnfForall (e : Expr) : MetaM Expr := do protected def withIncRecDepth (x : n α) : n α := mapMetaM (withIncRecDepth (m := MetaM)) x -private def mkFreshExprMVarAtCore +def mkFreshExprMVarAtCore (mvarId : MVarId) (lctx : LocalContext) (localInsts : LocalInstances) (type : Expr) (kind : MetavarKind) (userName : Name) (numScopeArgs : Nat) : MetaM Expr := do modifyMCtx fun mctx => mctx.addExprMVarDecl mvarId userName lctx localInsts type kind numScopeArgs; return mkMVar mvarId @@ -812,10 +812,10 @@ def mkFreshLevelMVar : MetaM Level := do modifyMCtx fun mctx => mctx.addLevelMVarDecl mvarId; return mkLevelMVar mvarId -private def mkFreshExprMVarCore (type : Expr) (kind : MetavarKind) (userName : Name) : MetaM Expr := do +def mkFreshExprMVarCore (type : Expr) (kind : MetavarKind) (userName : Name) : MetaM Expr := do mkFreshExprMVarAt (← getLCtx) (← getLocalInstances) type kind userName -private def mkFreshExprMVarImpl (type? : Option Expr) (kind : MetavarKind) (userName : Name) : MetaM Expr := +def mkFreshExprMVarImpl (type? : Option Expr) (kind : MetavarKind) (userName : Name) : MetaM Expr := match type? with | some type => mkFreshExprMVarCore type kind userName | none => do @@ -832,7 +832,7 @@ def mkFreshTypeMVar (kind := MetavarKind.natural) (userName := Name.anonymous) : /-- Low-level version of `MkFreshExprMVar` which allows users to create/reserve a `mvarId` using `mkFreshId`, and then later create the metavar using this method. -/ -private def mkFreshExprMVarWithIdCore (mvarId : MVarId) (type : Expr) +def mkFreshExprMVarWithIdCore (mvarId : MVarId) (type : Expr) (kind : MetavarKind := MetavarKind.natural) (userName : Name := Name.anonymous) (numScopeArgs : Nat := 0) : MetaM Expr := do mkFreshExprMVarAtCore mvarId (← getLCtx) (← getLocalInstances) type kind userName numScopeArgs @@ -1147,7 +1147,7 @@ def withTrackingZetaDeltaSet (s : FVarIdSet) : n α → n α := @[inline] def withoutProofIrrelevance (x : n α) : n α := withConfig (fun cfg => { cfg with proofIrrelevance := false }) x -@[inline] private def Context.setTransparency (ctx : Context) (transparency : TransparencyMode) : Context := +@[inline] def Context.setTransparency (ctx : Context) (transparency : TransparencyMode) : Context := let config := { ctx.config with transparency } -- Recall that `transparency` is stored in the first 2 bits let configKey : UInt64 := ((ctx.configKey >>> (2 : UInt64)) <<< 2) ||| transparency.toUInt64 @@ -1186,7 +1186,7 @@ Recall that `.all > .default > .instances > .reducible`. withConfig (fun config => { config with assignSyntheticOpaque := true }) x /-- Save cache, execute `x`, restore cache -/ -@[inline] private def savingCacheImpl (x : MetaM α) : MetaM α := do +@[inline] def savingCacheImpl (x : MetaM α) : MetaM α := do let savedCache := (← get).cache try x finally modify fun s => { s with cache := savedCache } @@ -1199,7 +1199,7 @@ def getTheoremInfo (info : ConstantInfo) : MetaM (Option ConstantInfo) := do else return none -private def getDefInfoTemp (info : ConstantInfo) : MetaM (Option ConstantInfo) := do +def getDefInfoTemp (info : ConstantInfo) : MetaM (Option ConstantInfo) := do match (← getTransparency) with | .all => return some info | .default => return some info @@ -1213,14 +1213,14 @@ private def getDefInfoTemp (info : ConstantInfo) : MetaM (Option ConstantInfo) : This method is only used to implement `isClassQuickConst?`. It is very similar to `getUnfoldableConst?`, but it returns none when `TransparencyMode.instances` and `constName` is an instance. This difference should be irrelevant for `isClassQuickConst?`. -/ -private def getConstTemp? (constName : Name) : MetaM (Option ConstantInfo) := do +def getConstTemp? (constName : Name) : MetaM (Option ConstantInfo) := do match (← getEnv).find? constName with | some (info@(ConstantInfo.thmInfo _)) => getTheoremInfo info | some (info@(ConstantInfo.defnInfo _)) => getDefInfoTemp info | some info => pure (some info) | none => throwUnknownConstant constName -private def isClassQuickConst? (constName : Name) : MetaM (LOption Name) := do +def isClassQuickConst? (constName : Name) : MetaM (LOption Name) := do if isClass (← getEnv) constName then return .some constName else @@ -1228,7 +1228,7 @@ private def isClassQuickConst? (constName : Name) : MetaM (LOption Name) := do | some (.defnInfo ..) => return .undef -- We may be able to unfold the definition | _ => return .none -private partial def isClassQuick? : Expr → MetaM (LOption Name) +partial def isClassQuick? : Expr → MetaM (LOption Name) | .bvar .. => return .none | .lit .. => return .none | .fvar .. => return .none @@ -1253,7 +1253,7 @@ private partial def isClassQuick? : Expr → MetaM (LOption Name) | _ => return .undef | _ => return .none -private def withNewLocalInstanceImp (className : Name) (fvar : Expr) (k : MetaM α) : MetaM α := do +def withNewLocalInstanceImp (className : Name) (fvar : Expr) (k : MetaM α) : MetaM α := do let localDecl ← getFVarLocalDecl fvar if localDecl.isImplementationDetail then k @@ -1265,7 +1265,7 @@ private def withNewLocalInstanceImp (className : Name) (fvar : Expr) (k : MetaM def withNewLocalInstance (className : Name) (fvar : Expr) : n α → n α := mapMetaM <| withNewLocalInstanceImp className fvar -private def fvarsSizeLtMaxFVars (fvars : Array Expr) (maxFVars? : Option Nat) : Bool := +def fvarsSizeLtMaxFVars (fvars : Array Expr) (maxFVars? : Option Nat) : Bool := match maxFVars? with | some maxFVars => fvars.size < maxFVars | none => true @@ -1277,7 +1277,7 @@ mutual - `isClassExpensive` is defined later. - `isClassExpensive` uses `whnf` which depends (indirectly) on the set of local instances. -/ - private partial def withNewLocalInstancesImp + partial def withNewLocalInstancesImp (fvars : Array Expr) (i : Nat) (k : MetaM α) : MetaM α := do if h : i < fvars.size then let fvar := fvars[i] @@ -1320,7 +1320,7 @@ mutual If `cleanupAnnotations` is `true`, we apply `Expr.cleanupAnnotations` to each type in the telescope. -/ - private partial def forallTelescopeReducingAuxAux + partial def forallTelescopeReducingAuxAux (reducing : Bool) (maxFVars? : Option Nat) (type : Expr) (k : Array Expr → Expr → MetaM α) (cleanupAnnotations : Bool) : MetaM α := do @@ -1354,7 +1354,7 @@ mutual k fvars type process (← getLCtx) #[] 0 type - private partial def forallTelescopeReducingAux (type : Expr) (maxFVars? : Option Nat) (k : Array Expr → Expr → MetaM α) (cleanupAnnotations : Bool) : MetaM α := do + partial def forallTelescopeReducingAux (type : Expr) (maxFVars? : Option Nat) (k : Array Expr → Expr → MetaM α) (cleanupAnnotations : Bool) : MetaM α := do match maxFVars? with | some 0 => k #[] type | _ => do @@ -1366,7 +1366,7 @@ mutual -- Helper method for isClassExpensive? - private partial def isClassApp? (type : Expr) (instantiated := false) : MetaM (Option Name) := do + partial def isClassApp? (type : Expr) (instantiated := false) : MetaM (Option Name) := do match type.getAppFn with | .const c _ => let env ← getEnv @@ -1382,11 +1382,11 @@ mutual isClassApp? (← instantiateMVars type) true | _ => return none - private partial def isClassExpensive? (type : Expr) : MetaM (Option Name) := + partial def isClassExpensive? (type : Expr) : MetaM (Option Name) := withReducible do -- when testing whether a type is a type class, we only unfold reducible constants. forallTelescopeReducingAux type none (cleanupAnnotations := false) fun _ type => isClassApp? type - private partial def isClassImp? (type : Expr) : MetaM (Option Name) := do + partial def isClassImp? (type : Expr) : MetaM (Option Name) := do match (← isClassQuick? type) with | .none => return none | .some c => return (some c) @@ -1407,13 +1407,13 @@ end def isClass? (type : Expr) : MetaM (Option Name) := try isClassImp? type catch _ => return none -private def withNewLocalInstancesImpAux (fvars : Array Expr) (j : Nat) : n α → n α := +def withNewLocalInstancesImpAux (fvars : Array Expr) (j : Nat) : n α → n α := mapMetaM <| withNewLocalInstancesImp fvars j partial def withNewLocalInstances (fvars : Array Expr) (j : Nat) : n α → n α := mapMetaM <| withNewLocalInstancesImpAux fvars j -@[inline] private def forallTelescopeImp (type : Expr) (k : Array Expr → Expr → MetaM α) (cleanupAnnotations : Bool) : MetaM α := do +@[inline] def forallTelescopeImp (type : Expr) (k : Array Expr → Expr → MetaM α) (cleanupAnnotations : Bool) : MetaM α := do forallTelescopeReducingAuxAux (reducing := false) (maxFVars? := none) type k cleanupAnnotations /-- @@ -1443,7 +1443,7 @@ and then builds the lambda telescope term for the new term. def mapForallTelescope (f : Expr → MetaM Expr) (forallTerm : Expr) : MetaM Expr := do mapForallTelescope' (fun _ e => f e) forallTerm -private def forallTelescopeReducingImp (type : Expr) (k : Array Expr → Expr → MetaM α) (cleanupAnnotations : Bool) : MetaM α := +def forallTelescopeReducingImp (type : Expr) (k : Array Expr → Expr → MetaM α) (cleanupAnnotations : Bool) : MetaM α := forallTelescopeReducingAux type (maxFVars? := none) k cleanupAnnotations /-- @@ -1455,7 +1455,7 @@ private def forallTelescopeReducingImp (type : Expr) (k : Array Expr → Expr def forallTelescopeReducing (type : Expr) (k : Array Expr → Expr → n α) (cleanupAnnotations := false) : n α := map2MetaM (fun k => forallTelescopeReducingImp type k cleanupAnnotations) k -private def forallBoundedTelescopeImp (type : Expr) (maxFVars? : Option Nat) (k : Array Expr → Expr → MetaM α) (cleanupAnnotations : Bool) : MetaM α := +def forallBoundedTelescopeImp (type : Expr) (maxFVars? : Option Nat) (k : Array Expr → Expr → MetaM α) (cleanupAnnotations : Bool) : MetaM α := forallTelescopeReducingAux type maxFVars? k cleanupAnnotations /-- @@ -1467,7 +1467,7 @@ private def forallBoundedTelescopeImp (type : Expr) (maxFVars? : Option Nat) (k def forallBoundedTelescope (type : Expr) (maxFVars? : Option Nat) (k : Array Expr → Expr → n α) (cleanupAnnotations := false) : n α := map2MetaM (fun k => forallBoundedTelescopeImp type maxFVars? k cleanupAnnotations) k -private partial def lambdaTelescopeImp (e : Expr) (consumeLet : Bool) (maxFVars? : Option Nat) +partial def lambdaTelescopeImp (e : Expr) (consumeLet : Bool) (maxFVars? : Option Nat) (k : Array Expr → Expr → MetaM α) (cleanupAnnotations := false) : MetaM α := do process consumeLet (← getLCtx) #[] e where @@ -1531,7 +1531,7 @@ def getParamNames (declName : Name) : MetaM (Array Name) := do return localDecl.userName -- `kind` specifies the metavariable kind for metavariables not corresponding to instance implicit `[ ... ]` arguments. -private partial def forallMetaTelescopeReducingAux +partial def forallMetaTelescopeReducingAux (e : Expr) (reducing : Bool) (maxMVars? : Option Nat) (kind : MetavarKind) : MetaM (Array Expr × Array BinderInfo × Expr) := process #[] #[] 0 e where @@ -1599,13 +1599,13 @@ where process mvars bis j b | _ => finalize () -private def withNewFVar (fvar fvarType : Expr) (k : Expr → MetaM α) : MetaM α := do +def withNewFVar (fvar fvarType : Expr) (k : Expr → MetaM α) : MetaM α := do if let some c ← isClass? fvarType then withNewLocalInstance c fvar <| k fvar else k fvar -private def withLocalDeclImp (n : Name) (bi : BinderInfo) (type : Expr) (k : Expr → MetaM α) (kind : LocalDeclKind) : MetaM α := do +def withLocalDeclImp (n : Name) (bi : BinderInfo) (type : Expr) (k : Expr → MetaM α) (kind : LocalDeclKind) : MetaM α := do let fvarId ← mkFreshFVarId let ctx ← read let lctx := ctx.lctx.mkLocalDecl fvarId n type bi kind @@ -1667,7 +1667,7 @@ def withLocalDeclsDND [Inhabited α] (declInfos : Array (Name × Expr)) (k : (xs withLocalDeclsD (declInfos.map (fun (name, typeCtor) => (name, fun _ => pure typeCtor))) k -private def withAuxDeclImp (shortDeclName : Name) (type : Expr) (declName : Name) (k : Expr → MetaM α) : MetaM α := do +def withAuxDeclImp (shortDeclName : Name) (type : Expr) (declName : Name) (k : Expr → MetaM α) : MetaM α := do let fvarId ← mkFreshFVarId let ctx ← read let lctx := ctx.lctx.mkAuxDecl fvarId shortDeclName type declName @@ -1682,7 +1682,7 @@ private def withAuxDeclImp (shortDeclName : Name) (type : Expr) (declName : Name def withAuxDecl (shortDeclName : Name) (type : Expr) (declName : Name) (k : Expr → n α) : n α := map1MetaM (fun k => withAuxDeclImp shortDeclName type declName k) k -private def withNewBinderInfosImp (bs : Array (FVarId × BinderInfo)) (k : MetaM α) : MetaM α := do +def withNewBinderInfosImp (bs : Array (FVarId × BinderInfo)) (k : MetaM α) : MetaM α := do let lctx := bs.foldl (init := (← getLCtx)) fun lctx (fvarId, bi) => lctx.setBinderInfo fvarId bi withReader (fun ctx => { ctx with lctx := lctx }) k @@ -1702,7 +1702,7 @@ def withInstImplicitAsImplict (xs : Array Expr) (k : MetaM α) : MetaM α := do return none withNewBinderInfos newBinderInfos k -private def withLetDeclImp (n : Name) (type : Expr) (val : Expr) (k : Expr → MetaM α) (kind : LocalDeclKind) : MetaM α := do +def withLetDeclImp (n : Name) (type : Expr) (val : Expr) (k : Expr → MetaM α) (kind : LocalDeclKind) : MetaM α := do let fvarId ← mkFreshFVarId let ctx ← read let lctx := ctx.lctx.mkLetDecl fvarId n type val (nonDep := false) kind @@ -1735,7 +1735,7 @@ def withLocalInstancesImp (decls : List LocalDecl) (k : MetaM α) : MetaM α := def withLocalInstances (decls : List LocalDecl) : n α → n α := mapMetaM <| withLocalInstancesImp decls -private def withExistingLocalDeclsImp (decls : List LocalDecl) (k : MetaM α) : MetaM α := do +def withExistingLocalDeclsImp (decls : List LocalDecl) (k : MetaM α) : MetaM α := do let ctx ← read let lctx := decls.foldl (fun (lctx : LocalContext) decl => lctx.addDecl decl) ctx.lctx withReader (fun ctx => { ctx with lctx := lctx }) do @@ -1754,7 +1754,7 @@ private def withExistingLocalDeclsImp (decls : List LocalDecl) (k : MetaM α) : def withExistingLocalDecls (decls : List LocalDecl) : n α → n α := mapMetaM <| withExistingLocalDeclsImp decls -private def withNewMCtxDepthImp (allowLevelAssignments : Bool) (x : MetaM α) : MetaM α := do +def withNewMCtxDepthImp (allowLevelAssignments : Bool) (x : MetaM α) : MetaM α := do let saved ← get modify fun s => { s with mctx := s.mctx.incDepth allowLevelAssignments, postponed := {} } try @@ -1782,7 +1782,7 @@ assigned. (This is used by TC synthesis.) def withNewMCtxDepth (k : n α) (allowLevelAssignments := false) : n α := mapMetaM (withNewMCtxDepthImp allowLevelAssignments) k -private def withLocalContextImp (lctx : LocalContext) (localInsts : LocalInstances) (x : MetaM α) : MetaM α := do +def withLocalContextImp (lctx : LocalContext) (localInsts : LocalInstances) (x : MetaM α) : MetaM α := do withReader (fun ctx => { ctx with lctx := lctx, localInstances := localInsts }) do x @@ -1811,7 +1811,7 @@ def withErasedFVars [MonadLCtx n] [MonadLiftT MetaM n] (fvarIds : Array FVarId) let localInsts' := localInsts.filter (!fvarIds.contains ·.fvar.fvarId!) withLCtx lctx' localInsts' k -private def withMVarContextImp (mvarId : MVarId) (x : MetaM α) : MetaM α := do +def withMVarContextImp (mvarId : MVarId) (x : MetaM α) : MetaM α := do let mvarDecl ← mvarId.getDecl withLocalContextImp mvarDecl.lctx mvarDecl.localInstances x @@ -1822,7 +1822,7 @@ different from the current ones. -/ def _root_.Lean.MVarId.withContext (mvarId : MVarId) : n α → n α := mapMetaM <| withMVarContextImp mvarId -private def withMCtxImp (mctx : MetavarContext) (x : MetaM α) : MetaM α := do +def withMCtxImp (mctx : MetavarContext) (x : MetaM α) : MetaM α := do let mctx' ← getMCtx setMCtx mctx try x finally setMCtx mctx' @@ -1847,14 +1847,14 @@ def withoutModifyingMCtx : n α → n α := resetCache setMCtx mctx -@[inline] private def approxDefEqImp (x : MetaM α) : MetaM α := +@[inline] def approxDefEqImp (x : MetaM α) : MetaM α := withConfig (fun config => { config with foApprox := true, ctxApprox := true, quasiPatternApprox := true}) x /-- Execute `x` using approximate unification: `foApprox`, `ctxApprox` and `quasiPatternApprox`. -/ @[inline] def approxDefEq : n α → n α := mapMetaM approxDefEqImp -@[inline] private def fullApproxDefEqImp (x : MetaM α) : MetaM α := +@[inline] def fullApproxDefEqImp (x : MetaM α) : MetaM α := withConfig (fun config => { config with foApprox := true, ctxApprox := true, quasiPatternApprox := true, constApprox := true }) x /-- @@ -1902,7 +1902,7 @@ def setInlineAttribute (declName : Name) (kind := Compiler.InlineAttributeKind.i | .ok env => setEnv env | .error msg => throwError msg -private partial def instantiateForallAux (ps : Array Expr) (i : Nat) (e : Expr) : MetaM Expr := do +partial def instantiateForallAux (ps : Array Expr) (i : Nat) (e : Expr) : MetaM Expr := do if h : i < ps.size then let p := ps[i] match (← whnf e) with @@ -1915,7 +1915,7 @@ private partial def instantiateForallAux (ps : Array Expr) (i : Nat) (e : Expr) def instantiateForall (e : Expr) (ps : Array Expr) : MetaM Expr := instantiateForallAux ps 0 e -private partial def instantiateLambdaAux (ps : Array Expr) (i : Nat) (e : Expr) : MetaM Expr := do +partial def instantiateLambdaAux (ps : Array Expr) (i : Nat) (e : Expr) : MetaM Expr := do if h : i < ps.size then let p := ps[i] match (← whnf e) with @@ -2017,7 +2017,7 @@ instance : Alternative MetaM where failure := fun {_} => throwError "failed" orElse := Meta.orElse -@[inline] private def orelseMergeErrorsImp (x y : MetaM α) +@[inline] def orelseMergeErrorsImp (x y : MetaM α) (mergeRef : Syntax → Syntax → Syntax := fun r₁ _ => r₁) (mergeMsg : MessageData → MessageData → MessageData := fun m₁ m₂ => m₁ ++ Format.line ++ m₂) : MetaM α := do let env ← getEnv @@ -2101,14 +2101,14 @@ def getResetPostponed : MetaM (PersistentArray PostponedEntry) := do return ps /-- Annotate any constant and sort in `e` that satisfies `p` with `pp.universes true` -/ -private def exposeRelevantUniverses (e : Expr) (p : Level → Bool) : Expr := +def exposeRelevantUniverses (e : Expr) (p : Level → Bool) : Expr := e.replace fun e => match e with | .const _ us => if us.any p then some (e.setPPUniverses true) else none | .sort u => if p u then some (e.setPPUniverses true) else none | _ => none -private def mkLevelErrorMessageCore (header : String) (entry : PostponedEntry) : MetaM MessageData := do +def mkLevelErrorMessageCore (header : String) (entry : PostponedEntry) : MetaM MessageData := do match entry.ctx? with | none => return m!"{header}{indentD m!"{entry.lhs} =?= {entry.rhs}"}" @@ -2130,7 +2130,7 @@ def mkLevelStuckErrorMessage (entry : PostponedEntry) : MetaM MessageData := do def mkLevelErrorMessage (entry : PostponedEntry) : MetaM MessageData := do mkLevelErrorMessageCore "failed to solve universe constraint" entry -private def processPostponedStep (exceptionOnFailure : Bool) : MetaM Bool := do +def processPostponedStep (exceptionOnFailure : Bool) : MetaM Bool := do let ps ← getResetPostponed for p in ps do unless (← withReader (fun ctx => { ctx with defEqCtx? := p.ctx? }) <| isLevelDefEqAux p.lhs p.rhs) do @@ -2291,7 +2291,7 @@ def instantiateMVarsIfMVarApp (e : Expr) : MetaM Expr := do else return e -private partial def setAllDiagRanges (snap : Language.SnapshotTree) (pos endPos : Position) : +partial def setAllDiagRanges (snap : Language.SnapshotTree) (pos endPos : Position) : BaseIO Language.SnapshotTree := do let msgLog := snap.element.diagnostics.msgLog let msgLog := { msgLog with unreported := msgLog.unreported.map fun diag => @@ -2305,7 +2305,7 @@ private partial def setAllDiagRanges (snap : Language.SnapshotTree) (pos endPos open Language -private structure RealizeConstantResult where +structure RealizeConstantResult where snap : SnapshotTree error? : Option Exception deriving TypeName diff --git a/src/Lean/Meta/Check.lean b/src/Lean/Meta/Check.lean index 3efb0ead00f6..f8a0cad4bd68 100644 --- a/src/Lean/Meta/Check.lean +++ b/src/Lean/Meta/Check.lean @@ -84,6 +84,17 @@ where | _, .mdata _ b' => let (a, b') ← visit a b' return (a, b.updateMData! b') + | .const nm _, .const nm' _ => + if nm != nm' then + return (a, b) + else + return (a.setPPUniverses true, b.setPPUniverses true) + | .proj _ i a', .proj _ j b' => + if i != j then + return (a, b) + else + let (a', b') ← visit a' b' + return (a.updateProj! a', b.updateProj! b') | .app .., .app .. => if a.getAppNumArgs != b.getAppNumArgs then return (a, b) @@ -198,7 +209,7 @@ def throwAppTypeMismatch (f a : Expr) : MetaM α := do unless binfo.isExplicit do e := e.setAppPPExplicit let aType ← inferType a - throwError "application type mismatch{indentExpr e}\nargument{indentExpr a}\n{← mkHasTypeButIsExpectedMsg aType expectedType}" + throwError "Application type mismatch: In the appplication{indentExpr e}\nthe final argument{indentExpr a}\n{← mkHasTypeButIsExpectedMsg aType expectedType}" def checkApp (f a : Expr) : MetaM Unit := do let fType ← inferType f diff --git a/src/Lean/Meta/Hint.lean b/src/Lean/Meta/Hint.lean new file mode 100644 index 000000000000..6d94dbb502cd --- /dev/null +++ b/src/Lean/Meta/Hint.lean @@ -0,0 +1,180 @@ +/- +Copyright (c) 2025 Lean FRO, LLC. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Joseph Rotella +-/ + +prelude + +import Lean.CoreM +import Lean.Data.Lsp.Utf16 +import Lean.Message +import Lean.Meta.TryThis +import Lean.Util.Diff +import Lean.Widget.Types +import Lean.PrettyPrinter + +namespace Lean.Meta.Hint + +open Elab Tactic PrettyPrinter TryThis + +/-- +A widget for rendering code action suggestions in error messages. Generally, this widget should not +be used directly; instead, use `MessageData.hint`. Note that this widget is intended only for use +within message data; it may not display line breaks properly if rendered as a panel widget. + +The props to this widget are of the following form: +```json +{ + "diff": [ + {"type": "unchanged", "text": "h"}, + {"type": "deletion", "text": "ello"}, + {"type": "insertion", "text": "i"} + ] +} +``` + +Note: we cannot add the `builtin_widget_module` attribute here because that would require importing +`Lean.Widget.UserWidget`, which in turn imports much of `Lean.Elab` -- the module where we want to +be able to use this widget. Instead, we register the attribute post-hoc when we declare the regular +"Try This" widget in `Lean.Meta.Tactic.TryThis`. +-/ +def tryThisDiffWidget : Widget.Module where + javascript := " +import * as React from 'react'; +import { EditorContext, EnvPosContext } from '@leanprover/infoview'; +const e = React.createElement; +export default function ({ diff, range, suggestion }) { + const pos = React.useContext(EnvPosContext) + const editorConnection = React.useContext(EditorContext) + const insStyle = { className: 'information' } + const delStyle = { + style: { color: 'var(--vscode-errorForeground)', textDecoration: 'line-through' } + } + const defStyle = { + style: { color: 'var(--vscode-textLink-foreground)' } + } + function onClick() { + editorConnection.api.applyEdit({ + changes: { [pos.uri]: [{ range, newText: suggestion }] } + }) + } + + const spans = diff.map (comp => + comp.type === 'deletion' ? e('span', delStyle, comp.text) : + comp.type === 'insertion' ? e('span', insStyle, comp.text) : + e('span', defStyle, comp.text) + ) + const fullDiff = e('span', + { onClick, title: 'Apply suggestion', className: 'link pointer dim font-code', }, + spans) + return fullDiff +}" + +/-- +Converts an array of diff actions into corresponding JSON interpretable by `tryThisDiffWidget`. +-/ +private def mkDiffJson (ds : Array (Diff.Action × Char)) := + -- Avoid cluttering the DOM by grouping "runs" of the same action + let unified : List (Diff.Action × List Char) := ds.foldr (init := []) fun + | (act, c), [] => [(act, [c])] + | (act, c), (act', cs) :: acc => + if act == act' then + (act, c :: cs) :: acc + else + (act, [c]) :: (act', cs) :: acc + toJson <| unified.map fun + | (.insert, s) => json% { type: "insertion", text: $(String.mk s) } + | (.delete, s) => json% { type: "deletion", text: $(String.mk s) } + | (.skip , s) => json% { type: "unchanged", text: $(String.mk s) } + +/-- +Converts an array of diff actions into a Unicode string that visually depicts the diff. + +Note that this function does not return the string that results from applying the diff to some +input; rather, it returns a string representation of the actions that the diff itself comprises, such as `b̵a̵c̲h̲e̲e̲rs̲`. + +-/ +private def mkDiffString (ds : Array (Diff.Action × Char)) : String := + let rangeStrs := ds.map fun + | (.insert, s) => String.mk [s, '\u0332'] -- U+0332 Combining Low Line + | (.delete, s) => String.mk [s, '\u0335'] -- U+0335 Combining Short Stroke Overlay + | (.skip , s) => String.mk [s] + rangeStrs.foldl (· ++ ·) "" + +/-- +A code action suggestion associated with a hint in a message. + +Refer to `TryThis.Suggestion`; this extends that structure with a `span?` field, allowing a single +hint to suggest modifications at different locations. If `span?` is not specified, then the `ref` +for the containing `Suggestions` value is used. +-/ +structure Suggestion extends TryThis.Suggestion where + span? : Option Syntax := none + +instance : Coe TryThis.SuggestionText Suggestion where + coe t := { suggestion := t } + +instance : ToMessageData Suggestion where + toMessageData s := toMessageData s.toSuggestion + +/-- +A collection of code action suggestions to be included in a hint in a diagnostic message. + +Contains the following fields: +* `ref`: the syntax location for the code action suggestions. Will be overridden by the `span?` + field on any suggestions that specify it. +* `suggestions`: the suggestions to display. +* `codeActionPrefix?`: if specified, text to display in place of "Try this: " in the code action + label +-/ +structure Suggestions where + ref : Syntax + suggestions : Array Suggestion + codeActionPrefix? : Option String := none + +/-- +Creates message data corresponding to a `HintSuggestions` collection and adds the corresponding info +leaf. +-/ +def Suggestions.toHintMessage (suggestions : Suggestions) : CoreM MessageData := do + let { ref, codeActionPrefix?, suggestions } := suggestions + let mut msg := m!"" + for suggestion in suggestions do + if let some range := (suggestion.span?.getD ref).getRange? then + let { info, suggestions := suggestionArr, range := lspRange } ← processSuggestions ref range + #[suggestion.toSuggestion] codeActionPrefix? + pushInfoLeaf info + let suggestionText := suggestionArr[0]!.2.1 + let map ← getFileMap + let rangeContents := Substring.mk map.source range.start range.stop |>.toString + let split (s : String) := s.toList.toArray + let edits := Diff.diff (split rangeContents) (split suggestionText) + let diff := mkDiffJson edits + let json := json% { + diff: $diff, + suggestion: $suggestionText, + range: $lspRange + } + let preInfo := suggestion.preInfo?.getD "" + let postInfo := suggestion.postInfo?.getD "" + let widget := MessageData.ofWidget { + id := ``tryThisDiffWidget + javascriptHash := tryThisDiffWidget.javascriptHash + props := return json + } (suggestion.messageData?.getD (mkDiffString edits)) + let widgetMsg := m!"{preInfo}{widget}{postInfo}" + let suggestionMsg := if suggestions.size == 1 then m!"\n{widgetMsg}" else m!"\n• {widgetMsg}" + msg := msg ++ MessageData.nestD suggestionMsg + return msg + +/-- +Appends a hint `hint` to `msg`. If `suggestions?` is non-`none`, will also append an inline +suggestion widget. +-/ +def _root_.Lean.MessageData.hint (hint : MessageData) (suggestions? : Option Suggestions := none) + : CoreM MessageData := do + let mut hintMsg := m!"\n\nHint: {hint}" + if let some suggestions := suggestions? then + hintMsg := hintMsg ++ (← suggestions.toHintMessage) + return .tagged `hint hintMsg diff --git a/src/Lean/Meta/LazyDiscrTree.lean b/src/Lean/Meta/LazyDiscrTree.lean index aa56f01fe418..5c3ba83e3884 100644 --- a/src/Lean/Meta/LazyDiscrTree.lean +++ b/src/Lean/Meta/LazyDiscrTree.lean @@ -57,8 +57,8 @@ end Key -- This namespace contains definitions copied from Lean.Meta.DiscrTree. namespace MatchClone -private def tmpMVarId : MVarId := { name := `_discr_tree_tmp } -private def tmpStar := mkMVar tmpMVarId +def tmpMVarId : MVarId := { name := `_discr_tree_tmp } +def tmpStar := mkMVar tmpMVarId /-- Returns true iff the argument should be treated as a "wildcard" by the @@ -71,7 +71,7 @@ private def tmpStar := mkMVar tmpMVarId avoid coupling between `DiscrTree` and `LazyDiscrTree` while both are potentially subject to independent changes. -/ -private def ignoreArg (a : Expr) (i : Nat) (infos : Array ParamInfo) : MetaM Bool := do +def ignoreArg (a : Expr) (i : Nat) (infos : Array ParamInfo) : MetaM Bool := do if h : i < infos.size then let info := infos[i] if info.isInstImplicit then @@ -83,7 +83,7 @@ private def ignoreArg (a : Expr) (i : Nat) (infos : Array ParamInfo) : MetaM Boo else isProof a -private partial def pushArgsAux (infos : Array ParamInfo) : Nat → Expr → Array Expr → MetaM (Array Expr) +partial def pushArgsAux (infos : Array ParamInfo) : Nat → Expr → Array Expr → MetaM (Array Expr) | i, .app f a, todo => do if (← ignoreArg a i infos) then pushArgsAux infos (i-1) f (todo.push tmpStar) @@ -97,7 +97,7 @@ private partial def pushArgsAux (infos : Array ParamInfo) : Nat → Expr → Arr - `Nat.zero` - `Nat.succ x` where `isNumeral x` - `OfNat.ofNat _ x _` where `isNumeral x` -/ -private partial def isNumeral (e : Expr) : Bool := +partial def isNumeral (e : Expr) : Bool := if e.isRawNatLit then true else let f := e.getAppFn @@ -109,7 +109,7 @@ private partial def isNumeral (e : Expr) : Bool := else if fName == ``Nat.zero && e.getAppNumArgs == 0 then true else false -private partial def toNatLit? (e : Expr) : Option Literal := +partial def toNatLit? (e : Expr) : Option Literal := if isNumeral e then if let some n := loop e then some (.natVal n) @@ -134,7 +134,7 @@ where failure | _ => failure -private def isNatType (e : Expr) : MetaM Bool := +def isNatType (e : Expr) : MetaM Bool := return (← whnf e).isConstOf ``Nat /-- @@ -145,7 +145,7 @@ private def isNatType (e : Expr) : MetaM Bool := - `Nat.succ _` This function assumes `e.isAppOf fName` -/ -private def isNatOffset (fName : Name) (e : Expr) : MetaM Bool := do +def isNatOffset (fName : Name) (e : Expr) : MetaM Bool := do if fName == ``Nat.add && e.getAppNumArgs == 2 then return isNumeral e.appArg! else if fName == ``Add.add && e.getAppNumArgs == 4 then @@ -160,7 +160,7 @@ This is a hook to determine if we should add an expression as a wildcard pattern Clone of `Lean.Meta.DiscrTree.shouldAddAsStar`. See it for more discussion. -/ -private def shouldAddAsStar (fName : Name) (e : Expr) : MetaM Bool := do +def shouldAddAsStar (fName : Name) (e : Expr) : MetaM Bool := do isNatOffset fName e /-- @@ -174,7 +174,7 @@ discrimination tree. Clone of `Lean.Meta.DiscrTree.elimLooseBVarsByBeta`. See it for more discussion. -/ -private def elimLooseBVarsByBeta (e : Expr) : CoreM Expr := +def elimLooseBVarsByBeta (e : Expr) : CoreM Expr := Core.transform e (pre := fun e => do if !e.hasLooseBVars then @@ -184,7 +184,7 @@ private def elimLooseBVarsByBeta (e : Expr) : CoreM Expr := else return .continue) -private def getKeyArgs (e : Expr) (isMatch root : Bool) : +def getKeyArgs (e : Expr) (isMatch root : Bool) : MetaM (Key × Array Expr) := do let e ← DiscrTree.reduceDT e root unless root do @@ -274,12 +274,12 @@ private abbrev LazyEntry α := Array Expr × ((LocalContext × LocalInstances) Index identifying trie in a discrimination tree. -/ @[reducible] -private def TrieIndex := Nat +def TrieIndex := Nat /-- Discrimination tree trie. See `LazyDiscrTree`. -/ -private structure Trie (α : Type) where +structure Trie (α : Type) where node :: /-- Values for matches ending at this trie. -/ values : Array α @@ -294,7 +294,7 @@ private structure Trie (α : Type) where instance : EmptyCollection (Trie α) := ⟨.node #[] 0 {} #[]⟩ /-- Push lazy entry to trie. -/ -private def Trie.pushPending : Trie α → LazyEntry α → Trie α +def Trie.pushPending : Trie α → LazyEntry α → Trie α | .node vs star cs p, e => .node vs star cs (p.push e) end LazyDiscrTree @@ -330,7 +330,7 @@ open Lean.Meta.DiscrTree (mkNoindexAnnotation hasNoindexAnnotation reduceDT) /-- Specialization of Lean.Meta.DiscrTree.pushArgs -/ -private def pushArgs (root : Bool) (todo : Array Expr) (e : Expr) : +def pushArgs (root : Bool) (todo : Array Expr) (e : Expr) : MetaM (Key × Array Expr) := do if hasNoindexAnnotation e then return (.star, todo) @@ -382,15 +382,15 @@ private def pushArgs (root : Bool) (todo : Array Expr) (e : Expr) : return (.other, todo) /-- Initial capacity for key and todo vector. -/ -private def initCapacity := 8 +def initCapacity := 8 /-- Get the root key and rest of terms of an expression using the specified config. -/ -private def rootKey (e : Expr) : MetaM (Key × Array Expr) := +def rootKey (e : Expr) : MetaM (Key × Array Expr) := pushArgs true (Array.mkEmpty initCapacity) e -private partial def buildPath (op : Bool → Array Expr → Expr → MetaM (Key × Array Expr)) (root : Bool) (todo : Array Expr) (keys : Array Key) : MetaM (Array Key) := do +partial def buildPath (op : Bool → Array Expr → Expr → MetaM (Key × Array Expr)) (root : Bool) (todo : Array Expr) (keys : Array Key) : MetaM (Array Key) := do if todo.isEmpty then return keys else @@ -424,25 +424,25 @@ def targetPath (e : Expr) : MetaM (Array Key) := do /- Monad for finding matches while resolving deferred patterns. -/ @[reducible] -private def MatchM α := StateRefT (Array (Trie α)) MetaM +def MatchM α := StateRefT (Array (Trie α)) MetaM -private def runMatch (d : LazyDiscrTree α) (m : MatchM α β) : MetaM (β × LazyDiscrTree α) := do +def runMatch (d : LazyDiscrTree α) (m : MatchM α β) : MetaM (β × LazyDiscrTree α) := do let { tries := a, roots := r } := d let (result, a) ← withReducible <| m.run a return (result, { tries := a, roots := r}) -private def setTrie (i : TrieIndex) (v : Trie α) : MatchM α Unit := +def setTrie (i : TrieIndex) (v : Trie α) : MatchM α Unit := modify (·.set! i v) /-- Create a new trie with the given lazy entry. -/ -private def newTrie [Monad m] [MonadState (Array (Trie α)) m] (e : LazyEntry α) : m TrieIndex := do +def newTrie [Monad m] [MonadState (Array (Trie α)) m] (e : LazyEntry α) : m TrieIndex := do modifyGet fun a => let sz := a.size; (sz, a.push (.node #[] 0 {} #[e])) /-- Add a lazy entry to an existing trie. -/ -private def addLazyEntryToTrie (i:TrieIndex) (e : LazyEntry α) : MatchM α Unit := +def addLazyEntryToTrie (i:TrieIndex) (e : LazyEntry α) : MatchM α Unit := modify (·.modify i (·.pushPending e)) -private def evalLazyEntry +def evalLazyEntry (p : Array α × TrieIndex × Std.HashMap Key TrieIndex) (entry : LazyEntry α) : MatchM α (Array α × TrieIndex × Std.HashMap Key TrieIndex) := do @@ -475,7 +475,7 @@ private def evalLazyEntry This evaluates all lazy entries in a trie and updates `values`, `starIdx`, and `children` accordingly. -/ -private partial def evalLazyEntries +partial def evalLazyEntries (values : Array α) (starIdx : TrieIndex) (children : Std.HashMap Key TrieIndex) (entries : Array (LazyEntry α)) : MatchM α (Array α × TrieIndex × Std.HashMap Key TrieIndex) := do @@ -484,7 +484,7 @@ private partial def evalLazyEntries let mut children := children entries.foldlM (init := (values, starIdx, children)) evalLazyEntry -private def evalNode (c : TrieIndex) : +def evalNode (c : TrieIndex) : MatchM α (Array α × TrieIndex × Std.HashMap Key TrieIndex) := do let .node vs star cs pending := (←get)[c]! if pending.size = 0 then @@ -539,7 +539,7 @@ structure MatchResult (α : Type) where namespace MatchResult -private def push (r : MatchResult α) (score : Nat) (e : Array α) : MatchResult α := +def push (r : MatchResult α) (score : Nat) (e : Array α) : MatchResult α := if e.isEmpty then r else if score < r.elts.size then @@ -601,7 +601,7 @@ multiple partial matches to explore next, to ensure the order of results matches user expectations, this code must add paths we want to prioritize and return results earlier are added last. -/ -private partial def getMatchLoop (cases : Array PartialMatch) (result : MatchResult α) : MatchM α (MatchResult α) := do +partial def getMatchLoop (cases : Array PartialMatch) (result : MatchResult α) : MatchM α (MatchResult α) := do if cases.isEmpty then pure result else do @@ -647,7 +647,7 @@ private partial def getMatchLoop (cases : Array PartialMatch) (result : MatchRes cases |> pushNonStar k args getMatchLoop cases result -private def getStarResult (root : Std.HashMap Key TrieIndex) : MatchM α (MatchResult α) := +def getStarResult (root : Std.HashMap Key TrieIndex) : MatchM α (MatchResult α) := match root[Key.star]? with | none => pure <| {} @@ -658,7 +658,7 @@ private def getStarResult (root : Std.HashMap Key TrieIndex) : MatchM α (MatchR /- Add partial match to cases if discriminator tree root map has potential matches. -/ -private def pushRootCase (r : Std.HashMap Key TrieIndex) (k : Key) (args : Array Expr) +def pushRootCase (r : Std.HashMap Key TrieIndex) (k : Key) (args : Array Expr) (cases : Array PartialMatch) : Array PartialMatch := match r[k]? with | none => cases @@ -667,7 +667,7 @@ private def pushRootCase (r : Std.HashMap Key TrieIndex) (k : Key) (args : Array /-- Find values that match `e` in `root`. -/ -private def getMatchCore (root : Std.HashMap Key TrieIndex) (e : Expr) : +def getMatchCore (root : Std.HashMap Key TrieIndex) (e : Expr) : MatchM α (MatchResult α) := do let result ← getStarResult root let (k, args) ← MatchClone.getMatchKeyArgs e (root := true) (← read) @@ -696,7 +696,7 @@ def getMatch (d : LazyDiscrTree α) (e : Expr) : MetaM (MatchResult α × LazyDi Structure for quickly initializing a lazy discrimination tree with a large number of elements using concurrent functions for generating entries. -/ -private structure PreDiscrTree (α : Type) where +structure PreDiscrTree (α : Type) where /-- Maps keys to index in tries array. -/ roots : Std.HashMap Key Nat := {} /-- Lazy entries for root of trie. -/ @@ -705,7 +705,7 @@ private structure PreDiscrTree (α : Type) where namespace PreDiscrTree -private def modifyAt (d : PreDiscrTree α) (k : Key) +def modifyAt (d : PreDiscrTree α) (k : Key) (f : Array (LazyEntry α) → Array (LazyEntry α)) : PreDiscrTree α := let { roots, tries } := d match roots[k]? with @@ -716,11 +716,11 @@ private def modifyAt (d : PreDiscrTree α) (k : Key) { roots, tries := tries.modify i f } /-- Add an entry to the pre-discrimination tree.-/ -private def push (d : PreDiscrTree α) (k : Key) (e : LazyEntry α) : PreDiscrTree α := +def push (d : PreDiscrTree α) (k : Key) (e : LazyEntry α) : PreDiscrTree α := d.modifyAt k (·.push e) /-- Convert a pre-discrimination tree to a lazy discrimination tree. -/ -private def toLazy (d : PreDiscrTree α) : LazyDiscrTree α := +def toLazy (d : PreDiscrTree α) : LazyDiscrTree α := let { roots, tries } := d -- Adjust trie indices so the first value is reserved (so 0 is never a valid trie index) let roots := roots.fold (init := roots) (fun m k n => m.insert k (n+1)) @@ -775,7 +775,7 @@ def mkSubEntry (e : InitEntry α) (idx : Nat) (value : α) : end InitEntry /-- Information about a failed import. -/ -private structure ImportFailure where +structure ImportFailure where /-- Module with constant that import failed on. -/ module : Name /-- Constant that import failed on. -/ @@ -784,10 +784,10 @@ private structure ImportFailure where exception : Exception /-- Information generation from imported modules. -/ -private structure ImportData where +structure ImportData where errors : IO.Ref (Array ImportFailure) -private def ImportData.new : BaseIO ImportData := do +def ImportData.new : BaseIO ImportData := do let errors ← IO.mkRef #[] pure { errors } @@ -805,7 +805,7 @@ def blacklistInsertion (env : Environment) (declName : Name) : Bool := || (declName matches .str _ "inj") || (declName matches .str _ "noConfusionType") -private def addConstImportData +def addConstImportData (cctx : Core.Context) (env : Environment) (modName : Name) @@ -839,7 +839,7 @@ private def addConstImportData Contains the pre discrimination tree and any errors occurring during initialization of the library search tree. -/ -private structure InitResults (α : Type) where +structure InitResults (α : Type) where tree : PreDiscrTree α := {} errors : Array ImportFailure := #[] @@ -859,12 +859,12 @@ instance : Append (InitResults α) where end InitResults -private def toFlat (d : ImportData) (tree : PreDiscrTree α) : +def toFlat (d : ImportData) (tree : PreDiscrTree α) : BaseIO (InitResults α) := do let de ← d.errors.swap #[] pure ⟨tree, de⟩ -private partial def loadImportedModule +partial def loadImportedModule (cctx : Core.Context) (env : Environment) (act : Name → ConstantInfo → MetaM (Array (InitEntry α))) @@ -882,7 +882,7 @@ private partial def loadImportedModule else pure tree -private def createImportedEnvironmentSeq (cctx : Core.Context) (ngen : NameGenerator) (env : Environment) +def createImportedEnvironmentSeq (cctx : Core.Context) (ngen : NameGenerator) (env : Environment) (act : Name → ConstantInfo → MetaM (Array (InitEntry α))) (start stop : Nat) : BaseIO (InitResults α) := do let cacheRef ← IO.mkRef (Cache.empty ngen) @@ -898,7 +898,7 @@ private def createImportedEnvironmentSeq (cctx : Core.Context) (ngen : NameGener termination_by stop - start /-- Get the results of each task and merge using combining function -/ -private def combineGet [Append α] (z : α) (tasks : Array (Task α)) : α := +def combineGet [Append α] (z : α) (tasks : Array (Task α)) : α := tasks.foldl (fun x t => x ++ t.get) (init := z) def getChildNgen [Monad M] [MonadNameGenerator M] : M NameGenerator := do @@ -960,7 +960,7 @@ def createImportedDiscrTree [Monad m] [MonadLog m] [AddMessageContext m] [MonadO pure <| r.tree.toLazy /-- Creates the core context used for initializing a tree using the current context. -/ -private def createTreeCtx (ctx : Core.Context) : Core.Context := { +def createTreeCtx (ctx : Core.Context) : Core.Context := { fileName := ctx.fileName fileMap := ctx.fileMap options := ctx.options diff --git a/src/Lean/Meta/Match/MatchEqs.lean b/src/Lean/Meta/Match/MatchEqs.lean index 3b2732a75dae..f845ac787687 100644 --- a/src/Lean/Meta/Match/MatchEqs.lean +++ b/src/Lean/Meta/Match/MatchEqs.lean @@ -98,9 +98,68 @@ def unfoldNamedPattern (e : Expr) : MetaM Expr := do 1. Eliminates arguments for named parameters and the associated equation proofs. - 2. Equality parameters associated with the `h : discr` notation are replaced with `rfl` proofs. - Recall that this kind of parameter always occurs after the parameters correspoting to pattern variables. - `numNonEqParams` is the size of the prefix. + 2. Instantiate the `Unit` parameter of an otherwise argumentless alternative. + + It does not handle the equality parameters associated with the `h : discr` notation. + + The continuation `k` takes four arguments `ys args mask type`. + - `ys` are variables for the hypotheses that have not been eliminated. + - `args` are the arguments for the alternative `alt` that has type `altType`. `ys.size <= args.size` + - `mask[i]` is true if the hypotheses has not been eliminated. `mask.size == args.size`. + - `type` is the resulting type for `altType`. + + We use the `mask` to build the splitter proof. See `mkSplitterProof`. + + This can be used to use the alternative of a match expression in its splitter. +-/ +partial def forallAltVarsTelescope (altType : Expr) (altNumParams numDiscrEqs : Nat) + (k : (patVars : Array Expr) → (args : Array Expr) → (mask : Array Bool) → (type : Expr) → MetaM α) : MetaM α := do + go #[] #[] #[] 0 altType +where + go (ys : Array Expr) (args : Array Expr) (mask : Array Bool) (i : Nat) (type : Expr) : MetaM α := do + let type ← whnfForall type + if i < altNumParams - numDiscrEqs then + let Expr.forallE n d b .. := type + | throwError "expecting {altNumParams} parameters, excluding {numDiscrEqs} equalities, but found type{indentExpr altType}" + + -- Handle the special case of `Unit` parameters. + if i = 0 && altNumParams - numDiscrEqs = 1 && d.isConstOf ``Unit && !b.hasLooseBVars then + return ← k #[] #[mkConst ``Unit.unit] #[false] b + + let d ← Match.unfoldNamedPattern d + withLocalDeclD n d fun y => do + let typeNew := b.instantiate1 y + if let some (_, lhs, rhs) ← matchEq? d then + if lhs.isFVar && ys.contains lhs && args.contains lhs && isNamedPatternProof typeNew y then + let some j := ys.finIdxOf? lhs | unreachable! + let ys := ys.eraseIdx j + let some k := args.idxOf? lhs | unreachable! + let mask := mask.set! k false + let args := args.map fun arg => if arg == lhs then rhs else arg + let arg ← mkEqRefl rhs + let typeNew := typeNew.replaceFVar lhs rhs + return ← withReplaceFVarId lhs.fvarId! rhs do + withReplaceFVarId y.fvarId! arg do + go ys (args.push arg) (mask.push false) (i+1) typeNew + go (ys.push y) (args.push y) (mask.push true) (i+1) typeNew + else + let type ← Match.unfoldNamedPattern type + k ys args mask type + + isNamedPatternProof (type : Expr) (h : Expr) : Bool := + Option.isSome <| type.find? fun e => + if let some e := Match.isNamedPattern? e then + e.appArg! == h + else + false + + +/-- + Extension of `forallAltTelescope` that continues further: + + Equality parameters associated with the `h : discr` notation are replaced with `rfl` proofs. + Recall that this kind of parameter always occurs after the parameters corresponding to pattern + variables. The continuation `k` takes four arguments `ys args mask type`. - `ys` are variables for the hypotheses that have not been eliminated. @@ -116,57 +175,45 @@ def unfoldNamedPattern (e : Expr) : MetaM Expr := do partial def forallAltTelescope (altType : Expr) (altNumParams numDiscrEqs : Nat) (k : (ys : Array Expr) → (eqs : Array Expr) → (args : Array Expr) → (mask : Array Bool) → (type : Expr) → MetaM α) : MetaM α := do - go #[] #[] #[] #[] 0 altType + forallAltVarsTelescope altType altNumParams numDiscrEqs fun ys args mask altType => do + go ys #[] args mask 0 altType where go (ys : Array Expr) (eqs : Array Expr) (args : Array Expr) (mask : Array Bool) (i : Nat) (type : Expr) : MetaM α := do let type ← whnfForall type - if i < altNumParams then + if i < numDiscrEqs then let Expr.forallE n d b .. := type | throwError "expecting {altNumParams} parameters, including {numDiscrEqs} equalities, but found type{indentExpr altType}" - if i < altNumParams - numDiscrEqs then - let d ← unfoldNamedPattern d - withLocalDeclD n d fun y => do - let typeNew := b.instantiate1 y - if let some (_, lhs, rhs) ← matchEq? d then - if lhs.isFVar && ys.contains lhs && args.contains lhs && isNamedPatternProof typeNew y then - let some j := ys.finIdxOf? lhs | unreachable! - let ys := ys.eraseIdx j - let some k := args.idxOf? lhs | unreachable! - let mask := mask.set! k false - let args := args.map fun arg => if arg == lhs then rhs else arg - let arg ← mkEqRefl rhs - let typeNew := typeNew.replaceFVar lhs rhs - return ← withReplaceFVarId lhs.fvarId! rhs do - withReplaceFVarId y.fvarId! arg do - go ys eqs (args.push arg) (mask.push false) (i+1) typeNew - go (ys.push y) eqs (args.push y) (mask.push true) (i+1) typeNew + let arg ← if let some (_, _, rhs) ← matchEq? d then + mkEqRefl rhs + else if let some (_, _, _, rhs) ← matchHEq? d then + mkHEqRefl rhs else - let arg ← if let some (_, _, rhs) ← matchEq? d then - mkEqRefl rhs - else if let some (_, _, _, rhs) ← matchHEq? d then - mkHEqRefl rhs - else - throwError "unexpected match alternative type{indentExpr altType}" - withLocalDeclD n d fun eq => do - let typeNew := b.instantiate1 eq - go ys (eqs.push eq) (args.push arg) (mask.push false) (i+1) typeNew + throwError "unexpected match alternative type{indentExpr altType}" + withLocalDeclD n d fun eq => do + let typeNew := b.instantiate1 eq + go ys (eqs.push eq) (args.push arg) (mask.push false) (i+1) typeNew else let type ← unfoldNamedPattern type - /- Recall that alternatives that do not have variables have a `Unit` parameter to ensure - they are not eagerly evaluated. -/ - if ys.size == 1 then - if (← inferType ys[0]!).isConstOf ``Unit && !(← dependsOn type ys[0]!.fvarId!) then - let rhs := mkConst ``Unit.unit - return ← withReplaceFVarId ys[0]!.fvarId! rhs do - return (← k #[] #[] #[rhs] #[false] type) k ys eqs args mask type - isNamedPatternProof (type : Expr) (h : Expr) : Bool := - Option.isSome <| type.find? fun e => - if let some e := isNamedPattern? e then - e.appArg! == h - else - false +/-- +Given an application of an matcher arm `alt` that is expecting the `numDiscrEqs`, and +an array of `discr = pattern` equalities (one for each discriminant), apply those that +are expected by the alternative. +-/ +partial def mkAppDiscrEqs (alt : Expr) (heqs : Array Expr) (numDiscrEqs : Nat) : MetaM Expr := do + go alt (← inferType alt) 0 +where + go e ty i := do + if i < numDiscrEqs then + let Expr.forallE n d b .. := ty + | throwError "expecting {numDiscrEqs} equalities, but found type{indentExpr alt}" + for heq in heqs do + if (← isDefEq (← inferType heq) d) then + return ← go (mkApp e heq) (b.instantiate1 heq) (i+1) + throwError "Could not find equation {n} : {d} among {heqs}" + else + return e namespace SimpH @@ -328,21 +375,33 @@ private def unfoldElimOffset (mvarId : MVarId) : MetaM MVarId := do mvarId.deltaTarget (· == ``Nat.elimOffset) /-- - Helper method for proving a conditional equational theorem associated with an alternative of - the `match`-eliminator `matchDeclName`. `type` contains the type of the theorem. -/ -partial def proveCondEqThm (matchDeclName : Name) (type : Expr) : MetaM Expr := withLCtx {} {} do +Helper method for proving a conditional equational theorem associated with an alternative of +the `match`-eliminator `matchDeclName`. `type` contains the type of the theorem. + +The `heqPos`/`heqNum` arguments indicate that these hypotheses are `Eq`/`HEq` hypotheses +to substitute first; this is used for the generalized match equations. +-/ +partial def proveCondEqThm (matchDeclName : Name) (type : Expr) + (heqPos : Nat := 0) (heqNum : Nat := 0) : MetaM Expr := withLCtx {} {} do let type ← instantiateMVars type - forallTelescope type fun ys target => do - let mvar0 ← mkFreshExprSyntheticOpaqueMVar target - trace[Meta.Match.matchEqs] "proveCondEqThm {mvar0.mvarId!}" - let mvarId ← mvar0.mvarId!.deltaTarget (· == matchDeclName) - withDefault <| go mvarId 0 - mkLambdaFVars ys (← instantiateMVars mvar0) + let mvar0 ← mkFreshExprSyntheticOpaqueMVar type + trace[Meta.Match.matchEqs] "proveCondEqThm {mvar0.mvarId!}" + let mut mvarId := mvar0.mvarId! + if heqNum > 0 then + mvarId := (← mvarId.introN heqPos).2 + for _ in [:heqNum] do + let (h, mvarId') ← mvarId.intro1 + mvarId ← subst mvarId' h + trace[Meta.Match.matchEqs] "proveCondEqThm after subst{mvarId}" + mvarId := (← mvarId.intros).2 + mvarId ← mvarId.deltaTarget (· == matchDeclName) + mvarId ← mvarId.heqOfEq + go mvarId 0 + instantiateMVars mvar0 where go (mvarId : MVarId) (depth : Nat) : MetaM Unit := withIncRecDepth do trace[Meta.Match.matchEqs] "proveCondEqThm.go {mvarId}" - let mvarId' ← mvarId.modifyTargetEqLHS whnfCore - let mvarId := mvarId' + let mvarId ← mvarId.modifyTargetEqLHS whnfCore let subgoals ← (do mvarId.refl; return #[]) <|> @@ -716,6 +775,7 @@ where go baseName splitterName := withConfig (fun c => { c with etaStruct := .no hs := hs.push h trace[Meta.Match.matchEqs] "hs: {hs}" let splitterAltType ← mkForallFVars ys (← hs.foldrM (init := (← mkForallFVars eqs altResultType)) (mkArrow · ·)) + let splitterAltType ← unfoldNamedPattern splitterAltType let splitterAltNumParam := hs.size + ys.size -- Create a proposition for representing terms that do not match `patterns` let mut notAlt := mkConst ``False @@ -767,21 +827,121 @@ where go baseName splitterName := withConfig (fun c => { c with etaStruct := .no let result := { eqnNames, splitterName, splitterAltNumParams } registerMatchEqns matchDeclName result +def congrEqnThmSuffixBase := "congr_eq" +def congrEqnThmSuffixBasePrefix := congrEqnThmSuffixBase ++ "_" +def congrEqn1ThmSuffix := congrEqnThmSuffixBasePrefix ++ "1" +example : congrEqn1ThmSuffix = "congr_eq_1" := rfl + +/-- Returns `true` if `s` is of the form `congr_eq_<idx>` -/ +def iscongrEqnReservedNameSuffix (s : String) : Bool := + congrEqnThmSuffixBasePrefix.isPrefixOf s && (s.drop congrEqnThmSuffixBasePrefix.length).isNat + +/- We generate the equations and splitter on demand, and do not save them on .olean files. -/ +builtin_initialize matchCongrEqnsExt : EnvExtension (PHashMap Name (Array Name)) ← + -- Using `local` allows us to use the extension in `realizeConst` without specifying `replay?`. + -- The resulting state can still be accessed on the generated declarations using `findStateAsync`; + -- see below + registerEnvExtension (pure {}) (asyncMode := .local) + +def registerMatchcongrEqns (matchDeclName : Name) (eqnNames : Array Name) : CoreM Unit := do + modifyEnv fun env => matchCongrEqnsExt.modifyState env fun map => + map.insert matchDeclName eqnNames + +/-- +Generate the congruence equations for the given match auxiliary declaration. +The congruence equations have a completely unrestriced left-hand side (arbitrary discriminants), +and take propositional equations relating the discriminants to the patterns as arguments. In this +sense they combine a congruence lemma with the regular equation lemma. +Since the motive depends on the discriminants, they are `HEq` equations. + +The code duplicates a fair bit of the logic above, and has to repeat the calculation of the +`notAlts`. One could avoid that and generate the generalized equations eagerly above, but they are +not always needed, so for now we live with the code duplication. +-/ +def genMatchCongrEqns (matchDeclName : Name) : MetaM (Array Name) := do + let baseName := mkPrivateName (← getEnv) matchDeclName + let firstEqnName := .str baseName congrEqn1ThmSuffix + realizeConst matchDeclName firstEqnName (go baseName) + return matchCongrEqnsExt.findStateAsync (← getEnv) firstEqnName |>.find! matchDeclName +where go baseName := withConfig (fun c => { c with etaStruct := .none }) do + withConfig (fun c => { c with etaStruct := .none }) do + let constInfo ← getConstInfo matchDeclName + let us := constInfo.levelParams.map mkLevelParam + let some matchInfo ← getMatcherInfo? matchDeclName | throwError "'{matchDeclName}' is not a matcher function" + let numDiscrEqs := matchInfo.getNumDiscrEqs + forallTelescopeReducing constInfo.type fun xs _matchResultType => do + let mut eqnNames := #[] + let params := xs[:matchInfo.numParams] + let motive := xs[matchInfo.getMotivePos]! + let alts := xs[xs.size - matchInfo.numAlts:] + let firstDiscrIdx := matchInfo.numParams + 1 + let discrs := xs[firstDiscrIdx : firstDiscrIdx + matchInfo.numDiscrs] + let mut notAlts := #[] + let mut idx := 1 + for i in [:alts.size] do + let altNumParams := matchInfo.altNumParams[i]! + let thmName := (Name.str baseName congrEqnThmSuffixBase).appendIndexAfter idx + eqnNames := eqnNames.push thmName + let notAlt ← do + let alt := alts[i]! + Match.forallAltVarsTelescope (← inferType alt) altNumParams numDiscrEqs fun altVars args _mask altResultType => do + let patterns ← forallTelescope altResultType fun _ t => pure t.getAppArgs + let mut heqsTypes := #[] + assert! patterns.size == discrs.size + for discr in discrs, pattern in patterns do + let heqType ← mkEqHEq discr pattern + heqsTypes := heqsTypes.push ((`heq).appendIndexAfter (heqsTypes.size + 1), heqType) + withLocalDeclsDND heqsTypes fun heqs => do + let rhs ← Match.mkAppDiscrEqs (mkAppN alt args) heqs numDiscrEqs + let mut hs := #[] + for notAlt in notAlts do + let h ← instantiateForall notAlt patterns + if let some h ← Match.simpH? h patterns.size then + hs := hs.push h + trace[Meta.Match.matchEqs] "hs: {hs}" + let mut notAlt := mkConst ``False + for discr in discrs.toArray.reverse, pattern in patterns.reverse do + notAlt ← mkArrow (← mkEqHEq discr pattern) notAlt + notAlt ← mkForallFVars (discrs ++ altVars) notAlt + let lhs := mkAppN (mkConst constInfo.name us) (params ++ #[motive] ++ discrs ++ alts) + let thmType ← mkHEq lhs rhs + let thmType ← hs.foldrM (init := thmType) (mkArrow · ·) + let thmType ← mkForallFVars (params ++ #[motive] ++ discrs ++ alts ++ altVars ++ heqs) thmType + let thmType ← Match.unfoldNamedPattern thmType + -- Here we prove the theorem from scratch. One could likely also use the (non-generalized) + -- match equation theorem after subst'ing the `heqs`. + let thmVal ← Match.proveCondEqThm matchDeclName thmType + (heqPos := params.size + 1 + discrs.size + alts.size + altVars.size) (heqNum := heqs.size) + unless (← getEnv).contains thmName do + addDecl <| Declaration.thmDecl { + name := thmName + levelParams := constInfo.levelParams + type := thmType + value := thmVal + } + return notAlt + notAlts := notAlts.push notAlt + idx := idx + 1 + registerMatchcongrEqns matchDeclName eqnNames + builtin_initialize registerTraceClass `Meta.Match.matchEqs -private def isMatchEqName? (env : Environment) (n : Name) : Option Name := do +private def isMatchEqName? (env : Environment) (n : Name) : Option (Name × Bool) := do let .str p s := n | failure - guard <| isEqnReservedNameSuffix s || s == "splitter" + guard <| isEqnReservedNameSuffix s || s == "splitter" || iscongrEqnReservedNameSuffix s let p ← privateToUserName? p guard <| isMatcherCore env p - return p + return (p, iscongrEqnReservedNameSuffix s) builtin_initialize registerReservedNamePredicate (isMatchEqName? · · |>.isSome) builtin_initialize registerReservedNameAction fun name => do - let some p := isMatchEqName? (← getEnv) name | + let some (p, isGenEq) := isMatchEqName? (← getEnv) name | return false - let _ ← MetaM.run' <| getEquationsFor p + if isGenEq then + let _ ← MetaM.run' <| genMatchCongrEqns p + else + let _ ← MetaM.run' <| getEquationsFor p return true end Lean.Meta.Match diff --git a/src/Lean/Meta/Match/MatcherApp/Transform.lean b/src/Lean/Meta/Match/MatcherApp/Transform.lean index bcd8b1b9f710..3c0995b5fee5 100644 --- a/src/Lean/Meta/Match/MatcherApp/Transform.lean +++ b/src/Lean/Meta/Match/MatcherApp/Transform.lean @@ -190,8 +190,8 @@ private def forallAltTelescope' {α} (origAltType : Expr) (numParams numDiscrEqs : Nat) (k : Array Expr → Array Expr → n α) : n α := do map2MetaM (fun k => - Match.forallAltTelescope origAltType (numParams - numDiscrEqs) 0 - fun ys _eqs args _mask _bodyType => k ys args + Match.forallAltVarsTelescope origAltType numParams numDiscrEqs + fun ys args _mask _bodyType => k ys args ) k /-- @@ -222,7 +222,7 @@ def transform (addEqualities : Bool := false) (onParams : Expr → n Expr := pure) (onMotive : Array Expr → Expr → n Expr := fun _ e => pure e) - (onAlt : Expr → Expr → n Expr := fun _ e => pure e) + (onAlt : Nat → Expr → Expr → n Expr := fun _ _ e => pure e) (onRemaining : Array Expr → n (Array Expr) := pure) : n MatcherApp := do @@ -282,8 +282,8 @@ def transform let aux1 := mkApp aux1 motive' let aux1 := mkAppN aux1 discrs' unless (← isTypeCorrect aux1) do - logError m!"failed to transform matcher, type error when constructing new pre-splitter motive:{indentExpr aux1}" - check aux1 + mapError (f := (m!"failed to transform matcher, type error when constructing new pre-splitter motive:{indentExpr aux1}\n{indentD ·}")) do + check aux1 let origAltTypes ← inferArgumentTypesN matcherApp.alts.size aux1 -- We replace the matcher with the splitter @@ -294,12 +294,13 @@ def transform let aux2 := mkApp aux2 motive' let aux2 := mkAppN aux2 discrs' unless (← isTypeCorrect aux2) do - logError m!"failed to transform matcher, type error when constructing splitter motive:{indentExpr aux2}" - check aux2 + mapError (f := (m!"failed to transform matcher, type error when constructing splitter motive:{indentExpr aux2}\n{indentD ·}")) do + check aux2 let altTypes ← inferArgumentTypesN matcherApp.alts.size aux2 let mut alts' := #[] - for alt in matcherApp.alts, + for altIdx in [:matcherApp.alts.size], + alt in matcherApp.alts, numParams in matcherApp.altNumParams, splitterNumParams in matchEqns.splitterAltNumParams, origAltType in origAltTypes, @@ -313,7 +314,7 @@ def transform forallBoundedTelescope altType extraEqualities fun ys4 altType => do let alt ← try instantiateLambda alt (args ++ ys3) catch _ => throwError "unexpected matcher application, insufficient number of parameters in alternative" - let alt' ← onAlt altType alt + let alt' ← onAlt altIdx altType alt mkLambdaFVars (ys ++ ys2 ++ ys3 ++ ys4) alt' alts' := alts'.push alt' @@ -339,7 +340,8 @@ def transform let altTypes ← inferArgumentTypesN matcherApp.alts.size aux let mut alts' := #[] - for alt in matcherApp.alts, + for altIdx in [:matcherApp.alts.size], + alt in matcherApp.alts, numParams in matcherApp.altNumParams, altType in altTypes do let alt' ← forallBoundedTelescope altType numParams fun xs altType => do @@ -348,7 +350,7 @@ def transform let names ← lambdaTelescope alt fun xs _ => xs.mapM (·.fvarId!.getUserName) withUserNames xs names do let alt ← instantiateLambda alt xs - let alt' ← onAlt altType alt + let alt' ← onAlt altIdx altType alt mkLambdaFVars (xs ++ ys4) alt' alts' := alts'.push alt' @@ -422,7 +424,7 @@ def inferMatchType (matcherApp : MatcherApp) : MetaM MatcherApp := do } mkArrowN extraParams typeMatcherApp.toExpr ) - (onAlt := fun expAltType alt => do + (onAlt := fun _altIdx expAltType alt => do let altType ← inferType alt let eq ← mkEq expAltType altType let proof ← mkFreshExprSyntheticOpaqueMVar eq diff --git a/src/Lean/Meta/Tactic/Apply.lean b/src/Lean/Meta/Tactic/Apply.lean index b912883a96ce..1d63c907298f 100644 --- a/src/Lean/Meta/Tactic/Apply.lean +++ b/src/Lean/Meta/Tactic/Apply.lean @@ -23,11 +23,16 @@ def getExpectedNumArgs (e : Expr) : MetaM Nat := do let (numArgs, _) ← getExpectedNumArgsAux e pure numArgs -private def throwApplyError {α} (mvarId : MVarId) (eType : Expr) (targetType : Expr) : MetaM α := do - let explanation := MessageData.ofLazyM (es := #[eType, targetType]) do - let (eType, targetType) ← addPPExplicitToExposeDiff eType targetType - return m!"{indentExpr eType}\nwith{indentExpr targetType}" - throwTacticEx `apply mvarId m!"failed to unify{explanation}" +private def throwApplyError {α} (mvarId : MVarId) + (eType : Expr) (conclusionType? : Option Expr) (targetType : Expr) + (term? : Option MessageData) : MetaM α := do + throwTacticEx `apply mvarId <| MessageData.ofLazyM (es := #[eType, targetType]) do + let conclusionType := conclusionType?.getD eType + let note := if conclusionType?.isSome then .note m!"The full type of {term?.getD "the term"} is{indentExpr eType}" else m!"" + let (conclusionType, targetType) ← addPPExplicitToExposeDiff conclusionType targetType + let conclusion := if conclusionType?.isNone then "type" else "conclusion" + return m!"could not unify the {conclusion} of {term?.getD "the term"}{indentExpr conclusionType}\n\ + with the goal{indentExpr targetType}{note}" def synthAppInstances (tacticName : Name) (mvarId : MVarId) (mvarsNew : Array Expr) (binderInfos : Array BinderInfo) (synthAssignedInstances : Bool) (allowSynthFailures : Bool) : MetaM Unit := do @@ -159,7 +164,8 @@ private def isDefEqApply (cfg : ApplyConfig) (a b : Expr) : MetaM Bool := do /-- Close the given goal using `apply e`. -/ -def _root_.Lean.MVarId.apply (mvarId : MVarId) (e : Expr) (cfg : ApplyConfig := {}) : MetaM (List MVarId) := +def _root_.Lean.MVarId.apply (mvarId : MVarId) (e : Expr) (cfg : ApplyConfig := {}) + (term? : Option MessageData := none) : MetaM (List MVarId) := mvarId.withContext do mvarId.checkNotAssigned `apply let targetType ← mvarId.getType @@ -201,8 +207,13 @@ def _root_.Lean.MVarId.apply (mvarId : MVarId) (e : Expr) (cfg : ApplyConfig := s.restore go (i+1) else - let (_, _, eType) ← forallMetaTelescopeReducing eType (some rangeNumArgs.start) - throwApplyError mvarId eType targetType + + let conclusionType? ← if rangeNumArgs.start = 0 then + pure none + else + let (_, _, r) ← forallMetaTelescopeReducing eType (some rangeNumArgs.start) + pure (some r) + throwApplyError mvarId eType conclusionType? targetType term? termination_by rangeNumArgs.stop - i let (newMVars, binderInfos) ← go rangeNumArgs.start postprocessAppMVars `apply mvarId newMVars binderInfos cfg.synthAssignedInstances cfg.allowSynthFailures @@ -218,7 +229,7 @@ def _root_.Lean.MVarId.apply (mvarId : MVarId) (e : Expr) (cfg : ApplyConfig := /-- Short-hand for applying a constant to the goal. -/ def _root_.Lean.MVarId.applyConst (mvar : MVarId) (c : Name) (cfg : ApplyConfig := {}) : MetaM (List MVarId) := do - mvar.apply (← mkConstWithFreshMVarLevels c) cfg + mvar.apply (← mkConstWithFreshMVarLevels c) cfg (term? := m!"'{.ofConstName c}'") end Meta diff --git a/src/Lean/Meta/Tactic/FunInd.lean b/src/Lean/Meta/Tactic/FunInd.lean index 0006fe3de3c0..a3e46d3b604f 100644 --- a/src/Lean/Meta/Tactic/FunInd.lean +++ b/src/Lean/Meta/Tactic/FunInd.lean @@ -203,8 +203,6 @@ something goes wrong, one still gets a useful induction principle, just maybe wi not fully simplified. -/ -set_option autoImplicit false - namespace Lean.Tactic.FunInd open Lean Elab Meta @@ -327,7 +325,7 @@ partial def foldAndCollect (oldIH newIH : FVarId) (isRecCall : Expr → Option E -- statement and the inferred alt types let dummyGoal := mkConst ``True [] mkArrow eTypeAbst dummyGoal) - (onAlt := fun altType alt => do + (onAlt := fun _altIdx altType alt => do lambdaTelescope1 alt fun oldIH' alt => do forallBoundedTelescope altType (some 1) fun newIH' _goal' => do let #[newIH'] := newIH' | unreachable! @@ -345,7 +343,7 @@ partial def foldAndCollect (oldIH newIH : FVarId) (isRecCall : Expr → Option E (onMotive := fun _motiveArgs motiveBody => do let some (_extra, body) := motiveBody.arrow? | throwError "motive not an arrow" M.eval (foldAndCollect oldIH newIH isRecCall body)) - (onAlt := fun altType alt => do + (onAlt := fun _altIdx altType alt => do lambdaTelescope1 alt fun oldIH' alt => do -- We don't have suitable newIH around here, but we don't care since -- we just want to fold calls. So lets create a fake one. @@ -650,7 +648,7 @@ def rwFun (names : Array Name) (e : Expr) : MetaM Simp.Result := do else return { expr := e } -def rwMatcher (e : Expr) : MetaM Simp.Result := do +def rwMatcher (altIdx : Nat) (e : Expr) : MetaM Simp.Result := do if e.isAppOf ``PSum.casesOn || e.isAppOf ``PSigma.casesOn then let mut e := e while true do @@ -664,10 +662,67 @@ def rwMatcher (e : Expr) : MetaM Simp.Result := do break return { expr := e } else - Split.simpMatch e + unless (← isMatcherApp e) do + return { expr := e } + let matcherDeclName := e.getAppFn.constName! + let eqns ← Match.genMatchCongrEqns matcherDeclName + unless altIdx < eqns.size do + trace[Tactic.FunInd] "When trying to reduce arm {altIdx}, only {eqns.size} equations for {.ofConstName matcherDeclName}" + return { expr := e } + let eqnThm := eqns[altIdx]! + try + withTraceNode `Meta.FunInd (pure m!"{exceptEmoji ·} rewriting with {.ofConstName eqnThm} in{indentExpr e}") do + let eqProof := mkAppN (mkConst eqnThm e.getAppFn.constLevels!) e.getAppArgs + let (hyps, _, eqType) ← forallMetaTelescope (← inferType eqProof) + trace[Meta.FunInd] "eqProof has type{indentExpr eqType}" + let proof := mkAppN eqProof hyps + let hyps := hyps.map (·.mvarId!) + let (isHeq, lhs, rhs) ← do + if let some (_, lhs, _, rhs) := eqType.heq? then pure (true, lhs, rhs) else + if let some (_, lhs, rhs) := eqType.eq? then pure (false, lhs, rhs) else + throwError m!"Type of {.ofConstName eqnThm} is not an equality" + if !(← isDefEq e lhs) then + throwError m!"Left-hand side {lhs} of {.ofConstName eqnThm} does not apply to {e}" + /- + Here we instantiate the hypotheses of the congruence equation theorem + There are two sets of hypotheses to instantiate: + - `Eq` or `HEq` that relate the discriminants to the patterns + Solving these should instantiate the pattern variables. + - Overlap hypotheses (`isEqnThmHypothesis`) + With more book keeping we could maybe do this very precisely, knowing exactly + which facts provided by the splitter should go where, but it's tedious. + So for now let's use heuristics and try `assumption` and `rfl`. + -/ + for h in hyps do + unless (← h.isAssigned) do + let hType ← h.getType + if Simp.isEqnThmHypothesis hType then + -- Using unrestricted h.substVars here does not work well; it could + -- even introduce a dependency on the `oldIH` we want to eliminate + h.assumption <|> throwError "Failed to discharge {h}" + else if hType.isEq then + h.assumption <|> h.refl <|> throwError m!"Failed to resolve {h}" + else if hType.isHEq then + h.assumption <|> h.hrefl <|> throwError m!"Failed to resolve {h}" + let unassignedHyps ← hyps.filterM fun h => return !(← h.isAssigned) + unless unassignedHyps.isEmpty do + throwError m!"Not all hypotheses of {.ofConstName eqnThm} could be discharged: {unassignedHyps}" + let rhs ← instantiateMVars rhs + let proof ← instantiateMVars proof + let proof ← if isHeq then + try mkEqOfHEq proof + catch e => throwError m!"Could not un-HEq {proof}:{indentD e.toMessageData} " + else + pure proof + return { + expr := rhs + proof? := proof + } + catch ex => + trace[Meta.FunInd] "Failed to apply {.ofConstName eqnThm}:{indentD ex.toMessageData}" + return { expr := e } /-- - Builds an expression of type `goal` by replicating the expression `e` into its tail-call-positions, where it calls `buildInductionCase`. Collects the cases of the final induction hypothesis as `MVars` as it goes. @@ -719,6 +774,27 @@ partial def buildInductionBody (toErase toClear : Array FVarId) (goal : Expr) return mkApp4 (mkConst ``Bool.dcond [u]) goal c' t' f' | _ => + + -- Check for unreachable cases. We look for the kind of expressions that `by contradiction` + -- produces + match_expr e with + | False.elim _ h => do + return ← mkFalseElim goal h + | absurd _ _ h₁ h₂ => do + return ← mkAbsurd goal h₁ h₂ + | _ => pure () + if e.isApp && e.getAppFn.isConst && isNoConfusion (← getEnv) e.getAppFn.constName! then + let arity := (← inferType e.getAppFn).getNumHeadForalls -- crucially not reducing the noConfusionType in the type + let h := e.getArg! (arity - 1) + let hType ← inferType h + -- The following duplicates a bit of code from the contradiction tactic, maybe worth extracting + -- into a common helper at some point + if let some (_, lhs, rhs) ← matchEq? hType then + if let some lhsCtor ← matchConstructorApp? lhs then + if let some rhsCtor ← matchConstructorApp? rhs then + if lhsCtor.name != rhsCtor.name then + return (← mkNoConfusion goal h) + -- we look in to `PProd.mk`, as it occurs in the mutual structural recursion construction match_expr goal with | And goal₁ goal₂ => match_expr e with @@ -746,13 +822,13 @@ partial def buildInductionBody (toErase toClear : Array FVarId) (goal : Expr) (addEqualities := true) (onParams := (foldAndCollect oldIH newIH isRecCall ·)) (onMotive := fun xs _body => pure (absMotiveBody.beta (maskArray mask xs))) - (onAlt := fun expAltType alt => M2.branch do + (onAlt := fun altIdx expAltType alt => M2.branch do lambdaTelescope1 alt fun oldIH' alt => do forallBoundedTelescope expAltType (some 1) fun newIH' goal' => do let #[newIH'] := newIH' | unreachable! let toErase' := toErase ++ #[oldIH', newIH'.fvarId!] let toClear' := toClear ++ matcherApp.discrs.filterMap (·.fvarId?) - let alt' ← withRewrittenMotiveArg goal' rwMatcher fun goal'' => do + let alt' ← withRewrittenMotiveArg goal' (rwMatcher altIdx) fun goal'' => do -- logInfo m!"rwMatcher after {matcherApp.matcherName} on{indentExpr goal'}\nyields{indentExpr goal''}" buildInductionBody toErase' toClear' goal'' oldIH' newIH'.fvarId! isRecCall alt mkLambdaFVars #[newIH'] alt') @@ -769,8 +845,8 @@ partial def buildInductionBody (toErase toClear : Array FVarId) (goal : Expr) (addEqualities := true) (onParams := (foldAndCollect oldIH newIH isRecCall ·)) (onMotive := fun xs _body => pure (absMotiveBody.beta (maskArray mask xs))) - (onAlt := fun expAltType alt => M2.branch do - withRewrittenMotiveArg expAltType Split.simpMatch fun expAltType' => + (onAlt := fun altIdx expAltType alt => M2.branch do + withRewrittenMotiveArg expAltType (rwMatcher altIdx) fun expAltType' => buildInductionBody toErase toClear expAltType' oldIH newIH isRecCall alt) return matcherApp'.toExpr diff --git a/src/Lean/Meta/Tactic/Grind/Arith/CommRing.lean b/src/Lean/Meta/Tactic/Grind/Arith/CommRing.lean index fe7b73bc617f..886b1437b13c 100644 --- a/src/Lean/Meta/Tactic/Grind/Arith/CommRing.lean +++ b/src/Lean/Meta/Tactic/Grind/Arith/CommRing.lean @@ -16,6 +16,7 @@ import Lean.Meta.Tactic.Grind.Arith.CommRing.EqCnstr import Lean.Meta.Tactic.Grind.Arith.CommRing.Proof import Lean.Meta.Tactic.Grind.Arith.CommRing.DenoteExpr import Lean.Meta.Tactic.Grind.Arith.CommRing.Inv +import Lean.Meta.Tactic.Grind.Arith.CommRing.PP namespace Lean @@ -26,6 +27,7 @@ builtin_initialize registerTraceClass `grind.ring.assert.unsat (inherited := tru builtin_initialize registerTraceClass `grind.ring.assert.trivial (inherited := true) builtin_initialize registerTraceClass `grind.ring.assert.queue (inherited := true) builtin_initialize registerTraceClass `grind.ring.assert.basis (inherited := true) +builtin_initialize registerTraceClass `grind.ring.assert.store (inherited := true) builtin_initialize registerTraceClass `grind.ring.assert.discard (inherited := true) builtin_initialize registerTraceClass `grind.ring.simp builtin_initialize registerTraceClass `grind.ring.superpose @@ -35,5 +37,6 @@ builtin_initialize registerTraceClass `grind.debug.ring.simp builtin_initialize registerTraceClass `grind.debug.ring.proof builtin_initialize registerTraceClass `grind.debug.ring.check builtin_initialize registerTraceClass `grind.debug.ring.impEq +builtin_initialize registerTraceClass `grind.debug.ring.simpBasis end Lean diff --git a/src/Lean/Meta/Tactic/Grind/Arith/CommRing/DenoteExpr.lean b/src/Lean/Meta/Tactic/Grind/Arith/CommRing/DenoteExpr.lean index e1003246bd93..61b7d370776f 100644 --- a/src/Lean/Meta/Tactic/Grind/Arith/CommRing/DenoteExpr.lean +++ b/src/Lean/Meta/Tactic/Grind/Arith/CommRing/DenoteExpr.lean @@ -12,7 +12,9 @@ namespace Lean.Meta.Grind.Arith.CommRing Helper functions for converting reified terms back into their denotations. -/ -private def denoteNum (k : Int) : RingM Expr := do +variable [Monad M] [MonadGetRing M] + +private def denoteNum (k : Int) : M Expr := do let ring ← getRing let n := mkRawNatLit k.natAbs let ofNatInst := mkApp3 (mkConst ``Grind.CommRing.ofNat [ring.u]) ring.type ring.commRingInst n @@ -22,44 +24,44 @@ private def denoteNum (k : Int) : RingM Expr := do else return n -def _root_.Lean.Grind.CommRing.Power.denoteExpr (pw : Power) : RingM Expr := do +def _root_.Lean.Grind.CommRing.Power.denoteExpr (pw : Power) : M Expr := do let x := (← getRing).vars[pw.x]! if pw.k == 1 then return x else return mkApp2 (← getRing).powFn x (toExpr pw.k) -def _root_.Lean.Grind.CommRing.Mon.denoteExpr (m : Mon) : RingM Expr := do +def _root_.Lean.Grind.CommRing.Mon.denoteExpr (m : Mon) : M Expr := do match m with | .unit => denoteNum 1 | .mult pw m => go m (← pw.denoteExpr) where - go (m : Mon) (acc : Expr) : RingM Expr := do + go (m : Mon) (acc : Expr) : M Expr := do match m with | .unit => return acc | .mult pw m => go m (mkApp2 (← getRing).mulFn acc (← pw.denoteExpr)) -def _root_.Lean.Grind.CommRing.Poly.denoteExpr (p : Poly) : RingM Expr := do +def _root_.Lean.Grind.CommRing.Poly.denoteExpr (p : Poly) : M Expr := do match p with | .num k => denoteNum k | .add k m p => go p (← denoteTerm k m) where - denoteTerm (k : Int) (m : Mon) : RingM Expr := do + denoteTerm (k : Int) (m : Mon) : M Expr := do if k == 1 then m.denoteExpr else return mkApp2 (← getRing).mulFn (← denoteNum k) (← m.denoteExpr) - go (p : Poly) (acc : Expr) : RingM Expr := do + go (p : Poly) (acc : Expr) : M Expr := do match p with | .num 0 => return acc | .num k => return mkApp2 (← getRing).addFn acc (← denoteNum k) | .add k m p => go p (mkApp2 (← getRing).addFn acc (← denoteTerm k m)) -def _root_.Lean.Grind.CommRing.Expr.denoteExpr (e : RingExpr) : RingM Expr := do +def _root_.Lean.Grind.CommRing.Expr.denoteExpr (e : RingExpr) : M Expr := do go e where - go : RingExpr → RingM Expr + go : RingExpr → M Expr | .num k => denoteNum k | .var x => return (← getRing).vars[x]! | .add a b => return mkApp2 (← getRing).addFn (← go a) (← go b) @@ -68,13 +70,17 @@ where | .pow a k => return mkApp2 (← getRing).powFn (← go a) (toExpr k) | .neg a => return mkApp (← getRing).negFn (← go a) -def EqCnstr.denoteExpr (c : EqCnstr) : RingM Expr := do +private def mkEq (a b : Expr) : M Expr := do + let r ← getRing + return mkApp3 (mkConst ``Eq [r.u.succ]) r.type a b + +def EqCnstr.denoteExpr (c : EqCnstr) : M Expr := do mkEq (← c.p.denoteExpr) (← denoteNum 0) -def PolyDerivation.denoteExpr (d : PolyDerivation) : RingM Expr := do +def PolyDerivation.denoteExpr (d : PolyDerivation) : M Expr := do d.p.denoteExpr -def DiseqCnstr.denoteExpr (c : DiseqCnstr) : RingM Expr := do +def DiseqCnstr.denoteExpr (c : DiseqCnstr) : M Expr := do return mkNot (← mkEq (← c.d.denoteExpr) (← denoteNum 0)) end Lean.Meta.Grind.Arith.CommRing diff --git a/src/Lean/Meta/Tactic/Grind/Arith/CommRing/EqCnstr.lean b/src/Lean/Meta/Tactic/Grind/Arith/CommRing/EqCnstr.lean index c0903dcb8f3a..ce5ecf1bb7bc 100644 --- a/src/Lean/Meta/Tactic/Grind/Arith/CommRing/EqCnstr.lean +++ b/src/Lean/Meta/Tactic/Grind/Arith/CommRing/EqCnstr.lean @@ -89,16 +89,26 @@ def PolyDerivation.simplify (d : PolyDerivation) : RingM PolyDerivation := do return d /-- Simplifies `c₁` using `c₂`. -/ -def EqCnstr.simplifyWith (c₁ c₂ : EqCnstr) : RingM EqCnstr := do - let some r := c₁.p.simp? c₂.p (← nonzeroChar?) | return c₁ +def EqCnstr.simplifyWithCore (c₁ c₂ : EqCnstr) : RingM (Option EqCnstr) := do + let some r := c₁.p.simp? c₂.p (← nonzeroChar?) | return none let c := { c₁ with p := r.p h := .simp r.k₁ c₁ r.k₂ r.m₂ c₂ } incSteps trace_goal[grind.ring.simp] "{← c.p.denoteExpr}" + return some c + +/-- Simplifies `c₁` using `c₂`. -/ +def EqCnstr.simplifyWith (c₁ c₂ : EqCnstr) : RingM EqCnstr := do + let some c ← c₁.simplifyWithCore c₂ | return c₁ return c +/-- Simplifies `c₁` using `c₂` exhaustively. -/ +partial def EqCnstr.simplifyWithExhaustively (c₁ c₂ : EqCnstr) : RingM EqCnstr := do + let some c ← c₁.simplifyWithCore c₂ | return c₁ + c.simplifyWithExhaustively c₂ + /-- Simplify the given equation constraint using the current basis. -/ def EqCnstr.simplify (c : EqCnstr) : RingM EqCnstr := do let mut c := c @@ -150,22 +160,6 @@ def addToBasisCore (c : EqCnstr) : RingM Unit := do recheck := true } -def EqCnstr.simplifyBasis (c : EqCnstr) : RingM Unit := do - let .add _ m _ := c.p | return () - let .mult pw _ := m | return () - let x := pw.x - let cs := (← getRing).varToBasis[x]! - if cs.isEmpty then return () - modifyRing fun s => { s with varToBasis := s.varToBasis.set x {} } - for c' in cs do - let .add _ m' _ := c'.p | pure () - if m.divides m' then - let c'' ← c'.simplifyWith c - unless (← c''.checkConstant) do - addToBasisCore c'' - else - addToBasisCore c' - def EqCnstr.addToQueue (c : EqCnstr) : RingM Unit := do if (← checkMaxSteps) then return () trace_goal[grind.ring.assert.queue] "{← c.denoteExpr}" @@ -218,6 +212,29 @@ def EqCnstr.toMonic (c : EqCnstr) : RingM EqCnstr := do return { c with p := c.p.mulConst (-1), h := .mul (-1) c } return c +def EqCnstr.simplifyBasis (c : EqCnstr) : RingM Unit := do + trace[grind.debug.ring.simpBasis] "using: {← c.denoteExpr}" + let .add _ m _ := c.p | return () + let rec go (m' : Mon) : RingM Unit := do + match m' with + | .unit => return () + | .mult pw m' => goVar m pw.x; go m' + go m +where + goVar (m : Mon) (x : Var) : RingM Unit := do + let cs := (← getRing).varToBasis[x]! + if cs.isEmpty then return () + modifyRing fun s => { s with varToBasis := s.varToBasis.set x {} } + for c' in cs do + trace[grind.debug.ring.simpBasis] "target: {← c'.denoteExpr}" + let .add _ m' _ := c'.p | pure () + if m.divides m' then + let c'' ← c'.simplifyWithExhaustively c + trace[grind.debug.ring.simpBasis] "simplified: {← c''.denoteExpr}" + addToQueue c'' + else + addToBasisCore c' + def EqCnstr.addToBasisAfterSimp (c : EqCnstr) : RingM Unit := do let c ← c.toMonic c.simplifyBasis diff --git a/src/Lean/Meta/Tactic/Grind/Arith/CommRing/PP.lean b/src/Lean/Meta/Tactic/Grind/Arith/CommRing/PP.lean new file mode 100644 index 000000000000..4a3e7b50db11 --- /dev/null +++ b/src/Lean/Meta/Tactic/Grind/Arith/CommRing/PP.lean @@ -0,0 +1,56 @@ +/- +Copyright (c) 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Leonardo de Moura +-/ +prelude +import Lean.Meta.Tactic.Grind.Arith.CommRing.DenoteExpr + +namespace Lean.Meta.Grind.Arith.CommRing + +instance : MonadGetRing (ReaderT Ring MetaM) where + getRing := read + +private def M := ReaderT Goal (StateT (Array MessageData) MetaM) + +private def toOption (cls : Name) (header : Thunk MessageData) (msgs : Array MessageData) : Option MessageData := + if msgs.isEmpty then + none + else + some (.trace {cls} header.get msgs) + +private def push (msgs : Array MessageData) (msg? : Option MessageData) : Array MessageData := + if let some msg := msg? then msgs.push msg else msgs + +def ppBasis? : ReaderT Ring MetaM (Option MessageData) := do + let mut basis := #[] + for cs in (← getRing).varToBasis do + for c in cs do + basis := basis.push (toTraceElem (← c.denoteExpr)) + return toOption `basis "Basis" basis + +def ppDiseqs? : ReaderT Ring MetaM (Option MessageData) := do + let mut diseqs := #[] + for d in (← getRing).diseqs do + diseqs := diseqs.push (toTraceElem (← d.denoteExpr)) + return toOption `diseqs "Disequalities" diseqs + +def ppRing? : ReaderT Ring MetaM (Option MessageData) := do + let msgs := #[] + let msgs := push msgs (← ppBasis?) + let msgs := push msgs (← ppDiseqs?) + return toOption `ring m!"Ring `{(← getRing).type}`" msgs + +def pp? (goal : Goal) : MetaM (Option MessageData) := do + let mut msgs := #[] + for ring in goal.arith.ring.rings do + let some msg ← ppRing? ring | pure () + msgs := msgs.push msg + if msgs.isEmpty then + return none + else if h : msgs.size = 1 then + return some msgs[0] + else + return some (.trace { cls := `ring } "Rings" msgs) + +end Lean.Meta.Grind.Arith.CommRing diff --git a/src/Lean/Meta/Tactic/Grind/Arith/CommRing/Util.lean b/src/Lean/Meta/Tactic/Grind/Arith/CommRing/Util.lean index b5ff936856ab..91327fb8bf1f 100644 --- a/src/Lean/Meta/Tactic/Grind/Arith/CommRing/Util.lean +++ b/src/Lean/Meta/Tactic/Grind/Arith/CommRing/Util.lean @@ -36,6 +36,15 @@ structure RingM.Context where -/ checkCoeffDvd : Bool := false +class MonadGetRing (m : Type → Type) where + getRing : m Ring + +export MonadGetRing (getRing) + +@[always_inline] +instance (m n) [MonadLift m n] [MonadGetRing m] : MonadGetRing n where + getRing := liftM (getRing : m Ring) + /-- We don't want to keep carrying the `RingId` around. -/ abbrev RingM := ReaderT RingM.Context GoalM @@ -45,7 +54,7 @@ abbrev RingM.run (ringId : Nat) (x : RingM α) : GoalM α := abbrev getRingId : RingM Nat := return (← read).ringId -def getRing : RingM Ring := do +protected def RingM.getRing : RingM Ring := do let s ← get' let ringId ← getRingId if h : ringId < s.rings.size then @@ -53,6 +62,9 @@ def getRing : RingM Ring := do else throwError "`grind` internal error, invalid ringId" +instance : MonadGetRing RingM where + getRing := RingM.getRing + @[inline] def modifyRing (f : Ring → Ring) : RingM Unit := do let ringId ← getRingId modify' fun s => { s with rings := s.rings.modify ringId f } @@ -75,14 +87,14 @@ def setTermRingId (e : Expr) : RingM Unit := do modify' fun s => { s with exprToRingId := s.exprToRingId.insert { expr := e } ringId } /-- Returns `some c` if the current ring has a nonzero characteristic `c`. -/ -def nonzeroChar? : RingM (Option Nat) := do +def nonzeroChar? [Monad m] [MonadGetRing m] : m (Option Nat) := do if let some (_, c) := (← getRing).charInst? then if c != 0 then return some c return none /-- Returns `some (charInst, c)` if the current ring has a nonzero characteristic `c`. -/ -def nonzeroCharInst? : RingM (Option (Expr × Nat)) := do +def nonzeroCharInst? [Monad m] [MonadGetRing m] : m (Option (Expr × Nat)) := do if let some (inst, c) := (← getRing).charInst? then if c != 0 then return some (inst, c) diff --git a/src/Lean/Meta/Tactic/Grind/Core.lean b/src/Lean/Meta/Tactic/Grind/Core.lean index bf35bed0f85b..7fbcc6a2c79e 100644 --- a/src/Lean/Meta/Tactic/Grind/Core.lean +++ b/src/Lean/Meta/Tactic/Grind/Core.lean @@ -37,6 +37,12 @@ where proof? := proofNew? } +/-- +Returns `true` if the parent is relevant for congruence closure. +-/ +private def isCongrRelevant (parent : Expr) : Bool := + parent.isApp || parent.isArrow + /-- Removes `root` parents from the congruence table. This is an auxiliary function performed while merging equivalence classes. @@ -45,7 +51,7 @@ private def removeParents (root : Expr) : GoalM ParentSet := do let parents ← getParents root for parent in parents do -- Recall that we may have `Expr.forallE` in `parents` because of `ForallProp.lean` - if (← pure parent.isApp <&&> isCongrRoot parent) then + if (← pure (isCongrRelevant parent) <&&> isCongrRoot parent) then trace_goal[grind.debug.parent] "remove: {parent}" modify fun s => { s with congrTable := s.congrTable.erase { e := parent } } return parents @@ -56,7 +62,7 @@ This is an auxiliary function performed while merging equivalence classes. -/ private def reinsertParents (parents : ParentSet) : GoalM Unit := do for parent in parents do - if (← pure parent.isApp <&&> isCongrRoot parent) then + if (← pure (isCongrRelevant parent) <&&> isCongrRoot parent) then trace_goal[grind.debug.parent] "reinsert: {parent}" addCongrTable parent diff --git a/src/Lean/Meta/Tactic/Grind/EMatch.lean b/src/Lean/Meta/Tactic/Grind/EMatch.lean index 4a245b93972b..c14f405d2ceb 100644 --- a/src/Lean/Meta/Tactic/Grind/EMatch.lean +++ b/src/Lean/Meta/Tactic/Grind/EMatch.lean @@ -300,7 +300,7 @@ private partial def instantiateTheorem (c : Choice) : M Unit := withDefault do w let report : M Unit := do reportIssue! "type error constructing proof for {← thm.origin.pp}\nwhen assigning metavariable {mvars[i]} with {indentExpr v}\n{← mkHasTypeButIsExpectedMsg vType mvarIdType}" unless (← withDefault <| isDefEq mvarIdType vType) do - let some heq ← withoutReportingMVarIssues <| proveEq? vType mvarIdType + let some heq ← withoutReportingMVarIssues <| proveEq? vType mvarIdType (abstract := true) | report return () /- diff --git a/src/Lean/Meta/Tactic/Grind/EMatchTheorem.lean b/src/Lean/Meta/Tactic/Grind/EMatchTheorem.lean index cab2ee4ee055..59bd5779409b 100644 --- a/src/Lean/Meta/Tactic/Grind/EMatchTheorem.lean +++ b/src/Lean/Meta/Tactic/Grind/EMatchTheorem.lean @@ -20,7 +20,7 @@ namespace Lean.Meta.Grind def mkOffsetPattern (pat : Expr) (k : Nat) : Expr := mkApp2 (mkConst ``Grind.offset) pat (mkRawNatLit k) -private def detectOffsets (pat : Expr) : MetaM Expr := do +def detectOffsets (pat : Expr) : MetaM Expr := do let pre (e : Expr) := do if e == pat then -- We only consider nested offset patterns @@ -51,7 +51,7 @@ def isEqBwdPattern? (e : Expr) : Option (Expr × Expr) := some (lhs, rhs) -- Configuration for the `grind` normalizer. We want both `zetaDelta` and `zeta` -private def normConfig : Grind.Config := {} +def normConfig : Grind.Config := {} theorem normConfig_zeta : normConfig.zeta = true := rfl theorem normConfig_zetaDelta : normConfig.zetaDelta = true := rfl @@ -101,7 +101,7 @@ inductive EMatchTheoremKind where | eqLhs | eqRhs | eqBoth | eqBwd | fwd | bwd | leftRight | rightLeft | default | user /- pattern specified using `grind_pattern` command -/ deriving Inhabited, BEq, Repr, Hashable -private def EMatchTheoremKind.toAttribute : EMatchTheoremKind → String +def EMatchTheoremKind.toAttribute : EMatchTheoremKind → String | .eqLhs => "[grind =]" | .eqRhs => "[grind =_]" | .eqBoth => "[grind _=_]" @@ -113,7 +113,7 @@ private def EMatchTheoremKind.toAttribute : EMatchTheoremKind → String | .default => "[grind]" | .user => "[grind]" -private def EMatchTheoremKind.explainFailure : EMatchTheoremKind → String +def EMatchTheoremKind.explainFailure : EMatchTheoremKind → String | .eqLhs => "failed to find pattern in the left-hand side of the theorem's conclusion" | .eqRhs => "failed to find pattern in the right-hand side of the theorem's conclusion" | .eqBoth => unreachable! -- eqBoth is a macro @@ -246,9 +246,9 @@ This is because `grind` performs normalization operations and uses specialized d to implement these symbols, which may interfere with E-matching behavior. -/ -- TODO: create attribute? -private def forbiddenDeclNames := #[``Eq, ``HEq, ``Iff, ``And, ``Or, ``Not] +def forbiddenDeclNames := #[``Eq, ``HEq, ``Iff, ``And, ``Or, ``Not] -private def isForbidden (declName : Name) := forbiddenDeclNames.contains declName +def isForbidden (declName : Name) := forbiddenDeclNames.contains declName /-- Auxiliary function to expand a pattern containing forbidden application symbols @@ -277,7 +277,7 @@ partial def splitWhileForbidden (pat : Expr) : List Expr := | HEq _ lhs _ rhs => splitWhileForbidden lhs ++ splitWhileForbidden rhs | _ => [pat] -private def dontCare := mkConst (Name.mkSimple "[grind_dontcare]") +def dontCare := mkConst (Name.mkSimple "[grind_dontcare]") def mkGroundPattern (e : Expr) : Expr := mkAnnotation `grind.ground_pat e @@ -285,13 +285,13 @@ def mkGroundPattern (e : Expr) : Expr := def groundPattern? (e : Expr) : Option Expr := annotation? `grind.ground_pat e -private def isGroundPattern (e : Expr) : Bool := +def isGroundPattern (e : Expr) : Bool := groundPattern? e |>.isSome def isPatternDontCare (e : Expr) : Bool := e == dontCare -private def isAtomicPattern (e : Expr) : Bool := +def isAtomicPattern (e : Expr) : Bool := e.isBVar || isPatternDontCare e || isGroundPattern e partial def ppPattern (pattern : Expr) : MessageData := Id.run do @@ -327,14 +327,14 @@ structure State where abbrev M := StateRefT State MetaM -private def saveSymbol (h : HeadIndex) : M Unit := do +def saveSymbol (h : HeadIndex) : M Unit := do unless (← get).symbolSet.contains h do modify fun s => { s with symbols := s.symbols.push h, symbolSet := s.symbolSet.insert h } -private def foundBVar (idx : Nat) : M Bool := +def foundBVar (idx : Nat) : M Bool := return (← get).bvarsFound.contains idx -private def saveBVar (idx : Nat) : M Unit := do +def saveBVar (idx : Nat) : M Unit := do modify fun s => { s with bvarsFound := s.bvarsFound.insert idx } inductive PatternArgKind where @@ -401,7 +401,7 @@ def getPatternArgKinds (f : Expr) (numArgs : Nat) : MetaM (Array PatternArgKind) else return .relevant -private def getPatternFn? (pattern : Expr) (inSupport : Bool) (argKind : PatternArgKind) : MetaM (Option Expr) := do +def getPatternFn? (pattern : Expr) (inSupport : Bool) (argKind : PatternArgKind) : MetaM (Option Expr) := do if !pattern.isApp && !pattern.isConst then return none else match pattern.getAppFn with @@ -419,7 +419,7 @@ private def getPatternFn? (pattern : Expr) (inSupport : Bool) (argKind : Pattern | _ => return none -private partial def go (pattern : Expr) (inSupport : Bool) : M Expr := do +partial def go (pattern : Expr) (inSupport : Bool) : M Expr := do if let some (e, k) := isOffsetPattern? pattern then let e ← goArg e inSupport .relevant if e == dontCare then @@ -471,7 +471,7 @@ end NormalizePattern Returns `true` if free variables in `type` are not in `thmVars` or are in `fvarsFound`. We use this function to check whether `type` is fully instantiated. -/ -private def checkTypeFVars (thmVars : FVarIdSet) (fvarsFound : FVarIdSet) (type : Expr) : Bool := +def checkTypeFVars (thmVars : FVarIdSet) (fvarsFound : FVarIdSet) (type : Expr) : Bool := let typeFVars := (collectFVars {} type).fvarIds typeFVars.all fun fvarId => !thmVars.contains fvarId || fvarsFound.contains fvarId @@ -481,7 +481,7 @@ Given an type class instance type `instType`, returns true if free variables in 2- are in `fvarsFound`. Remark: `fvarsFound` is a subset of `thmVars` -/ -private def canBeSynthesized (thmVars : FVarIdSet) (fvarsFound : FVarIdSet) (instType : Expr) : MetaM Bool := do +def canBeSynthesized (thmVars : FVarIdSet) (fvarsFound : FVarIdSet) (instType : Expr) : MetaM Bool := do forallTelescopeReducing instType fun xs type => type.withApp fun classFn classArgs => do for x in xs do unless checkTypeFVars thmVars fvarsFound (← inferType x) do return false @@ -515,7 +515,7 @@ The missing parameters: For type class instance parameters, we must check whether the free variables in class input parameters are available. -/ -private def checkCoverage (thmProof : Expr) (numParams : Nat) (bvarsFound : Std.HashSet Nat) : MetaM CheckCoverageResult := do +def checkCoverage (thmProof : Expr) (numParams : Nat) (bvarsFound : Std.HashSet Nat) : MetaM CheckCoverageResult := do if bvarsFound.size == numParams then return .ok forallBoundedTelescope (← inferType thmProof) numParams fun xs _ => do assert! numParams == xs.size @@ -578,7 +578,7 @@ private def checkCoverage (thmProof : Expr) (numParams : Nat) (bvarsFound : Std. Given a theorem with proof `proof` and `numParams` parameters, returns a message containing the parameters at positions `paramPos`. -/ -private def ppParamsAt (proof : Expr) (numParams : Nat) (paramPos : List Nat) : MetaM MessageData := do +def ppParamsAt (proof : Expr) (numParams : Nat) (paramPos : List Nat) : MetaM MessageData := do forallBoundedTelescope (← inferType proof) numParams fun xs _ => do let mut msg := m!"" let mut first := true @@ -606,7 +606,7 @@ def mkEMatchTheoremCore (origin : Origin) (levelParams : Array Name) (numParams levelParams, origin, kind } -private def getProofFor (declName : Name) : MetaM Expr := do +def getProofFor (declName : Name) : MetaM Expr := do let info ← getConstInfo declName -- For theorems, `isProp` has already been checked at declaration time unless wasOriginallyTheorem (← getEnv) declName do @@ -679,18 +679,18 @@ def getEMatchTheorems : CoreM EMatchTheorems := return ematchTheoremsExt.getState (← getEnv) /-- Returns the types of `xs` that are propositions. -/ -private def getPropTypes (xs : Array Expr) : MetaM (Array Expr) := +def getPropTypes (xs : Array Expr) : MetaM (Array Expr) := xs.filterMapM fun x => do let type ← inferType x if (← isProp type) then return some type else return none /-- State for the (pattern) `CollectorM` monad -/ -private structure Collector.State where +structure Collector.State where /-- Pattern found so far. -/ patterns : Array Expr := #[] done : Bool := false -private structure Collector.Context where +structure Collector.Context where proof : Expr xs : Array Expr @@ -698,13 +698,13 @@ private structure Collector.Context where private abbrev CollectorM := ReaderT Collector.Context $ StateRefT Collector.State NormalizePattern.M /-- Similar to `getPatternFn?`, but operates on expressions that do not contain loose de Bruijn variables. -/ -private def isPatternFnCandidate (f : Expr) : CollectorM Bool := do +def isPatternFnCandidate (f : Expr) : CollectorM Bool := do match f with | .const declName _ => return !isForbidden declName | .fvar .. => return !(← read).xs.contains f | _ => return false -private def addNewPattern (p : Expr) : CollectorM Unit := do +def addNewPattern (p : Expr) : CollectorM Unit := do trace[grind.debug.ematch.pattern] "found pattern: {ppPattern p}" let bvarsFound := (← getThe NormalizePattern.State).bvarsFound let done := (← checkCoverage (← read).proof (← read).xs.size bvarsFound) matches .ok @@ -713,7 +713,7 @@ private def addNewPattern (p : Expr) : CollectorM Unit := do modify fun s => { s with patterns := s.patterns.push p, done } /-- Collect the pattern (i.e., de Bruijn) variables in the given pattern. -/ -private def collectPatternBVars (p : Expr) : List Nat := +def collectPatternBVars (p : Expr) : List Nat := go p |>.run [] |>.2 where go (e : Expr) : StateM (List Nat) Unit := do @@ -723,7 +723,7 @@ where | .bvar idx => modify fun s => if s.contains idx then s else idx :: s | _ => return () -private def diff (s : List Nat) (found : Std.HashSet Nat) : List Nat := +def diff (s : List Nat) (found : Std.HashSet Nat) : List Nat := if found.isEmpty then s else s.filter fun x => !found.contains x /-- @@ -733,7 +733,7 @@ Returns `true` if pattern `p` contains a child `c` such that 3- `c` is not an offset pattern. 4- `c` is not a bound variable. -/ -private def hasChildWithSameNewBVars (p : Expr) +def hasChildWithSameNewBVars (p : Expr) (argKinds : Array NormalizePattern.PatternArgKind) (alreadyFound : Std.HashSet Nat) : CoreM Bool := do let s := diff (collectPatternBVars p) alreadyFound for arg in p.getAppArgs, argKind in argKinds do @@ -745,7 +745,7 @@ private def hasChildWithSameNewBVars (p : Expr) return true return false -private partial def collect (e : Expr) : CollectorM Unit := do +partial def collect (e : Expr) : CollectorM Unit := do if (← get).done then return () match e with | .app .. => @@ -783,7 +783,7 @@ private partial def collect (e : Expr) : CollectorM Unit := do collect b | _ => return () -private def collectPatterns? (proof : Expr) (xs : Array Expr) (searchPlaces : Array Expr) : MetaM (Option (List Expr × List HeadIndex)) := do +def collectPatterns? (proof : Expr) (xs : Array Expr) (searchPlaces : Array Expr) : MetaM (Option (List Expr × List HeadIndex)) := do let go : CollectorM (Option (List Expr)) := do for place in searchPlaces do trace[grind.debug.ematch.pattern] "place: {place}" @@ -801,7 +801,7 @@ Tries to find a ground pattern to activate the theorem. This is used for theorems such as `theorem evenZ : Even 0`. This function is only used if `collectPatterns?` returns `none`. -/ -private partial def collectGroundPattern? (proof : Expr) (xs : Array Expr) (searchPlaces : Array Expr) : MetaM (Option (Expr × List HeadIndex)) := do +partial def collectGroundPattern? (proof : Expr) (xs : Array Expr) (searchPlaces : Array Expr) : MetaM (Option (Expr × List HeadIndex)) := do unless (← checkCoverage proof xs.size {}) matches .ok do return none let go? : CollectorM (Option Expr) := do @@ -909,7 +909,7 @@ def mkEMatchEqTheoremsForDef? (declName : Name) : MetaM (Option (Array EMatchThe eqns.mapM fun eqn => do mkEMatchEqTheorem eqn (normalizePattern := true) -private def addGrindEqAttr (declName : Name) (attrKind : AttributeKind) (thmKind : EMatchTheoremKind) (useLhs := true) : MetaM Unit := do +def addGrindEqAttr (declName : Name) (attrKind : AttributeKind) (thmKind : EMatchTheoremKind) (useLhs := true) : MetaM Unit := do if wasOriginallyTheorem (← getEnv) declName then ematchTheoremsExt.add (← mkEMatchEqTheorem declName (normalizePattern := true) (useLhs := useLhs)) attrKind else if let some thms ← mkEMatchEqTheoremsForDef? declName then diff --git a/src/Lean/Meta/Tactic/Grind/ForallProp.lean b/src/Lean/Meta/Tactic/Grind/ForallProp.lean index 417e1931e0a8..b26ef0254734 100644 --- a/src/Lean/Meta/Tactic/Grind/ForallProp.lean +++ b/src/Lean/Meta/Tactic/Grind/ForallProp.lean @@ -37,13 +37,13 @@ def propagateForallPropUp (e : Expr) : GoalM Unit := do where propagateImpliesUp (a b : Expr) : GoalM Unit := do unless (← alreadyInternalized b) do return () - if (← isEqFalse a) then + if (← isEqFalse a <&&> isProp b) then -- a = False → (a → b) = True pushEqTrue e <| mkApp3 (mkConst ``Grind.imp_eq_of_eq_false_left) a b (← mkEqFalseProof a) - else if (← isEqTrue a) then + else if (← isEqTrue a <&&> isProp b) then -- a = True → (a → b) = b pushEq e b <| mkApp3 (mkConst ``Grind.imp_eq_of_eq_true_left) a b (← mkEqTrueProof a) - else if (← isEqTrue b) then + else if (← isEqTrue b <&&> isProp a) then -- b = True → (a → b) = True pushEqTrue e <| mkApp3 (mkConst ``Grind.imp_eq_of_eq_true_right) a b (← mkEqTrueProof b) else if (← isEqFalse b <&&> isEqTrue e <&&> isProp a) then diff --git a/src/Lean/Meta/Tactic/Grind/Internalize.lean b/src/Lean/Meta/Tactic/Grind/Internalize.lean index 99ce01a221f0..afcace274843 100644 --- a/src/Lean/Meta/Tactic/Grind/Internalize.lean +++ b/src/Lean/Meta/Tactic/Grind/Internalize.lean @@ -23,12 +23,13 @@ def addCongrTable (e : Expr) : GoalM Unit := do if let some { e := e' } := (← get).congrTable.find? { e } then -- `f` and `g` must have the same type. -- See paper: Congruence Closure in Intensional Type Theory - let f := e.getAppFn - let g := e'.getAppFn - unless isSameExpr f g do - unless (← hasSameType f g) do - reportIssue! "found congruence between{indentExpr e}\nand{indentExpr e'}\nbut functions have different types" - return () + if e.isApp then + let f := e.getAppFn + let g := e'.getAppFn + unless isSameExpr f g do + unless (← hasSameType f g) do + reportIssue! "found congruence between{indentExpr e}\nand{indentExpr e'}\nbut functions have different types" + return () trace_goal[grind.debug.congr] "{e} = {e'}" pushEqHEq e e' congrPlaceholderProof let node ← getENode e @@ -299,12 +300,13 @@ private partial def internalizeImpl (e : Expr) (generation : Nat) (parent? : Opt mkENode' e generation | .forallE _ d b _ => mkENode' e generation + internalizeImpl d generation e + registerParent e d + unless b.hasLooseBVars do + internalizeImpl b generation e + registerParent e b + addCongrTable e if (← isProp d <&&> isProp e) then - internalizeImpl d generation e - registerParent e d - unless b.hasLooseBVars do - internalizeImpl b generation e - registerParent e b propagateUp e checkAndAddSplitCandidate e | .lit .. => diff --git a/src/Lean/Meta/Tactic/Grind/MBTC.lean b/src/Lean/Meta/Tactic/Grind/MBTC.lean index fabaafb05d7b..aeae22b9464e 100644 --- a/src/Lean/Meta/Tactic/Grind/MBTC.lean +++ b/src/Lean/Meta/Tactic/Grind/MBTC.lean @@ -33,13 +33,13 @@ structure MBTC.Context where -/ eqAssignment : Expr → Expr → GoalM Bool -private structure ArgInfo where +structure ArgInfo where arg : Expr app : Expr private abbrev Map := Std.HashMap (Expr × Nat) (List ArgInfo) private abbrev Candidates := Std.HashSet SplitInfo -private def mkCandidate (a b : ArgInfo) (i : Nat) : GoalM SplitInfo := do +def mkCandidate (a b : ArgInfo) (i : Nat) : GoalM SplitInfo := do let (lhs, rhs) := if a.arg.lt b.arg then (a.arg, b.arg) else diff --git a/src/Lean/Meta/Tactic/Grind/PP.lean b/src/Lean/Meta/Tactic/Grind/PP.lean index 73cef430acb4..3d295bf6f03d 100644 --- a/src/Lean/Meta/Tactic/Grind/PP.lean +++ b/src/Lean/Meta/Tactic/Grind/PP.lean @@ -8,6 +8,7 @@ import Init.Grind.Util import Init.Grind.PP import Lean.Meta.Tactic.Grind.Types import Lean.Meta.Tactic.Grind.Arith.Model +import Lean.Meta.Tactic.Grind.Arith.CommRing.PP namespace Lean.Meta.Grind @@ -72,15 +73,15 @@ def ppGoals (goals : List Goal) : MetaM MessageData := do r := r ++ Format.line ++ m return r -private def ppExprArray (cls : Name) (header : String) (es : Array Expr) (clsElem : Name := Name.mkSimple "_") : MessageData := - let es := es.map fun e => .trace { cls := clsElem} m!"{e}" #[] - .trace { cls } header es - private abbrev M := ReaderT Goal (StateT (Array MessageData) MetaM) private def pushMsg (m : MessageData) : M Unit := modify fun s => s.push m +def ppExprArray (cls : Name) (header : String) (es : Array Expr) (clsElem : Name := Name.mkSimple "_") : MessageData := + let es := es.map (toTraceElem · clsElem) + .trace { cls } header es + private def ppEqcs : M Unit := do let mut trueEqc? : Option MessageData := none let mut falseEqc? : Option MessageData := none @@ -90,7 +91,7 @@ private def ppEqcs : M Unit := do if Option.isSome <| eqc.find? (·.isTrue) then let eqc := eqc.filter fun e => !e.isTrue unless eqc.isEmpty do - trueEqc? := ppExprArray `eqc "True propositions" eqc.toArray `prop + trueEqc? := ppExprArray `eqc "True propositions" eqc.toArray `prop else if Option.isSome <| eqc.find? (·.isFalse) then let eqc := eqc.filter fun e => !e.isFalse unless eqc.isEmpty do @@ -141,6 +142,11 @@ private def ppCutsat : M Unit := do ms := ms.push <| .trace { cls := `assign } m!"{Arith.quoteIfArithTerm e} := {val}" #[] pushMsg <| .trace { cls := `cutsat } "Assignment satisfying linear constraints" ms +private def ppCommRing : M Unit := do + let goal ← read + let some msg ← Arith.CommRing.pp? goal | return () + pushMsg msg + private def ppThresholds (c : Grind.Config) : M Unit := do let goal ← read let maxGen := goal.exprs.foldl (init := 0) fun g e => @@ -186,6 +192,7 @@ where ppActiveTheoremPatterns ppOffset ppCutsat + ppCommRing ppThresholds config end Lean.Meta.Grind diff --git a/src/Lean/Meta/Tactic/Grind/Proof.lean b/src/Lean/Meta/Tactic/Grind/Proof.lean index 265a82bc71a6..1853b71d4765 100644 --- a/src/Lean/Meta/Tactic/Grind/Proof.lean +++ b/src/Lean/Meta/Tactic/Grind/Proof.lean @@ -175,18 +175,24 @@ mutual /-- Constructs a congruence proof for `lhs` and `rhs`. -/ private partial def mkCongrProof (lhs rhs : Expr) (heq : Bool) : GoalM Expr := do - let f := lhs.getAppFn - let g := rhs.getAppFn - let numArgs := lhs.getAppNumArgs - assert! rhs.getAppNumArgs == numArgs - if f.isConstOf ``Lean.Grind.nestedProof && g.isConstOf ``Lean.Grind.nestedProof && numArgs == 2 then - mkNestedProofCongr lhs rhs heq - else if f.isConstOf ``Eq && g.isConstOf ``Eq && numArgs == 3 then - mkEqCongrProof lhs rhs heq - else if (← isCongrDefaultProofTarget lhs rhs f g numArgs) then - mkCongrDefaultProof lhs rhs heq + if let .forallE _ p₁ q₁ _ := lhs then + let .forallE _ p₂ q₂ _ := rhs | unreachable! + let u ← withDefault <| getLevel p₁ + let v ← withDefault <| getLevel q₁ + return mkApp6 (mkConst ``implies_congr [u, v]) p₁ p₂ q₁ q₂ (← mkEqProofCore p₁ p₂ false) (← mkEqProofCore q₁ q₂ false) else - mkHCongrProof lhs rhs heq + let f := lhs.getAppFn + let g := rhs.getAppFn + let numArgs := lhs.getAppNumArgs + assert! rhs.getAppNumArgs == numArgs + if f.isConstOf ``Lean.Grind.nestedProof && g.isConstOf ``Lean.Grind.nestedProof && numArgs == 2 then + mkNestedProofCongr lhs rhs heq + else if f.isConstOf ``Eq && g.isConstOf ``Eq && numArgs == 3 then + mkEqCongrProof lhs rhs heq + else if (← isCongrDefaultProofTarget lhs rhs f g numArgs) then + mkCongrDefaultProof lhs rhs heq + else + mkHCongrProof lhs rhs heq private partial def realizeEqProof (lhs rhs : Expr) (h : Expr) (flipped : Bool) (heq : Bool) : GoalM Expr := do let h ← if h == congrPlaceholderProof then diff --git a/src/Lean/Meta/Tactic/Grind/ProveEq.lean b/src/Lean/Meta/Tactic/Grind/ProveEq.lean index 12e64f99bdcb..e7908b7bf5d5 100644 --- a/src/Lean/Meta/Tactic/Grind/ProveEq.lean +++ b/src/Lean/Meta/Tactic/Grind/ProveEq.lean @@ -28,6 +28,144 @@ private def ensureInternalized (e : Expr) : GoalM Expr := do internalize e 0 return e +/-! +`abstractGroundMismatches?` is an auxiliary function for creating auxiliary equality +proofs. When trying to prove `lhs = rhs`, we use two different approaches. In the first +one, we just internalize the terms, propagate, and then check whether they are in the same +equivalence class. The function `abstractGroundMismatches?` is used to implement the +second approach that focus on terms containing binders. Here is a motivating example, +suppose we are trying to prove that `(b : Bool) → a[i]? = some b → Nat` is equal to +`(b : Bool) → some v = some b → Nat` and the goal contains the equivalence class +`{a[i]?, some v}`. +Congruence closure does not process terms containing free variables, and fails to +prove the equality. +`abstractGroundMismatches?` extracts ground terms that are equal in the current goal, +and creates an auxiliary function. In the example above, the following two terms +are generated. +- `(fun x => (b : Bool) → x = some b → Nat) a[i]?` +- `(fun x => (b : Bool) → x = some b → Nat) (some v)` + +The two new terms are definitionally equal to the original ones, but congruence +closure will now detect the equality. + +The motivation for this infrastructure is match-expression equalities. +Suppose we have +``` +match h : assign[v]? with +| none => ... +| some b => ... +``` +When instantiating the match-expr equations for the `none` and `some` cases, +we need to introduce casts. +-/ + +/-- Context for the `AbstractM` monad used to implement `abstractGroundMismatches?` -/ +private structure AbstractM.Context where + /-- Number of binders under which the terms being processed occur under. -/ + offset : Nat := 0 + +/-- State for the `AbstractM` monad used to implement `abstractGroundMismatches?` -/ +private structure AbstractM.State where + cache : Std.HashMap (Expr × Expr) Expr := {} + /-- Types of the new variables created for the auxiliary `fun`. -/ + varTypes : Array Expr := #[] + /-- Ground terms from the `lhs` that have been abstracted so far. -/ + lhss : Array Expr := #[] + /-- Ground terms from the `rhs` that have been abstracted so far. -/ + rhss : Array Expr := #[] + +/-- Helper monad for implementing `abstractGroundMismatches?` -/ +private abbrev AbstractM := ReaderT AbstractM.Context $ StateT AbstractM.State $ OptionT GoalM + +/-- Returns `true` if current terms occur under binders. -/ +private def inBinder : AbstractM Bool := + return (← read).offset > 0 + +/-- Executes `x` in a context where the number of binders have been increased. -/ +private abbrev withIncOffset (x : AbstractM α) : AbstractM α := + withReader (fun ctx => { ctx with offset := ctx.offset + 1 }) x + +/-- +Returns `fun (x_0 : varTypes[0]) ... (x_n : varTypes[n]) => b`. +`b` contains `varTypes.size` loose bound variables. +-/ +private def mkLambdaWithBodyAndVarType (varTypes : Array Expr) (b : Expr) : Expr := Id.run do + let mut i := 0 + let mut f := b + for varType in varTypes do + f := mkLambda ((`_x).appendIndexAfter i) .default varType f + return f + +/-- +Helper function for `proveEq?`. It abstracts nested ground terms in `lhs` and `rhs`. +Suppose `lhs` is `(b : Bool) → a[i]? = some b → Nat`, and +`rhs` is `(b : Bool) → some v = some b → Nat`. +Then, the result is +- `(fun x => (b : Bool) → x = some b → Nat) a[i]?` +- `(fun x => (b : Bool) → x = some b → Nat) (some v)` +-/ +private partial def abstractGroundMismatches? (lhs rhs : Expr) : GoalM (Option (Expr × Expr)) := do + let lhs ← shareCommon lhs + let rhs ← shareCommon rhs + let some (f, s) ← go lhs rhs |>.run {} |>.run {} |>.run + | return none + if s.lhss.isEmpty then + return none + let f := mkLambdaWithBodyAndVarType s.varTypes f + return some (mkAppN f s.lhss, mkAppN f s.rhss) +where + goCore (lhs rhs : Expr) : AbstractM Expr := do + if (← inBinder) then + if !lhs.hasLooseBVars && !rhs.hasLooseBVars then + let lhs ← ensureInternalized lhs + let rhs ← ensureInternalized rhs + processNewFacts + if (← isEqv lhs rhs) then + if (← hasSameType lhs rhs) then + let varType ← inferType lhs + let varIdx := (← get).varTypes.size + (← read).offset + modify fun s => { s with + varTypes := s.varTypes.push varType + lhss := s.lhss.push lhs + rhss := s.rhss.push rhs + } + return mkBVar varIdx + match lhs with + | .lit _ | .sort _ | .mvar _ | .fvar _ + | .bvar _ | .const .. => failure + | .mdata d₁ b₁ => + let .mdata _ b₂ := rhs | failure + return .mdata d₁ (← go b₁ b₂) + | .proj n₁ i₁ b₁ => + let .proj n₂ i₂ b₂ := rhs | failure + guard (n₁ == n₂ && i₁ == i₂) + return .proj n₁ i₁ (← go b₁ b₂) + | .app f₁ a₁ => + let .app f₂ a₂ := rhs | failure + return mkApp (← go f₁ f₂) (← go a₁ a₂) + | .forallE n₁ d₁ b₁ i₁ => + let .forallE _ d₂ b₂ _ := rhs | failure + return mkForall n₁ i₁ (← go d₁ d₂) (← withIncOffset <| go b₁ b₂) + | .lam n₁ d₁ b₁ i₁ => + let .lam _ d₂ b₂ _ := rhs | failure + return mkLambda n₁ i₁ (← go d₁ d₂) (← withIncOffset <| go b₁ b₂) + | .letE n₁ t₁ v₁ b₁ nd₁ => + let .letE _ t₂ v₂ b₂ _ := rhs | failure + return mkLet n₁ (← go t₁ t₂) (← go v₁ v₂) (← withIncOffset <| go b₁ b₂) nd₁ + + go (lhs rhs : Expr) : AbstractM Expr := do + if isSameExpr lhs rhs then + return lhs + if let some e := (← get).cache[(lhs, rhs)]? then + return e + let r ← goCore lhs rhs + modify fun s => { s with cache := s.cache.insert (lhs, rhs) r } + return r + +/-! +Helper functions for creating equalities proofs. +-/ + /-- Try to construct a proof that `lhs = rhs` using the information in the goal state. If `lhs` and `rhs` have not been internalized, this function @@ -37,11 +175,14 @@ The goal state is not modified by this function. This function mainly relies on congruence closure, and constraint propagation. It will not perform case analysis. -/ -def proveEq? (lhs rhs : Expr) : GoalM (Option Expr) := do +def proveEq? (lhs rhs : Expr) (abstract : Bool := false) : GoalM (Option Expr) := do trace[grind.debug.proveEq] "({lhs}) = ({rhs})" + unless (← hasSameType lhs rhs) do return none if (← alreadyInternalized lhs <&&> alreadyInternalized rhs) then if (← isEqv lhs rhs) then return some (← mkEqProof lhs rhs) + else if abstract then withoutModifyingState do + tryAbstract lhs rhs else return none else withoutModifyingState do @@ -55,9 +196,23 @@ def proveEq? (lhs rhs : Expr) : GoalM (Option Expr) := do let lhs ← ensureInternalized lhs let rhs ← ensureInternalized rhs processNewFacts - unless (← isEqv lhs rhs) do return none - unless (← hasSameType lhs rhs) do return none - mkEqProof lhs rhs + if (← isEqv lhs rhs) then + return some (← mkEqProof lhs rhs) + else if abstract then + tryAbstract lhs rhs + else + return none +where + tryAbstract (lhs₀ rhs₀ : Expr) : GoalM (Option Expr) := do + let some (lhs, rhs) ← abstractGroundMismatches? lhs₀ rhs₀ | return none + trace[grind.debug.proveEq] "abstract: ({lhs}) = ({rhs})" + let lhs ← ensureInternalized lhs + let rhs ← ensureInternalized rhs + processNewFacts + if (← isEqv lhs rhs) then + return some (← mkEqProof lhs rhs) + else + return none /-- Similar to `proveEq?`, but for heterogeneous equality. -/ def proveHEq? (lhs rhs : Expr) : GoalM (Option Expr) := do diff --git a/src/Lean/Meta/Tactic/Grind/Types.lean b/src/Lean/Meta/Tactic/Grind/Types.lean index f621bb153c56..1e9e37d4cc25 100644 --- a/src/Lean/Meta/Tactic/Grind/Types.lean +++ b/src/Lean/Meta/Tactic/Grind/Types.lean @@ -384,7 +384,9 @@ private def hasSameRoot (enodes : ENodeMap) (a b : Expr) : Bool := Id.run do isSameExpr n1.root n2.root private def congrHash (enodes : ENodeMap) (e : Expr) : UInt64 := - match_expr e with + if let .forallE _ d b _ := e then + mixHash (hashRoot enodes d) (hashRoot enodes b) + else match_expr e with | Grind.nestedProof p _ => hashRoot enodes p | Eq _ lhs rhs => goEq lhs rhs | _ => go e 17 @@ -400,7 +402,12 @@ where /-- Returns `true` if `a` and `b` are congruent modulo the equivalence classes in `enodes`. -/ private partial def isCongruent (enodes : ENodeMap) (a b : Expr) : Bool := - match_expr a with + if let .forallE _ d₁ b₁ _ := a then + if let .forallE _ d₂ b₂ _ := b then + hasSameRoot enodes d₁ d₂ && hasSameRoot enodes b₁ b₂ + else + false + else match_expr a with | Grind.nestedProof p₁ _ => let_expr Grind.nestedProof p₂ _ := b | false hasSameRoot enodes p₁ p₂ @@ -410,7 +417,11 @@ private partial def isCongruent (enodes : ENodeMap) (a b : Expr) : Bool := goEq lhs₁ rhs₁ lhs₂ rhs₂ else go a b - | _ => go a b + | _ => + if a.isApp && b.isApp then + go a b + else + false where goEq (lhs₁ rhs₁ lhs₂ rhs₂ : Expr) : Bool := (hasSameRoot enodes lhs₁ lhs₂ && hasSameRoot enodes rhs₁ rhs₂) diff --git a/src/Lean/Meta/Tactic/Rfl.lean b/src/Lean/Meta/Tactic/Rfl.lean index bb55f734c86a..5c6b57367b90 100644 --- a/src/Lean/Meta/Tactic/Rfl.lean +++ b/src/Lean/Meta/Tactic/Rfl.lean @@ -90,7 +90,7 @@ def _root_.Lean.MVarId.applyRfl (goal : MVarId) : MetaM Unit := goal.withContext let mut ex? := none for lem in ← (reflExt.getState (← getEnv)).getMatch rel do try - let gs ← goal.apply (← mkConstWithFreshMVarLevels lem) + let gs ← goal.apply (← mkConstWithFreshMVarLevels lem) (term? := m!"'{.ofConstName lem}'") if gs.isEmpty then return () else throwError MessageData.tagged `Tactic.unsolvedGoals <| m!"unsolved goals\n{ goalsToMessageData gs}" diff --git a/src/Lean/Meta/Tactic/Simp/Main.lean b/src/Lean/Meta/Tactic/Simp/Main.lean index e245c8a890a9..9ebe85d1aec5 100644 --- a/src/Lean/Meta/Tactic/Simp/Main.lean +++ b/src/Lean/Meta/Tactic/Simp/Main.lean @@ -52,13 +52,13 @@ Unfold definition even if it is not marked as `@[reducible]`. Remark: We never unfold irreducible definitions. Mathlib relies on that in the implementation of the command `irreducible_def`. -/ -private def unfoldDefinitionAny? (e : Expr) : MetaM (Option Expr) := do +def unfoldDefinitionAny? (e : Expr) : MetaM (Option Expr) := do if let .const declName _ := e.getAppFn then if (← isIrreducible declName) then return none unfoldDefinition? e (ignoreTransparency := true) -private def reduceProjFn? (e : Expr) : SimpM (Option Expr) := do +def reduceProjFn? (e : Expr) : SimpM (Option Expr) := do matchConst e.getAppFn (fun _ => pure none) fun cinfo _ => do match (← getProjectionFnInfo? cinfo.name) with | none => return none @@ -99,7 +99,7 @@ private def reduceProjFn? (e : Expr) : SimpM (Option Expr) := do -- `structure` projections reduceProjCont? (← unfoldDefinition? e) -private def reduceFVar (cfg : Config) (thms : SimpTheoremsArray) (e : Expr) : SimpM Expr := do +def reduceFVar (cfg : Config) (thms : SimpTheoremsArray) (e : Expr) : SimpM Expr := do let localDecl ← getFVarLocalDecl e if cfg.zetaDelta || thms.isLetDeclToUnfold e.fvarId! || localDecl.isImplementationDetail then if !cfg.zetaDelta && thms.isLetDeclToUnfold e.fvarId! then @@ -117,7 +117,7 @@ private def reduceFVar (cfg : Config) (thms : SimpTheoremsArray) (e : Expr) : Si | ... ``` -/ -private partial def isMatchDef (declName : Name) : CoreM Bool := do +partial def isMatchDef (declName : Name) : CoreM Bool := do let .defnInfo info ← getConstInfo declName | return false return go (← getEnv) info.value where @@ -131,7 +131,7 @@ where /-- Try to unfold `e`. -/ -private def unfold? (e : Expr) : SimpM (Option Expr) := do +def unfold? (e : Expr) : SimpM (Option Expr) := do let f := e.getAppFn if !f.isConst then return none @@ -172,7 +172,7 @@ private def unfold? (e : Expr) : SimpM (Option Expr) := do else return none -private def reduceStep (e : Expr) : SimpM Expr := do +def reduceStep (e : Expr) : SimpM Expr := do let cfg ← getConfig let f := e.getAppFn if f.isMVar then @@ -205,7 +205,7 @@ private def reduceStep (e : Expr) : SimpM Expr := do return e' | none => foldRawNatLit e -private partial def reduce (e : Expr) : SimpM Expr := withIncRecDepth do +partial def reduce (e : Expr) : SimpM Expr := withIncRecDepth do let e' ← reduceStep e if e' == e then return e' @@ -417,14 +417,14 @@ def simpLet (e : Expr) : SimpM Result := do let h ← mkLambdaFVars #[x] h return { expr := e', proof? := some (← mkLetBodyCongr v' h) } -private def dsimpReduce : DSimproc := fun e => do +def dsimpReduce : DSimproc := fun e => do let mut eNew ← reduce e if eNew.isFVar then eNew ← reduceFVar (← getConfig) (← getSimpTheorems) eNew if eNew != e then return .visit eNew else return .done e /-- Helper `dsimproc` for `doNotVisitOfNat` and `doNotVisitOfScientific` -/ -private def doNotVisit (pred : Expr → Bool) (declName : Name) : DSimproc := fun e => do +def doNotVisit (pred : Expr → Bool) (declName : Name) : DSimproc := fun e => do if pred e then if (← readThe Simp.Context).isDeclToUnfold declName then return .continue e @@ -441,20 +441,20 @@ Auxiliary `dsimproc` for not visiting `OfNat.ofNat` application subterms. This is the `dsimp` equivalent of the approach used at `visitApp`. Recall that we fold orphan raw Nat literals. -/ -private def doNotVisitOfNat : DSimproc := doNotVisit isOfNatNatLit ``OfNat.ofNat +def doNotVisitOfNat : DSimproc := doNotVisit isOfNatNatLit ``OfNat.ofNat /-- Auxiliary `dsimproc` for not visiting `OfScientific.ofScientific` application subterms. -/ -private def doNotVisitOfScientific : DSimproc := doNotVisit isOfScientificLit ``OfScientific.ofScientific +def doNotVisitOfScientific : DSimproc := doNotVisit isOfScientificLit ``OfScientific.ofScientific /-- Auxiliary `dsimproc` for not visiting `Char` literal subterms. -/ -private def doNotVisitCharLit : DSimproc := doNotVisit isCharLit ``Char.ofNat +def doNotVisitCharLit : DSimproc := doNotVisit isCharLit ``Char.ofNat @[export lean_dsimp] -private partial def dsimpImpl (e : Expr) : SimpM Expr := do +partial def dsimpImpl (e : Expr) : SimpM Expr := do let cfg ← getConfig unless cfg.dsimp do return e @@ -601,7 +601,7 @@ def isNonDepLetFun (e : Expr) : Bool := /-- Auxiliary structure used to represent the return value of `simpNonDepLetFun.go`. -/ -private structure SimpLetFunResult where +structure SimpLetFunResult where /-- The simplified expression. Note that is may contain loose bound variables. `simpNonDepLetFun.go` attempts to minimize the quadratic overhead imposed @@ -780,16 +780,16 @@ where trace[Meta.Tactic.simp.heads] "{repr e.toHeadIndex}" simpLoop e -@[inline] private def withSimpContext (ctx : Context) (x : MetaM α) : MetaM α := do +@[inline] def withSimpContext (ctx : Context) (x : MetaM α) : MetaM α := do withConfig (fun c => { c with etaStruct := ctx.config.etaStruct }) <| withTrackingZetaDeltaSet ctx.zetaDeltaSet <| withReducible x -private def updateUsedSimpsWithZetaDeltaCore (s : UsedSimps) (usedZetaDelta : FVarIdSet) : UsedSimps := +def updateUsedSimpsWithZetaDeltaCore (s : UsedSimps) (usedZetaDelta : FVarIdSet) : UsedSimps := usedZetaDelta.fold (init := s) fun s fvarId => s.insert <| .fvar fvarId -private def updateUsedSimpsWithZetaDelta (ctx : Context) (stats : Stats) : MetaM Stats := do +def updateUsedSimpsWithZetaDelta (ctx : Context) (stats : Stats) : MetaM Stats := do let used := stats.usedTheorems let used := updateUsedSimpsWithZetaDeltaCore used ctx.initUsedZetaDelta let used := updateUsedSimpsWithZetaDeltaCore used (← getZetaDeltaFVarIds) diff --git a/src/Lean/Meta/Tactic/Simp/Rewrite.lean b/src/Lean/Meta/Tactic/Simp/Rewrite.lean index bf8fde8dd62c..d05361156348 100644 --- a/src/Lean/Meta/Tactic/Simp/Rewrite.lean +++ b/src/Lean/Meta/Tactic/Simp/Rewrite.lean @@ -110,7 +110,7 @@ where return false private def useImplicitDefEqProof (thm : SimpTheorem) : SimpM Bool := do - if thm.rfl then + if thm.isRfl (← getEnv) then return (← getConfig).implicitDefEqProofs else return false @@ -218,7 +218,7 @@ where else let candidates := candidates.insertionSort fun e₁ e₂ => e₁.1.priority > e₂.1.priority for (thm, numExtraArgs) in candidates do - unless inErasedSet thm || (rflOnly && !thm.rfl) do + unless inErasedSet thm || (rflOnly && !thm.isRfl (← getEnv)) do if let some result ← tryTheoremWithExtraArgs? e thm numExtraArgs then trace[Debug.Meta.Tactic.simp] "rewrite result {e} => {result.expr}" return some result @@ -236,7 +236,7 @@ where else let candidates := candidates.insertionSort fun e₁ e₂ => e₁.priority > e₂.priority for thm in candidates do - unless inErasedSet thm || (rflOnly && !thm.rfl) do + unless inErasedSet thm || (rflOnly && !thm.isRfl (← getEnv)) do let result? ← withNewMCtxDepth do let val ← thm.getValue let type ← inferType val diff --git a/src/Lean/Meta/Tactic/Simp/SimpTheorems.lean b/src/Lean/Meta/Tactic/Simp/SimpTheorems.lean index de8e2a905ebb..b7f0c95747c1 100644 --- a/src/Lean/Meta/Tactic/Simp/SimpTheorems.lean +++ b/src/Lean/Meta/Tactic/Simp/SimpTheorems.lean @@ -122,10 +122,24 @@ structure SimpTheorem where It is also viewed an `id` used to "erase" `simp` theorems from `SimpTheorems`. -/ origin : Origin - /-- `rfl` is true if `proof` is by `Eq.refl` or `rfl`. -/ + /-- + `rfl` is true if `proof` is by `Eq.refl` or `rfl`. + + NOTE: As the visibility of `proof` may have changed between the point of declaration and use + of a `@[simp]` theorem, `isRfl` must be used to check for this flag. + -/ rfl : Bool deriving Inhabited +/-- Checks whether the theorem holds by reflexivity in the scope given by the environment. -/ +def SimpTheorem.isRfl (s : SimpTheorem) (env : Environment) : Bool := Id.run do + if !s.rfl then + return false + let .decl declName _ _ := s.origin | + return true -- not a global simp theorem, proof visibility must be unchanged + -- If we can see the proof, it must hold in the current scope. + env.findAsync? declName matches some ({ kind := .thm, .. }) + mutual private partial def isRflProofCore (type : Expr) (proof : Expr) : CoreM Bool := do match type with diff --git a/src/Lean/Meta/Tactic/TryThis.lean b/src/Lean/Meta/Tactic/TryThis.lean index f4867b012992..4c783474deb8 100644 --- a/src/Lean/Meta/Tactic/TryThis.lean +++ b/src/Lean/Meta/Tactic/TryThis.lean @@ -10,9 +10,11 @@ import Lean.Data.Json.Elab import Lean.Data.Lsp.Utf16 import Lean.Meta.CollectFVars import Lean.Meta.Tactic.ExposeNames +import Lean.Meta.TryThis +import Lean.Meta.Hint /-! -# "Try this" support +# "Try this" code action and tactic suggestions This implements a mechanism for tactics to print a message saying `Try this: <suggestion>`, where `<suggestion>` is a link to a replacement tactic. Users can either click on the link @@ -84,22 +86,10 @@ export default function ({ suggestions, range, header, isInline, style }) { inner) }" -/-! # Code action -/ +-- Because we can't reference `builtin_widget_module` in `Lean.Meta.Hint`, we add the attribute here +attribute [builtin_widget_module] Hint.tryThisDiffWidget -/-- A packet of information about a "Try this" suggestion -that we store in the infotree for the associated code action to retrieve. -/ -structure TryThisInfo : Type where - /-- The textual range to be replaced by one of the suggestions. -/ - range : Lsp.Range - /-- - A list of suggestions for the user to choose from. - Each suggestion may optionally come with an override for the code action title. - -/ - suggestionTexts : Array (String × Option String) - /-- The prefix to display before the code action for a "Try this" suggestion if no custom code - action title is provided. If not provided, `"Try this: "` is used. -/ - codeActionPrefix? : Option String - deriving TypeName +/-! # Code action -/ /-- This is a code action provider that looks for `TryThisInfo` nodes and supplies a code action to @@ -130,202 +120,14 @@ apply the replacement. /-! # Formatting -/ -/-- Yields `(indent, column)` given a `FileMap` and a `String.Range`, where `indent` is the number -of spaces by which the line that first includes `range` is initially indented, and `column` is the -column `range` starts at in that line. -/ -def getIndentAndColumn (map : FileMap) (range : String.Range) : Nat × Nat := - let start := map.source.findLineStart range.start - let body := map.source.findAux (· ≠ ' ') range.start start - ((body - start).1, (range.start - start).1) - /-- Delaborate `e` into syntax suitable for use by `refine`. -/ def delabToRefinableSyntax (e : Expr) : MetaM Term := withOptions (pp.mvars.anonymous.set · false) do delab e -/-- -An option allowing the user to customize the ideal input width. Defaults to 100. -This option controls output format when -the output is intended to be copied back into a lean file -/ -register_option format.inputWidth : Nat := { - /- The default maximum width of an ideal line in source code. -/ - defValue := 100 - descr := "ideal input width" -} - -/-- Get the input width specified in the options -/ -def getInputWidth (o : Options) : Nat := format.inputWidth.get o - -/-! # `Suggestion` data -/ - --- TODO: we could also support `Syntax` and `Format` -/-- Text to be used as a suggested replacement in the infoview. This can be either a `TSyntax kind` -for a single `kind : SyntaxNodeKind` or a raw `String`. - -Instead of using constructors directly, there are coercions available from these types to -`SuggestionText`. -/ -inductive SuggestionText where - /-- `TSyntax kind` used as suggested replacement text in the infoview. Note that while `TSyntax` - is in general parameterized by a list of `SyntaxNodeKind`s, we only allow one here; this - unambiguously guides pretty-printing. -/ - | tsyntax {kind : SyntaxNodeKind} : TSyntax kind → SuggestionText - /-- A raw string to be used as suggested replacement text in the infoview. -/ - | string : String → SuggestionText - deriving Inhabited - -instance : ToMessageData SuggestionText where - toMessageData - | .tsyntax stx => stx - | .string s => s - -instance {kind : SyntaxNodeKind} : CoeHead (TSyntax kind) SuggestionText where - coe := .tsyntax - -instance : Coe String SuggestionText where - coe := .string - -namespace SuggestionText - -/-- Pretty-prints a `SuggestionText` as a `Format`. If the `SuggestionText` is some `TSyntax kind`, -we use the appropriate pretty-printer; strings are coerced to `Format`s as-is. -/ -def pretty : SuggestionText → CoreM Format - | .tsyntax (kind := kind) stx => ppCategory kind stx - | .string text => return text - -/- Note that this is essentially `return (← s.pretty).prettyExtra w indent column`, but we -special-case strings to avoid converting them to `Format`s and back, which adds indentation after each newline. -/ -/-- Pretty-prints a `SuggestionText` as a `String` and wraps with respect to the pane width, -indentation, and column, via `Format.prettyExtra`. If `w := none`, then -`w := getInputWidth (← getOptions)` is used. Raw `String`s are returned as-is. -/ -def prettyExtra (s : SuggestionText) (w : Option Nat := none) - (indent column : Nat := 0) : CoreM String := - match s with - | .tsyntax (kind := kind) stx => do - let w ← match w with | none => do pure <| getInputWidth (← getOptions) | some n => pure n - return (← ppCategory kind stx).pretty w indent column - | .string text => return text - -end SuggestionText - -/-- -Style hooks for `Suggestion`s. See `SuggestionStyle.error`, `.warning`, `.success`, `.value`, -and other definitions here for style presets. This is an arbitrary `Json` object, with the following -interesting fields: -* `title`: the hover text in the suggestion link -* `className`: the CSS classes applied to the link -* `style`: A `Json` object with additional inline CSS styles such as `color` or `textDecoration`. --/ -def SuggestionStyle := Json deriving Inhabited, ToJson - -/-- Style as an error. By default, decorates the text with an undersquiggle; providing the argument -`decorated := false` turns this off. -/ -def SuggestionStyle.error (decorated := true) : SuggestionStyle := - let style := if decorated then - json% { - -- The VS code error foreground theme color (`--vscode-errorForeground`). - color: "var(--vscode-errorForeground)", - textDecoration: "underline wavy var(--vscode-editorError-foreground) 1pt" - } - else json% { color: "var(--vscode-errorForeground)" } - json% { className: "pointer dim", style: $style } - -/-- Style as a warning. By default, decorates the text with an undersquiggle; providing the -argument `decorated := false` turns this off. -/ -def SuggestionStyle.warning (decorated := true) : SuggestionStyle := - if decorated then - json% { - -- The `.gold` CSS class, which the infoview uses when e.g. building a file. - className: "gold pointer dim", - style: { textDecoration: "underline wavy var(--vscode-editorWarning-foreground) 1pt" } - } - else json% { className: "gold pointer dim" } - -/-- Style as a success. -/ -def SuggestionStyle.success : SuggestionStyle := - -- The `.information` CSS class, which the infoview uses on successes. - json% { className: "information pointer dim" } - -/-- Style the same way as a hypothesis appearing in the infoview. -/ -def SuggestionStyle.asHypothesis : SuggestionStyle := - json% { className: "goal-hyp pointer dim" } - -/-- Style the same way as an inaccessible hypothesis appearing in the infoview. -/ -def SuggestionStyle.asInaccessible : SuggestionStyle := - json% { className: "goal-inaccessible pointer dim" } - -/-- Draws the color from a red-yellow-green color gradient with red at `0.0`, yellow at `0.5`, and -green at `1.0`. Values outside the range `[0.0, 1.0]` are clipped to lie within this range. - -With `showValueInHoverText := true` (the default), the value `t` will be included in the `title` of -the HTML element (which appears on hover). -/ -def SuggestionStyle.value (t : Float) (showValueInHoverText := true) : SuggestionStyle := - let t := min (max t 0) 1 - json% { - className: "pointer dim", - -- interpolates linearly from 0º to 120º with 95% saturation and lightness - -- varying around 50% in HSL space - style: { color: $(s!"hsl({(t * 120).round} 95% {60 * ((t - 0.5)^2 + 0.75)}%)") }, - title: $(if showValueInHoverText then s!"Apply suggestion ({t})" else "Apply suggestion") - } - -/-- Holds a `suggestion` for replacement, along with `preInfo` and `postInfo` strings to be printed -immediately before and after that suggestion, respectively. It also includes an optional -`MessageData` to represent the suggestion in logs; by default, this is `none`, and `suggestion` is -used. -/ -structure Suggestion where - /-- Text to be used as a replacement via a code action. -/ - suggestion : SuggestionText - /-- Optional info to be printed immediately before replacement text in a widget. -/ - preInfo? : Option String := none - /-- Optional info to be printed immediately after replacement text in a widget. -/ - postInfo? : Option String := none - /-- Optional style specification for the suggestion. If `none` (the default), the suggestion is - styled as a text link. Otherwise, the suggestion can be styled as: - * a status: `.error`, `.warning`, `.success` - * a hypothesis name: `.asHypothesis`, `.asInaccessible` - * a variable color: `.value (t : Float)`, which draws from a red-yellow-green gradient, with red - at `0.0` and green at `1.0`. - - See `SuggestionStyle` for details. -/ - style? : Option SuggestionStyle := none - /-- How to represent the suggestion as `MessageData`. This is used only in the info diagnostic. - If `none`, we use `suggestion`. Use `toMessageData` to render a `Suggestion` in this manner. -/ - messageData? : Option MessageData := none - /-- How to construct the text that appears in the lightbulb menu from the suggestion text. If - `none`, we use `fun ppSuggestionText => "Try this: " ++ ppSuggestionText`. Only the pretty-printed - `suggestion : SuggestionText` is used here. -/ - toCodeActionTitle? : Option (String → String) := none - deriving Inhabited - -/-- Converts a `Suggestion` to `Json` in `CoreM`. We need `CoreM` in order to pretty-print syntax. - -This also returns a `String × Option String` consisting of the pretty-printed text and any custom -code action title if `toCodeActionTitle?` is provided. - -If `w := none`, then `w := getInputWidth (← getOptions)` is used. --/ -def Suggestion.toJsonAndInfoM (s : Suggestion) (w : Option Nat := none) (indent column : Nat := 0) : - CoreM (Json × String × Option String) := do - let text ← s.suggestion.prettyExtra w indent column - let mut json := [("suggestion", (text : Json))] - if let some preInfo := s.preInfo? then json := ("preInfo", preInfo) :: json - if let some postInfo := s.postInfo? then json := ("postInfo", postInfo) :: json - if let some style := s.style? then json := ("style", toJson style) :: json - return (Json.mkObj json, text, s.toCodeActionTitle?.map (· text)) - -/- If `messageData?` is specified, we use that; otherwise (by default), we use `toMessageData` of -the suggestion text. -/ -instance : ToMessageData Suggestion where - toMessageData s := s.messageData?.getD (toMessageData s.suggestion) - -instance : Coe SuggestionText Suggestion where - coe t := { suggestion := t } - /-- Delaborate `e` into a suggestion suitable for use by `refine`. -/ def delabToRefinableSuggestion (e : Expr) : MetaM Suggestion := return { suggestion := ← delabToRefinableSyntax e, messageData? := e } -/-! # Widget hooks -/ - /-- Core of `addSuggestion` and `addSuggestions`. Whether we use an inline display for a single element or a list display is controlled by `isInline`. -/ private def addSuggestionCore (ref : Syntax) (suggestions : Array Suggestion) @@ -333,29 +135,17 @@ private def addSuggestionCore (ref : Syntax) (suggestions : Array Suggestion) (style? : Option SuggestionStyle := none) (codeActionPrefix? : Option String := none) : CoreM Unit := do if let some range := (origSpan?.getD ref).getRange? then - let map ← getFileMap - -- FIXME: this produces incorrect results when `by` is at the beginning of the line, i.e. - -- replacing `tac` in `by tac`, because the next line will only be 2 space indented - -- (less than `tac` which starts at column 3) - let (indent, column) := getIndentAndColumn map range - let suggestions ← suggestions.mapM (·.toJsonAndInfoM (indent := indent) (column := column)) - let suggestionTexts := suggestions.map (·.2) + let { suggestions, info, range } ← processSuggestions ref range suggestions codeActionPrefix? let suggestions := suggestions.map (·.1) - let ref := Syntax.ofRange <| ref.getRange?.getD range - let range := map.utf8RangeToLspRange range - pushInfoLeaf <| .ofCustomInfo { - stx := ref - value := Dynamic.mk - { range, suggestionTexts, codeActionPrefix? : TryThisInfo } + let json := json% { + suggestions: $suggestions, + range: $range, + header: $header, + isInline: $isInline, + style: $style? } - Widget.savePanelWidgetInfo (hash tryThisWidget.javascript) ref - (props := return json% { - suggestions: $suggestions, - range: $range, - header: $header, - isInline: $isInline, - style: $style? - }) + pushInfoLeaf info + Widget.savePanelWidgetInfo tryThisWidget.javascriptHash ref (props := return json) /-- Add a "try this" suggestion. This has three effects: @@ -433,6 +223,7 @@ def addSuggestions (ref : Syntax) (suggestions : Array Suggestion) logInfoAt ref m!"{header}{msgs}" addSuggestionCore ref suggestions header (isInline := false) origSpan? style? codeActionPrefix? +/-! # Tactic-specific widget hooks -/ /-- Evaluates `tac` in `initialState` without recovery or sorrying on elaboration failure. If `expectedType?` is non-`none`, an error is thrown if the resulting goal type is not equal to the diff --git a/src/Lean/Meta/TryThis.lean b/src/Lean/Meta/TryThis.lean new file mode 100644 index 000000000000..649f9b06eb6d --- /dev/null +++ b/src/Lean/Meta/TryThis.lean @@ -0,0 +1,268 @@ +/- +Copyright (c) 2021 Gabriel Ebner. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Gabriel Ebner, Mario Carneiro, Thomas Murrills +-/ +prelude +import Lean.CoreM +import Lean.Message +import Lean.Elab.InfoTree.Types +import Lean.Data.Lsp.Basic +import Lean.PrettyPrinter + +/-! +# "Try this" data types + +This defines the data types used in constructing "try this" widgets for suggestion-providing tactics +and inline error-message hints, as well as basic infrastructure for generating info trees and +widget content therefrom. +-/ + +namespace Lean.Meta.Tactic.TryThis + +open PrettyPrinter + +/-! # Code action information -/ + +/-- A packet of information about a "Try this" suggestion +that we store in the infotree for the associated code action to retrieve. -/ +structure TryThisInfo : Type where + /-- The textual range to be replaced by one of the suggestions. -/ + range : Lsp.Range + /-- + A list of suggestions for the user to choose from. + Each suggestion may optionally come with an override for the code action title. + -/ + suggestionTexts : Array (String × Option String) + /-- The prefix to display before the code action for a "Try this" suggestion if no custom code + action title is provided. If not provided, `"Try this: "` is used. -/ + codeActionPrefix? : Option String + deriving TypeName + +/-! # `Suggestion` data -/ + +-- TODO: we could also support `Syntax` and `Format` +/-- Text to be used as a suggested replacement in the infoview. This can be either a `TSyntax kind` +for a single `kind : SyntaxNodeKind` or a raw `String`. + +Instead of using constructors directly, there are coercions available from these types to +`SuggestionText`. -/ +inductive SuggestionText where + /-- `TSyntax kind` used as suggested replacement text in the infoview. Note that while `TSyntax` + is in general parameterized by a list of `SyntaxNodeKind`s, we only allow one here; this + unambiguously guides pretty-printing. -/ + | tsyntax {kind : SyntaxNodeKind} : TSyntax kind → SuggestionText + /-- A raw string to be used as suggested replacement text in the infoview. -/ + | string : String → SuggestionText + deriving Inhabited + +instance : ToMessageData SuggestionText where + toMessageData + | .tsyntax stx => stx + | .string s => s + +instance {kind : SyntaxNodeKind} : CoeHead (TSyntax kind) SuggestionText where + coe := .tsyntax + +instance : Coe String SuggestionText where + coe := .string + +/-- +Style hooks for `Suggestion`s. See `SuggestionStyle.error`, `.warning`, `.success`, `.value`, +and other definitions here for style presets. This is an arbitrary `Json` object, with the following +interesting fields: +* `title`: the hover text in the suggestion link +* `className`: the CSS classes applied to the link +* `style`: A `Json` object with additional inline CSS styles such as `color` or `textDecoration`. +-/ +def SuggestionStyle := Json deriving Inhabited, ToJson + +/-- Style as an error. By default, decorates the text with an undersquiggle; providing the argument +`decorated := false` turns this off. -/ +def SuggestionStyle.error (decorated := true) : SuggestionStyle := + let style := if decorated then + json% { + -- The VS code error foreground theme color (`--vscode-errorForeground`). + color: "var(--vscode-errorForeground)", + textDecoration: "underline wavy var(--vscode-editorError-foreground) 1pt" + } + else json% { color: "var(--vscode-errorForeground)" } + json% { className: "pointer dim", style: $style } + +/-- Style as a warning. By default, decorates the text with an undersquiggle; providing the +argument `decorated := false` turns this off. -/ +def SuggestionStyle.warning (decorated := true) : SuggestionStyle := + if decorated then + json% { + -- The `.gold` CSS class, which the infoview uses when e.g. building a file. + className: "gold pointer dim", + style: { textDecoration: "underline wavy var(--vscode-editorWarning-foreground) 1pt" } + } + else json% { className: "gold pointer dim" } + +/-- Style as a success. -/ +def SuggestionStyle.success : SuggestionStyle := + -- The `.information` CSS class, which the infoview uses on successes. + json% { className: "information pointer dim" } + +/-- Style the same way as a hypothesis appearing in the infoview. -/ +def SuggestionStyle.asHypothesis : SuggestionStyle := + json% { className: "goal-hyp pointer dim" } + +/-- Style the same way as an inaccessible hypothesis appearing in the infoview. -/ +def SuggestionStyle.asInaccessible : SuggestionStyle := + json% { className: "goal-inaccessible pointer dim" } + +/-- Draws the color from a red-yellow-green color gradient with red at `0.0`, yellow at `0.5`, and +green at `1.0`. Values outside the range `[0.0, 1.0]` are clipped to lie within this range. + +With `showValueInHoverText := true` (the default), the value `t` will be included in the `title` of +the HTML element (which appears on hover). -/ +def SuggestionStyle.value (t : Float) (showValueInHoverText := true) : SuggestionStyle := + let t := min (max t 0) 1 + json% { + className: "pointer dim", + -- interpolates linearly from 0º to 120º with 95% saturation and lightness + -- varying around 50% in HSL space + style: { color: $(s!"hsl({(t * 120).round} 95% {60 * ((t - 0.5)^2 + 0.75)}%)") }, + title: $(if showValueInHoverText then s!"Apply suggestion ({t})" else "Apply suggestion") + } + +/-- Holds a `suggestion` for replacement, along with `preInfo` and `postInfo` strings to be printed +immediately before and after that suggestion, respectively. It also includes an optional +`MessageData` to represent the suggestion in logs; by default, this is `none`, and `suggestion` is +used. -/ +structure Suggestion where + /-- Text to be used as a replacement via a code action. -/ + suggestion : SuggestionText + /-- Optional info to be printed immediately before replacement text in a widget. -/ + preInfo? : Option String := none + /-- Optional info to be printed immediately after replacement text in a widget. -/ + postInfo? : Option String := none + /-- Optional style specification for the suggestion. If `none` (the default), the suggestion is + styled as a text link. Otherwise, the suggestion can be styled as: + * a status: `.error`, `.warning`, `.success` + * a hypothesis name: `.asHypothesis`, `.asInaccessible` + * a variable color: `.value (t : Float)`, which draws from a red-yellow-green gradient, with red + at `0.0` and green at `1.0`. + + See `SuggestionStyle` for details. + + Note that this property is used only by the "try this" widget; it is ignored by the inline hint + widget. -/ + style? : Option SuggestionStyle := none + /-- How to represent the suggestion as `MessageData`. This is used only in the info diagnostic. + If `none`, we use `suggestion`. Use `toMessageData` to render a `Suggestion` in this manner. -/ + messageData? : Option MessageData := none + /-- How to construct the text that appears in the lightbulb menu from the suggestion text. If + `none`, we use `fun ppSuggestionText => "Try this: " ++ ppSuggestionText`. Only the pretty-printed + `suggestion : SuggestionText` is used here. -/ + toCodeActionTitle? : Option (String → String) := none + deriving Inhabited + +/- If `messageData?` is specified, we use that; otherwise (by default), we use `toMessageData` of +the suggestion text. -/ +instance : ToMessageData Suggestion where + toMessageData s := s.messageData?.getD (toMessageData s.suggestion) + +instance : Coe SuggestionText Suggestion where + coe t := { suggestion := t } + +/-! # Formatting -/ + +/-- Yields `(indent, column)` given a `FileMap` and a `String.Range`, where `indent` is the number +of spaces by which the line that first includes `range` is initially indented, and `column` is the +column `range` starts at in that line. -/ +def getIndentAndColumn (map : FileMap) (range : String.Range) : Nat × Nat := + let start := map.source.findLineStart range.start + let body := map.source.findAux (· ≠ ' ') range.start start + ((body - start).1, (range.start - start).1) + +/-- +An option allowing the user to customize the ideal input width. Defaults to 100. +This option controls output format when +the output is intended to be copied back into a lean file -/ +register_builtin_option format.inputWidth : Nat := { + /- The default maximum width of an ideal line in source code. -/ + defValue := 100 + descr := "ideal input width" +} + +/-- Get the input width specified in the options -/ +def getInputWidth (o : Options) : Nat := format.inputWidth.get o + +namespace SuggestionText + +/-- Pretty-prints a `SuggestionText` as a `Format`. If the `SuggestionText` is some `TSyntax kind`, +we use the appropriate pretty-printer; strings are coerced to `Format`s as-is. -/ +def pretty : SuggestionText → CoreM Format + | .tsyntax (kind := kind) stx => ppCategory kind stx + | .string text => return text + +/- Note that this is essentially `return (← s.pretty).prettyExtra w indent column`, but we +special-case strings to avoid converting them to `Format`s and back, which adds indentation after each newline. -/ +/-- Pretty-prints a `SuggestionText` as a `String` and wraps with respect to the pane width, +indentation, and column, via `Format.prettyExtra`. If `w := none`, then +`w := getInputWidth (← getOptions)` is used. Raw `String`s are returned as-is. -/ +def prettyExtra (s : SuggestionText) (w : Option Nat := none) + (indent column : Nat := 0) : CoreM String := + match s with + | .tsyntax (kind := kind) stx => do + let w ← match w with | none => do pure <| getInputWidth (← getOptions) | some n => pure n + return (← ppCategory kind stx).pretty w indent column + | .string text => return text + +end SuggestionText + +/-- Converts a `Suggestion` to `Json` in `CoreM`. We need `CoreM` in order to pretty-print syntax. + +This also returns a `String × Option String` consisting of the pretty-printed text and any custom +code action title if `toCodeActionTitle?` is provided. + +If `w := none`, then `w := getInputWidth (← getOptions)` is used. +-/ +def Suggestion.toJsonAndInfoM (s : Suggestion) (w : Option Nat := none) (indent column : Nat := 0) : + CoreM (Json × String × Option String) := do + let text ← s.suggestion.prettyExtra w indent column + let mut json := [("suggestion", (text : Json))] + if let some preInfo := s.preInfo? then json := ("preInfo", preInfo) :: json + if let some postInfo := s.postInfo? then json := ("postInfo", postInfo) :: json + if let some style := s.style? then json := ("style", toJson style) :: json + return (Json.mkObj json, text, s.toCodeActionTitle?.map (· text)) + +/-- +Represents processed data for a collection of suggestions that can be passed to a widget and pushed +in an info leaf. + +It contains the following data: +* `suggestions`: tuples of the form `(j, t, p)` where `j` is JSON containing a suggestion and its + pre- and post-info, `t` is the text to be inserteed by the suggestion, and `p` is the code action + prefix thereof. +* `info`: the `TryThisInfo` data corresponding to a collection of suggestions +* `range`: the range at which the suggestion is to be applied. +-/ +structure ProcessedSuggestions where + suggestions : Array (Json × String × Option String) + info : Elab.Info + range : Lsp.Range + +/-- +Processes an array of `Suggestion`s into data that can be used to construct a code-action info leaf +and "try this" widget. +-/ +def processSuggestions (ref : Syntax) (range : String.Range) (suggestions : Array Suggestion) + (codeActionPrefix? : Option String) : CoreM ProcessedSuggestions := do + let map ← getFileMap + -- FIXME: this produces incorrect results when `by` is at the beginning of the line, i.e. + -- replacing `tac` in `by tac`, because the next line will only be 2 space indented + -- (less than `tac` which starts at column 3) + let (indent, column) := getIndentAndColumn map range + let suggestions ← suggestions.mapM (·.toJsonAndInfoM (indent := indent) (column := column)) + let suggestionTexts := suggestions.map (·.2) + let ref := Syntax.ofRange <| ref.getRange?.getD range + let range := map.utf8RangeToLspRange range + let info := .ofCustomInfo { + stx := ref + value := Dynamic.mk { range, suggestionTexts, codeActionPrefix? : TryThisInfo } + } + return { info, suggestions, range } diff --git a/src/Lean/PrettyPrinter/Delaborator/Basic.lean b/src/Lean/PrettyPrinter/Delaborator/Basic.lean index 623ab4d2d1b1..dcfc94c736e9 100644 --- a/src/Lean/PrettyPrinter/Delaborator/Basic.lean +++ b/src/Lean/PrettyPrinter/Delaborator/Basic.lean @@ -4,7 +4,7 @@ Released under Apache 2.0 license as described in the file LICENSE. Authors: Sebastian Ullrich -/ prelude -import Lean.Elab.Term +import Lean.KeyedDeclsAttribute import Lean.PrettyPrinter.Delaborator.Options import Lean.PrettyPrinter.Delaborator.SubExpr import Lean.PrettyPrinter.Delaborator.TopDownAnalyze diff --git a/src/Lean/PrettyPrinter/Delaborator/Builtins.lean b/src/Lean/PrettyPrinter/Delaborator/Builtins.lean index 6710eeeede0b..cc002d944040 100644 --- a/src/Lean/PrettyPrinter/Delaborator/Builtins.lean +++ b/src/Lean/PrettyPrinter/Delaborator/Builtins.lean @@ -8,6 +8,8 @@ import Lean.PrettyPrinter.Delaborator.Attributes import Lean.PrettyPrinter.Delaborator.Basic import Lean.PrettyPrinter.Delaborator.SubExpr import Lean.PrettyPrinter.Delaborator.TopDownAnalyze +import Lean.Parser.Do +import Lean.Parser.Command import Lean.Meta.CoeAttr import Lean.Meta.Structure diff --git a/src/Lean/Server/Completion/CompletionCollectors.lean b/src/Lean/Server/Completion/CompletionCollectors.lean index 04d0bfd2f079..6f31e064adc2 100644 --- a/src/Lean/Server/Completion/CompletionCollectors.lean +++ b/src/Lean/Server/Completion/CompletionCollectors.lean @@ -19,13 +19,13 @@ open FuzzyMatching section Infrastructure - private structure Context where + structure Context where params : CompletionParams completionInfoPos : Nat /-- Intermediate state while completions are being computed. -/ - private structure State where + structure State where /-- All completion items and their fuzzy match scores so far. -/ items : Array CompletionItem := #[] @@ -36,7 +36,7 @@ section Infrastructure private abbrev M := ReaderT Context $ StateRefT State $ CancellableT MetaM /-- Adds a new completion item to the state in `M`. -/ - private def addItem + def addItem (item : CompletionItem) (id? : Option CompletionIdentifier := none) : M Unit := do @@ -54,7 +54,7 @@ section Infrastructure Adds a new completion item with the given `label`, `id`, `kind` and `score` to the state in `M`. Computes the doc string from the environment if available. -/ - private def addUnresolvedCompletionItem + def addUnresolvedCompletionItem (label : Name) (id : CompletionIdentifier) (kind : CompletionItemKind) @@ -68,7 +68,7 @@ section Infrastructure let item := { label := label.toString, kind? := kind, tags? } addItem item id - private def getCompletionKindForDecl (constInfo : ConstantInfo) : M CompletionItemKind := do + def getCompletionKindForDecl (constInfo : ConstantInfo) : M CompletionItemKind := do let env ← getEnv if constInfo.isCtor then return CompletionItemKind.constructor @@ -91,19 +91,19 @@ section Infrastructure else return CompletionItemKind.constant - private def addUnresolvedCompletionItemForDecl (label : Name) (declName : Name) : M Unit := do + def addUnresolvedCompletionItemForDecl (label : Name) (declName : Name) : M Unit := do if let some c := (← getEnv).find? declName then addUnresolvedCompletionItem label (.const declName) (← getCompletionKindForDecl c) - private def addKeywordCompletionItem (keyword : String) : M Unit := do + def addKeywordCompletionItem (keyword : String) : M Unit := do let item := { label := keyword, detail? := "keyword", documentation? := none, kind? := CompletionItemKind.keyword } addItem item - private def addNamespaceCompletionItem (ns : Name) : M Unit := do + def addNamespaceCompletionItem (ns : Name) : M Unit := do let item := { label := ns.toString, detail? := "namespace", documentation? := none, kind? := CompletionItemKind.module } addItem item - private def runM + def runM (params : CompletionParams) (completionInfoPos : Nat) (ctx : ContextInfo) @@ -121,7 +121,7 @@ end Infrastructure section Utils - private partial def containsSuccessiveCharacters (a b : String) : Bool := + partial def containsSuccessiveCharacters (a b : String) : Bool := go ⟨0⟩ ⟨0⟩ where go (aPos bPos : String.Pos) : Bool := @@ -139,7 +139,7 @@ section Utils else go aPos bPos - private def normPrivateName? (declName : Name) : MetaM (Option Name) := do + def normPrivateName? (declName : Name) : MetaM (Option Name) := do match privateToUserName? declName with | none => return declName | some userName => @@ -154,7 +154,7 @@ section Utils Remark: `danglingDot == true` when the completion point is an identifier followed by `.`. -/ - private def matchDecl? (ns : Name) (id : Name) (danglingDot : Bool) (declName : Name) : MetaM (Option Name) := do + def matchDecl? (ns : Name) (id : Name) (danglingDot : Bool) (declName : Name) : MetaM (Option Name) := do let some declName ← normPrivateName? declName | return none if !ns.isPrefixOf declName then @@ -183,7 +183,7 @@ section Utils return none return none - private def forEligibleDeclsWithCancellationM [Monad m] [MonadEnv m] + def forEligibleDeclsWithCancellationM [Monad m] [MonadEnv m] [MonadLiftT (ST IO.RealWorld) m] [MonadCancellable m] [MonadLiftT IO m] (f : Name → ConstantInfo → m PUnit) : m PUnit := do let _ ← StateT.run (s := 0) <| forEligibleDeclsM fun decl ci => do @@ -197,7 +197,7 @@ end Utils section IdCompletionUtils - private def matchAtomic (id : Name) (declName : Name) (danglingDot : Bool) : Bool := + def matchAtomic (id : Name) (declName : Name) (danglingDot : Bool) : Bool := if danglingDot then false else @@ -209,7 +209,7 @@ section IdCompletionUtils Truncate the given identifier and make sure it has length `≤ newLength`. This function assumes `id` does not contain `Name.num` constructors. -/ - private partial def truncate (id : Name) (newLen : Nat) : Name := + partial def truncate (id : Name) (newLen : Nat) : Name := let rec go (id : Name) : Name × Nat := match id with | Name.anonymous => (id, 0) @@ -270,7 +270,7 @@ end IdCompletionUtils section DotCompletionUtils /-- Return `true` if `e` is a `declName`-application, or can be unfolded (delta-reduced) to one. -/ - private partial def isDefEqToAppOf (e : Expr) (declName : Name) : MetaM Bool := do + partial def isDefEqToAppOf (e : Expr) (declName : Name) : MetaM Bool := do let isConstOf := match e.getAppFn with | .const name .. => (privateToUserName? name).getD name == declName | _ => false @@ -279,7 +279,7 @@ section DotCompletionUtils let some e ← unfoldeDefinitionGuarded? e | return false isDefEqToAppOf e declName - private def isDotCompletionMethod (typeName : Name) (info : ConstantInfo) : MetaM Bool := + def isDotCompletionMethod (typeName : Name) (info : ConstantInfo) : MetaM Bool := forallTelescopeReducing info.type fun xs _ => do for x in xs do let localDecl ← x.fvarId!.getDecl @@ -291,14 +291,14 @@ section DotCompletionUtils /-- Checks whether the expected type of `info.type` can be reduced to an application of `typeName`. -/ - private def isDotIdCompletionMethod (typeName : Name) (info : ConstantInfo) : MetaM Bool := do + def isDotIdCompletionMethod (typeName : Name) (info : ConstantInfo) : MetaM Bool := do forallTelescopeReducing info.type fun _ type => isDefEqToAppOf type.consumeMData typeName /-- Converts `n` to `Name.anonymous` if `n` is a private prefix (see `Lean.isPrivatePrefix`). -/ - private def stripPrivatePrefix (n : Name) : Name := + def stripPrivatePrefix (n : Name) : Name := match n with | .num _ 0 => if isPrivatePrefix n then .anonymous else n | _ => n @@ -308,7 +308,7 @@ section DotCompletionUtils private prefixes in both names. Necessary because the namespaces of private names do not contain private prefixes. -/ - private partial def cmpModPrivate (n₁ n₂ : Name) : Ordering := + partial def cmpModPrivate (n₁ n₂ : Name) : Ordering := let n₁ := stripPrivatePrefix n₁ let n₂ := stripPrivatePrefix n₂ match n₁, n₂ with @@ -332,13 +332,13 @@ section DotCompletionUtils strip the private prefix from deep in the name, letting us reject most names without having to scan the full name first. -/ - private def NameSetModPrivate := RBTree Name cmpModPrivate + def NameSetModPrivate := RBTree Name cmpModPrivate /-- Given a type, try to extract relevant type names for dot notation field completion. We extract the type name, parent struct names, and unfold the type. The process mimics the dot notation elaboration procedure at `App.lean` -/ - private def getDotCompletionTypeNameSet (type : Expr) : MetaM NameSetModPrivate := do + def getDotCompletionTypeNameSet (type : Expr) : MetaM NameSetModPrivate := do let mut set := .empty for typeName in ← getDotCompletionTypeNames type do set := set.insert typeName @@ -346,7 +346,7 @@ section DotCompletionUtils end DotCompletionUtils -private def idCompletionCore +def idCompletionCore (ctx : ContextInfo) (stx : Syntax) (id : Name) diff --git a/src/Lean/Server/Completion/CompletionResolution.lean b/src/Lean/Server/Completion/CompletionResolution.lean index e72f76752073..f3d7caa770be 100644 --- a/src/Lean/Server/Completion/CompletionResolution.lean +++ b/src/Lean/Server/Completion/CompletionResolution.lean @@ -6,6 +6,7 @@ Authors: Leonardo de Moura, Marc Huisinga prelude import Lean.Server.Completion.CompletionItemData import Lean.Server.Completion.CompletionInfoSelection +import Lean.Linter.Deprecated namespace Lean.Lsp diff --git a/src/Lean/Server/Completion/LeanServer3.code-workspace b/src/Lean/Server/Completion/LeanServer3.code-workspace new file mode 100644 index 000000000000..b0dc3642b7ae --- /dev/null +++ b/src/Lean/Server/Completion/LeanServer3.code-workspace @@ -0,0 +1,11 @@ +{ + "folders": [ + { + "path": "../../../../../LeanServer3" + }, + { + "path": "../../../.." + } + ], + "settings": {} +} \ No newline at end of file diff --git a/src/Lean/Server/FileWorker.lean b/src/Lean/Server/FileWorker.lean index c300ffdfb64a..9ece142f7dad 100644 --- a/src/Lean/Server/FileWorker.lean +++ b/src/Lean/Server/FileWorker.lean @@ -144,29 +144,29 @@ def WorkerContext.resolveServerRequestResponse (ctx : WorkerContext) (id : Reque section Elab -- Placed here instead of Lean.Server.Utils because of an import loop - private def mkIleanInfoNotification (method : String) (m : DocumentMeta) + def mkIleanInfoNotification (method : String) (m : DocumentMeta) (trees : Array Elab.InfoTree) : BaseIO (JsonRpc.Notification Lsp.LeanIleanInfoParams) := do let references ← findModuleRefs m.text trees (localVars := true) |>.toLspModuleRefs let param := { version := m.version, references } return { method, param } - private def mkIleanInfoUpdateNotification : DocumentMeta → Array Elab.InfoTree → + def mkIleanInfoUpdateNotification : DocumentMeta → Array Elab.InfoTree → BaseIO (JsonRpc.Notification Lsp.LeanIleanInfoParams) := mkIleanInfoNotification "$/lean/ileanInfoUpdate" - private def mkIleanInfoFinalNotification : DocumentMeta → Array Elab.InfoTree → + def mkIleanInfoFinalNotification : DocumentMeta → Array Elab.InfoTree → BaseIO (JsonRpc.Notification Lsp.LeanIleanInfoParams) := mkIleanInfoNotification "$/lean/ileanInfoFinal" /-- Yields a `$/lean/importClosure` notification. -/ - private def mkImportClosureNotification (importClosure : Array DocumentUri) + def mkImportClosureNotification (importClosure : Array DocumentUri) : JsonRpc.Notification Lsp.LeanImportClosureParams := { method := "$/lean/importClosure", param := { importClosure : LeanImportClosureParams } } /-- State of `reportSnapshots`. -/ - private structure ReportSnapshotsState where + structure ReportSnapshotsState where /-- Whether we have waited for a snapshot to finish at least once (see debouncing below). -/ hasBlocked := false /-- All info trees encountered so far. -/ @@ -199,7 +199,7 @@ This option can only be set on the command line, not in the lakefile or via `set Sends a `textDocument/publishDiagnostics` notification to the client that contains the diagnostics in `ctx.stickyDiagnosticsRef` and `doc.diagnosticsRef`. -/ - private def publishDiagnostics (ctx : WorkerContext) (doc : EditableDocumentCore) + def publishDiagnostics (ctx : WorkerContext) (doc : EditableDocumentCore) : BaseIO Unit := do let stickyInteractiveDiagnostics ← ctx.stickyDiagnosticsRef.get let docInteractiveDiagnostics ← doc.diagnosticsRef.get @@ -223,7 +223,7 @@ This option can only be set on the command line, not in the lakefile or via `set 3. afterwards, each time new information is found in a snapshot 4. at the very end, if we never blocked (e.g. emptying a file should make sure to empty diagnostics as well eventually) -/ - private partial def reportSnapshots (ctx : WorkerContext) (doc : EditableDocumentCore) + partial def reportSnapshots (ctx : WorkerContext) (doc : EditableDocumentCore) (cancelTk : CancelToken) : BaseIO (ServerTask Unit) := ServerTask.BaseIO.asTask do IO.sleep (server.reportDelayMs.get ctx.cmdlineOpts).toUInt32 -- "Debouncing 1." @@ -417,6 +417,7 @@ def setupImports return .ok { mainModuleName := meta.mod + imports opts plugins := fileSetupResult.plugins } diff --git a/src/Lean/Server/Rpc/RequestHandling.lean b/src/Lean/Server/Rpc/RequestHandling.lean index 453b01600264..73f2b974590a 100644 --- a/src/Lean/Server/Rpc/RequestHandling.lean +++ b/src/Lean/Server/Rpc/RequestHandling.lean @@ -12,7 +12,7 @@ import Lean.Server.Rpc.Basic namespace Lean.Server -private structure RpcProcedure where +structure RpcProcedure where wrapper : (sessionId : UInt64) → Json → RequestM (RequestTask Json) deriving Inhabited diff --git a/src/Lean/Setup.lean b/src/Lean/Setup.lean new file mode 100644 index 000000000000..34a031556c9c --- /dev/null +++ b/src/Lean/Setup.lean @@ -0,0 +1,65 @@ +/- +Copyright (c) 2019 Microsoft Corporation. All rights reserved. +Released under Apache 2.0 license as described in the file LICENSE. +Authors: Leonardo de Moura, Mac Malone +-/ +prelude +import Lean.Data.Json +import Lean.Util.LeanOptions + +/-! +# Module Setup Information + +Data types used by Lean module headers and the `--setup` CLI. +-/ + +namespace Lean + +structure Import where + module : Name + /-- `import all`; whether to import and expose all data saved by the module. -/ + importAll : Bool := false + /-- Whether to activate this import when the current module itself is imported. -/ + isExported : Bool := true + deriving Repr, Inhabited, ToJson, FromJson + +instance : Coe Name Import := ⟨({module := ·})⟩ + +instance : ToString Import := ⟨fun imp => toString imp.module⟩ + +/-- Files containing data for a single module. -/ +structure ModuleArtifacts where + lean? : Option System.FilePath := none + olean? : Option System.FilePath := none + oleanServer? : Option System.FilePath := none + oleanPrivate? : Option System.FilePath := none + ilean? : Option System.FilePath := none + deriving Repr, Inhabited, ToJson, FromJson + +/-- +A module's setup information as described by a JSON file. +Supercedes the module's header when the `--setup` CLI option is used. +-/ +structure ModuleSetup where + /-- Name of the module. -/ + name : Name + /-- Whether the module is participating in the module system. -/ + isModule : Bool := false + /- The module's direct imports. -/ + imports : Array Import := #[] + /-- Pre-resolved artifacts of related modules (e.g., this module's transitive imports). -/ + modules : NameMap ModuleArtifacts := {} + /-- Dynamic libraries to load with the module. -/ + dynlibs : Array System.FilePath := #[] + /-- Plugins to initialize with the module. -/ + plugins : Array System.FilePath := #[] + /-- Additional options for the module. -/ + options : LeanOptions := {} + deriving Repr, Inhabited, ToJson, FromJson + +/-- Load a `ModuleSetup` from a JSON file. -/ +def ModuleSetup.load (path : System.FilePath) : IO ModuleSetup := do + let contents ← IO.FS.readFile path + match Json.parse contents >>= fromJson? with + | .ok info => pure info + | .error msg => throw <| IO.userError s!"failed to load header from {path}: {msg}" diff --git a/src/Lean/Structure.lean b/src/Lean/Structure.lean index b2791c7adfae..9e9e9aff3539 100644 --- a/src/Lean/Structure.lean +++ b/src/Lean/Structure.lean @@ -76,7 +76,7 @@ def StructureInfo.getProjFn? (info : StructureInfo) (i : Nat) : Option Name := none /-- Auxiliary state for structures defined in the current module. -/ -private structure StructureState where +structure StructureState where map : PersistentHashMap Name StructureInfo := {} deriving Inhabited diff --git a/src/Lean/Util/LeanOptions.lean b/src/Lean/Util/LeanOptions.lean index 9fe332f7dd18..cc369f172be8 100644 --- a/src/Lean/Util/LeanOptions.lean +++ b/src/Lean/Util/LeanOptions.lean @@ -60,9 +60,11 @@ def LeanOptionValue.asCliFlagValue : (v : LeanOptionValue) → String /-- Options that are used by Lean as if they were passed using `-D`. -/ structure LeanOptions where - values : RBMap Name LeanOptionValue Name.cmp + values : NameMap LeanOptionValue deriving Inhabited, Repr +instance : EmptyCollection LeanOptions := ⟨⟨∅⟩⟩ + def LeanOptions.toOptions (leanOptions : LeanOptions) : Options := Id.run do let mut options := KVMap.empty for ⟨name, optionValue⟩ in leanOptions.values do @@ -77,17 +79,9 @@ def LeanOptions.fromOptions? (options : Options) : Option LeanOptions := do return ⟨values⟩ instance : FromJson LeanOptions where - fromJson? - | Json.obj obj => do - let values ← obj.foldM (init := RBMap.empty) fun acc k v => do - let optionValue ← fromJson? v - return acc.insert k.toName optionValue - return ⟨values⟩ - | _ => Except.error "invalid LeanOptions type" + fromJson? j := LeanOptions.mk <$> fromJson? j instance : ToJson LeanOptions where - toJson options := - Json.obj <| options.values.fold (init := RBNode.leaf) fun acc k v => - acc.insert (cmp := compare) k.toString (toJson v) + toJson options := toJson options.values end Lean diff --git a/src/Lean/Widget/TaggedText.lean b/src/Lean/Widget/TaggedText.lean index 6ee95a360416..ed59845f2a8a 100644 --- a/src/Lean/Widget/TaggedText.lean +++ b/src/Lean/Widget/TaggedText.lean @@ -68,7 +68,7 @@ instance [RpcEncodable α] : RpcEncodable (TaggedText α) where rpcEncode a := toJson <$> a.mapM rpcEncode rpcDecode a := do TaggedText.mapM rpcDecode (← fromJson? a) -private structure TaggedState where +structure TaggedState where out : TaggedText (Nat × Nat) := TaggedText.text "" tagStack : List (Nat × Nat × TaggedText (Nat × Nat)) := [] column : Nat := 0 diff --git a/src/Lean/Widget/Types.lean b/src/Lean/Widget/Types.lean index 8a3ce41301fa..8b9ca279a63b 100644 --- a/src/Lean/Widget/Types.lean +++ b/src/Lean/Widget/Types.lean @@ -33,4 +33,31 @@ structure WidgetInstance where props : StateM Server.RpcObjectStore Json deriving Server.RpcEncodable +/-- A widget module is a unit of source code that can execute in the infoview. + +Every module definition must either be annotated with `@[widget_module]`, +or use a value of `javascript` identical to that of another definition +annotated with `@[widget_module]`. +This makes it possible for the infoview to load the module. + +See the [manual entry](https://lean-lang.org/lean4/doc/examples/widgets.lean.html) +for more information on how to use the widgets system. -/ +structure Module where + /-- A JS [module](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Modules) + intended for use in user widgets. + + The JS environment in which modules execute + provides a fixed set of libraries accessible via direct `import`, + notably [`@leanprover/infoview`](https://www.npmjs.com/package/@leanprover/infoview) + and [`react`](https://www.npmjs.com/package/react). + + To initialize this field from an external JS file, + you may use `include_str "path"/"to"/"file.js"`. + However **beware** that this does not register a dependency with Lake, + so your Lean module will not automatically be rebuilt + when the `.js` file changes. -/ + javascript : String + /-- The hash is cached to avoid recomputing it whenever the `Module` is used. -/ + javascriptHash : { x : UInt64 // x = hash javascript } := ⟨hash javascript, rfl⟩ + end Lean.Widget diff --git a/src/Lean/Widget/UserWidget.lean b/src/Lean/Widget/UserWidget.lean index 3c0d9bd59da5..8ee25fa44a70 100644 --- a/src/Lean/Widget/UserWidget.lean +++ b/src/Lean/Widget/UserWidget.lean @@ -7,37 +7,11 @@ Authors: E.W.Ayers, Wojciech Nawrocki prelude import Lean.Elab.Eval import Lean.Server.Rpc.RequestHandling +import Lean.Widget.Types namespace Lean.Widget open Meta Elab -/-- A widget module is a unit of source code that can execute in the infoview. - -Every module definition must either be annotated with `@[widget_module]`, -or use a value of `javascript` identical to that of another definition -annotated with `@[widget_module]`. -This makes it possible for the infoview to load the module. - -See the [manual entry](https://lean-lang.org/lean4/doc/examples/widgets.lean.html) -for more information on how to use the widgets system. -/ -structure Module where - /-- A JS [module](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Modules) - intended for use in user widgets. - - The JS environment in which modules execute - provides a fixed set of libraries accessible via direct `import`, - notably [`@leanprover/infoview`](https://www.npmjs.com/package/@leanprover/infoview) - and [`react`](https://www.npmjs.com/package/react). - - To initialize this field from an external JS file, - you may use `include_str "path"/"to"/"file.js"`. - However **beware** that this does not register a dependency with Lake, - so your Lean module will not automatically be rebuilt - when the `.js` file changes. -/ - javascript : String - /-- The hash is cached to avoid recomputing it whenever the `Module` is used. -/ - javascriptHash : { x : UInt64 // x = hash javascript } := ⟨hash javascript, rfl⟩ - private unsafe def evalModuleUnsafe (e : Expr) : MetaM Module := evalExpr' Module ``Module e diff --git a/src/Lean/test.lean b/src/Lean/test.lean new file mode 100644 index 000000000000..66b59f3f5662 --- /dev/null +++ b/src/Lean/test.lean @@ -0,0 +1,9 @@ +import Lean.Data.Json +import Lean.PrettyPrinter +import Std +import Lean.PrettyPrinter +--let foo (a:Lean.PrettyPrinter.InfoPerPos) := a +--instance : Lean.ToJson Lean.PrettyPrinter.InfoPerPos where +-- toJson a := Lean.Json.mkObj [ ] +--instance : Lean.ToJson Std.Format where +-- toJson _ := Lean.Json.mkObj [ ] diff --git a/src/Std/Data/DHashMap/Internal/RawLemmas.lean b/src/Std/Data/DHashMap/Internal/RawLemmas.lean index 2196b6b7d9c6..c4c2a7bc0009 100644 --- a/src/Std/Data/DHashMap/Internal/RawLemmas.lean +++ b/src/Std/Data/DHashMap/Internal/RawLemmas.lean @@ -2562,6 +2562,7 @@ theorem getKey!_alter [LawfulBEq α] [Inhabited α] {k k' : α} (h : m.1.WF) m.getKey! k' := by simp_to_model [alter, get?, getKey!] using List.getKey!_alterKey +-- Note that in many use cases `getKey_eq` gives a simpler right hand side. theorem getKey_alter [LawfulBEq α] [Inhabited α] {k k' : α} (h : m.1.WF) {f : Option (β k) → Option (β k)} (hc : (m.alter k f).contains k') : (m.alter k f).getKey k' hc = diff --git a/src/Std/Data/DHashMap/Lemmas.lean b/src/Std/Data/DHashMap/Lemmas.lean index 244af875f3a1..c77466d31b2e 100644 --- a/src/Std/Data/DHashMap/Lemmas.lean +++ b/src/Std/Data/DHashMap/Lemmas.lean @@ -807,6 +807,7 @@ theorem getKey_congr [EquivBEq α] [LawfulHashable α] {k₁ k₂ : α} (h : k (h₁ : k₁ ∈ m) : m.getKey k₁ h₁ = m.getKey k₂ ((mem_congr h).mp h₁) := Raw₀.getKey_congr ⟨m.1, _⟩ m.2 h h₁ +@[simp] theorem getKey_eq [LawfulBEq α] {k : α} (h : k ∈ m) : m.getKey k h = k := Raw₀.getKey_eq ⟨m.1, _⟩ m.2 h @@ -2550,6 +2551,7 @@ theorem getKey!_alter_self [LawfulBEq α] [Inhabited α] {k : α} {f : Option ( (m.alter k f).getKey! k = if (f (m.get? k)).isSome then k else default := by simp only [getKey!_alter, beq_self_eq_true, reduceIte] +@[deprecated getKey_eq (since := "2025-01-05")] theorem getKey_alter [LawfulBEq α] [Inhabited α] {k k' : α} {f : Option (β k) → Option (β k)} {h : k' ∈ m.alter k f} : (m.alter k f).getKey k' h = @@ -2560,10 +2562,9 @@ theorem getKey_alter [LawfulBEq α] [Inhabited α] {k k' : α} {f : Option (β k m.getKey k' h' := Raw₀.getKey_alter ⟨m.1, _⟩ m.2 h -@[simp] theorem getKey_alter_self [LawfulBEq α] [Inhabited α] {k : α} {f : Option (β k) → Option (β k)} {h : k ∈ m.alter k f} : (m.alter k f).getKey k h = k := by - simp [getKey_alter] + simp theorem getKeyD_alter [LawfulBEq α] {k k' fallback : α} {f : Option (β k) → Option (β k)} : (m.alter k f).getKeyD k' fallback = @@ -2881,6 +2882,7 @@ theorem getKey!_modify_self [LawfulBEq α] [Inhabited α] {k : α} {f : β k → (m.modify k f).getKey! k = if k ∈ m then k else default := Raw₀.getKey!_modify_self ⟨m.1, _⟩ m.2 +@[deprecated getKey_eq (since := "2025-01-05")] theorem getKey_modify [LawfulBEq α] [Inhabited α] {k k' : α} {f : β k → β k} {h : k' ∈ m.modify k f} : (m.modify k f).getKey k' h = diff --git a/src/Std/Data/DHashMap/RawLemmas.lean b/src/Std/Data/DHashMap/RawLemmas.lean index cb402fa52db9..cb29135508ed 100644 --- a/src/Std/Data/DHashMap/RawLemmas.lean +++ b/src/Std/Data/DHashMap/RawLemmas.lean @@ -873,6 +873,7 @@ theorem getKey_congr [EquivBEq α] [LawfulHashable α] (h : m.WF) {k₁ k₂ : m.getKey k₁ h₁ = m.getKey k₂ (((mem_congr h h').mp h₁)) := by simp_to_raw using Raw₀.getKey_congr +@[simp] theorem getKey_eq [LawfulBEq α] (h : m.WF) {k : α} (h') : m.getKey k h' = k := by simp_to_raw using Raw₀.getKey_eq @@ -2692,6 +2693,7 @@ theorem getKey!_alter_self [LawfulBEq α] [Inhabited α] {k : α} {f : Option ( (h : m.WF) : (m.alter k f).getKey! k = if (f (m.get? k)).isSome then k else default := by simp [getKey!_alter h] +-- Note that in many use cases `getKey_eq` gives a simpler right hand side. theorem getKey_alter [LawfulBEq α] [Inhabited α] {k k' : α} {f : Option (β k) → Option (β k)} (h : m.WF) {hc : k' ∈ m.alter k f} : (m.alter k f).getKey k' hc = @@ -3052,6 +3054,7 @@ theorem getKey!_modify_self [LawfulBEq α] [Inhabited α] {k : α} {f : β k → simp only [mem_iff_contains] simp_to_raw using Raw₀.getKey!_modify_self +@[deprecated getKey_eq (since := "2025-01-05")] theorem getKey_modify [LawfulBEq α] [Inhabited α] {k k' : α} {f : β k → β k} (h : m.WF) : {hc : k' ∈ m.modify k f} → (m.modify k f).getKey k' hc = diff --git a/src/Std/Data/DTreeMap/Internal/Balancing.lean b/src/Std/Data/DTreeMap/Internal/Balancing.lean index 6ec9fd0bdf72..d79e7ba71623 100644 --- a/src/Std/Data/DTreeMap/Internal/Balancing.lean +++ b/src/Std/Data/DTreeMap/Internal/Balancing.lean @@ -682,7 +682,6 @@ theorem balanceL_eq_balanceLErase {k : α} {v : β k} {l r : Impl α β} {hlb hr balanceL k v l r hlb hrb hlr = balanceLErase k v l r hlb hrb hlr.erase := by fun_cases balanceL k v l r hlb hrb hlr all_goals dsimp only [balanceL, balanceLErase] - contradiction split · split <;> contradiction · rfl @@ -711,7 +710,6 @@ theorem balanceR_eq_balanceRErase {k : α} {v : β k} {l r : Impl α β} {hlb hr balanceR k v l r hlb hrb hlr = balanceRErase k v l r hlb hrb hlr.erase := by fun_cases balanceR k v l r hlb hrb hlr all_goals dsimp only [balanceR, balanceRErase] - contradiction split · split <;> contradiction · rfl @@ -761,15 +759,11 @@ theorem balance!_desc {k : α} {v : β k} {l r : Impl α β} (hlb : l.Balanced) fun_cases balanceₘ k v l r · rw [if_pos ‹_›, bin, balanced_inner_iff] exact ⟨rfl, hlb, hrb, Or.inl ‹_›, rfl⟩ - · rw [if_neg ‹_›, dif_pos ‹_›] - contradiction · rw [if_neg ‹_›, dif_pos ‹_›] simp only [size_rotateL (.left ‹_›), size_bin, size_inner] rw [← Balanced.eq ‹_›] refine ⟨rfl, ?_⟩ apply balanced_rotateL <;> assumption - · simp only [delta, size_leaf] at * - omega · rw [if_neg ‹_›, dif_neg ‹_›, dif_pos ‹_›] simp only [size_rotateR (.right ‹_›), size_bin, size_inner] rw [← Balanced.eq ‹_›] diff --git a/src/Std/Data/DTreeMap/Internal/Lemmas.lean b/src/Std/Data/DTreeMap/Internal/Lemmas.lean index e6827befcc78..5292598a9972 100644 --- a/src/Std/Data/DTreeMap/Internal/Lemmas.lean +++ b/src/Std/Data/DTreeMap/Internal/Lemmas.lean @@ -3603,6 +3603,7 @@ theorem getKey!_alter!_self [TransOrd α] [LawfulEqOrd α] [Inhabited α] (h : t (t.alter! k f).getKey! k = if (f (t.get? k)).isSome then k else default := by simpa only [alter_eq_alter!] using getKey!_alter_self h +-- Note that in many use cases `getKey_eq` gives a simpler right hand side. theorem getKey_alter [TransOrd α] [LawfulEqOrd α] [Inhabited α] (h : t.WF) {k k' : α} {f : Option (β k) → Option (β k)} {hc : k' ∈ (t.alter k f h.balanced).1} : (t.alter k f h.balanced).1.getKey k' hc = diff --git a/src/Std/Data/DTreeMap/Lemmas.lean b/src/Std/Data/DTreeMap/Lemmas.lean index 8cbabe379bac..b08cc6749398 100644 --- a/src/Std/Data/DTreeMap/Lemmas.lean +++ b/src/Std/Data/DTreeMap/Lemmas.lean @@ -2348,6 +2348,7 @@ theorem getKey!_alter_self [TransCmp cmp] [LawfulEqCmp cmp] [Inhabited α] {k : (t.alter k f).getKey! k = if (f (t.get? k)).isSome then k else default := Impl.getKey!_alter_self t.wf +@[deprecated getKey_eq (since := "2025-01-05")] theorem getKey_alter [TransCmp cmp] [LawfulEqCmp cmp] [Inhabited α] {k k' : α} {f : Option (β k) → Option (β k)} {hc : k' ∈ t.alter k f} : (t.alter k f).getKey k' hc = diff --git a/src/Std/Data/DTreeMap/Raw/Lemmas.lean b/src/Std/Data/DTreeMap/Raw/Lemmas.lean index 6abe607b953b..9c43d819e653 100644 --- a/src/Std/Data/DTreeMap/Raw/Lemmas.lean +++ b/src/Std/Data/DTreeMap/Raw/Lemmas.lean @@ -2379,6 +2379,7 @@ theorem getKey!_alter_self [TransCmp cmp] [LawfulEqCmp cmp] [Inhabited α] (h : (t.alter k f).getKey! k = if (f (t.get? k)).isSome then k else default := Impl.getKey!_alter!_self h +@[deprecated getKey_eq (since := "2025-01-05")] theorem getKey_alter [TransCmp cmp] [LawfulEqCmp cmp] [Inhabited α] (h : t.WF) {k k' : α} {f : Option (β k) → Option (β k)} {hc : k' ∈ t.alter k f} : (t.alter k f).getKey k' hc = diff --git a/src/Std/Data/ExtDHashMap/Lemmas.lean b/src/Std/Data/ExtDHashMap/Lemmas.lean index 225bf893932a..4af9801dc881 100644 --- a/src/Std/Data/ExtDHashMap/Lemmas.lean +++ b/src/Std/Data/ExtDHashMap/Lemmas.lean @@ -677,6 +677,7 @@ theorem getKey_congr [EquivBEq α] [LawfulHashable α] {k₁ k₂ : α} (h : k (h₁ : k₁ ∈ m) : m.getKey k₁ h₁ = m.getKey k₂ ((mem_congr h).mp h₁) := m.inductionOn (fun _ h h₁ => DHashMap.getKey_congr h h₁) h h₁ +@[simp] theorem getKey_eq [LawfulBEq α] {k : α} (h : k ∈ m) : m.getKey k h = k := m.inductionOn (fun _ h => DHashMap.getKey_eq h) h @@ -2264,6 +2265,7 @@ theorem getKey!_alter_self [LawfulBEq α] [Inhabited α] {k : α} {f : Option ( (m.alter k f).getKey! k = if (f (m.get? k)).isSome then k else default := m.inductionOn fun _ => DHashMap.getKey!_alter_self +@[deprecated getKey_eq (since := "2025-01-05")] theorem getKey_alter [LawfulBEq α] [Inhabited α] {k k' : α} {f : Option (β k) → Option (β k)} {h : k' ∈ m.alter k f} : (m.alter k f).getKey k' h = @@ -2271,8 +2273,8 @@ theorem getKey_alter [LawfulBEq α] [Inhabited α] {k k' : α} {f : Option (β k k else haveI h' : k' ∈ m := mem_alter_of_beq_eq_false (Bool.not_eq_true _ ▸ heq) |>.mp h - m.getKey k' h' := - m.inductionOn (fun _ _ => DHashMap.getKey_alter) h + m.getKey k' h' := by + split <;> simp_all @[simp] theorem getKey_alter_self [LawfulBEq α] [Inhabited α] {k : α} {f : Option (β k) → Option (β k)} @@ -2597,6 +2599,7 @@ theorem getKey!_modify_self [LawfulBEq α] [Inhabited α] {k : α} {f : β k → (m.modify k f).getKey! k = if k ∈ m then k else default := m.inductionOn fun _ => DHashMap.getKey!_modify_self +@[deprecated getKey_eq (since := "2025-01-05")] theorem getKey_modify [LawfulBEq α] [Inhabited α] {k k' : α} {f : β k → β k} {h : k' ∈ m.modify k f} : (m.modify k f).getKey k' h = @@ -2604,8 +2607,8 @@ theorem getKey_modify [LawfulBEq α] [Inhabited α] {k k' : α} {f : β k → β k else haveI h' : k' ∈ m := mem_modify.mp h - m.getKey k' h' := - m.inductionOn (fun _ _ => DHashMap.getKey_modify) h + m.getKey k' h' := by + split <;> simp_all @[simp] theorem getKey_modify_self [LawfulBEq α] [Inhabited α] {k : α} {f : β k → β k} diff --git a/src/Std/Data/ExtHashMap/Lemmas.lean b/src/Std/Data/ExtHashMap/Lemmas.lean index 8f73660916d3..e4141eeeb104 100644 --- a/src/Std/Data/ExtHashMap/Lemmas.lean +++ b/src/Std/Data/ExtHashMap/Lemmas.lean @@ -477,6 +477,7 @@ theorem getKey_congr [EquivBEq α] [LawfulHashable α] {k₁ k₂ : α} (h : k (h₁ : k₁ ∈ m) : m.getKey k₁ h₁ = m.getKey k₂ ((mem_congr h).mp h₁) := ExtDHashMap.getKey_congr h h₁ +@[simp] theorem getKey_eq [LawfulBEq α] {k : α} (h : k ∈ m) : m.getKey k h = k := ExtDHashMap.getKey_eq h diff --git a/src/Std/Data/ExtHashSet/Lemmas.lean b/src/Std/Data/ExtHashSet/Lemmas.lean index f26f0fd932d7..2db7ec90713a 100644 --- a/src/Std/Data/ExtHashSet/Lemmas.lean +++ b/src/Std/Data/ExtHashSet/Lemmas.lean @@ -259,6 +259,7 @@ theorem get_congr [EquivBEq α] [LawfulHashable α] {k₁ k₂ : α} (h : k₁ = (h₁ : k₁ ∈ m) : m.get k₁ h₁ = m.get k₂ ((mem_congr h).mp h₁) := ExtHashMap.getKey_congr h h₁ +@[simp] theorem get_eq [LawfulBEq α] {k : α} (h : k ∈ m) : m.get k h = k := ExtHashMap.getKey_eq h diff --git a/src/Std/Data/HashMap/Lemmas.lean b/src/Std/Data/HashMap/Lemmas.lean index 3ed842c34a53..b2298b0af309 100644 --- a/src/Std/Data/HashMap/Lemmas.lean +++ b/src/Std/Data/HashMap/Lemmas.lean @@ -565,6 +565,7 @@ theorem getKey_congr [EquivBEq α] [LawfulHashable α] {k₁ k₂ : α} (h : k (h₁ : k₁ ∈ m) : m.getKey k₁ h₁ = m.getKey k₂ ((mem_congr h).mp h₁) := DHashMap.getKey_congr h h₁ +@[simp] theorem getKey_eq [LawfulBEq α] {k : α} (h : k ∈ m) : m.getKey k h = k := DHashMap.getKey_eq h diff --git a/src/Std/Data/HashMap/RawLemmas.lean b/src/Std/Data/HashMap/RawLemmas.lean index 2cb0bd21d472..13ba53cc2088 100644 --- a/src/Std/Data/HashMap/RawLemmas.lean +++ b/src/Std/Data/HashMap/RawLemmas.lean @@ -589,6 +589,7 @@ theorem getKey_congr [EquivBEq α] [LawfulHashable α] (h : m.WF) {k₁ k₂ : m.getKey k₁ h₁ = m.getKey k₂ ((mem_congr h h').mp h₁) := DHashMap.Raw.getKey_congr h.out h' h₁ +@[simp] theorem getKey_eq [LawfulBEq α] (h : m.WF) {k : α} (h' : k ∈ m) : m.getKey k h' = k := DHashMap.Raw.getKey_eq h.out h' diff --git a/src/Std/Data/HashSet/Lemmas.lean b/src/Std/Data/HashSet/Lemmas.lean index 4e76a2b74e39..2456b6db46ea 100644 --- a/src/Std/Data/HashSet/Lemmas.lean +++ b/src/Std/Data/HashSet/Lemmas.lean @@ -310,6 +310,7 @@ theorem get_congr [EquivBEq α] [LawfulHashable α] {k₁ k₂ : α} (h : k₁ = (h₁ : k₁ ∈ m) : m.get k₁ h₁ = m.get k₂ ((mem_congr h).mp h₁) := HashMap.getKey_congr h h₁ +@[simp] theorem get_eq [LawfulBEq α] {k : α} (h : k ∈ m) : m.get k h = k := HashMap.getKey_eq h diff --git a/src/Std/Data/HashSet/RawLemmas.lean b/src/Std/Data/HashSet/RawLemmas.lean index 7f99f504b388..2e4d7fa60608 100644 --- a/src/Std/Data/HashSet/RawLemmas.lean +++ b/src/Std/Data/HashSet/RawLemmas.lean @@ -328,6 +328,7 @@ theorem get_congr [EquivBEq α] [LawfulHashable α] (h : m.WF) {k₁ k₂ : α} m.get k₁ h₁ = m.get k₂ (((mem_congr h h').mp h₁)) := HashMap.Raw.getKey_congr h.out h' h₁ +@[simp] theorem get_eq [LawfulBEq α] (h : m.WF) {k : α} (h' : m.contains k) : m.get k h' = k := HashMap.Raw.getKey_eq h.out h' diff --git a/src/Std/Data/Internal/List/Associative.lean b/src/Std/Data/Internal/List/Associative.lean index 7548c70ca676..510013d94ac9 100644 --- a/src/Std/Data/Internal/List/Associative.lean +++ b/src/Std/Data/Internal/List/Associative.lean @@ -3255,11 +3255,11 @@ theorem getEntry?_insertListIfNewUnit [BEq α] [PartialEquivBEq α] {l : List (( · simp · cases hc : containsKey hd l · simp only [Bool.not_false, Bool.and_self, ↓reduceIte, Option.some_or, cond_true, - Option.or_some', Option.some.injEq] + Option.or_some, Option.some.injEq] rw [getEntry?_eq_none.2, Option.getD_none] rwa [← containsKey_congr hhd] · simp only [Bool.not_true, Bool.and_false, Bool.false_eq_true, ↓reduceIte, cond_true, - Option.or_some', getEntry?_eq_none] + Option.or_some, getEntry?_eq_none] rw [containsKey_congr hhd, containsKey_eq_isSome_getEntry?] at hc obtain ⟨v, hv⟩ := Option.isSome_iff_exists.1 hc simp [hv] @@ -4495,10 +4495,10 @@ theorem getEntry?_filterMap' [BEq α] [EquivBEq α] specialize hf ⟨k', v⟩ split · rename_i h - simp only [List.filterMap_cons, Option.some_bind] + simp only [List.filterMap_cons, Option.bind_some] simp only [containsKey_congr h] at hl split - · simp only [ih, ‹f _ = _›, Option.none_bind, getEntry?_eq_none.mpr hl.2] + · simp only [ih, ‹f _ = _›, Option.bind_none, getEntry?_eq_none.mpr hl.2] · rw [‹f _ = _›, Option.all_some, BEq.congr_right h] at hf rw [getEntry?_cons, hf, ‹f _ = _›, cond_true] · simp only [List.filterMap_cons] @@ -5169,7 +5169,7 @@ theorem getValue?_filterMap_of_getKey?_eq_some {β : Type v} {γ : Type w} [BEq simp only [getKey?_eq_getEntry?, Option.map_eq_some_iff, getValue?_eq_getEntry?, getEntry?_filterMap distinct, Option.map_bind, forall_exists_index, and_imp] intro x hx hk - simp only [hx, Option.some_bind, Function.comp_apply, hk, Option.map_map, Option.map_some] + simp only [hx, Option.bind_some, Function.comp_apply, hk, Option.map_map, Option.map_some] cases f k' x.2 <;> simp theorem getValue!_filterMap {β : Type v} {γ : Type w} [BEq α] [EquivBEq α] [Inhabited γ] diff --git a/src/Std/Sat/CNF/Dimacs.lean b/src/Std/Sat/CNF/Dimacs.lean index a07458a2c296..646e4e7e7743 100644 --- a/src/Std/Sat/CNF/Dimacs.lean +++ b/src/Std/Sat/CNF/Dimacs.lean @@ -12,7 +12,7 @@ namespace Sat namespace CNF -private structure DimacsState where +structure DimacsState where numClauses : Nat := 0 maxLit : Nat := 0 diff --git a/src/Std/Sync/Barrier.lean b/src/Std/Sync/Barrier.lean index 1b8ea0e36354..4078bbaade71 100644 --- a/src/Std/Sync/Barrier.lean +++ b/src/Std/Sync/Barrier.lean @@ -13,7 +13,7 @@ This file heavily inspired by: https://github.com/rust-lang/rust/blob/b8ae372/library/std/src/sync/barrier.rs -/ -private structure BarrierState where +structure BarrierState where count : Nat generationId : Nat diff --git a/src/Std/Sync/Channel.lean b/src/Std/Sync/Channel.lean index b34c8ef60d5b..d252536c27c9 100644 --- a/src/Std/Sync/Channel.lean +++ b/src/Std/Sync/Channel.lean @@ -67,7 +67,7 @@ The central state structure for an unbounded channel, maintains the following in 1. `values = ∅ ∨ consumers = ∅` 2. `closed = true → consumers = ∅` -/ -private structure Unbounded.State (α : Type) where +structure Unbounded.State (α : Type) where /-- Values pushed into the channel that are waiting to be consumed. -/ @@ -83,7 +83,7 @@ private structure Unbounded.State (α : Type) where closed : Bool deriving Nonempty -private structure Unbounded (α : Type) where +structure Unbounded (α : Type) where state : Mutex (Unbounded.State α) deriving Nonempty @@ -205,7 +205,7 @@ The central state structure for a zero buffer channel, maintains the following i 1. `producers = ∅ ∨ consumers = ∅` 2. `closed = true → consumers = ∅` -/ -private structure Zero.State (α : Type) where +structure Zero.State (α : Type) where /-- Producers that are blocked on a consumer taking their value. -/ @@ -220,7 +220,7 @@ private structure Zero.State (α : Type) where -/ closed : Bool -private structure Zero (α : Type) where +structure Zero (α : Type) where state : Mutex (Zero.State α) namespace Zero @@ -351,7 +351,7 @@ private def recvSelector (ch : Zero α) : Selector (Option α) where end Zero open Internal.IO.Async in -private structure Bounded.Consumer (α : Type) where +structure Bounded.Consumer (α : Type) where promise : IO.Promise Bool waiter : Option (Waiter (Option α)) @@ -373,7 +373,7 @@ While it (currently) lacks the partial lock-freeness of go channels, the protoco [Go channels on steroids](https://docs.google.com/document/d/1yIAYmbvL3JxOKOjuCyon7JhW4cSv1wy5hC0ApeGMV9s/pub) as well as its [implementation](https://go.dev/src/runtime/chan.go). -/ -private structure Bounded.State (α : Type) where +structure Bounded.State (α : Type) where /-- Producers that are blocked on a consumer taking their value as there was no buffer space available when they tried to enqueue. @@ -416,7 +416,7 @@ private structure Bounded.State (α : Type) where -/ closed : Bool -private structure Bounded (α : Type) where +structure Bounded (α : Type) where state : Mutex (Bounded.State α) namespace Bounded diff --git a/src/Std/Time/Format/Basic.lean b/src/Std/Time/Format/Basic.lean index c8108ee995a9..0a33118c9778 100644 --- a/src/Std/Time/Format/Basic.lean +++ b/src/Std/Time/Format/Basic.lean @@ -1274,7 +1274,7 @@ private def FormatType (result : Type) : FormatString → Type namespace GenericFormat -private structure DateBuilder where +structure DateBuilder where G : Option Year.Era := none y : Option Year.Offset := none u : Option Year.Offset := none diff --git a/src/include/lean/lean.h b/src/include/lean/lean.h index fceaee2d38ae..3c2495ce04f5 100644 --- a/src/include/lean/lean.h +++ b/src/include/lean/lean.h @@ -1424,11 +1424,22 @@ static inline lean_obj_res lean_nat_lxor(b_lean_obj_arg a1, b_lean_obj_arg a2) { } LEAN_EXPORT lean_obj_res lean_nat_shiftl(b_lean_obj_arg a1, b_lean_obj_arg a2); -LEAN_EXPORT lean_obj_res lean_nat_shiftr(b_lean_obj_arg a1, b_lean_obj_arg a2); +LEAN_EXPORT lean_obj_res lean_nat_big_shiftr(b_lean_obj_arg a1, b_lean_obj_arg a2); LEAN_EXPORT lean_obj_res lean_nat_pow(b_lean_obj_arg a1, b_lean_obj_arg a2); LEAN_EXPORT lean_obj_res lean_nat_gcd(b_lean_obj_arg a1, b_lean_obj_arg a2); LEAN_EXPORT lean_obj_res lean_nat_log2(b_lean_obj_arg a); +static inline lean_obj_res lean_nat_shiftr(b_lean_obj_arg a1, b_lean_obj_arg a2) { + if (LEAN_LIKELY(lean_is_scalar(a1) && lean_is_scalar(a2))) { + size_t s1 = lean_unbox(a1); + size_t s2 = lean_unbox(a2); + size_t r = (s2 < sizeof(size_t)*8) ? s1 >> s2 : 0; + return lean_box(r); + } else { + return lean_nat_big_shiftr(a1, a2); + } +} + /* Integers */ #define LEAN_MAX_SMALL_INT (sizeof(void*) == 8 ? INT_MAX : (INT_MAX >> 1)) diff --git a/src/lake/Lake/Config/Meta.lean b/src/lake/Lake/Config/Meta.lean index 173798b94adc..f6475b15c01e 100644 --- a/src/lake/Lake/Config/Meta.lean +++ b/src/lake/Lake/Config/Meta.lean @@ -69,7 +69,7 @@ scoped syntax (name := configDecl) instance : Coe Ident (TSyntax ``Term.structInstLVal) where coe stx := Unhygienic.run `(Term.structInstLVal| $stx:ident) -private structure FieldView where +structure FieldView where ref : Syntax mods : TSyntax ``Command.declModifiers := Unhygienic.run `(declModifiers|) id : Ident @@ -79,7 +79,7 @@ private structure FieldView where decl? : Option (TSyntax ``structSimpleBinder) := none parent : Bool := false -private structure FieldMetadata where +structure FieldMetadata where cmds : Array Command := #[] fields : Term := Unhygienic.run `(Array.empty) diff --git a/src/lake/Lake/Util/Name.lean b/src/lake/Lake/Util/Name.lean index 1dbf3c8b82cb..ee58ebc93cc5 100644 --- a/src/lake/Lake/Util/Name.lean +++ b/src/lake/Lake/Util/Name.lean @@ -35,18 +35,6 @@ abbrev OrdNameMap α := RBArray Name α Name.quickCmp abbrev DNameMap α := DRBMap Name α Name.quickCmp @[inline] def DNameMap.empty : DNameMap α := DRBMap.empty -instance [ToJson α] : ToJson (NameMap α) where - toJson m := Json.obj <| m.fold (fun n k v => n.insert compare k.toString (toJson v)) .leaf - -instance [FromJson α] : FromJson (NameMap α) where - fromJson? j := do - (← j.getObj?).foldM (init := {}) fun m k v => - let k := k.toName - if k.isAnonymous then - throw "expected name" - else - return m.insert k (← fromJson? v) - /-! # Name Helpers -/ namespace Name diff --git a/src/runtime/object.cpp b/src/runtime/object.cpp index 7c390dd679bd..ab12a58c3131 100644 --- a/src/runtime/object.cpp +++ b/src/runtime/object.cpp @@ -1462,7 +1462,7 @@ extern "C" LEAN_EXPORT lean_obj_res lean_nat_shiftl(b_lean_obj_arg a1, b_lean_ob return mpz_to_nat(r); } -extern "C" LEAN_EXPORT lean_obj_res lean_nat_shiftr(b_lean_obj_arg a1, b_lean_obj_arg a2) { +extern "C" LEAN_EXPORT lean_obj_res lean_nat_big_shiftr(b_lean_obj_arg a1, b_lean_obj_arg a2) { if (!lean_is_scalar(a2)) { return lean_box(0); // This large of an exponent must be 0. } diff --git a/src/util/shell.cpp b/src/util/shell.cpp index ab1345c97e0c..308ecc04301c 100644 --- a/src/util/shell.cpp +++ b/src/util/shell.cpp @@ -223,6 +223,7 @@ static void display_help(std::ostream & out) { #endif std::cout << " --plugin=file load and initialize Lean shared library for registering linters etc.\n"; std::cout << " --load-dynlib=file load shared library to make its symbols available to the interpreter\n"; + std::cout << " --setup=file JSON file with module setup data (supersedes the file's header)\n"; std::cout << " --json report Lean output (e.g., messages) as JSON (one per line)\n"; std::cout << " -E --error=kind report Lean messages of kind as errors\n"; std::cout << " --deps just print dependencies of a Lean input\n"; @@ -273,6 +274,7 @@ static struct option g_long_options[] = { #endif {"plugin", required_argument, 0, 'p'}, {"load-dynlib", required_argument, 0, 'l'}, + {"setup", required_argument, 0, 'u'}, {"error", required_argument, 0, 'E'}, {"json", no_argument, &json_output, 1}, {"print-prefix", no_argument, &print_prefix, 1}, @@ -340,6 +342,7 @@ extern "C" object * lean_run_frontend( object * error_kinds, object * plugins, bool print_stats, + object * header_file_name, object * w ); option_ref<elab_environment> run_new_frontend( @@ -351,7 +354,8 @@ option_ref<elab_environment> run_new_frontend( optional<std::string> const & ilean_file_name, uint8_t json_output, array_ref<name> const & error_kinds, - bool print_stats + bool print_stats, + optional<std::string> const & setup_file_name ) { return get_io_result<option_ref<elab_environment>>(lean_run_frontend( mk_string(input), @@ -365,6 +369,7 @@ option_ref<elab_environment> run_new_frontend( error_kinds.to_obj_arg(), mk_empty_array(), print_stats, + setup_file_name ? mk_option_some(mk_string(*setup_file_name)) : mk_option_none(), io_mk_world() )); } @@ -487,6 +492,7 @@ extern "C" LEAN_EXPORT int lean_main(int argc, char ** argv) { bool run = false; optional<std::string> olean_fn; optional<std::string> ilean_fn; + optional<std::string> setup_fn; bool use_stdin = false; unsigned trust_lvl = LEAN_BELIEVER_TRUST_LEVEL + 1; bool only_deps = false; @@ -638,6 +644,10 @@ extern "C" LEAN_EXPORT int lean_main(int argc, char ** argv) { lean::load_dynlib(optarg); forwarded_args.push_back(string_ref("--load-dynlib=" + std::string(optarg))); break; + case 'u': + check_optarg("u"); + setup_fn = optarg; + break; case 'E': check_optarg("E"); error_kinds.push_back(string_to_name(std::string(optarg))); @@ -755,7 +765,10 @@ extern "C" LEAN_EXPORT int lean_main(int argc, char ** argv) { if (!main_module_name) main_module_name = name("_stdin"); - option_ref<elab_environment> opt_env = run_new_frontend(contents, opts, mod_fn, *main_module_name, trust_lvl, olean_fn, ilean_fn, json_output, error_kinds, stats); + option_ref<elab_environment> opt_env = run_new_frontend( + contents, opts, mod_fn, *main_module_name, trust_lvl, + olean_fn, ilean_fn, json_output, error_kinds, stats, setup_fn + ); if (opt_env) { elab_environment env = opt_env.get_val(); diff --git a/stage0/src/CMakeLists.txt b/stage0/src/CMakeLists.txt index b95396d64131..f074c862fcf3 100644 --- a/stage0/src/CMakeLists.txt +++ b/stage0/src/CMakeLists.txt @@ -511,7 +511,10 @@ if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") # import libraries created by the stdlib.make targets string(APPEND LEANC_SHARED_LINKER_FLAGS " -lInit_shared -lleanshared_1 -lleanshared") elseif("${CMAKE_SYSTEM_NAME}" MATCHES "Darwin") - string(APPEND LEANC_SHARED_LINKER_FLAGS " -Wl,-undefined,dynamic_lookup") + # The second flag is necessary to even *load* dylibs without resolved symbols, as can happen + # if a Lake `extern_lib` depends on a symbols defined by the Lean library but is loaded even + # before definition. + string(APPEND LEANC_SHARED_LINKER_FLAGS " -Wl,-undefined,dynamic_lookup -Wl,-no_fixup_chains") endif() # Linux ignores undefined symbols in shared libraries by default diff --git a/stage0/src/util/shell.cpp b/stage0/src/util/shell.cpp index ab1345c97e0c..308ecc04301c 100644 --- a/stage0/src/util/shell.cpp +++ b/stage0/src/util/shell.cpp @@ -223,6 +223,7 @@ static void display_help(std::ostream & out) { #endif std::cout << " --plugin=file load and initialize Lean shared library for registering linters etc.\n"; std::cout << " --load-dynlib=file load shared library to make its symbols available to the interpreter\n"; + std::cout << " --setup=file JSON file with module setup data (supersedes the file's header)\n"; std::cout << " --json report Lean output (e.g., messages) as JSON (one per line)\n"; std::cout << " -E --error=kind report Lean messages of kind as errors\n"; std::cout << " --deps just print dependencies of a Lean input\n"; @@ -273,6 +274,7 @@ static struct option g_long_options[] = { #endif {"plugin", required_argument, 0, 'p'}, {"load-dynlib", required_argument, 0, 'l'}, + {"setup", required_argument, 0, 'u'}, {"error", required_argument, 0, 'E'}, {"json", no_argument, &json_output, 1}, {"print-prefix", no_argument, &print_prefix, 1}, @@ -340,6 +342,7 @@ extern "C" object * lean_run_frontend( object * error_kinds, object * plugins, bool print_stats, + object * header_file_name, object * w ); option_ref<elab_environment> run_new_frontend( @@ -351,7 +354,8 @@ option_ref<elab_environment> run_new_frontend( optional<std::string> const & ilean_file_name, uint8_t json_output, array_ref<name> const & error_kinds, - bool print_stats + bool print_stats, + optional<std::string> const & setup_file_name ) { return get_io_result<option_ref<elab_environment>>(lean_run_frontend( mk_string(input), @@ -365,6 +369,7 @@ option_ref<elab_environment> run_new_frontend( error_kinds.to_obj_arg(), mk_empty_array(), print_stats, + setup_file_name ? mk_option_some(mk_string(*setup_file_name)) : mk_option_none(), io_mk_world() )); } @@ -487,6 +492,7 @@ extern "C" LEAN_EXPORT int lean_main(int argc, char ** argv) { bool run = false; optional<std::string> olean_fn; optional<std::string> ilean_fn; + optional<std::string> setup_fn; bool use_stdin = false; unsigned trust_lvl = LEAN_BELIEVER_TRUST_LEVEL + 1; bool only_deps = false; @@ -638,6 +644,10 @@ extern "C" LEAN_EXPORT int lean_main(int argc, char ** argv) { lean::load_dynlib(optarg); forwarded_args.push_back(string_ref("--load-dynlib=" + std::string(optarg))); break; + case 'u': + check_optarg("u"); + setup_fn = optarg; + break; case 'E': check_optarg("E"); error_kinds.push_back(string_to_name(std::string(optarg))); @@ -755,7 +765,10 @@ extern "C" LEAN_EXPORT int lean_main(int argc, char ** argv) { if (!main_module_name) main_module_name = name("_stdin"); - option_ref<elab_environment> opt_env = run_new_frontend(contents, opts, mod_fn, *main_module_name, trust_lvl, olean_fn, ilean_fn, json_output, error_kinds, stats); + option_ref<elab_environment> opt_env = run_new_frontend( + contents, opts, mod_fn, *main_module_name, trust_lvl, + olean_fn, ilean_fn, json_output, error_kinds, stats, setup_fn + ); if (opt_env) { elab_environment env = opt_env.get_val(); diff --git a/stage0/stdlib/Init/Data/Array/Lemmas.c b/stage0/stdlib/Init/Data/Array/Lemmas.c index 31bacca035a2..507706bfbff7 100644 --- a/stage0/stdlib/Init/Data/Array/Lemmas.c +++ b/stage0/stdlib/Init/Data/Array/Lemmas.c @@ -13,114 +13,114 @@ #ifdef __cplusplus extern "C" { #endif -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__16; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__11; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__6; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__10; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_findSomeRevM_x3f_find_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_findSomeRevM_x3f_find_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__19; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__List_filterMap__replicate_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Array_instDecidableMemOfLawfulBEq___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_toListRev___rarg___boxed(lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_foldlM_loop_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__15; -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__14; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__5; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Array_toListRev___spec__1___rarg(lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_isEqvAux_match__1_splitter___boxed(lean_object*, lean_object*, lean_object*); lean_object* lean_array_push(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__GetElem_x3f_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_instDecidableMemOfLawfulBEq(lean_object*); uint8_t lean_usize_dec_eq(size_t, size_t); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__20; LEAN_EXPORT lean_object* l_Array_instDecidableForallForallMemOfDecidablePred___rarg(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__21; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_foldl__filterMap_x27_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__31; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__14; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__23; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Option_getD_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__6; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__List_anyM_match__1_splitter___rarg(uint8_t, lean_object*, lean_object*); lean_object* l_Nat_decidableExistsLT_x27(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__8; -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__22; -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__29; -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__10; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__12; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__29; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_isEqvAux_match__1_splitter(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_filterMap__push_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_appendCore_loop_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); size_t lean_usize_of_nat(lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_erase_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__17; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__15; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_shrink_loop_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__2; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_foldlM_loop_match__1_splitter(lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_erase_match__1_splitter___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__List_filterMap_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__List_foldl__filterMap_match__1_splitter(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_instDecidableExistsAndMemOfDecidablePred(lean_object*); +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__16; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__List_filterMap_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__7; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__3; LEAN_EXPORT lean_object* l_Array_toListRev___rarg(lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_shrink_loop_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__24; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__18; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_filterMap__push_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__4; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__8; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__List_filterMap__replicate_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__5; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__1; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_erase_match__1_splitter(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_foldl__filterMap_x27_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__22; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_appendCore_loop_match__1_splitter(lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__2; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__List_filterMap_match__1_splitter(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Array_toListRev___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Option_getD_match__1_splitter(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__20; lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__1; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__27; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__List_filterMap__replicate_match__1_splitter(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__23; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__30; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_isEqvAux_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__13; LEAN_EXPORT lean_object* l_Array_instDecidableExistsAndMemOfDecidablePred___rarg(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__26; -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__12; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_isEqvAux_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_findSomeRevM_x3f_find_match__1_splitter(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Array_toListRev___spec__1(lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_filterMap__push_match__1_splitter(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__27; +LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368_; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__19; lean_object* lean_array_fget(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_instDecidableForallForallMemOfDecidablePred(lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__List_anyM_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_foldlM_loop_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_eq(lean_object*, lean_object*); uint8_t l_Array_contains___rarg(lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__9; uint8_t lean_nat_dec_lt(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_instDecidableMemOfLawfulBEq___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__11; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Option_getD_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__30; -LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364_; lean_object* lean_nat_sub(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__List_anyM_match__1_splitter(lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_erase_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__GetElem_x3f_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_instDecidableForallForallMemOfDecidablePred___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_appendCore_loop_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__21; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__13; size_t lean_usize_add(size_t, size_t); lean_object* lean_array_uget(lean_object*, size_t); +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__17; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__List_foldl__filterMap_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Name_mkStr4(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__9; LEAN_EXPORT lean_object* l_Array_toListRev(lean_object*); LEAN_EXPORT lean_object* l_Array_instDecidableForallForallMemOfDecidablePred___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_get_size(lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_shrink_loop_match__1_splitter(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__4; uint8_t lean_nat_dec_le(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__25; lean_object* l_Nat_decidableBallLT(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__3; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__List_foldl__filterMap_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__25; LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__GetElem_x3f_match__1_splitter(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Array_Lemmas_0__Array_foldl__filterMap_x27_match__1_splitter(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__28; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__7; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__31; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__24; lean_object* lean_mk_empty_array_with_capacity(lean_object*); -static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__18; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__28; +static lean_object* l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__26; LEAN_EXPORT lean_object* l_Array_instDecidableForallForallMemOfDecidablePred___rarg___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { @@ -354,7 +354,7 @@ lean_dec(x_2); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__1() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__1() { _start: { lean_object* x_1; @@ -362,7 +362,7 @@ x_1 = lean_mk_string_unchecked("Lean", 4, 4); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__2() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__2() { _start: { lean_object* x_1; @@ -370,7 +370,7 @@ x_1 = lean_mk_string_unchecked("Parser", 6, 6); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__3() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__3() { _start: { lean_object* x_1; @@ -378,7 +378,7 @@ x_1 = lean_mk_string_unchecked("Tactic", 6, 6); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__4() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__4() { _start: { lean_object* x_1; @@ -386,19 +386,19 @@ x_1 = lean_mk_string_unchecked("tacticSeq", 9, 9); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__5() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__5() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__1; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__2; -x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__3; -x_4 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__4; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__1; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__2; +x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__3; +x_4 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__4; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__6() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__6() { _start: { lean_object* x_1; lean_object* x_2; @@ -407,7 +407,7 @@ x_2 = lean_mk_empty_array_with_capacity(x_1); return x_2; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__7() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__7() { _start: { lean_object* x_1; @@ -415,19 +415,19 @@ x_1 = lean_mk_string_unchecked("tacticSeq1Indented", 18, 18); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__8() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__8() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__1; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__2; -x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__3; -x_4 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__7; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__1; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__2; +x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__3; +x_4 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__7; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__9() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__9() { _start: { lean_object* x_1; @@ -435,17 +435,17 @@ x_1 = lean_mk_string_unchecked("null", 4, 4); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__10() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__10() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__9; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__9; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__11() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__11() { _start: { lean_object* x_1; @@ -453,41 +453,41 @@ x_1 = lean_mk_string_unchecked("simp", 4, 4); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__12() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__12() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__1; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__2; -x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__3; -x_4 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__11; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__1; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__2; +x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__3; +x_4 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__11; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__13() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__13() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__11; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__11; x_3 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__14() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__14() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__6; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__13; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__6; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__13; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__15() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__15() { _start: { lean_object* x_1; @@ -495,25 +495,25 @@ x_1 = lean_mk_string_unchecked("optConfig", 9, 9); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__16() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__16() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__1; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__2; -x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__3; -x_4 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__15; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__1; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__2; +x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__3; +x_4 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__15; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__17() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__17() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__10; -x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__6; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__10; +x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__6; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -521,23 +521,23 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__18() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__18() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__6; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__17; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__6; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__17; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__19() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__19() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__16; -x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__18; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__16; +x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__18; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -545,63 +545,63 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__20() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__20() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__14; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__19; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__14; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__19; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__21() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__21() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__20; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__17; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__20; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__17; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__22() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__22() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__21; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__17; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__21; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__17; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__23() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__23() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__22; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__17; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__22; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__17; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__24() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__24() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__23; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__17; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__23; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__17; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__25() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__25() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__12; -x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__24; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__12; +x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__24; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -609,23 +609,23 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__26() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__26() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__6; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__25; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__6; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__25; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__27() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__27() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__10; -x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__26; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__10; +x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__26; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -633,23 +633,23 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__28() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__28() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__6; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__27; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__6; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__27; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__29() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__29() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__8; -x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__28; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__8; +x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__28; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -657,23 +657,23 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__30() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__30() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__6; -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__29; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__6; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__29; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__31() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__31() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__5; -x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__30; +x_2 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__5; +x_3 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__30; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -681,11 +681,11 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364_() { +static lean_object* _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368_() { _start: { lean_object* x_1; -x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__31; +x_1 = l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__31; return x_1; } } @@ -1244,70 +1244,70 @@ lean_dec_ref(res); res = initialize_Init_Data_List_ToArray(builtin, lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__1 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__1(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__1); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__2 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__2(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__2); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__3 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__3(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__3); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__4 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__4(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__4); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__5 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__5(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__5); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__6 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__6(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__6); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__7 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__7(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__7); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__8 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__8(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__8); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__9 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__9(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__9); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__10 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__10(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__10); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__11 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__11(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__11); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__12 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__12(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__12); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__13 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__13(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__13); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__14 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__14(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__14); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__15 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__15(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__15); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__16 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__16(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__16); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__17 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__17(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__17); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__18 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__18(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__18); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__19 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__19(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__19); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__20 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__20(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__20); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__21 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__21(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__21); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__22 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__22(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__22); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__23 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__23(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__23); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__24 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__24(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__24); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__25 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__25(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__25); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__26 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__26(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__26); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__27 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__27(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__27); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__28 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__28(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__28); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__29 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__29(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__29); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__30 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__30(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__30); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__31 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__31(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364____closed__31); -l___auto____x40_Init_Data_Array_Lemmas___hyg_18364_ = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18364_(); -lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18364_); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__1 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__1(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__1); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__2 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__2(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__2); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__3 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__3(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__3); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__4 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__4(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__4); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__5 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__5(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__5); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__6 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__6(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__6); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__7 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__7(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__7); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__8 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__8(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__8); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__9 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__9(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__9); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__10 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__10(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__10); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__11 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__11(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__11); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__12 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__12(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__12); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__13 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__13(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__13); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__14 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__14(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__14); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__15 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__15(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__15); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__16 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__16(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__16); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__17 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__17(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__17); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__18 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__18(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__18); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__19 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__19(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__19); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__20 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__20(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__20); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__21 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__21(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__21); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__22 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__22(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__22); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__23 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__23(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__23); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__24 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__24(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__24); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__25 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__25(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__25); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__26 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__26(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__26); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__27 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__27(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__27); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__28 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__28(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__28); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__29 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__29(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__29); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__30 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__30(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__30); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__31 = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__31(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368____closed__31); +l___auto____x40_Init_Data_Array_Lemmas___hyg_18368_ = _init_l___auto____x40_Init_Data_Array_Lemmas___hyg_18368_(); +lean_mark_persistent(l___auto____x40_Init_Data_Array_Lemmas___hyg_18368_); return lean_io_result_mk_ok(lean_box(0)); } #ifdef __cplusplus diff --git a/stage0/stdlib/Init/Data/Option/Lemmas.c b/stage0/stdlib/Init/Data/Option/Lemmas.c index 0f6954464d52..380698c1863a 100644 --- a/stage0/stdlib/Init/Data/Option/Lemmas.c +++ b/stage0/stdlib/Init/Data/Option/Lemmas.c @@ -20,11 +20,13 @@ LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_merge_mat LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_isSome_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_isSome_match__1_splitter(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_lt_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_isEqSome_match__1_splitter(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_pmap_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_pmap_match__1_splitter(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_le_match__1_splitter(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_lt_match__1_splitter(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_le_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_isEqSome_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_bind_match__1_splitter(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_pfilter_match__1_splitter(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_isSome_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); @@ -166,6 +168,36 @@ lean_dec(x_3); return x_7; } } +LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_isEqSome_match__1_splitter___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_5; +lean_dec(x_3); +x_5 = lean_apply_1(x_4, x_2); +return x_5; +} +else +{ +lean_object* x_6; lean_object* x_7; +lean_dec(x_4); +x_6 = lean_ctor_get(x_1, 0); +lean_inc(x_6); +lean_dec(x_1); +x_7 = lean_apply_2(x_3, x_6, x_2); +return x_7; +} +} +} +LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_isEqSome_match__1_splitter(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = lean_alloc_closure((void*)(l___private_Init_Data_Option_Lemmas_0__Option_isEqSome_match__1_splitter___rarg), 4, 0); +return x_3; +} +} LEAN_EXPORT lean_object* l___private_Init_Data_Option_Lemmas_0__Option_pmap_match__1_splitter___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { diff --git a/stage0/stdlib/Init/Data/Option/List.c b/stage0/stdlib/Init/Data/Option/List.c index 284cf5a10b18..2c66140461bb 100644 --- a/stage0/stdlib/Init/Data/Option/List.c +++ b/stage0/stdlib/Init/Data/Option/List.c @@ -13,41 +13,6 @@ #ifdef __cplusplus extern "C" { #endif -LEAN_EXPORT lean_object* l___private_Init_Data_Option_List_0__Option_instForIn_x27InferInstanceMembership_match__1_splitter(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___private_Init_Data_Option_List_0__Option_instForIn_x27InferInstanceMembership_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___private_Init_Data_Option_List_0__Option_instForIn_x27InferInstanceMembership_match__1_splitter___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { -_start: -{ -if (lean_obj_tag(x_1) == 0) -{ -lean_object* x_4; lean_object* x_5; -lean_dec(x_3); -x_4 = lean_ctor_get(x_1, 0); -lean_inc(x_4); -lean_dec(x_1); -x_5 = lean_apply_1(x_2, x_4); -return x_5; -} -else -{ -lean_object* x_6; lean_object* x_7; -lean_dec(x_2); -x_6 = lean_ctor_get(x_1, 0); -lean_inc(x_6); -lean_dec(x_1); -x_7 = lean_apply_1(x_3, x_6); -return x_7; -} -} -} -LEAN_EXPORT lean_object* l___private_Init_Data_Option_List_0__Option_instForIn_x27InferInstanceMembership_match__1_splitter(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; -x_3 = lean_alloc_closure((void*)(l___private_Init_Data_Option_List_0__Option_instForIn_x27InferInstanceMembership_match__1_splitter___rarg), 3, 0); -return x_3; -} -} lean_object* initialize_Init_Data_List_Lemmas(uint8_t builtin, lean_object*); static bool _G_initialized = false; LEAN_EXPORT lean_object* initialize_Init_Data_Option_List(uint8_t builtin, lean_object* w) { diff --git a/stage0/stdlib/Init/Data/Option/Monadic.c b/stage0/stdlib/Init/Data/Option/Monadic.c index 1cb652dad67f..1398dbb54582 100644 --- a/stage0/stdlib/Init/Data/Option/Monadic.c +++ b/stage0/stdlib/Init/Data/Option/Monadic.c @@ -13,6 +13,73 @@ #ifdef __cplusplus extern "C" { #endif +LEAN_EXPORT lean_object* l___private_Init_Data_Option_Monadic_0__Option_instForIn_x27InferInstanceMembership_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Init_Data_Option_Monadic_0__Option_instForIn_x27InferInstanceMembership_match__1_splitter(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Init_Data_Option_Monadic_0__Option_bindM_match__1_splitter(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Init_Data_Option_Monadic_0__Option_bindM_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Init_Data_Option_Monadic_0__Option_bindM_match__1_splitter___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_4; +lean_dec(x_2); +x_4 = lean_apply_2(x_3, x_1, lean_box(0)); +return x_4; +} +else +{ +lean_object* x_5; lean_object* x_6; +lean_dec(x_3); +x_5 = lean_ctor_get(x_1, 0); +lean_inc(x_5); +lean_dec(x_1); +x_6 = lean_apply_1(x_2, x_5); +return x_6; +} +} +} +LEAN_EXPORT lean_object* l___private_Init_Data_Option_Monadic_0__Option_bindM_match__1_splitter(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = lean_alloc_closure((void*)(l___private_Init_Data_Option_Monadic_0__Option_bindM_match__1_splitter___rarg), 3, 0); +return x_3; +} +} +LEAN_EXPORT lean_object* l___private_Init_Data_Option_Monadic_0__Option_instForIn_x27InferInstanceMembership_match__1_splitter___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_4; lean_object* x_5; +lean_dec(x_3); +x_4 = lean_ctor_get(x_1, 0); +lean_inc(x_4); +lean_dec(x_1); +x_5 = lean_apply_1(x_2, x_4); +return x_5; +} +else +{ +lean_object* x_6; lean_object* x_7; +lean_dec(x_2); +x_6 = lean_ctor_get(x_1, 0); +lean_inc(x_6); +lean_dec(x_1); +x_7 = lean_apply_1(x_3, x_6); +return x_7; +} +} +} +LEAN_EXPORT lean_object* l___private_Init_Data_Option_Monadic_0__Option_instForIn_x27InferInstanceMembership_match__1_splitter(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = lean_alloc_closure((void*)(l___private_Init_Data_Option_Monadic_0__Option_instForIn_x27InferInstanceMembership_match__1_splitter___rarg), 3, 0); +return x_3; +} +} lean_object* initialize_Init_Data_Option_Attach(uint8_t builtin, lean_object*); lean_object* initialize_Init_Control_Lawful_Basic(uint8_t builtin, lean_object*); static bool _G_initialized = false; diff --git a/stage0/stdlib/Init/Data/Vector/Basic.c b/stage0/stdlib/Init/Data/Vector/Basic.c index 4fc953f16b6b..cbc0de56476f 100644 --- a/stage0/stdlib/Init/Data/Vector/Basic.c +++ b/stage0/stdlib/Init/Data/Vector/Basic.c @@ -19,11 +19,9 @@ static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__V LEAN_EXPORT lean_object* l_Array_findSomeRevM_x3f_find___at_Vector_findRevM_x3f___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_count(lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__9; -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__16; LEAN_EXPORT lean_object* l_Vector_shrink___rarg___boxed(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__23; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__2; LEAN_EXPORT lean_object* l_Vector_instMembership(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__21; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_findM_x3f___spec__1___rarg___lambda__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_singleton(lean_object*); LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Vector_lex___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -34,11 +32,11 @@ LEAN_EXPORT lean_object* l_Vector_drop___boxed(lean_object*, lean_object*); static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__20; LEAN_EXPORT uint8_t l_Vector_instBEq___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_mapFinIdxM_map___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__3; uint8_t l_Array_isEqvAux___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_findSome_x3f___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Vector_flatten___spec__1(lean_object*); LEAN_EXPORT lean_object* l_Array_foldrMUnsafe_fold___at_Vector_countP___spec__1___rarg(lean_object*, lean_object*, size_t, size_t, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__2; LEAN_EXPORT lean_object* l_Vector_insertIdx_x21(lean_object*); static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__19; static lean_object* l_Vector_findM_x3f___rarg___closed__1; @@ -55,12 +53,15 @@ LEAN_EXPORT lean_object* l_Array_findSomeRevM_x3f_find___at_Vector_findSomeRev_x LEAN_EXPORT lean_object* l_Array_mapFinIdxM_map___at_Vector_mapIdx___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_back_x3f___rarg___boxed(lean_object*); static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__22; +LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608_; LEAN_EXPORT lean_object* l_Vector_push(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_findSome_x3f___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_anyMUnsafe_any___at_Vector_any___spec__1(lean_object*); LEAN_EXPORT lean_object* l_Vector_findM_x3f___rarg___lambda__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_range___boxed(lean_object*); LEAN_EXPORT uint8_t l_Vector_any___rarg(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__23; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__24; static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__12; static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__6; LEAN_EXPORT lean_object* l_Vector_eraseIdx_x21___rarg(lean_object*, lean_object*, lean_object*); @@ -75,7 +76,6 @@ static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__V LEAN_EXPORT lean_object* l_Vector_set_x21___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_findSomeM_x3f___spec__1___rarg___lambda__2(lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, lean_object*); LEAN_EXPORT lean_object* l_Vector_zip(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__32; LEAN_EXPORT lean_object* l_Vector_findSomeRev_x3f___rarg___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_toVector___rarg(lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Vector_forM___spec__1___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -101,23 +101,27 @@ LEAN_EXPORT lean_object* l_Vector_back___boxed(lean_object*, lean_object*, lean_ LEAN_EXPORT lean_object* l_Vector_mapFinIdx___rarg(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_push(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_back_x21(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__8; lean_object* l_Array_toSubarray___rarg(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__9; LEAN_EXPORT lean_object* l_Array_foldrMUnsafe_fold___at_Vector_foldr___spec__1___rarg(lean_object*, lean_object*, size_t, size_t, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__14; LEAN_EXPORT lean_object* l_Array_toVector(lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__38; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__7; LEAN_EXPORT lean_object* l_Vector_get(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_swap___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_instForIn_x27InferInstanceMembership(lean_object*, lean_object*); uint8_t lean_usize_dec_eq(size_t, size_t); LEAN_EXPORT lean_object* l_Vector_swap___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__6; LEAN_EXPORT lean_object* l_Vector_insertIdx_x21___rarg(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__19; -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__22; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__22; LEAN_EXPORT lean_object* l_Vector_reverse___boxed(lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__25; LEAN_EXPORT lean_object* l_Vector_findFinIdx_x3f___boxed(lean_object*, lean_object*); lean_object* l_Lean_Syntax_getArgs(lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__5; static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__13; static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__35; LEAN_EXPORT lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___boxed(lean_object*, lean_object*, lean_object*); @@ -133,16 +137,17 @@ LEAN_EXPORT lean_object* l_Vector_shrink___boxed(lean_object*, lean_object*); lean_object* lean_array_fset(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_set_x21___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_finIdxOf_x3f(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__15; LEAN_EXPORT lean_object* l_Vector_pop___rarg(lean_object*); LEAN_EXPORT uint8_t l_Array_isEqvAux___at_Vector_instBEq___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_anyMUnsafe_any___at_Vector_any___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__13; lean_object* l_Lean_Syntax_TSepArray_getElems___rarg(lean_object*); static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__25; LEAN_EXPORT uint8_t l_Vector_contains___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_instGetElemNatLt(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__3; LEAN_EXPORT lean_object* l_Vector_all(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_6512_; +LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_6165_; LEAN_EXPORT lean_object* l_Vector_findFinIdx_x3f___rarg___boxed(lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__33; LEAN_EXPORT lean_object* l_Vector_eraseIdx___rarg(lean_object*, lean_object*, lean_object*); @@ -152,7 +157,6 @@ LEAN_EXPORT lean_object* l_Array_foldrMUnsafe_fold___at_Vector_count___spec__1__ LEAN_EXPORT lean_object* l_Vector_instLE___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_zipWithIndex___boxed(lean_object*, lean_object*); static lean_object* l_Vector_mapM___rarg___closed__1; -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__11; LEAN_EXPORT lean_object* l_Vector_foldrM(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__26; LEAN_EXPORT lean_object* l_Array_mapFinIdxM_map___at_Vector_mapFinIdx___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -164,15 +168,12 @@ LEAN_EXPORT lean_object* l_Vector_find_x3f___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_findM_x3f___spec__1___rarg___lambda__3(lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, lean_object*); lean_object* l_Lean_Syntax_node5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__5; LEAN_EXPORT lean_object* l_Vector_mapM___rarg(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Syntax_isOfKind(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__15; LEAN_EXPORT lean_object* l_Vector_uget___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_singleton___rarg(lean_object*); LEAN_EXPORT lean_object* l_Vector_contains(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__5; -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__1; -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__6; LEAN_EXPORT lean_object* l_Vector_instForM(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_findSome_x3f___spec__1(lean_object*, lean_object*); static lean_object* l_Vector_find_x3f___rarg___closed__2; @@ -186,7 +187,6 @@ LEAN_EXPORT lean_object* l_Array_isEqvAux___at_Vector_instBEq___spec__1(lean_obj LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Vector_forM___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_instLT___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_instGetElemNatLt___boxed(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__14; LEAN_EXPORT lean_object* l_Vector_any___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_indexOf_x3f(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_shrink___rarg(lean_object*, lean_object*); @@ -201,14 +201,13 @@ LEAN_EXPORT lean_object* l_Vector_unzip___boxed(lean_object*, lean_object*, lean LEAN_EXPORT lean_object* l_Vector_get___rarg(lean_object*, lean_object*); static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__30; LEAN_EXPORT lean_object* l_Vector_mapFinIdxM_map(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__13; LEAN_EXPORT lean_object* l_Vector_flatMapM_go___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__16; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__31; LEAN_EXPORT lean_object* l_Vector_mapFinIdxM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_finIdxOf_x3f___boxed(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__12; static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__32; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Vector_forM___spec__1___rarg___lambda__1(size_t, lean_object*, lean_object*, lean_object*, size_t, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__7; LEAN_EXPORT lean_object* l_Vector_replace___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_cast___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_instForIn_x27InferInstanceMembership___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -223,8 +222,8 @@ LEAN_EXPORT lean_object* l_Vector_elimAsArray___rarg(lean_object*, lean_object*) LEAN_EXPORT lean_object* l_Vector_instHAppendHAddNat___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Vector_flatten___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_findSomeRevM_x3f___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__27; LEAN_EXPORT lean_object* l_Vector_find_x3f___rarg___lambda__1(lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__3; LEAN_EXPORT lean_object* l_Vector_head___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_Array_range___lambda__1___boxed(lean_object*); size_t lean_usize_of_nat(lean_object*); @@ -236,21 +235,18 @@ LEAN_EXPORT lean_object* l_Vector_count___rarg___boxed(lean_object*, lean_object LEAN_EXPORT lean_object* l_Vector_rightpad___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_findSomeM_x3f___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_map___boxed(lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__15; LEAN_EXPORT lean_object* l_Vector_zip___rarg___boxed(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__10; LEAN_EXPORT lean_object* l_Vector_swapIfInBounds___rarg___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__8; static lean_object* l_Vector_swapAt_x21___rarg___closed__3; LEAN_EXPORT lean_object* l_instReprVector(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__6; LEAN_EXPORT lean_object* l_Vector_foldr(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_back(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Array_reverse___rarg(lean_object*); LEAN_EXPORT lean_object* l_Vector_getD___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_pop(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__31; LEAN_EXPORT lean_object* l_Vector_uget___boxed(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__4; LEAN_EXPORT lean_object* l_Vector_findM_x3f___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__40; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_instForIn_x27InferInstanceMembership___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*); @@ -274,7 +270,8 @@ LEAN_EXPORT uint8_t l_Vector_all___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_isPrefixOf___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__15; uint8_t l_Array_isPrefixOf___rarg(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__24; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__6; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__10; LEAN_EXPORT lean_object* l_Vector_findSomeRevM_x3f(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_findRev_x3f(lean_object*, lean_object*); static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__16; @@ -286,23 +283,21 @@ LEAN_EXPORT lean_object* l_Vector_replicate___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_findSomeRevM_x3f_find___at_Vector_findRevM_x3f___spec__1(lean_object*, lean_object*); static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__4; LEAN_EXPORT lean_object* l_Vector_cast___rarg___boxed(lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__21; LEAN_EXPORT lean_object* l_Vector_flatMapM_go(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_extract___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_set___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_elimAsArray(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_contains___rarg___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__14; -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__15; static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__31; LEAN_EXPORT lean_object* l_Vector_head___rarg___boxed(lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__33; static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__9; LEAN_EXPORT lean_object* l_Vector_instToStreamSubarray___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_isPrefixOf___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldrMUnsafe_fold___at_Vector_foldr___spec__1(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__29; LEAN_EXPORT lean_object* l_Vector_swap___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__18; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__8; LEAN_EXPORT lean_object* l_Vector_mapM_go___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__22; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Vector_foldl___spec__1___rarg(lean_object*, lean_object*, size_t, size_t, lean_object*); @@ -310,7 +305,6 @@ LEAN_EXPORT lean_object* l___private_Init_Data_Vector_Basic_0__decEqVector____x4 lean_object* l_Array_back_x3f___rarg(lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__10; LEAN_EXPORT lean_object* l_Array_foldrMUnsafe_fold___at_Vector_count___spec__2___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__11; LEAN_EXPORT lean_object* l_Vector_flatMap___rarg___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__4; LEAN_EXPORT lean_object* l_Vector_findSomeM_x3f___rarg(lean_object*, lean_object*, lean_object*); @@ -319,17 +313,16 @@ LEAN_EXPORT lean_object* l_Vector_range_x27(lean_object*, lean_object*, lean_obj LEAN_EXPORT lean_object* l_panic___at_Vector_swapAt_x21___spec__1___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldrMUnsafe_fold___at_Vector_countP___spec__2(lean_object*); lean_object* l_Array_replace___rarg(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__29; lean_object* lean_array_pop(lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__30; LEAN_EXPORT lean_object* l_Vector_reverse(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_unzip___rarg___boxed(lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__9; LEAN_EXPORT lean_object* l_Vector_findM_x3f(lean_object*, lean_object*, lean_object*); static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__27; LEAN_EXPORT lean_object* l_Array_foldrMUnsafe_fold___at_Vector_count___spec__2(lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__27; LEAN_EXPORT lean_object* l_Vector_zipWith___rarg(lean_object*, lean_object*, lean_object*); lean_object* lean_array_to_list(lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__8; LEAN_EXPORT lean_object* l_Vector_insertIdx___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Syntax_node3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_map(lean_object*, lean_object*, lean_object*); @@ -339,16 +332,18 @@ LEAN_EXPORT lean_object* l_Vector_flatMapM_go___rarg___lambda__1(lean_object*, l LEAN_EXPORT lean_object* l_Vector_getD(lean_object*, lean_object*); static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__8; LEAN_EXPORT lean_object* l_Vector_findSomeM_x3f___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__26; LEAN_EXPORT lean_object* l_Vector_mapFinIdxM_map___at_Vector_mapIdxM___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_anyM___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_set_x21(lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__20; LEAN_EXPORT lean_object* l_Vector_setIfInBounds___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_findM_x3f___rarg___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__17; LEAN_EXPORT lean_object* l_Array_anyMUnsafe_any___at_Vector_all___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__33; static lean_object* l_Vector_lex___rarg___closed__1; LEAN_EXPORT lean_object* l_Vector_back_x3f(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__19; LEAN_EXPORT lean_object* l_Vector_append(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_panic___at_Vector_swapAt_x21___spec__1___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_lex(lean_object*); @@ -357,6 +352,7 @@ static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__V static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__20; lean_object* l_Array_anyMUnsafe_any___rarg(lean_object*, lean_object*, lean_object*, size_t, size_t); LEAN_EXPORT lean_object* l_Vector_foldrM___rarg(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__20; LEAN_EXPORT uint8_t l_Vector_lex___rarg___lambda__1(lean_object*); LEAN_EXPORT lean_object* l_Vector_insertIdx___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_leftpad(lean_object*); @@ -365,30 +361,32 @@ LEAN_EXPORT lean_object* l_Vector_emptyWithCapacity(lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__1; static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__24; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Vector_flatten___spec__1___rarg(lean_object*, size_t, size_t, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__1; static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__27; LEAN_EXPORT lean_object* l_Vector_findRevM_x3f___rarg(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_addMacroScope(lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__28; LEAN_EXPORT lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_leftpad___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_findM_x3f___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Array_foldrMUnsafe_fold___at_Vector_countP___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_findRev_x3f___rarg(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_6090_; static lean_object* l_Vector_eraseIdx_x21___rarg___closed__4; LEAN_EXPORT lean_object* l_Vector_elimAsList___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_findRev_x3f___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_mapM_go___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_instInhabited___rarg(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__7; LEAN_EXPORT lean_object* l_Vector_get___rarg___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_range_x27___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldrMUnsafe_fold___at_Vector_count___spec__1(lean_object*); LEAN_EXPORT lean_object* l_Vector_emptyWithCapacity___rarg___boxed(lean_object*); lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_flatMap___boxed(lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__18; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_instForIn_x27InferInstanceMembership___spec__1___rarg___lambda__1(lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, size_t, lean_object*); LEAN_EXPORT lean_object* l_Array_foldrMUnsafe_fold___at_Vector_count___spec__2___rarg(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Vector_foldl___rarg___boxed(lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__9; LEAN_EXPORT lean_object* l_Vector_foldr___rarg(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Syntax_node2(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Vector_swapAt_x21___rarg___closed__1; @@ -396,6 +394,7 @@ LEAN_EXPORT lean_object* l_Vector_drop___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_zipWithIndex___rarg(lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__28; static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__13; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__13; LEAN_EXPORT lean_object* l_Vector_elimAsArray___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Vector_flatMap___spec__1___rarg(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Vector_insertIdx___boxed(lean_object*, lean_object*); @@ -406,17 +405,19 @@ lean_object* l___private_Init_Util_0__mkPanicMessageWithDecl(lean_object*, lean_ LEAN_EXPORT lean_object* l_Vector_lex___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Vector_swapAt_x21___rarg___closed__4; LEAN_EXPORT lean_object* l_Vector_back_x3f___rarg(lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__3; static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__26; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__5; LEAN_EXPORT lean_object* l_Array_findSomeRevM_x3f_find___at_Vector_findRevM_x3f___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_swapAt_x21___rarg(lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607_; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__30; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_findSomeM_x3f___spec__1___rarg___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Vector_eraseIdx_x21___rarg___closed__2; -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__10; LEAN_EXPORT lean_object* l_Vector_set___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__17; LEAN_EXPORT lean_object* l_Vector_firstM(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_findSomeRevM_x3f_find___at_Vector_findSomeRev_x3f___spec__1(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__11; LEAN_EXPORT lean_object* l_Vector_findSome_x3f(lean_object*, lean_object*, lean_object*); static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__7; LEAN_EXPORT lean_object* l_Array_foldrMUnsafe_fold___at_Vector_countP___spec__2___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -424,12 +425,12 @@ LEAN_EXPORT lean_object* l_Vector_back_x21___rarg___boxed(lean_object*, lean_obj lean_object* l_Array_eraseIdx___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_take___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_any(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__9; static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__3; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Vector_forM___spec__1___rarg(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Vector_flatten___spec__2___rarg(lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Vector_indexOf_x3f___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_foldlM___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_6513_; LEAN_EXPORT lean_object* l_Vector_head___rarg(lean_object*); LEAN_EXPORT lean_object* l_Vector_find_x3f___rarg___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_zipWith___boxed(lean_object*, lean_object*, lean_object*, lean_object*); @@ -449,7 +450,6 @@ LEAN_EXPORT lean_object* l_Vector_swapAt_x21___rarg___boxed(lean_object*, lean_o LEAN_EXPORT lean_object* l_Vector_countP(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_findRevM_x3f(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapFinIdxM_map___at_Vector_mapFinIdx___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__12; LEAN_EXPORT lean_object* l_Vector_flatMap___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_panic___at_Vector_insertIdx_x21___spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_anyMUnsafe_any___at_Vector_allM___spec__1___rarg___lambda__1___boxed(lean_object*, lean_object*); @@ -469,7 +469,6 @@ LEAN_EXPORT lean_object* l_Array_anyMUnsafe_any___at_Vector_allM___spec__1___rar LEAN_EXPORT lean_object* l_Array_anyMUnsafe_any___at_Vector_allM___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t); LEAN_EXPORT lean_object* l_Vector_emptyWithCapacity___rarg(lean_object*); lean_object* l_Array_findFinIdx_x3f_loop___rarg(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_6082_; LEAN_EXPORT lean_object* l_Vector_extract___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_foldl___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_lex___rarg___lambda__1___boxed(lean_object*); @@ -497,7 +496,7 @@ LEAN_EXPORT lean_object* l_Std_Format_joinSep___at___private_Init_Data_Vector_Ba LEAN_EXPORT lean_object* l_Vector_any___rarg___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_zip___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_foldlM___rarg(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__34; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__16; static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__29; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_findSomeM_x3f___spec__1___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_findSomeRevM_x3f_find___at_Vector_findRev_x3f___spec__1(lean_object*); @@ -505,21 +504,22 @@ LEAN_EXPORT lean_object* l_panic___at_Vector_swapAt_x21___spec__1(lean_object*, lean_object* lean_string_length(lean_object*); static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__21; LEAN_EXPORT lean_object* l_Vector_allM___rarg(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__18; uint8_t lean_nat_dec_eq(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__17; LEAN_EXPORT lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg(lean_object*, lean_object*, lean_object*); uint8_t l_Array_contains___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_rightpad(lean_object*); uint8_t lean_nat_dec_lt(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__32; LEAN_EXPORT lean_object* l_Vector_countP___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_finIdxOf_x3f___rarg___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l_Vector_instHAppendHAddNat___closed__1; LEAN_EXPORT lean_object* l_Std_Format_joinSep___at___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____spec__1(lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_findM_x3f___spec__1___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_mapM(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_6083_; LEAN_EXPORT lean_object* l_Vector_flatMapM___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_isPrefixOf(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__25; lean_object* l_Lean_Name_mkStr2(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_swapIfInBounds(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_foldlM(lean_object*, lean_object*, lean_object*, lean_object*); @@ -534,6 +534,7 @@ LEAN_EXPORT lean_object* l_Array_toVector___rarg___boxed(lean_object*); LEAN_EXPORT lean_object* l_Vector_cast(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_findSomeRev_x3f___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_find_x3f___spec__1(lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__34; static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__8; static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__37; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_findM_x3f___spec__1(lean_object*, lean_object*); @@ -547,17 +548,17 @@ LEAN_EXPORT lean_object* l_Vector_count___boxed(lean_object*, lean_object*); uint8_t l_Array_instDecidableEq___rarg(lean_object*, lean_object*, lean_object*); lean_object* lean_nat_sub(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_isEqv___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__26; -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__4; LEAN_EXPORT lean_object* l_Vector_unzip(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__1; LEAN_EXPORT lean_object* l_Vector_zipIdx___rarg___boxed(lean_object*, lean_object*); static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__11; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__12; LEAN_EXPORT lean_object* l_Vector_mapIdxM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__11; LEAN_EXPORT lean_object* l_Vector_flatten___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_findSomeRev_x3f___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_find_x3f___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Array_back_x21___rarg(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__19; LEAN_EXPORT lean_object* l_Vector_mapIdx(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Vector_flatMap___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Vector_range___closed__1; @@ -569,7 +570,9 @@ static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__V LEAN_EXPORT uint8_t l_Array_anyMUnsafe_any___at_Vector_all___spec__1___rarg(lean_object*, lean_object*, size_t, size_t); LEAN_EXPORT lean_object* l_Vector_mkEmpty___rarg___boxed(lean_object*); LEAN_EXPORT lean_object* l_Vector_set___rarg(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__4; LEAN_EXPORT lean_object* l_Vector_find_x3f(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__4; LEAN_EXPORT lean_object* l_Vector_flatMap(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_findM_x3f___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_usize_sub(size_t, size_t); @@ -580,7 +583,6 @@ LEAN_EXPORT lean_object* l_Vector_rightpad___rarg(lean_object*, lean_object*, le LEAN_EXPORT lean_object* l___private_Init_Data_Vector_Basic_0__decEqVector____x40_Init_Data_Vector_Basic___hyg_100_(lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__24; static lean_object* l_Vector_insertIdx_x21___rarg___closed__2; -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__17; static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__15; LEAN_EXPORT lean_object* l_Vector_countP___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_panic___at_Vector_eraseIdx_x21___spec__1(lean_object*, lean_object*); @@ -599,7 +601,6 @@ LEAN_EXPORT lean_object* l_Vector_finIdxOf_x3f___rarg(lean_object*, lean_object* LEAN_EXPORT lean_object* l_Vector_mapFinIdxM_map___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_instDecidableEqVector(lean_object*, lean_object*); lean_object* l_Array_foldrMUnsafe_fold___rarg(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__7; static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__21; lean_object* lean_array_uget(lean_object*, size_t); LEAN_EXPORT lean_object* l_Vector_reverse___rarg(lean_object*); @@ -608,13 +609,12 @@ size_t lean_array_size(lean_object*); lean_object* l_Array_foldlMUnsafe_fold___rarg(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_instDecidableEqVector___boxed(lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__12; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__12; LEAN_EXPORT lean_object* l_Vector_instInhabited(lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__28; static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__4; LEAN_EXPORT lean_object* l_Vector_swapIfInBounds___boxed(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_6386_; lean_object* l_Lean_Name_mkStr4(lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_6164_; -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__2; LEAN_EXPORT lean_object* l_Vector_swapIfInBounds___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_mapFinIdx___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Vector_instForM___closed__1; @@ -632,16 +632,18 @@ LEAN_EXPORT lean_object* l_Vector_instForM___boxed(lean_object*, lean_object*, l LEAN_EXPORT lean_object* l_Vector_take___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_zipWith___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_findSomeRevM_x3f_find___at_Vector_findRev_x3f___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__17; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__1; LEAN_EXPORT lean_object* l_Vector_mapFinIdxM_map___at_Vector_mapIdxM___spec__1___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Vector_eraseIdx_x21___rarg___closed__1; LEAN_EXPORT lean_object* l_Vector_instMembership___boxed(lean_object*, lean_object*); lean_object* lean_array_get_size(lean_object*); LEAN_EXPORT lean_object* l_Vector_indexOf_x3f___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_append___rarg___boxed(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__10; LEAN_EXPORT lean_object* l_Vector_flatten(lean_object*); static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__5; LEAN_EXPORT lean_object* l_Vector_set(lean_object*, lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__25; uint8_t lean_nat_dec_le(lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__19; LEAN_EXPORT lean_object* l_Vector_swap(lean_object*, lean_object*); @@ -651,20 +653,21 @@ uint8_t lean_usize_dec_lt(size_t, size_t); LEAN_EXPORT lean_object* l_Vector_ofFn(lean_object*, lean_object*, lean_object*); static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__1; static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__3; +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__16; static lean_object* l_Std_Range_forIn_x27_loop___at_Vector_lex___spec__1___rarg___closed__4; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Vector_forM___spec__1(lean_object*, lean_object*); static lean_object* l___private_Init_Data_Vector_Basic_0__reprVector____x40_Init_Data_Vector_Basic___hyg_34____rarg___closed__23; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Vector_flatten___spec__2(lean_object*); lean_object* lean_nat_add(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_6091_; LEAN_EXPORT lean_object* l_Vector_findFinIdx_x3f___rarg(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161_; LEAN_EXPORT lean_object* l_Vector_setIfInBounds___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_mapM_go(lean_object*, lean_object*, lean_object*); static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__18; +LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1162_; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Vector_findSomeM_x3f___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Vector_zipWith(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__12; -LEAN_EXPORT lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_6385_; LEAN_EXPORT lean_object* l_Vector_instToStreamSubarray(lean_object*); LEAN_EXPORT lean_object* l_Vector_range(lean_object*); static lean_object* l_Vector_term_x23v_x5b___x2c_x5d___closed__17; @@ -672,10 +675,9 @@ LEAN_EXPORT lean_object* l_Array_mapFinIdxM_map___at_Vector_mapFinIdx___spec__1( LEAN_EXPORT lean_object* l_Vector_mkEmpty(lean_object*); LEAN_EXPORT lean_object* l_Vector_eraseIdx_x21___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_push___rarg(lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__20; -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__2; LEAN_EXPORT lean_object* l_Vector_back_x21___rarg(lean_object*, lean_object*); lean_object* l_String_toSubstring_x27(lean_object*); +static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__14; LEAN_EXPORT lean_object* l_Vector_findSome_x3f___rarg___boxed(lean_object*, lean_object*); lean_object* lean_array_uset(lean_object*, size_t, lean_object*); LEAN_EXPORT lean_object* l_Vector_findSomeRevM_x3f___rarg(lean_object*, lean_object*, lean_object*); @@ -684,11 +686,9 @@ LEAN_EXPORT lean_object* l_Vector_forM___boxed(lean_object*, lean_object*, lean_ static lean_object* l_Vector_insertIdx_x21___rarg___closed__1; LEAN_EXPORT lean_object* l_Vector_countP___rarg___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_anyM(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__13; LEAN_EXPORT lean_object* l_Vector_findRevM_x3f___boxed(lean_object*, lean_object*, lean_object*); lean_object* lean_mk_empty_array_with_capacity(lean_object*); LEAN_EXPORT lean_object* l_Vector_elimAsList(lean_object*, lean_object*, lean_object*); -static lean_object* l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__8; LEAN_EXPORT lean_object* l_Vector_mkVector(lean_object*); lean_object* l_Array_insertIdx_loop___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Vector_append___rarg(lean_object*, lean_object*); @@ -2656,7 +2656,7 @@ lean_dec(x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__1() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__1() { _start: { lean_object* x_1; @@ -2664,7 +2664,7 @@ x_1 = lean_mk_string_unchecked("Tactic", 6, 6); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__2() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__2() { _start: { lean_object* x_1; @@ -2672,19 +2672,19 @@ x_1 = lean_mk_string_unchecked("tacticSeq", 9, 9); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__3() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__1; x_2 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__2; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__1; -x_4 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__2; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__1; +x_4 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__2; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__4() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__4() { _start: { lean_object* x_1; @@ -2692,19 +2692,19 @@ x_1 = lean_mk_string_unchecked("tacticSeq1Indented", 18, 18); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__5() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__5() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__1; x_2 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__2; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__1; -x_4 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__4; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__1; +x_4 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__4; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__6() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__6() { _start: { lean_object* x_1; @@ -2712,17 +2712,17 @@ x_1 = lean_mk_string_unchecked("tacticGet_elem_tactic", 21, 21); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__7() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__7() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__6; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__6; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__8() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__8() { _start: { lean_object* x_1; @@ -2730,35 +2730,35 @@ x_1 = lean_mk_string_unchecked("get_elem_tactic", 15, 15); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__9() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__9() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__8; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__8; x_3 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__10() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__10() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__26; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__9; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__9; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__11() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__11() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__7; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__10; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__7; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__10; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -2766,23 +2766,23 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__12() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__12() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__26; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__11; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__11; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__13() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__13() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); x_2 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__15; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__12; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__12; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -2790,23 +2790,23 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__14() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__14() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__26; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__13; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__13; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__15() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__15() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__5; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__14; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__5; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__14; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -2814,23 +2814,23 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__16() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__16() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__26; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__15; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__15; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__17() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__17() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__3; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__16; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__3; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__16; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -2838,11 +2838,11 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161_() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162_() { _start: { lean_object* x_1; -x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__17; +x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__17; return x_1; } } @@ -4855,19 +4855,19 @@ lean_dec(x_1); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6082_() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6083_() { _start: { lean_object* x_1; -x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__17; +x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__17; return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6090_() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6091_() { _start: { lean_object* x_1; -x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__17; +x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__17; return x_1; } } @@ -4941,11 +4941,11 @@ lean_dec(x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6164_() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6165_() { _start: { lean_object* x_1; -x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__17; +x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__17; return x_1; } } @@ -5308,11 +5308,11 @@ lean_dec(x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6385_() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6386_() { _start: { lean_object* x_1; -x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__17; +x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__17; return x_1; } } @@ -5442,11 +5442,11 @@ lean_dec(x_1); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6512_() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6513_() { _start: { lean_object* x_1; -x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__17; +x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__17; return x_1; } } @@ -8326,7 +8326,7 @@ lean_dec(x_2); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__1() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__1() { _start: { lean_object* x_1; @@ -8334,41 +8334,41 @@ x_1 = lean_mk_string_unchecked("exact", 5, 5); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__2() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__1; x_2 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__2; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__1; -x_4 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__1; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__1; +x_4 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__1; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__3() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__1; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__1; x_3 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__4() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__26; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__3; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__3; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__5() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__5() { _start: { lean_object* x_1; @@ -8376,19 +8376,19 @@ x_1 = lean_mk_string_unchecked("paren", 5, 5); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__6() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__1; x_2 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__2; x_3 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__3; -x_4 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__5; +x_4 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__5; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__7() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__7() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; @@ -8400,17 +8400,17 @@ lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__8() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__8() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__26; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__7; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__7; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__9() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__9() { _start: { lean_object* x_1; @@ -8418,17 +8418,17 @@ x_1 = lean_mk_string_unchecked("term_<_", 7, 7); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__10() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__10() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__9; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__9; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__11() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__11() { _start: { lean_object* x_1; @@ -8436,19 +8436,19 @@ x_1 = lean_mk_string_unchecked("cdot", 4, 4); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__12() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__12() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__1; x_2 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__2; x_3 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__3; -x_4 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__11; +x_4 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__11; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__13() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__13() { _start: { lean_object* x_1; @@ -8456,35 +8456,35 @@ x_1 = lean_mk_string_unchecked("·", 2, 1); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__14() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__14() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__13; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__13; x_3 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__15() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__15() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__26; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__14; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__14; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__16() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__16() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__12; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__15; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__12; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__15; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -8492,17 +8492,17 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__17() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__17() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__26; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__16; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__16; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__18() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__18() { _start: { lean_object* x_1; @@ -8510,45 +8510,45 @@ x_1 = lean_mk_string_unchecked("<", 1, 1); return x_1; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__19() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__19() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__18; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__18; x_3 = lean_alloc_ctor(2, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__20() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__20() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__17; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__19; +x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__17; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__19; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__21() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__21() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__20; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__16; +x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__20; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__16; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__22() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__22() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__10; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__21; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__10; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__21; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -8556,17 +8556,17 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__23() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__23() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__8; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__22; +x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__8; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__22; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__24() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__24() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; @@ -8578,23 +8578,23 @@ lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__25() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__25() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__23; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__24; +x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__23; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__24; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__26() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__26() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__6; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__25; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__6; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__25; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -8602,23 +8602,23 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__27() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__27() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__4; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__26; +x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__4; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__26; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__28() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__28() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__2; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__27; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__2; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__27; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -8626,23 +8626,23 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__29() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__29() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__26; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__28; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__28; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__30() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__30() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); x_2 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__15; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__29; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__29; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -8650,23 +8650,23 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__31() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__31() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__26; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__30; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__30; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__32() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__32() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__5; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__31; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__5; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__31; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -8674,23 +8674,23 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__33() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__33() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__26; -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__32; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__32; x_3 = lean_array_push(x_1, x_2); return x_3; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__34() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__34() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(2); -x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__3; -x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__33; +x_2 = l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__3; +x_3 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__33; x_4 = lean_alloc_ctor(1, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -8698,11 +8698,11 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607_() { +static lean_object* _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608_() { _start: { lean_object* x_1; -x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__34; +x_1 = l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__34; return x_1; } } @@ -9149,52 +9149,52 @@ l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b lean_mark_persistent(l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__30); l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__31 = _init_l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__31(); lean_mark_persistent(l_Vector___aux__Init__Data__Vector__Basic______macroRules__Vector__term_x23v_x5b___x2c_x5d__1___closed__31); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__1 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__1(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__1); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__2 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__2(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__2); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__3 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__3(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__3); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__4 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__4(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__4); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__5 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__5(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__5); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__6 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__6(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__6); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__7 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__7(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__7); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__8 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__8(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__8); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__9 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__9(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__9); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__10 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__10(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__10); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__11 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__11(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__11); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__12 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__12(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__12); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__13 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__13(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__13); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__14 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__14(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__14); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__15 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__15(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__15); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__16 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__16(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__16); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__17 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__17(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161____closed__17); -l___auto____x40_Init_Data_Vector_Basic___hyg_1161_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1161_(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1161_); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__1 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__1(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__1); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__2 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__2(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__2); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__3 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__3(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__3); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__4 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__4(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__4); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__5 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__5(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__5); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__6 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__6(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__6); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__7 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__7(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__7); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__8 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__8(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__8); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__9 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__9(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__9); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__10 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__10(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__10); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__11 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__11(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__11); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__12 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__12(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__12); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__13 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__13(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__13); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__14 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__14(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__14); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__15 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__15(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__15); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__16 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__16(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__16); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__17 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__17(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162____closed__17); +l___auto____x40_Init_Data_Vector_Basic___hyg_1162_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_1162_(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_1162_); l_Vector_instHAppendHAddNat___closed__1 = _init_l_Vector_instHAppendHAddNat___closed__1(); lean_mark_persistent(l_Vector_instHAppendHAddNat___closed__1); l_Vector_mapM___rarg___closed__1 = _init_l_Vector_mapM___rarg___closed__1(); lean_mark_persistent(l_Vector_mapM___rarg___closed__1); -l___auto____x40_Init_Data_Vector_Basic___hyg_6082_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6082_(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_6082_); -l___auto____x40_Init_Data_Vector_Basic___hyg_6090_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6090_(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_6090_); -l___auto____x40_Init_Data_Vector_Basic___hyg_6164_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6164_(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_6164_); +l___auto____x40_Init_Data_Vector_Basic___hyg_6083_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6083_(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_6083_); +l___auto____x40_Init_Data_Vector_Basic___hyg_6091_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6091_(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_6091_); +l___auto____x40_Init_Data_Vector_Basic___hyg_6165_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6165_(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_6165_); l_Vector_swapAt_x21___rarg___closed__1 = _init_l_Vector_swapAt_x21___rarg___closed__1(); lean_mark_persistent(l_Vector_swapAt_x21___rarg___closed__1); l_Vector_swapAt_x21___rarg___closed__2 = _init_l_Vector_swapAt_x21___rarg___closed__2(); @@ -9205,8 +9205,8 @@ l_Vector_swapAt_x21___rarg___closed__4 = _init_l_Vector_swapAt_x21___rarg___clos lean_mark_persistent(l_Vector_swapAt_x21___rarg___closed__4); l_Vector_range___closed__1 = _init_l_Vector_range___closed__1(); lean_mark_persistent(l_Vector_range___closed__1); -l___auto____x40_Init_Data_Vector_Basic___hyg_6385_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6385_(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_6385_); +l___auto____x40_Init_Data_Vector_Basic___hyg_6386_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6386_(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_6386_); l_Vector_eraseIdx_x21___rarg___closed__1 = _init_l_Vector_eraseIdx_x21___rarg___closed__1(); lean_mark_persistent(l_Vector_eraseIdx_x21___rarg___closed__1); l_Vector_eraseIdx_x21___rarg___closed__2 = _init_l_Vector_eraseIdx_x21___rarg___closed__2(); @@ -9215,8 +9215,8 @@ l_Vector_eraseIdx_x21___rarg___closed__3 = _init_l_Vector_eraseIdx_x21___rarg___ lean_mark_persistent(l_Vector_eraseIdx_x21___rarg___closed__3); l_Vector_eraseIdx_x21___rarg___closed__4 = _init_l_Vector_eraseIdx_x21___rarg___closed__4(); lean_mark_persistent(l_Vector_eraseIdx_x21___rarg___closed__4); -l___auto____x40_Init_Data_Vector_Basic___hyg_6512_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6512_(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_6512_); +l___auto____x40_Init_Data_Vector_Basic___hyg_6513_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_6513_(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_6513_); l_Vector_insertIdx_x21___rarg___closed__1 = _init_l_Vector_insertIdx_x21___rarg___closed__1(); lean_mark_persistent(l_Vector_insertIdx_x21___rarg___closed__1); l_Vector_insertIdx_x21___rarg___closed__2 = _init_l_Vector_insertIdx_x21___rarg___closed__2(); @@ -9229,76 +9229,76 @@ l_Vector_find_x3f___rarg___closed__2 = _init_l_Vector_find_x3f___rarg___closed__ lean_mark_persistent(l_Vector_find_x3f___rarg___closed__2); l_Vector_instForM___closed__1 = _init_l_Vector_instForM___closed__1(); lean_mark_persistent(l_Vector_instForM___closed__1); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__1 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__1(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__1); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__2 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__2(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__2); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__3 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__3(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__3); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__4 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__4(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__4); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__5 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__5(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__5); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__6 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__6(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__6); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__7 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__7(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__7); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__8 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__8(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__8); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__9 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__9(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__9); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__10 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__10(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__10); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__11 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__11(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__11); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__12 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__12(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__12); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__13 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__13(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__13); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__14 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__14(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__14); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__15 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__15(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__15); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__16 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__16(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__16); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__17 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__17(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__17); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__18 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__18(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__18); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__19 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__19(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__19); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__20 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__20(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__20); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__21 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__21(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__21); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__22 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__22(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__22); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__23 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__23(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__23); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__24 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__24(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__24); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__25 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__25(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__25); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__26 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__26(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__26); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__27 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__27(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__27); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__28 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__28(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__28); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__29 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__29(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__29); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__30 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__30(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__30); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__31 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__31(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__31); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__32 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__32(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__32); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__33 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__33(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__33); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__34 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__34(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607____closed__34); -l___auto____x40_Init_Data_Vector_Basic___hyg_7607_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7607_(); -lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7607_); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__1 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__1(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__1); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__2 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__2(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__2); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__3 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__3(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__3); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__4 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__4(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__4); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__5 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__5(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__5); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__6 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__6(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__6); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__7 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__7(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__7); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__8 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__8(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__8); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__9 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__9(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__9); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__10 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__10(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__10); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__11 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__11(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__11); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__12 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__12(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__12); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__13 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__13(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__13); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__14 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__14(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__14); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__15 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__15(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__15); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__16 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__16(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__16); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__17 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__17(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__17); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__18 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__18(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__18); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__19 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__19(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__19); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__20 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__20(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__20); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__21 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__21(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__21); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__22 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__22(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__22); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__23 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__23(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__23); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__24 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__24(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__24); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__25 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__25(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__25); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__26 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__26(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__26); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__27 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__27(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__27); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__28 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__28(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__28); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__29 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__29(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__29); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__30 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__30(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__30); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__31 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__31(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__31); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__32 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__32(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__32); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__33 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__33(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__33); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__34 = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__34(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608____closed__34); +l___auto____x40_Init_Data_Vector_Basic___hyg_7608_ = _init_l___auto____x40_Init_Data_Vector_Basic___hyg_7608_(); +lean_mark_persistent(l___auto____x40_Init_Data_Vector_Basic___hyg_7608_); l_Std_Range_forIn_x27_loop___at_Vector_lex___spec__1___rarg___closed__1 = _init_l_Std_Range_forIn_x27_loop___at_Vector_lex___spec__1___rarg___closed__1(); lean_mark_persistent(l_Std_Range_forIn_x27_loop___at_Vector_lex___spec__1___rarg___closed__1); l_Std_Range_forIn_x27_loop___at_Vector_lex___spec__1___rarg___closed__2 = _init_l_Std_Range_forIn_x27_loop___at_Vector_lex___spec__1___rarg___closed__2(); diff --git a/stage0/stdlib/Lake/Build/Common.c b/stage0/stdlib/Lake/Build/Common.c index 995b3db88ac9..a6dab40d9139 100644 --- a/stage0/stdlib/Lake/Build/Common.c +++ b/stage0/stdlib/Lake/Build/Common.c @@ -360,7 +360,6 @@ LEAN_EXPORT lean_object* l_Lake_buildUnlessUpToDate_x3f___rarg___lambda__1(lean_ LEAN_EXPORT lean_object* l_Lake_buildLeanO(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lake_buildLeanO___lambda__6___closed__3; LEAN_EXPORT lean_object* l_Lake_MTime_checkUpToDate___at_Lake_buildFileUnlessUpToDate_x27___spec__3___boxed(lean_object*, lean_object*, lean_object*); -lean_object* l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(lean_object*, lean_object*); lean_object* l_Lean_bignumToJson(lean_object*); LEAN_EXPORT lean_object* l_Lake_inputBinFile___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_List_reverse___rarg(lean_object*); @@ -444,6 +443,7 @@ LEAN_EXPORT lean_object* l_IO_FS_withIsolatedStreams___at_Lake_inputDir___spec__ lean_object* l___private_Init_Data_Repr_0__Nat_reprFast(lean_object*); LEAN_EXPORT lean_object* l___private_Init_Data_Option_Basic_0__Option_beqOption____x40_Init_Data_Option_Basic___hyg_159____at_Lake_buildFileUnlessUpToDate_x27___spec__5___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lake_buildFileUnlessUpToDate(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Basic_0__Lean_Lsp_toJsonPosition____x40_Lean_Data_Lsp_Basic___hyg_221____spec__1(lean_object*, lean_object*); static lean_object* l_Lake_platformTrace___closed__5; LEAN_EXPORT lean_object* l_Lake_Job_bindM___at_Lake_buildSharedLib___spec__7___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_IO_FS_withIsolatedStreams___at_Lake_buildFileAfterDep___spec__2(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -940,7 +940,7 @@ x_27 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_27, 0, x_9); lean_ctor_set(x_27, 1, x_26); x_28 = l_Lake_platformTrace___closed__3; -x_29 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_27, x_28); +x_29 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Basic_0__Lean_Lsp_toJsonPosition____x40_Lean_Data_Lsp_Basic___hyg_221____spec__1(x_27, x_28); x_30 = l_Lean_Json_mkObj(x_29); return x_30; } diff --git a/stage0/stdlib/Lake/CLI/Main.c b/stage0/stdlib/Lake/CLI/Main.c index 6091bc925573..306a06096289 100644 --- a/stage0/stdlib/Lake/CLI/Main.c +++ b/stage0/stdlib/Lake/CLI/Main.c @@ -487,7 +487,6 @@ static lean_object* l_Option_repr___at_Lake_verifyInstall___spec__1___closed__2; LEAN_EXPORT lean_object* l_Lake_lake_script_run___boxed__const__1; LEAN_EXPORT lean_object* l_Lake_lake_test___boxed__const__1; LEAN_EXPORT lean_object* l_Lake_lake___boxed__const__1; -lean_object* l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(lean_object*, lean_object*); static lean_object* l_Lake_lake_selfCheck___closed__1; static lean_object* l_Lake_parseLangSpec___closed__1; lean_object* l_List_reverse___rarg(lean_object*); @@ -597,6 +596,7 @@ static lean_object* l_Lake_lake_env___lambda__1___closed__1; lean_object* lean_array_uset(lean_object*, size_t, lean_object*); extern lean_object* l_Lean_Options_empty; LEAN_EXPORT lean_object* l_Lake_lake_update___boxed__const__1; +lean_object* l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Basic_0__Lean_Lsp_toJsonPosition____x40_Lean_Data_Lsp_Basic___hyg_221____spec__1(lean_object*, lean_object*); static lean_object* l_Lake_lakeLongOption___closed__15; static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lake_CLI_Main_0__Lake_lake_evalLeanFile_mkSpawnArgs___spec__2___closed__1; LEAN_EXPORT lean_object* l_Lake_lake_ReservoirConfig_currentSchemaVersion; @@ -56434,7 +56434,7 @@ x_81 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_81, 0, x_18); lean_ctor_set(x_81, 1, x_80); x_82 = l_Lake_CliM_run___rarg___closed__1; -x_83 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_81, x_82); +x_83 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Basic_0__Lean_Lsp_toJsonPosition____x40_Lean_Data_Lsp_Basic___hyg_221____spec__1(x_81, x_82); x_84 = l_Lean_Json_mkObj(x_83); return x_84; } @@ -56487,7 +56487,7 @@ x_100 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_100, 0, x_18); lean_ctor_set(x_100, 1, x_99); x_101 = l_Lake_CliM_run___rarg___closed__1; -x_102 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_100, x_101); +x_102 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Basic_0__Lean_Lsp_toJsonPosition____x40_Lean_Data_Lsp_Basic___hyg_221____spec__1(x_100, x_101); x_103 = l_Lean_Json_mkObj(x_102); return x_103; } @@ -56538,7 +56538,7 @@ x_119 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_119, 0, x_18); lean_ctor_set(x_119, 1, x_118); x_120 = l_Lake_CliM_run___rarg___closed__1; -x_121 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_119, x_120); +x_121 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Basic_0__Lean_Lsp_toJsonPosition____x40_Lean_Data_Lsp_Basic___hyg_221____spec__1(x_119, x_120); x_122 = l_Lean_Json_mkObj(x_121); return x_122; } diff --git a/stage0/stdlib/Lake/CLI/Serve.c b/stage0/stdlib/Lake/CLI/Serve.c index 146f6b9accc5..c41692012c6f 100644 --- a/stage0/stdlib/Lake/CLI/Serve.c +++ b/stage0/stdlib/Lake/CLI/Serve.c @@ -64,6 +64,7 @@ lean_object* l_Lake_Workspace_findModule_x3f(lean_object*, lean_object*); lean_object* l_Lake_loadWorkspace(lean_object*, lean_object*, lean_object*); lean_object* lean_io_process_spawn(lean_object*, lean_object*); lean_object* l_Lake_Workspace_leanPath(lean_object*); +lean_object* l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lake_setupFile___boxed__const__2; LEAN_EXPORT lean_object* l_Lake_setupFile(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lake_OutStream_logEntry(lean_object*, lean_object*, uint8_t, uint8_t, lean_object*); @@ -111,7 +112,6 @@ uint8_t lean_usize_dec_lt(size_t, size_t); LEAN_EXPORT lean_object* l_Lake_serve___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_uset(lean_object*, size_t, lean_object*); lean_object* l_Lake_realConfigFile(lean_object*, lean_object*); -lean_object* l_Lean_RBNode_insert___at_Lean_LeanOptions_fromOptions_x3f___spec__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lake_setupFile___lambda__1___boxed(lean_object*); static uint32_t _init_l_Lake_noConfigFileCode() { _start: @@ -528,18 +528,18 @@ uint8_t x_5; x_5 = lean_usize_dec_eq(x_2, x_3); if (x_5 == 0) { -lean_object* x_6; lean_object* x_7; lean_object* x_8; size_t x_9; size_t x_10; lean_object* x_11; +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; size_t x_10; size_t x_11; x_6 = lean_array_uget(x_1, x_2); x_7 = lean_ctor_get(x_6, 0); lean_inc(x_7); x_8 = lean_ctor_get(x_6, 1); lean_inc(x_8); lean_dec(x_6); -x_9 = 1; -x_10 = lean_usize_add(x_2, x_9); -x_11 = l_Lean_RBNode_insert___at_Lean_LeanOptions_fromOptions_x3f___spec__1(x_4, x_7, x_8); -x_2 = x_10; -x_4 = x_11; +x_9 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_4, x_7, x_8); +x_10 = 1; +x_11 = lean_usize_add(x_2, x_10); +x_2 = x_11; +x_4 = x_9; goto _start; } else diff --git a/stage0/stdlib/Lake/Config/Env.c b/stage0/stdlib/Lake/Config/Env.c index 909bed0c86b1..8140d8be466c 100644 --- a/stage0/stdlib/Lake/Config/Env.c +++ b/stage0/stdlib/Lake/Config/Env.c @@ -17,7 +17,6 @@ LEAN_EXPORT lean_object* l_Lake_Env_vars(lean_object*); static lean_object* l_Lake_Env_noToolchainVars___closed__16; lean_object* lean_string_utf8_extract(lean_object*, lean_object*, lean_object*); static lean_object* l_Lake_Env_noToolchainVars___closed__8; -lean_object* l_Lean_Json_getObj_x3f(lean_object*); static lean_object* l_Lake_Env_noToolchainVars___closed__10; lean_object* l_Lean_Json_compress(lean_object*); uint32_t lean_string_utf8_get(lean_object*, lean_object*); @@ -32,6 +31,7 @@ static lean_object* l_Lake_Env_noToolchainVars___closed__22; uint8_t l_Lean_Name_isAnonymous(lean_object*); lean_object* lean_io_getenv(lean_object*, lean_object*); static lean_object* l_Lake_Env_noToolchainVars___closed__20; +static lean_object* l_Lake_Env_compute_computePkgUrlMap___closed__4; LEAN_EXPORT lean_object* l_Lake_Env_leanSrcPath(lean_object*); static lean_object* l_Lake_Env_noToolchainVars___closed__15; LEAN_EXPORT lean_object* l_Lake_Env_leanSearchPath___boxed(lean_object*); @@ -40,6 +40,7 @@ uint8_t lean_string_dec_eq(lean_object*, lean_object*); lean_object* lean_string_utf8_byte_size(lean_object*); static lean_object* l_Lake_instInhabitedEnv___closed__2; lean_object* l_Lean_Json_getStr_x3f(lean_object*); +static lean_object* l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__3; static lean_object* l_Lake_Env_noToolchainVars___closed__3; lean_object* l_List_appendTR___rarg(lean_object*, lean_object*); static lean_object* l_Lake_Env_noToolchainVars___closed__18; @@ -115,6 +116,7 @@ lean_object* l_Lake_LeanInstall_leanCc_x3f(lean_object*); lean_object* lean_nat_add(lean_object*, lean_object*); static lean_object* l_Lake_Env_compute_computePkgUrlMap___closed__3; lean_object* l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Json_pretty(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lake_Env_path(lean_object*); extern uint8_t l_System_Platform_isWindows; static lean_object* l_Lake_Env_noToolchainVars___closed__12; @@ -223,18 +225,24 @@ static lean_object* _init_l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUr _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("expected name", 13, 13); +x_1 = lean_mk_string_unchecked("[anonymous]", 11, 11); return x_1; } } static lean_object* _init_l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__2() { _start: { -lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__1; -x_2 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; +lean_object* x_1; +x_1 = lean_mk_string_unchecked("expected a `Name`, got '", 24, 24); +return x_1; +} +} +static lean_object* _init_l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("'", 1, 1); +return x_1; } } LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1(lean_object* x_1, lean_object* x_2) { @@ -284,59 +292,229 @@ return x_11; } else { -lean_object* x_12; lean_object* x_13; uint8_t x_14; -x_12 = lean_ctor_get(x_8, 0); -lean_inc(x_12); -lean_dec(x_8); -x_13 = l_String_toName(x_5); -x_14 = l_Lean_Name_isAnonymous(x_13); -if (x_14 == 0) +uint8_t x_12; +x_12 = !lean_is_exclusive(x_8); +if (x_12 == 0) { -lean_object* x_15; -x_15 = l_Lean_Json_getStr_x3f(x_6); -if (lean_obj_tag(x_15) == 0) +lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_13 = lean_ctor_get(x_8, 0); +x_14 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__1; +x_15 = lean_string_dec_eq(x_5, x_14); +if (x_15 == 0) +{ +lean_object* x_16; uint8_t x_17; +lean_inc(x_5); +x_16 = l_String_toName(x_5); +x_17 = l_Lean_Name_isAnonymous(x_16); +if (x_17 == 0) +{ +lean_object* x_18; +lean_free_object(x_8); +lean_dec(x_5); +x_18 = l_Lean_Json_getStr_x3f(x_6); +if (lean_obj_tag(x_18) == 0) { -uint8_t x_16; +uint8_t x_19; +lean_dec(x_16); lean_dec(x_13); -lean_dec(x_12); lean_dec(x_7); -x_16 = !lean_is_exclusive(x_15); -if (x_16 == 0) +x_19 = !lean_is_exclusive(x_18); +if (x_19 == 0) { -return x_15; +return x_18; } else { -lean_object* x_17; lean_object* x_18; -x_17 = lean_ctor_get(x_15, 0); -lean_inc(x_17); -lean_dec(x_15); -x_18 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_18, 0, x_17); -return x_18; +lean_object* x_20; lean_object* x_21; +x_20 = lean_ctor_get(x_18, 0); +lean_inc(x_20); +lean_dec(x_18); +x_21 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_21, 0, x_20); +return x_21; } } else { -lean_object* x_19; lean_object* x_20; -x_19 = lean_ctor_get(x_15, 0); -lean_inc(x_19); -lean_dec(x_15); -x_20 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_12, x_13, x_19); -x_1 = x_20; +lean_object* x_22; lean_object* x_23; +x_22 = lean_ctor_get(x_18, 0); +lean_inc(x_22); +lean_dec(x_18); +x_23 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_16, x_22); +x_1 = x_23; x_2 = x_7; goto _start; } } else { -lean_object* x_22; +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +lean_dec(x_16); lean_dec(x_13); -lean_dec(x_12); lean_dec(x_7); lean_dec(x_6); -x_22 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__2; -return x_22; +x_25 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__2; +x_26 = lean_string_append(x_25, x_5); +lean_dec(x_5); +x_27 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__3; +x_28 = lean_string_append(x_26, x_27); +lean_ctor_set_tag(x_8, 0); +lean_ctor_set(x_8, 0, x_28); +return x_8; +} +} +else +{ +lean_object* x_29; +lean_free_object(x_8); +lean_dec(x_5); +x_29 = l_Lean_Json_getStr_x3f(x_6); +if (lean_obj_tag(x_29) == 0) +{ +uint8_t x_30; +lean_dec(x_13); +lean_dec(x_7); +x_30 = !lean_is_exclusive(x_29); +if (x_30 == 0) +{ +return x_29; +} +else +{ +lean_object* x_31; lean_object* x_32; +x_31 = lean_ctor_get(x_29, 0); +lean_inc(x_31); +lean_dec(x_29); +x_32 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_32, 0, x_31); +return x_32; +} +} +else +{ +lean_object* x_33; lean_object* x_34; lean_object* x_35; +x_33 = lean_ctor_get(x_29, 0); +lean_inc(x_33); +lean_dec(x_29); +x_34 = lean_box(0); +x_35 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_34, x_33); +x_1 = x_35; +x_2 = x_7; +goto _start; +} +} +} +else +{ +lean_object* x_37; lean_object* x_38; uint8_t x_39; +x_37 = lean_ctor_get(x_8, 0); +lean_inc(x_37); +lean_dec(x_8); +x_38 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__1; +x_39 = lean_string_dec_eq(x_5, x_38); +if (x_39 == 0) +{ +lean_object* x_40; uint8_t x_41; +lean_inc(x_5); +x_40 = l_String_toName(x_5); +x_41 = l_Lean_Name_isAnonymous(x_40); +if (x_41 == 0) +{ +lean_object* x_42; +lean_dec(x_5); +x_42 = l_Lean_Json_getStr_x3f(x_6); +if (lean_obj_tag(x_42) == 0) +{ +lean_object* x_43; lean_object* x_44; lean_object* x_45; +lean_dec(x_40); +lean_dec(x_37); +lean_dec(x_7); +x_43 = lean_ctor_get(x_42, 0); +lean_inc(x_43); +if (lean_is_exclusive(x_42)) { + lean_ctor_release(x_42, 0); + x_44 = x_42; +} else { + lean_dec_ref(x_42); + x_44 = lean_box(0); +} +if (lean_is_scalar(x_44)) { + x_45 = lean_alloc_ctor(0, 1, 0); +} else { + x_45 = x_44; +} +lean_ctor_set(x_45, 0, x_43); +return x_45; +} +else +{ +lean_object* x_46; lean_object* x_47; +x_46 = lean_ctor_get(x_42, 0); +lean_inc(x_46); +lean_dec(x_42); +x_47 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_37, x_40, x_46); +x_1 = x_47; +x_2 = x_7; +goto _start; +} +} +else +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; +lean_dec(x_40); +lean_dec(x_37); +lean_dec(x_7); +lean_dec(x_6); +x_49 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__2; +x_50 = lean_string_append(x_49, x_5); +lean_dec(x_5); +x_51 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__3; +x_52 = lean_string_append(x_50, x_51); +x_53 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_53, 0, x_52); +return x_53; +} +} +else +{ +lean_object* x_54; +lean_dec(x_5); +x_54 = l_Lean_Json_getStr_x3f(x_6); +if (lean_obj_tag(x_54) == 0) +{ +lean_object* x_55; lean_object* x_56; lean_object* x_57; +lean_dec(x_37); +lean_dec(x_7); +x_55 = lean_ctor_get(x_54, 0); +lean_inc(x_55); +if (lean_is_exclusive(x_54)) { + lean_ctor_release(x_54, 0); + x_56 = x_54; +} else { + lean_dec_ref(x_54); + x_56 = lean_box(0); +} +if (lean_is_scalar(x_56)) { + x_57 = lean_alloc_ctor(0, 1, 0); +} else { + x_57 = x_56; +} +lean_ctor_set(x_57, 0, x_55); +return x_57; +} +else +{ +lean_object* x_58; lean_object* x_59; lean_object* x_60; +x_58 = lean_ctor_get(x_54, 0); +lean_inc(x_58); +lean_dec(x_54); +x_59 = lean_box(0); +x_60 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_37, x_59, x_58); +x_1 = x_60; +x_2 = x_7; +goto _start; +} +} } } } @@ -366,6 +544,14 @@ x_1 = lean_mk_string_unchecked("'LAKE_PKG_URL_MAP' has invalid JSON: ", 37, 37); return x_1; } } +static lean_object* _init_l_Lake_Env_compute_computePkgUrlMap___closed__4() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("expected a `NameMap`, got '", 27, 27); +return x_1; +} +} LEAN_EXPORT lean_object* l_Lake_Env_compute_computePkgUrlMap(lean_object* x_1) { _start: { @@ -431,146 +617,152 @@ return x_3; } else { -lean_object* x_21; lean_object* x_22; +lean_object* x_21; x_21 = lean_ctor_get(x_15, 0); lean_inc(x_21); lean_dec(x_15); -x_22 = l_Lean_Json_getObj_x3f(x_21); -if (lean_obj_tag(x_22) == 0) -{ -lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; -x_23 = lean_ctor_get(x_22, 0); -lean_inc(x_23); -lean_dec(x_22); -x_24 = l_Lake_Env_compute_computePkgUrlMap___closed__3; -x_25 = lean_string_append(x_24, x_23); -lean_dec(x_23); -x_26 = l_Lake_instInhabitedEnv___closed__1; -x_27 = lean_string_append(x_25, x_26); +if (lean_obj_tag(x_21) == 5) +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_22 = lean_ctor_get(x_21, 0); +lean_inc(x_22); +lean_dec(x_21); +x_23 = lean_box(0); +x_24 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1(x_23, x_22); +if (lean_obj_tag(x_24) == 0) +{ +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; +x_25 = lean_ctor_get(x_24, 0); +lean_inc(x_25); +lean_dec(x_24); +x_26 = l_Lake_Env_compute_computePkgUrlMap___closed__3; +x_27 = lean_string_append(x_26, x_25); +lean_dec(x_25); +x_28 = l_Lake_instInhabitedEnv___closed__1; +x_29 = lean_string_append(x_27, x_28); lean_ctor_set_tag(x_3, 1); -lean_ctor_set(x_3, 0, x_27); +lean_ctor_set(x_3, 0, x_29); return x_3; } else { -lean_object* x_28; lean_object* x_29; lean_object* x_30; -x_28 = lean_ctor_get(x_22, 0); -lean_inc(x_28); -lean_dec(x_22); -x_29 = lean_box(0); -x_30 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1(x_29, x_28); -if (lean_obj_tag(x_30) == 0) -{ -lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; -x_31 = lean_ctor_get(x_30, 0); -lean_inc(x_31); -lean_dec(x_30); -x_32 = l_Lake_Env_compute_computePkgUrlMap___closed__3; -x_33 = lean_string_append(x_32, x_31); -lean_dec(x_31); -x_34 = l_Lake_instInhabitedEnv___closed__1; -x_35 = lean_string_append(x_33, x_34); -lean_ctor_set_tag(x_3, 1); -lean_ctor_set(x_3, 0, x_35); +lean_object* x_30; +x_30 = lean_ctor_get(x_24, 0); +lean_inc(x_30); +lean_dec(x_24); +lean_ctor_set(x_3, 0, x_30); return x_3; } +} else { -lean_object* x_36; -x_36 = lean_ctor_get(x_30, 0); -lean_inc(x_36); -lean_dec(x_30); -lean_ctor_set(x_3, 0, x_36); +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_31 = lean_unsigned_to_nat(80u); +x_32 = l_Lean_Json_pretty(x_21, x_31); +x_33 = l_Lake_Env_compute_computePkgUrlMap___closed__4; +x_34 = lean_string_append(x_33, x_32); +lean_dec(x_32); +x_35 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__3; +x_36 = lean_string_append(x_34, x_35); +x_37 = l_Lake_Env_compute_computePkgUrlMap___closed__3; +x_38 = lean_string_append(x_37, x_36); +lean_dec(x_36); +x_39 = l_Lake_instInhabitedEnv___closed__1; +x_40 = lean_string_append(x_38, x_39); +lean_ctor_set_tag(x_3, 1); +lean_ctor_set(x_3, 0, x_40); return x_3; } } } -} else { -lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; -x_37 = lean_ctor_get(x_3, 1); -lean_inc(x_37); +lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; +x_41 = lean_ctor_get(x_3, 1); +lean_inc(x_41); lean_dec(x_3); -x_38 = lean_ctor_get(x_4, 0); -lean_inc(x_38); +x_42 = lean_ctor_get(x_4, 0); +lean_inc(x_42); lean_dec(x_4); -x_39 = l_Lake_Env_compute_computePkgUrlMap___closed__2; -x_40 = l_Std_Internal_Parsec_String_Parser_run___rarg(x_39, x_38); -if (lean_obj_tag(x_40) == 0) +x_43 = l_Lake_Env_compute_computePkgUrlMap___closed__2; +x_44 = l_Std_Internal_Parsec_String_Parser_run___rarg(x_43, x_42); +if (lean_obj_tag(x_44) == 0) { -lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; -x_41 = lean_ctor_get(x_40, 0); -lean_inc(x_41); -lean_dec(x_40); -x_42 = l_Lake_Env_compute_computePkgUrlMap___closed__3; -x_43 = lean_string_append(x_42, x_41); -lean_dec(x_41); -x_44 = l_Lake_instInhabitedEnv___closed__1; -x_45 = lean_string_append(x_43, x_44); -x_46 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_46, 0, x_45); -lean_ctor_set(x_46, 1, x_37); -return x_46; -} -else +lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; +x_45 = lean_ctor_get(x_44, 0); +lean_inc(x_45); +lean_dec(x_44); +x_46 = l_Lake_Env_compute_computePkgUrlMap___closed__3; +x_47 = lean_string_append(x_46, x_45); +lean_dec(x_45); +x_48 = l_Lake_instInhabitedEnv___closed__1; +x_49 = lean_string_append(x_47, x_48); +x_50 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_50, 0, x_49); +lean_ctor_set(x_50, 1, x_41); +return x_50; +} +else +{ +lean_object* x_51; +x_51 = lean_ctor_get(x_44, 0); +lean_inc(x_51); +lean_dec(x_44); +if (lean_obj_tag(x_51) == 5) +{ +lean_object* x_52; lean_object* x_53; lean_object* x_54; +x_52 = lean_ctor_get(x_51, 0); +lean_inc(x_52); +lean_dec(x_51); +x_53 = lean_box(0); +x_54 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1(x_53, x_52); +if (lean_obj_tag(x_54) == 0) { -lean_object* x_47; lean_object* x_48; -x_47 = lean_ctor_get(x_40, 0); -lean_inc(x_47); -lean_dec(x_40); -x_48 = l_Lean_Json_getObj_x3f(x_47); -if (lean_obj_tag(x_48) == 0) -{ -lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; -x_49 = lean_ctor_get(x_48, 0); -lean_inc(x_49); -lean_dec(x_48); -x_50 = l_Lake_Env_compute_computePkgUrlMap___closed__3; -x_51 = lean_string_append(x_50, x_49); -lean_dec(x_49); -x_52 = l_Lake_instInhabitedEnv___closed__1; -x_53 = lean_string_append(x_51, x_52); -x_54 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_54, 0, x_53); -lean_ctor_set(x_54, 1, x_37); -return x_54; +lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; +x_55 = lean_ctor_get(x_54, 0); +lean_inc(x_55); +lean_dec(x_54); +x_56 = l_Lake_Env_compute_computePkgUrlMap___closed__3; +x_57 = lean_string_append(x_56, x_55); +lean_dec(x_55); +x_58 = l_Lake_instInhabitedEnv___closed__1; +x_59 = lean_string_append(x_57, x_58); +x_60 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_60, 0, x_59); +lean_ctor_set(x_60, 1, x_41); +return x_60; } else { -lean_object* x_55; lean_object* x_56; lean_object* x_57; -x_55 = lean_ctor_get(x_48, 0); -lean_inc(x_55); -lean_dec(x_48); -x_56 = lean_box(0); -x_57 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1(x_56, x_55); -if (lean_obj_tag(x_57) == 0) -{ -lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; -x_58 = lean_ctor_get(x_57, 0); -lean_inc(x_58); -lean_dec(x_57); -x_59 = l_Lake_Env_compute_computePkgUrlMap___closed__3; -x_60 = lean_string_append(x_59, x_58); -lean_dec(x_58); -x_61 = l_Lake_instInhabitedEnv___closed__1; -x_62 = lean_string_append(x_60, x_61); -x_63 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_63, 0, x_62); -lean_ctor_set(x_63, 1, x_37); -return x_63; +lean_object* x_61; lean_object* x_62; +x_61 = lean_ctor_get(x_54, 0); +lean_inc(x_61); +lean_dec(x_54); +x_62 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_62, 0, x_61); +lean_ctor_set(x_62, 1, x_41); +return x_62; +} } else { -lean_object* x_64; lean_object* x_65; -x_64 = lean_ctor_get(x_57, 0); -lean_inc(x_64); -lean_dec(x_57); -x_65 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_65, 0, x_64); -lean_ctor_set(x_65, 1, x_37); -return x_65; -} +lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; +x_63 = lean_unsigned_to_nat(80u); +x_64 = l_Lean_Json_pretty(x_51, x_63); +x_65 = l_Lake_Env_compute_computePkgUrlMap___closed__4; +x_66 = lean_string_append(x_65, x_64); +lean_dec(x_64); +x_67 = l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__3; +x_68 = lean_string_append(x_66, x_67); +x_69 = l_Lake_Env_compute_computePkgUrlMap___closed__3; +x_70 = lean_string_append(x_69, x_68); +lean_dec(x_68); +x_71 = l_Lake_instInhabitedEnv___closed__1; +x_72 = lean_string_append(x_70, x_71); +x_73 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_73, 0, x_72); +lean_ctor_set(x_73, 1, x_41); +return x_73; } } } @@ -2920,12 +3112,16 @@ l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__1 lean_mark_persistent(l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__1); l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__2 = _init_l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__2(); lean_mark_persistent(l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__2); +l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__3 = _init_l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__3(); +lean_mark_persistent(l_Lean_RBNode_foldM___at_Lake_Env_compute_computePkgUrlMap___spec__1___closed__3); l_Lake_Env_compute_computePkgUrlMap___closed__1 = _init_l_Lake_Env_compute_computePkgUrlMap___closed__1(); lean_mark_persistent(l_Lake_Env_compute_computePkgUrlMap___closed__1); l_Lake_Env_compute_computePkgUrlMap___closed__2 = _init_l_Lake_Env_compute_computePkgUrlMap___closed__2(); lean_mark_persistent(l_Lake_Env_compute_computePkgUrlMap___closed__2); l_Lake_Env_compute_computePkgUrlMap___closed__3 = _init_l_Lake_Env_compute_computePkgUrlMap___closed__3(); lean_mark_persistent(l_Lake_Env_compute_computePkgUrlMap___closed__3); +l_Lake_Env_compute_computePkgUrlMap___closed__4 = _init_l_Lake_Env_compute_computePkgUrlMap___closed__4(); +lean_mark_persistent(l_Lake_Env_compute_computePkgUrlMap___closed__4); l_Lake_Env_compute___closed__1 = _init_l_Lake_Env_compute___closed__1(); lean_mark_persistent(l_Lake_Env_compute___closed__1); l_Lake_Env_compute___closed__2 = _init_l_Lake_Env_compute___closed__2(); diff --git a/stage0/stdlib/Lake/Load/Lean/Elab.c b/stage0/stdlib/Lake/Load/Lean/Elab.c index f0b03a753744..de0aa950c460 100644 --- a/stage0/stdlib/Lake/Load/Lean/Elab.c +++ b/stage0/stdlib/Lake/Load/Lean/Elab.c @@ -15,7 +15,6 @@ extern "C" { #endif LEAN_EXPORT lean_object* l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976_(lean_object*); static lean_object* l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____closed__20; -lean_object* l_Lean_Json_getObj_x3f(lean_object*); static lean_object* l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____closed__7; static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__40; LEAN_EXPORT lean_object* l___private_Lake_Load_Lean_Elab_0__Lake_toJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_896_(lean_object*); @@ -76,7 +75,6 @@ static lean_object* l_Lake_importConfigFile___closed__6; lean_object* l_Lean_RBNode_insert___at_Lean_NameSet_insert___spec__1(lean_object*, lean_object*, lean_object*); static lean_object* l_Lake_importConfigFile___closed__12; LEAN_EXPORT lean_object* l_Lake_importModulesUsingCache___lambda__1(lean_object*, lean_object*, uint32_t, lean_object*, lean_object*); -lean_object* l_Lean_Elab_headerToImports(lean_object*); static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__1___closed__2; static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__22; static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__29; @@ -98,6 +96,7 @@ lean_object* l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJ lean_object* l_Lean_Name_mkStr3(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____closed__15; static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__10; +static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__2; static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__24; size_t lean_usize_of_nat(lean_object*); static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__51; @@ -164,6 +163,7 @@ static lean_object* l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace_ static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__35; static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__41; extern lean_object* l_Lake_defaultLakeDir; +lean_object* l_Lean_Elab_HeaderSyntax_imports(lean_object*); LEAN_EXPORT lean_object* l_Lake_importConfigFile(lean_object*, lean_object*, lean_object*); static uint64_t l_Lake_importModulesUsingCache___closed__2; LEAN_EXPORT lean_object* l_Lake_processHeader(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -228,7 +228,6 @@ LEAN_EXPORT lean_object* l___private_Lake_Load_Lean_Elab_0__Lake_hashImport____x static lean_object* l_Lake_importConfigFile___closed__8; static lean_object* l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____closed__19; static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__42; -lean_object* l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(lean_object*, lean_object*); extern lean_object* l_Lean_persistentEnvExtensionsRef; lean_object* l_Lean_bignumToJson(lean_object*); static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__46; @@ -271,7 +270,7 @@ static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__36; lean_object* l_Lake_joinRelative(lean_object*, lean_object*); lean_object* lean_nat_add(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand_go___at_Lake_importModulesUsingCache___spec__4(lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_importModules(lean_object*, lean_object*, uint32_t, lean_object*, uint8_t, uint8_t, uint8_t, lean_object*); +lean_object* l_Lean_importModules(lean_object*, lean_object*, uint32_t, lean_object*, uint8_t, uint8_t, uint8_t, lean_object*, lean_object*); static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__54; LEAN_EXPORT uint64_t l_Array_foldlMUnsafe_fold___at_Lake_importModulesUsingCache___spec__5(lean_object*, size_t, size_t, uint64_t); lean_object* l_Lean_Json_pretty(lean_object*, lean_object*); @@ -285,7 +284,9 @@ lean_object* lake_environment_add(lean_object*, lean_object*); lean_object* lean_array_uset(lean_object*, size_t, lean_object*); static lean_object* l_Array_foldlMUnsafe_fold___at_Lake_importConfigFileCore___spec__2___closed__1; static lean_object* l_Lake_importConfigFileCore_lakeExts___closed__28; +lean_object* l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Basic_0__Lean_Lsp_toJsonPosition____x40_Lean_Data_Lsp_Basic___hyg_221____spec__1(lean_object*, lean_object*); static lean_object* l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____closed__17; +static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__1; static lean_object* l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____closed__14; static lean_object* l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____closed__13; LEAN_EXPORT lean_object* l___private_Lake_Load_Lean_Elab_0__Lake_addToEnv___boxed(lean_object*, lean_object*); @@ -1130,236 +1131,237 @@ return x_1; LEAN_EXPORT lean_object* l_Lake_importModulesUsingCache___lambda__1(lean_object* x_1, lean_object* x_2, uint32_t x_3, lean_object* x_4, lean_object* x_5) { _start: { -lean_object* x_6; uint8_t x_7; uint8_t x_8; uint8_t x_9; lean_object* x_10; -x_6 = l_Lake_importModulesUsingCache___lambda__1___closed__1; -x_7 = 0; -x_8 = 1; -x_9 = 2; +lean_object* x_6; lean_object* x_7; uint8_t x_8; uint8_t x_9; uint8_t x_10; lean_object* x_11; +x_6 = lean_box(0); +x_7 = l_Lake_importModulesUsingCache___lambda__1___closed__1; +x_8 = 0; +x_9 = 1; +x_10 = 2; lean_inc(x_1); -x_10 = l_Lean_importModules(x_1, x_2, x_3, x_6, x_7, x_8, x_9, x_5); -if (lean_obj_tag(x_10) == 0) +x_11 = l_Lean_importModules(x_1, x_2, x_3, x_7, x_8, x_9, x_10, x_6, x_5); +if (lean_obj_tag(x_11) == 0) { -lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; uint64_t x_21; lean_object* x_22; lean_object* x_23; uint8_t x_24; uint64_t x_25; uint64_t x_26; size_t x_27; size_t x_28; size_t x_29; uint64_t x_30; -x_11 = lean_ctor_get(x_10, 0); -lean_inc(x_11); -x_12 = lean_ctor_get(x_10, 1); +lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; uint64_t x_22; lean_object* x_23; lean_object* x_24; uint8_t x_25; uint64_t x_26; uint64_t x_27; size_t x_28; size_t x_29; size_t x_30; uint64_t x_31; +x_12 = lean_ctor_get(x_11, 0); lean_inc(x_12); -lean_dec(x_10); -x_13 = l_Lake_importModulesUsingCache___lambda__1___closed__2; -x_14 = lean_st_ref_take(x_13, x_12); -x_15 = lean_ctor_get(x_14, 0); -lean_inc(x_15); -x_16 = lean_ctor_get(x_14, 1); +x_13 = lean_ctor_get(x_11, 1); +lean_inc(x_13); +lean_dec(x_11); +x_14 = l_Lake_importModulesUsingCache___lambda__1___closed__2; +x_15 = lean_st_ref_take(x_14, x_13); +x_16 = lean_ctor_get(x_15, 0); lean_inc(x_16); -lean_dec(x_14); -x_17 = lean_ctor_get(x_15, 0); +x_17 = lean_ctor_get(x_15, 1); lean_inc(x_17); -x_18 = lean_ctor_get(x_15, 1); +lean_dec(x_15); +x_18 = lean_ctor_get(x_16, 0); lean_inc(x_18); -if (lean_is_exclusive(x_15)) { - lean_ctor_release(x_15, 0); - lean_ctor_release(x_15, 1); - x_19 = x_15; -} else { - lean_dec_ref(x_15); - x_19 = lean_box(0); -} -x_20 = lean_array_get_size(x_18); -x_21 = 7; -x_22 = lean_array_get_size(x_1); -x_23 = lean_unsigned_to_nat(0u); -x_24 = lean_nat_dec_lt(x_23, x_22); -x_25 = 32; -x_26 = 16; -x_27 = lean_usize_of_nat(x_20); -lean_dec(x_20); -x_28 = 1; -x_29 = lean_usize_sub(x_27, x_28); -if (x_24 == 0) +x_19 = lean_ctor_get(x_16, 1); +lean_inc(x_19); +if (lean_is_exclusive(x_16)) { + lean_ctor_release(x_16, 0); + lean_ctor_release(x_16, 1); + x_20 = x_16; +} else { + lean_dec_ref(x_16); + x_20 = lean_box(0); +} +x_21 = lean_array_get_size(x_19); +x_22 = 7; +x_23 = lean_array_get_size(x_1); +x_24 = lean_unsigned_to_nat(0u); +x_25 = lean_nat_dec_lt(x_24, x_23); +x_26 = 32; +x_27 = 16; +x_28 = lean_usize_of_nat(x_21); +lean_dec(x_21); +x_29 = 1; +x_30 = lean_usize_sub(x_28, x_29); +if (x_25 == 0) { -lean_dec(x_22); -x_30 = x_21; -goto block_72; +lean_dec(x_23); +x_31 = x_22; +goto block_73; } else { -uint8_t x_73; -x_73 = lean_nat_dec_le(x_22, x_22); -if (x_73 == 0) +uint8_t x_74; +x_74 = lean_nat_dec_le(x_23, x_23); +if (x_74 == 0) { -lean_dec(x_22); -x_30 = x_21; -goto block_72; +lean_dec(x_23); +x_31 = x_22; +goto block_73; } else { -size_t x_74; size_t x_75; uint64_t x_76; -x_74 = 0; -x_75 = lean_usize_of_nat(x_22); -lean_dec(x_22); -x_76 = l_Array_foldlMUnsafe_fold___at_Lake_importModulesUsingCache___spec__5(x_1, x_74, x_75, x_21); -x_30 = x_76; -goto block_72; -} -} -block_72: -{ -uint64_t x_31; uint64_t x_32; uint64_t x_33; uint64_t x_34; size_t x_35; size_t x_36; lean_object* x_37; uint8_t x_38; -x_31 = lean_uint64_shift_right(x_30, x_25); -x_32 = lean_uint64_xor(x_30, x_31); -x_33 = lean_uint64_shift_right(x_32, x_26); -x_34 = lean_uint64_xor(x_32, x_33); -x_35 = lean_uint64_to_usize(x_34); -x_36 = lean_usize_land(x_35, x_29); -x_37 = lean_array_uget(x_18, x_36); -x_38 = l_Std_DHashMap_Internal_AssocList_contains___at_Lake_importModulesUsingCache___spec__1(x_1, x_37); -if (x_38 == 0) +size_t x_75; size_t x_76; uint64_t x_77; +x_75 = 0; +x_76 = lean_usize_of_nat(x_23); +lean_dec(x_23); +x_77 = l_Array_foldlMUnsafe_fold___at_Lake_importModulesUsingCache___spec__5(x_1, x_75, x_76, x_22); +x_31 = x_77; +goto block_73; +} +} +block_73: { -lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; uint8_t x_48; -x_39 = lean_unsigned_to_nat(1u); -x_40 = lean_nat_add(x_17, x_39); -lean_dec(x_17); -lean_inc(x_11); -x_41 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_41, 0, x_1); -lean_ctor_set(x_41, 1, x_11); -lean_ctor_set(x_41, 2, x_37); -x_42 = lean_array_uset(x_18, x_36, x_41); -x_43 = lean_unsigned_to_nat(4u); -x_44 = lean_nat_mul(x_40, x_43); -x_45 = lean_unsigned_to_nat(3u); -x_46 = lean_nat_div(x_44, x_45); -lean_dec(x_44); -x_47 = lean_array_get_size(x_42); -x_48 = lean_nat_dec_le(x_46, x_47); +uint64_t x_32; uint64_t x_33; uint64_t x_34; uint64_t x_35; size_t x_36; size_t x_37; lean_object* x_38; uint8_t x_39; +x_32 = lean_uint64_shift_right(x_31, x_26); +x_33 = lean_uint64_xor(x_31, x_32); +x_34 = lean_uint64_shift_right(x_33, x_27); +x_35 = lean_uint64_xor(x_33, x_34); +x_36 = lean_uint64_to_usize(x_35); +x_37 = lean_usize_land(x_36, x_30); +x_38 = lean_array_uget(x_19, x_37); +x_39 = l_Std_DHashMap_Internal_AssocList_contains___at_Lake_importModulesUsingCache___spec__1(x_1, x_38); +if (x_39 == 0) +{ +lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; uint8_t x_49; +x_40 = lean_unsigned_to_nat(1u); +x_41 = lean_nat_add(x_18, x_40); +lean_dec(x_18); +lean_inc(x_12); +x_42 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_42, 0, x_1); +lean_ctor_set(x_42, 1, x_12); +lean_ctor_set(x_42, 2, x_38); +x_43 = lean_array_uset(x_19, x_37, x_42); +x_44 = lean_unsigned_to_nat(4u); +x_45 = lean_nat_mul(x_41, x_44); +x_46 = lean_unsigned_to_nat(3u); +x_47 = lean_nat_div(x_45, x_46); +lean_dec(x_45); +x_48 = lean_array_get_size(x_43); +x_49 = lean_nat_dec_le(x_47, x_48); +lean_dec(x_48); lean_dec(x_47); -lean_dec(x_46); -if (x_48 == 0) +if (x_49 == 0) { -lean_object* x_49; lean_object* x_50; lean_object* x_51; uint8_t x_52; -x_49 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lake_importModulesUsingCache___spec__3(x_42); -if (lean_is_scalar(x_19)) { - x_50 = lean_alloc_ctor(0, 2, 0); +lean_object* x_50; lean_object* x_51; lean_object* x_52; uint8_t x_53; +x_50 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lake_importModulesUsingCache___spec__3(x_43); +if (lean_is_scalar(x_20)) { + x_51 = lean_alloc_ctor(0, 2, 0); } else { - x_50 = x_19; + x_51 = x_20; } -lean_ctor_set(x_50, 0, x_40); -lean_ctor_set(x_50, 1, x_49); -x_51 = lean_st_ref_set(x_13, x_50, x_16); -x_52 = !lean_is_exclusive(x_51); -if (x_52 == 0) +lean_ctor_set(x_51, 0, x_41); +lean_ctor_set(x_51, 1, x_50); +x_52 = lean_st_ref_set(x_14, x_51, x_17); +x_53 = !lean_is_exclusive(x_52); +if (x_53 == 0) { -lean_object* x_53; -x_53 = lean_ctor_get(x_51, 0); -lean_dec(x_53); -lean_ctor_set(x_51, 0, x_11); -return x_51; +lean_object* x_54; +x_54 = lean_ctor_get(x_52, 0); +lean_dec(x_54); +lean_ctor_set(x_52, 0, x_12); +return x_52; } else { -lean_object* x_54; lean_object* x_55; -x_54 = lean_ctor_get(x_51, 1); -lean_inc(x_54); -lean_dec(x_51); -x_55 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_55, 0, x_11); -lean_ctor_set(x_55, 1, x_54); -return x_55; +lean_object* x_55; lean_object* x_56; +x_55 = lean_ctor_get(x_52, 1); +lean_inc(x_55); +lean_dec(x_52); +x_56 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_56, 0, x_12); +lean_ctor_set(x_56, 1, x_55); +return x_56; } } else { -lean_object* x_56; lean_object* x_57; uint8_t x_58; -if (lean_is_scalar(x_19)) { - x_56 = lean_alloc_ctor(0, 2, 0); +lean_object* x_57; lean_object* x_58; uint8_t x_59; +if (lean_is_scalar(x_20)) { + x_57 = lean_alloc_ctor(0, 2, 0); } else { - x_56 = x_19; + x_57 = x_20; } -lean_ctor_set(x_56, 0, x_40); -lean_ctor_set(x_56, 1, x_42); -x_57 = lean_st_ref_set(x_13, x_56, x_16); -x_58 = !lean_is_exclusive(x_57); -if (x_58 == 0) +lean_ctor_set(x_57, 0, x_41); +lean_ctor_set(x_57, 1, x_43); +x_58 = lean_st_ref_set(x_14, x_57, x_17); +x_59 = !lean_is_exclusive(x_58); +if (x_59 == 0) { -lean_object* x_59; -x_59 = lean_ctor_get(x_57, 0); -lean_dec(x_59); -lean_ctor_set(x_57, 0, x_11); -return x_57; +lean_object* x_60; +x_60 = lean_ctor_get(x_58, 0); +lean_dec(x_60); +lean_ctor_set(x_58, 0, x_12); +return x_58; } else { -lean_object* x_60; lean_object* x_61; -x_60 = lean_ctor_get(x_57, 1); -lean_inc(x_60); -lean_dec(x_57); -x_61 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_61, 0, x_11); -lean_ctor_set(x_61, 1, x_60); -return x_61; +lean_object* x_61; lean_object* x_62; +x_61 = lean_ctor_get(x_58, 1); +lean_inc(x_61); +lean_dec(x_58); +x_62 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_62, 0, x_12); +lean_ctor_set(x_62, 1, x_61); +return x_62; } } } else { -lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; uint8_t x_68; -x_62 = lean_box(0); -x_63 = lean_array_uset(x_18, x_36, x_62); -lean_inc(x_11); -x_64 = l_Std_DHashMap_Internal_AssocList_replace___at_Lake_importModulesUsingCache___spec__8(x_1, x_11, x_37); -x_65 = lean_array_uset(x_63, x_36, x_64); -if (lean_is_scalar(x_19)) { - x_66 = lean_alloc_ctor(0, 2, 0); +lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; uint8_t x_69; +x_63 = lean_box(0); +x_64 = lean_array_uset(x_19, x_37, x_63); +lean_inc(x_12); +x_65 = l_Std_DHashMap_Internal_AssocList_replace___at_Lake_importModulesUsingCache___spec__8(x_1, x_12, x_38); +x_66 = lean_array_uset(x_64, x_37, x_65); +if (lean_is_scalar(x_20)) { + x_67 = lean_alloc_ctor(0, 2, 0); } else { - x_66 = x_19; + x_67 = x_20; } -lean_ctor_set(x_66, 0, x_17); -lean_ctor_set(x_66, 1, x_65); -x_67 = lean_st_ref_set(x_13, x_66, x_16); -x_68 = !lean_is_exclusive(x_67); -if (x_68 == 0) +lean_ctor_set(x_67, 0, x_18); +lean_ctor_set(x_67, 1, x_66); +x_68 = lean_st_ref_set(x_14, x_67, x_17); +x_69 = !lean_is_exclusive(x_68); +if (x_69 == 0) { -lean_object* x_69; -x_69 = lean_ctor_get(x_67, 0); -lean_dec(x_69); -lean_ctor_set(x_67, 0, x_11); -return x_67; +lean_object* x_70; +x_70 = lean_ctor_get(x_68, 0); +lean_dec(x_70); +lean_ctor_set(x_68, 0, x_12); +return x_68; } else { -lean_object* x_70; lean_object* x_71; -x_70 = lean_ctor_get(x_67, 1); -lean_inc(x_70); -lean_dec(x_67); -x_71 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_71, 0, x_11); -lean_ctor_set(x_71, 1, x_70); -return x_71; +lean_object* x_71; lean_object* x_72; +x_71 = lean_ctor_get(x_68, 1); +lean_inc(x_71); +lean_dec(x_68); +x_72 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_72, 0, x_12); +lean_ctor_set(x_72, 1, x_71); +return x_72; } } } } else { -uint8_t x_77; +uint8_t x_78; lean_dec(x_1); -x_77 = !lean_is_exclusive(x_10); -if (x_77 == 0) +x_78 = !lean_is_exclusive(x_11); +if (x_78 == 0) { -return x_10; +return x_11; } else { -lean_object* x_78; lean_object* x_79; lean_object* x_80; -x_78 = lean_ctor_get(x_10, 0); -x_79 = lean_ctor_get(x_10, 1); +lean_object* x_79; lean_object* x_80; lean_object* x_81; +x_79 = lean_ctor_get(x_11, 0); +x_80 = lean_ctor_get(x_11, 1); +lean_inc(x_80); lean_inc(x_79); -lean_inc(x_78); -lean_dec(x_10); -x_80 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_80, 0, x_78); -lean_ctor_set(x_80, 1, x_79); -return x_80; +lean_dec(x_11); +x_81 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_81, 0, x_79); +lean_ctor_set(x_81, 1, x_80); +return x_81; } } } @@ -1782,7 +1784,7 @@ LEAN_EXPORT lean_object* l_Lake_processHeader(lean_object* x_1, lean_object* x_2 { lean_object* x_6; uint32_t x_7; lean_object* x_8; lean_inc(x_1); -x_6 = l_Lean_Elab_headerToImports(x_1); +x_6 = l_Lean_Elab_HeaderSyntax_imports(x_1); x_7 = 1024; x_8 = l_Lake_importModulesUsingCache(x_6, x_2, x_7, x_5); if (lean_obj_tag(x_8) == 0) @@ -5176,7 +5178,7 @@ x_30 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_30, 0, x_10); lean_ctor_set(x_30, 1, x_29); x_31 = l_Lake_importModulesUsingCache___lambda__1___closed__1; -x_32 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_30, x_31); +x_32 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Basic_0__Lean_Lsp_toJsonPosition____x40_Lean_Data_Lsp_Basic___hyg_221____spec__1(x_30, x_31); x_33 = l_Lean_Json_mkObj(x_32); return x_33; } @@ -5299,40 +5301,101 @@ return x_16; } } } +static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("expected a `NameMap`, got '", 27, 27); +return x_1; +} +} +static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("'", 1, 1); +return x_1; +} +} LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2(lean_object* x_1, lean_object* x_2) { _start: { -lean_object* x_3; lean_object* x_4; +lean_object* x_3; x_3 = l_Lean_Json_getObjValD(x_1, x_2); -x_4 = l_Lean_Json_getObj_x3f(x_3); -if (lean_obj_tag(x_4) == 0) +switch (lean_obj_tag(x_3)) { +case 0: { -uint8_t x_5; -x_5 = !lean_is_exclusive(x_4); -if (x_5 == 0) +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_4 = lean_unsigned_to_nat(80u); +x_5 = l_Lean_Json_pretty(x_3, x_4); +x_6 = l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__1; +x_7 = lean_string_append(x_6, x_5); +lean_dec(x_5); +x_8 = l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__2; +x_9 = lean_string_append(x_7, x_8); +x_10 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_10, 0, x_9); +return x_10; +} +case 1: { -return x_4; +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_11 = lean_unsigned_to_nat(80u); +x_12 = l_Lean_Json_pretty(x_3, x_11); +x_13 = l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__1; +x_14 = lean_string_append(x_13, x_12); +lean_dec(x_12); +x_15 = l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__2; +x_16 = lean_string_append(x_14, x_15); +x_17 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_17, 0, x_16); +return x_17; } -else +case 5: { -lean_object* x_6; lean_object* x_7; -x_6 = lean_ctor_get(x_4, 0); -lean_inc(x_6); -lean_dec(x_4); -x_7 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_7, 0, x_6); -return x_7; +lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_18 = lean_ctor_get(x_3, 0); +lean_inc(x_18); +lean_dec(x_3); +x_19 = lean_box(0); +x_20 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1(x_19, x_18); +return x_20; } +default: +{ +lean_object* x_21; lean_object* x_22; uint8_t x_23; +x_21 = lean_unsigned_to_nat(80u); +lean_inc(x_3); +x_22 = l_Lean_Json_pretty(x_3, x_21); +x_23 = !lean_is_exclusive(x_3); +if (x_23 == 0) +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_24 = lean_ctor_get(x_3, 0); +lean_dec(x_24); +x_25 = l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__1; +x_26 = lean_string_append(x_25, x_22); +lean_dec(x_22); +x_27 = l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__2; +x_28 = lean_string_append(x_26, x_27); +lean_ctor_set_tag(x_3, 0); +lean_ctor_set(x_3, 0, x_28); +return x_3; } else { -lean_object* x_8; lean_object* x_9; lean_object* x_10; -x_8 = lean_ctor_get(x_4, 0); -lean_inc(x_8); -lean_dec(x_4); -x_9 = lean_box(0); -x_10 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1(x_9, x_8); -return x_10; +lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +lean_dec(x_3); +x_29 = l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__1; +x_30 = lean_string_append(x_29, x_22); +lean_dec(x_22); +x_31 = l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__2; +x_32 = lean_string_append(x_30, x_31); +x_33 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_33, 0, x_32); +return x_33; +} +} } } } @@ -14779,6 +14842,10 @@ l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonC lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__1___closed__2); l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__1___closed__3 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__1___closed__3(); lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__1___closed__3); +l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__1 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__1(); +lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__1); +l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__2 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__2(); +lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____spec__2___closed__2); l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____closed__1 = _init_l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____closed__1(); lean_mark_persistent(l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____closed__1); l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____closed__2 = _init_l___private_Lake_Load_Lean_Elab_0__Lake_fromJsonConfigTrace____x40_Lake_Load_Lean_Elab___hyg_976____closed__2(); diff --git a/stage0/stdlib/Lake/Load/Manifest.c b/stage0/stdlib/Lake/Load/Manifest.c index ad95227be579..425118c17458 100644 --- a/stage0/stdlib/Lake/Load/Manifest.c +++ b/stage0/stdlib/Lake/Load/Manifest.c @@ -54,12 +54,10 @@ static lean_object* l_Lake_Manifest_getPackages___closed__7; lean_object* lean_array_push(lean_object*, lean_object*); static lean_object* l_Lake_PackageEntry_fromJson_x3f___closed__1; static lean_object* l_Lake_PackageEntry_fromJson_x3f___closed__30; -static lean_object* l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; static lean_object* l_Lake_PackageEntry_instFromJson___closed__1; uint8_t l_Lean_Name_isAnonymous(lean_object*); static lean_object* l_Lake_instInhabitedPackageEntryV6___closed__1; uint8_t l_Ord_instDecidableRelLt___rarg(lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12; static lean_object* l_Lake_Manifest_parse___closed__1; LEAN_EXPORT lean_object* l_Lake_PackageEntry_inDirectory(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lake_Manifest_saveToFile(lean_object*, lean_object*, lean_object*); @@ -210,6 +208,7 @@ uint8_t lean_int_dec_lt(lean_object*, lean_object*); lean_object* l_Except_orElseLazy___rarg(lean_object*, lean_object*); lean_object* l_Lake_mkRelPathString(lean_object*); static lean_object* l_Lake_Manifest_version___closed__3; +static lean_object* l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at___private_Lake_Load_Manifest_0__Lake_toJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_456____spec__1(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Json_Parser_any(lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lake_Manifest_getPackages___spec__3___boxed(lean_object*, lean_object*, lean_object*); @@ -315,18 +314,24 @@ static lean_object* _init_l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_ _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("expected name", 13, 13); +x_1 = lean_mk_string_unchecked("[anonymous]", 11, 11); return x_1; } } static lean_object* _init_l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2() { _start: { -lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__1; -x_2 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; +lean_object* x_1; +x_1 = lean_mk_string_unchecked("expected a `Name`, got '", 24, 24); +return x_1; +} +} +static lean_object* _init_l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("'", 1, 1); +return x_1; } } LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1(lean_object* x_1, lean_object* x_2) { @@ -376,59 +381,229 @@ return x_11; } else { -lean_object* x_12; lean_object* x_13; uint8_t x_14; -x_12 = lean_ctor_get(x_8, 0); -lean_inc(x_12); -lean_dec(x_8); -x_13 = l_String_toName(x_5); -x_14 = l_Lean_Name_isAnonymous(x_13); -if (x_14 == 0) +uint8_t x_12; +x_12 = !lean_is_exclusive(x_8); +if (x_12 == 0) { -lean_object* x_15; -x_15 = l_Lean_Json_getStr_x3f(x_6); -if (lean_obj_tag(x_15) == 0) +lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_13 = lean_ctor_get(x_8, 0); +x_14 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__1; +x_15 = lean_string_dec_eq(x_5, x_14); +if (x_15 == 0) { -uint8_t x_16; +lean_object* x_16; uint8_t x_17; +lean_inc(x_5); +x_16 = l_String_toName(x_5); +x_17 = l_Lean_Name_isAnonymous(x_16); +if (x_17 == 0) +{ +lean_object* x_18; +lean_free_object(x_8); +lean_dec(x_5); +x_18 = l_Lean_Json_getStr_x3f(x_6); +if (lean_obj_tag(x_18) == 0) +{ +uint8_t x_19; +lean_dec(x_16); lean_dec(x_13); -lean_dec(x_12); lean_dec(x_7); -x_16 = !lean_is_exclusive(x_15); -if (x_16 == 0) +x_19 = !lean_is_exclusive(x_18); +if (x_19 == 0) { -return x_15; +return x_18; } else { -lean_object* x_17; lean_object* x_18; -x_17 = lean_ctor_get(x_15, 0); -lean_inc(x_17); -lean_dec(x_15); -x_18 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_18, 0, x_17); -return x_18; +lean_object* x_20; lean_object* x_21; +x_20 = lean_ctor_get(x_18, 0); +lean_inc(x_20); +lean_dec(x_18); +x_21 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_21, 0, x_20); +return x_21; } } else { -lean_object* x_19; lean_object* x_20; -x_19 = lean_ctor_get(x_15, 0); -lean_inc(x_19); -lean_dec(x_15); -x_20 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_12, x_13, x_19); -x_1 = x_20; +lean_object* x_22; lean_object* x_23; +x_22 = lean_ctor_get(x_18, 0); +lean_inc(x_22); +lean_dec(x_18); +x_23 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_16, x_22); +x_1 = x_23; x_2 = x_7; goto _start; } } else { -lean_object* x_22; +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +lean_dec(x_16); lean_dec(x_13); -lean_dec(x_12); lean_dec(x_7); lean_dec(x_6); -x_22 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2; -return x_22; +x_25 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2; +x_26 = lean_string_append(x_25, x_5); +lean_dec(x_5); +x_27 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; +x_28 = lean_string_append(x_26, x_27); +lean_ctor_set_tag(x_8, 0); +lean_ctor_set(x_8, 0, x_28); +return x_8; +} +} +else +{ +lean_object* x_29; +lean_free_object(x_8); +lean_dec(x_5); +x_29 = l_Lean_Json_getStr_x3f(x_6); +if (lean_obj_tag(x_29) == 0) +{ +uint8_t x_30; +lean_dec(x_13); +lean_dec(x_7); +x_30 = !lean_is_exclusive(x_29); +if (x_30 == 0) +{ +return x_29; +} +else +{ +lean_object* x_31; lean_object* x_32; +x_31 = lean_ctor_get(x_29, 0); +lean_inc(x_31); +lean_dec(x_29); +x_32 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_32, 0, x_31); +return x_32; +} +} +else +{ +lean_object* x_33; lean_object* x_34; lean_object* x_35; +x_33 = lean_ctor_get(x_29, 0); +lean_inc(x_33); +lean_dec(x_29); +x_34 = lean_box(0); +x_35 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_34, x_33); +x_1 = x_35; +x_2 = x_7; +goto _start; +} +} +} +else +{ +lean_object* x_37; lean_object* x_38; uint8_t x_39; +x_37 = lean_ctor_get(x_8, 0); +lean_inc(x_37); +lean_dec(x_8); +x_38 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__1; +x_39 = lean_string_dec_eq(x_5, x_38); +if (x_39 == 0) +{ +lean_object* x_40; uint8_t x_41; +lean_inc(x_5); +x_40 = l_String_toName(x_5); +x_41 = l_Lean_Name_isAnonymous(x_40); +if (x_41 == 0) +{ +lean_object* x_42; +lean_dec(x_5); +x_42 = l_Lean_Json_getStr_x3f(x_6); +if (lean_obj_tag(x_42) == 0) +{ +lean_object* x_43; lean_object* x_44; lean_object* x_45; +lean_dec(x_40); +lean_dec(x_37); +lean_dec(x_7); +x_43 = lean_ctor_get(x_42, 0); +lean_inc(x_43); +if (lean_is_exclusive(x_42)) { + lean_ctor_release(x_42, 0); + x_44 = x_42; +} else { + lean_dec_ref(x_42); + x_44 = lean_box(0); +} +if (lean_is_scalar(x_44)) { + x_45 = lean_alloc_ctor(0, 1, 0); +} else { + x_45 = x_44; +} +lean_ctor_set(x_45, 0, x_43); +return x_45; +} +else +{ +lean_object* x_46; lean_object* x_47; +x_46 = lean_ctor_get(x_42, 0); +lean_inc(x_46); +lean_dec(x_42); +x_47 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_37, x_40, x_46); +x_1 = x_47; +x_2 = x_7; +goto _start; +} +} +else +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; +lean_dec(x_40); +lean_dec(x_37); +lean_dec(x_7); +lean_dec(x_6); +x_49 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2; +x_50 = lean_string_append(x_49, x_5); +lean_dec(x_5); +x_51 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; +x_52 = lean_string_append(x_50, x_51); +x_53 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_53, 0, x_52); +return x_53; +} +} +else +{ +lean_object* x_54; +lean_dec(x_5); +x_54 = l_Lean_Json_getStr_x3f(x_6); +if (lean_obj_tag(x_54) == 0) +{ +lean_object* x_55; lean_object* x_56; lean_object* x_57; +lean_dec(x_37); +lean_dec(x_7); +x_55 = lean_ctor_get(x_54, 0); +lean_inc(x_55); +if (lean_is_exclusive(x_54)) { + lean_ctor_release(x_54, 0); + x_56 = x_54; +} else { + lean_dec_ref(x_54); + x_56 = lean_box(0); +} +if (lean_is_scalar(x_56)) { + x_57 = lean_alloc_ctor(0, 1, 0); +} else { + x_57 = x_56; +} +lean_ctor_set(x_57, 0, x_55); +return x_57; +} +else +{ +lean_object* x_58; lean_object* x_59; lean_object* x_60; +x_58 = lean_ctor_get(x_54, 0); +lean_inc(x_58); +lean_dec(x_54); +x_59 = lean_box(0); +x_60 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_37, x_59, x_58); +x_1 = x_60; +x_2 = x_7; +goto _start; +} +} } } } @@ -561,23 +736,7 @@ static lean_object* _init_l___private_Lake_Load_Manifest_0__Lake_fromJsonPackage _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("[anonymous]", 11, 11); -return x_1; -} -} -static lean_object* _init_l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("expected a `Name`, got '", 24, 24); -return x_1; -} -} -static lean_object* _init_l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("'", 1, 1); +x_1 = lean_mk_string_unchecked("expected a `NameMap`, got '", 27, 27); return x_1; } } @@ -643,766 +802,775 @@ return x_29; } else { -lean_object* x_30; lean_object* x_31; lean_object* x_187; lean_object* x_188; lean_object* x_189; lean_object* x_190; +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_190; lean_object* x_191; lean_object* x_192; lean_object* x_193; x_30 = lean_ctor_get(x_22, 0); lean_inc(x_30); -lean_dec(x_22); -x_187 = l_Lean_instInhabitedJson; -x_188 = lean_unsigned_to_nat(0u); -x_189 = lean_array_get(x_187, x_30, x_188); -lean_inc(x_189); -x_190 = l_Lean_Json_getStr_x3f(x_189); -if (lean_obj_tag(x_190) == 0) -{ -uint8_t x_191; -lean_dec(x_189); +if (lean_is_exclusive(x_22)) { + lean_ctor_release(x_22, 0); + x_31 = x_22; +} else { + lean_dec_ref(x_22); + x_31 = lean_box(0); +} +x_190 = l_Lean_instInhabitedJson; +x_191 = lean_unsigned_to_nat(0u); +x_192 = lean_array_get(x_190, x_30, x_191); +lean_inc(x_192); +x_193 = l_Lean_Json_getStr_x3f(x_192); +if (lean_obj_tag(x_193) == 0) +{ +uint8_t x_194; +lean_dec(x_192); +lean_dec(x_31); lean_dec(x_30); -x_191 = !lean_is_exclusive(x_190); -if (x_191 == 0) +x_194 = !lean_is_exclusive(x_193); +if (x_194 == 0) { -lean_object* x_192; lean_object* x_193; -x_192 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_193 = l_Except_orElseLazy___rarg(x_190, x_192); -return x_193; +lean_object* x_195; lean_object* x_196; +x_195 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_196 = l_Except_orElseLazy___rarg(x_193, x_195); +return x_196; } else { -lean_object* x_194; lean_object* x_195; lean_object* x_196; lean_object* x_197; -x_194 = lean_ctor_get(x_190, 0); -lean_inc(x_194); -lean_dec(x_190); -x_195 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_195, 0, x_194); -x_196 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_197 = l_Except_orElseLazy___rarg(x_195, x_196); -return x_197; +lean_object* x_197; lean_object* x_198; lean_object* x_199; lean_object* x_200; +x_197 = lean_ctor_get(x_193, 0); +lean_inc(x_197); +lean_dec(x_193); +x_198 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_198, 0, x_197); +x_199 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_200 = l_Except_orElseLazy___rarg(x_198, x_199); +return x_200; } } else { -uint8_t x_198; -x_198 = !lean_is_exclusive(x_190); -if (x_198 == 0) -{ -lean_object* x_199; lean_object* x_200; uint8_t x_201; -x_199 = lean_ctor_get(x_190, 0); -x_200 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11; -x_201 = lean_string_dec_eq(x_199, x_200); +uint8_t x_201; +x_201 = !lean_is_exclusive(x_193); if (x_201 == 0) { -lean_object* x_202; uint8_t x_203; -x_202 = l_String_toName(x_199); -x_203 = l_Lean_Name_isAnonymous(x_202); -if (x_203 == 0) +lean_object* x_202; lean_object* x_203; uint8_t x_204; +x_202 = lean_ctor_get(x_193, 0); +x_203 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__1; +x_204 = lean_string_dec_eq(x_202, x_203); +if (x_204 == 0) { -lean_free_object(x_190); -lean_dec(x_189); -x_31 = x_202; -goto block_186; +lean_object* x_205; uint8_t x_206; +x_205 = l_String_toName(x_202); +x_206 = l_Lean_Name_isAnonymous(x_205); +if (x_206 == 0) +{ +lean_free_object(x_193); +lean_dec(x_192); +x_32 = x_205; +goto block_189; } else { -lean_object* x_204; lean_object* x_205; lean_object* x_206; lean_object* x_207; lean_object* x_208; lean_object* x_209; lean_object* x_210; lean_object* x_211; -lean_dec(x_202); -lean_dec(x_30); -x_204 = lean_unsigned_to_nat(80u); -x_205 = l_Lean_Json_pretty(x_189, x_204); -x_206 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12; -x_207 = lean_string_append(x_206, x_205); +lean_object* x_207; lean_object* x_208; lean_object* x_209; lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; lean_dec(x_205); -x_208 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; -x_209 = lean_string_append(x_207, x_208); -lean_ctor_set_tag(x_190, 0); -lean_ctor_set(x_190, 0, x_209); -x_210 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_211 = l_Except_orElseLazy___rarg(x_190, x_210); -return x_211; +lean_dec(x_31); +lean_dec(x_30); +x_207 = lean_unsigned_to_nat(80u); +x_208 = l_Lean_Json_pretty(x_192, x_207); +x_209 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2; +x_210 = lean_string_append(x_209, x_208); +lean_dec(x_208); +x_211 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; +x_212 = lean_string_append(x_210, x_211); +lean_ctor_set_tag(x_193, 0); +lean_ctor_set(x_193, 0, x_212); +x_213 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_214 = l_Except_orElseLazy___rarg(x_193, x_213); +return x_214; } } else { -lean_object* x_212; -lean_free_object(x_190); -lean_dec(x_199); -lean_dec(x_189); -x_212 = lean_box(0); -x_31 = x_212; -goto block_186; +lean_object* x_215; +lean_free_object(x_193); +lean_dec(x_202); +lean_dec(x_192); +x_215 = lean_box(0); +x_32 = x_215; +goto block_189; } } else { -lean_object* x_213; lean_object* x_214; uint8_t x_215; -x_213 = lean_ctor_get(x_190, 0); -lean_inc(x_213); -lean_dec(x_190); -x_214 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11; -x_215 = lean_string_dec_eq(x_213, x_214); -if (x_215 == 0) +lean_object* x_216; lean_object* x_217; uint8_t x_218; +x_216 = lean_ctor_get(x_193, 0); +lean_inc(x_216); +lean_dec(x_193); +x_217 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__1; +x_218 = lean_string_dec_eq(x_216, x_217); +if (x_218 == 0) { -lean_object* x_216; uint8_t x_217; -x_216 = l_String_toName(x_213); -x_217 = l_Lean_Name_isAnonymous(x_216); -if (x_217 == 0) +lean_object* x_219; uint8_t x_220; +x_219 = l_String_toName(x_216); +x_220 = l_Lean_Name_isAnonymous(x_219); +if (x_220 == 0) { -lean_dec(x_189); -x_31 = x_216; -goto block_186; +lean_dec(x_192); +x_32 = x_219; +goto block_189; } else { -lean_object* x_218; lean_object* x_219; lean_object* x_220; lean_object* x_221; lean_object* x_222; lean_object* x_223; lean_object* x_224; lean_object* x_225; lean_object* x_226; -lean_dec(x_216); -lean_dec(x_30); -x_218 = lean_unsigned_to_nat(80u); -x_219 = l_Lean_Json_pretty(x_189, x_218); -x_220 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12; -x_221 = lean_string_append(x_220, x_219); +lean_object* x_221; lean_object* x_222; lean_object* x_223; lean_object* x_224; lean_object* x_225; lean_object* x_226; lean_object* x_227; lean_object* x_228; lean_object* x_229; lean_dec(x_219); -x_222 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; -x_223 = lean_string_append(x_221, x_222); -x_224 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_224, 0, x_223); -x_225 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_226 = l_Except_orElseLazy___rarg(x_224, x_225); -return x_226; +lean_dec(x_31); +lean_dec(x_30); +x_221 = lean_unsigned_to_nat(80u); +x_222 = l_Lean_Json_pretty(x_192, x_221); +x_223 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2; +x_224 = lean_string_append(x_223, x_222); +lean_dec(x_222); +x_225 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; +x_226 = lean_string_append(x_224, x_225); +x_227 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_227, 0, x_226); +x_228 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_229 = l_Except_orElseLazy___rarg(x_227, x_228); +return x_229; } } else { -lean_object* x_227; -lean_dec(x_213); -lean_dec(x_189); -x_227 = lean_box(0); -x_31 = x_227; -goto block_186; +lean_object* x_230; +lean_dec(x_216); +lean_dec(x_192); +x_230 = lean_box(0); +x_32 = x_230; +goto block_189; } } } -block_186: +block_189: { -lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; -x_32 = l_Lean_instInhabitedJson; -x_33 = lean_unsigned_to_nat(1u); -x_34 = lean_array_get(x_32, x_30, x_33); -x_35 = l_Lean_Json_getObj_x3f(x_34); -if (lean_obj_tag(x_35) == 0) +lean_object* x_33; lean_object* x_34; lean_object* x_35; +x_33 = l_Lean_instInhabitedJson; +x_34 = lean_unsigned_to_nat(1u); +x_35 = lean_array_get(x_33, x_30, x_34); +if (lean_obj_tag(x_35) == 5) { -uint8_t x_36; +lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_dec(x_31); -lean_dec(x_30); -x_36 = !lean_is_exclusive(x_35); -if (x_36 == 0) -{ -lean_object* x_37; lean_object* x_38; -x_37 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_38 = l_Except_orElseLazy___rarg(x_35, x_37); -return x_38; +x_36 = lean_ctor_get(x_35, 0); +lean_inc(x_36); +if (lean_is_exclusive(x_35)) { + lean_ctor_release(x_35, 0); + x_37 = x_35; +} else { + lean_dec_ref(x_35); + x_37 = lean_box(0); } -else +x_38 = lean_box(0); +x_39 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1(x_38, x_36); +if (lean_obj_tag(x_39) == 0) { -lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; -x_39 = lean_ctor_get(x_35, 0); -lean_inc(x_39); -lean_dec(x_35); -x_40 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_40, 0, x_39); +uint8_t x_40; +lean_dec(x_37); +lean_dec(x_32); +lean_dec(x_30); +x_40 = !lean_is_exclusive(x_39); +if (x_40 == 0) +{ +lean_object* x_41; lean_object* x_42; x_41 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_42 = l_Except_orElseLazy___rarg(x_40, x_41); +x_42 = l_Except_orElseLazy___rarg(x_39, x_41); return x_42; } -} else { -lean_object* x_43; lean_object* x_44; lean_object* x_45; -x_43 = lean_ctor_get(x_35, 0); +lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; +x_43 = lean_ctor_get(x_39, 0); lean_inc(x_43); -lean_dec(x_35); -x_44 = lean_box(0); -x_45 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1(x_44, x_43); -if (lean_obj_tag(x_45) == 0) -{ -uint8_t x_46; -lean_dec(x_31); -lean_dec(x_30); -x_46 = !lean_is_exclusive(x_45); -if (x_46 == 0) -{ -lean_object* x_47; lean_object* x_48; -x_47 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_48 = l_Except_orElseLazy___rarg(x_45, x_47); -return x_48; -} -else -{ -lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; -x_49 = lean_ctor_get(x_45, 0); -lean_inc(x_49); -lean_dec(x_45); -x_50 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_50, 0, x_49); -x_51 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_52 = l_Except_orElseLazy___rarg(x_50, x_51); -return x_52; +lean_dec(x_39); +x_44 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_44, 0, x_43); +x_45 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_46 = l_Except_orElseLazy___rarg(x_44, x_45); +return x_46; } } else { -lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; -x_53 = lean_ctor_get(x_45, 0); -lean_inc(x_53); -lean_dec(x_45); -x_54 = lean_unsigned_to_nat(2u); -x_55 = lean_array_get(x_32, x_30, x_54); -x_56 = l_Lean_Json_getBool_x3f(x_55); -lean_dec(x_55); -if (lean_obj_tag(x_56) == 0) +lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; +x_47 = lean_ctor_get(x_39, 0); +lean_inc(x_47); +lean_dec(x_39); +x_48 = lean_unsigned_to_nat(2u); +x_49 = lean_array_get(x_33, x_30, x_48); +x_50 = l_Lean_Json_getBool_x3f(x_49); +lean_dec(x_49); +if (lean_obj_tag(x_50) == 0) { -uint8_t x_57; -lean_dec(x_53); -lean_dec(x_31); +uint8_t x_51; +lean_dec(x_47); +lean_dec(x_37); +lean_dec(x_32); lean_dec(x_30); -x_57 = !lean_is_exclusive(x_56); -if (x_57 == 0) +x_51 = !lean_is_exclusive(x_50); +if (x_51 == 0) { -lean_object* x_58; lean_object* x_59; -x_58 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_59 = l_Except_orElseLazy___rarg(x_56, x_58); -return x_59; +lean_object* x_52; lean_object* x_53; +x_52 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_53 = l_Except_orElseLazy___rarg(x_50, x_52); +return x_53; } else { -lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; -x_60 = lean_ctor_get(x_56, 0); -lean_inc(x_60); -lean_dec(x_56); -x_61 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_61, 0, x_60); -x_62 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_63 = l_Except_orElseLazy___rarg(x_61, x_62); -return x_63; +lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; +x_54 = lean_ctor_get(x_50, 0); +lean_inc(x_54); +lean_dec(x_50); +x_55 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_55, 0, x_54); +x_56 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_57 = l_Except_orElseLazy___rarg(x_55, x_56); +return x_57; } } else { -lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; -x_64 = lean_ctor_get(x_56, 0); -lean_inc(x_64); -lean_dec(x_56); -x_65 = lean_unsigned_to_nat(3u); -x_66 = lean_array_get(x_32, x_30, x_65); -x_67 = l_Lean_Json_getStr_x3f(x_66); -if (lean_obj_tag(x_67) == 0) +lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; +x_58 = lean_ctor_get(x_50, 0); +lean_inc(x_58); +lean_dec(x_50); +x_59 = lean_unsigned_to_nat(3u); +x_60 = lean_array_get(x_33, x_30, x_59); +x_61 = l_Lean_Json_getStr_x3f(x_60); +if (lean_obj_tag(x_61) == 0) { -uint8_t x_68; -lean_dec(x_64); -lean_dec(x_53); -lean_dec(x_31); +uint8_t x_62; +lean_dec(x_58); +lean_dec(x_47); +lean_dec(x_37); +lean_dec(x_32); lean_dec(x_30); -x_68 = !lean_is_exclusive(x_67); -if (x_68 == 0) +x_62 = !lean_is_exclusive(x_61); +if (x_62 == 0) { -lean_object* x_69; lean_object* x_70; -x_69 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_70 = l_Except_orElseLazy___rarg(x_67, x_69); -return x_70; +lean_object* x_63; lean_object* x_64; +x_63 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_64 = l_Except_orElseLazy___rarg(x_61, x_63); +return x_64; } else { -lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; -x_71 = lean_ctor_get(x_67, 0); -lean_inc(x_71); -lean_dec(x_67); -x_72 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_72, 0, x_71); -x_73 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_74 = l_Except_orElseLazy___rarg(x_72, x_73); -return x_74; +lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; +x_65 = lean_ctor_get(x_61, 0); +lean_inc(x_65); +lean_dec(x_61); +x_66 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_66, 0, x_65); +x_67 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_68 = l_Except_orElseLazy___rarg(x_66, x_67); +return x_68; } } else { -lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; -x_75 = lean_ctor_get(x_67, 0); -lean_inc(x_75); -lean_dec(x_67); -x_76 = lean_unsigned_to_nat(4u); -x_77 = lean_array_get(x_32, x_30, x_76); -x_78 = l_Lean_Json_getStr_x3f(x_77); -if (lean_obj_tag(x_78) == 0) +lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; +x_69 = lean_ctor_get(x_61, 0); +lean_inc(x_69); +lean_dec(x_61); +x_70 = lean_unsigned_to_nat(4u); +x_71 = lean_array_get(x_33, x_30, x_70); +x_72 = l_Lean_Json_getStr_x3f(x_71); +if (lean_obj_tag(x_72) == 0) { -uint8_t x_79; -lean_dec(x_75); -lean_dec(x_64); -lean_dec(x_53); -lean_dec(x_31); +uint8_t x_73; +lean_dec(x_69); +lean_dec(x_58); +lean_dec(x_47); +lean_dec(x_37); +lean_dec(x_32); lean_dec(x_30); -x_79 = !lean_is_exclusive(x_78); -if (x_79 == 0) +x_73 = !lean_is_exclusive(x_72); +if (x_73 == 0) { -lean_object* x_80; lean_object* x_81; -x_80 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_81 = l_Except_orElseLazy___rarg(x_78, x_80); -return x_81; +lean_object* x_74; lean_object* x_75; +x_74 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_75 = l_Except_orElseLazy___rarg(x_72, x_74); +return x_75; } else { -lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; -x_82 = lean_ctor_get(x_78, 0); -lean_inc(x_82); -lean_dec(x_78); -x_83 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_83, 0, x_82); -x_84 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_85 = l_Except_orElseLazy___rarg(x_83, x_84); -return x_85; +lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; +x_76 = lean_ctor_get(x_72, 0); +lean_inc(x_76); +lean_dec(x_72); +x_77 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_77, 0, x_76); +x_78 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_79 = l_Except_orElseLazy___rarg(x_77, x_78); +return x_79; } } else { -lean_object* x_86; lean_object* x_87; lean_object* x_88; lean_object* x_155; lean_object* x_156; -x_86 = lean_ctor_get(x_78, 0); -lean_inc(x_86); -if (lean_is_exclusive(x_78)) { - lean_ctor_release(x_78, 0); - x_87 = x_78; +lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_149; lean_object* x_150; +x_80 = lean_ctor_get(x_72, 0); +lean_inc(x_80); +if (lean_is_exclusive(x_72)) { + lean_ctor_release(x_72, 0); + x_81 = x_72; } else { - lean_dec_ref(x_78); - x_87 = lean_box(0); + lean_dec_ref(x_72); + x_81 = lean_box(0); } -x_155 = lean_unsigned_to_nat(5u); -x_156 = lean_array_get(x_32, x_30, x_155); -switch (lean_obj_tag(x_156)) { +x_149 = lean_unsigned_to_nat(5u); +x_150 = lean_array_get(x_33, x_30, x_149); +switch (lean_obj_tag(x_150)) { case 0: { -lean_object* x_157; -x_157 = lean_box(0); -x_88 = x_157; -goto block_154; +lean_object* x_151; +x_151 = lean_box(0); +x_82 = x_151; +goto block_148; } case 1: { -lean_object* x_158; -x_158 = l_Lean_Json_getStr_x3f(x_156); -if (lean_obj_tag(x_158) == 0) +lean_object* x_152; +x_152 = l_Lean_Json_getStr_x3f(x_150); +if (lean_obj_tag(x_152) == 0) { -uint8_t x_159; -lean_dec(x_87); -lean_dec(x_86); -lean_dec(x_75); -lean_dec(x_64); -lean_dec(x_53); -lean_dec(x_31); +uint8_t x_153; +lean_dec(x_81); +lean_dec(x_80); +lean_dec(x_69); +lean_dec(x_58); +lean_dec(x_47); +lean_dec(x_37); +lean_dec(x_32); lean_dec(x_30); -x_159 = !lean_is_exclusive(x_158); -if (x_159 == 0) +x_153 = !lean_is_exclusive(x_152); +if (x_153 == 0) { -lean_object* x_160; lean_object* x_161; -x_160 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_161 = l_Except_orElseLazy___rarg(x_158, x_160); -return x_161; +lean_object* x_154; lean_object* x_155; +x_154 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_155 = l_Except_orElseLazy___rarg(x_152, x_154); +return x_155; } else { -lean_object* x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; -x_162 = lean_ctor_get(x_158, 0); -lean_inc(x_162); -lean_dec(x_158); -x_163 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_163, 0, x_162); -x_164 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_165 = l_Except_orElseLazy___rarg(x_163, x_164); -return x_165; +lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; +x_156 = lean_ctor_get(x_152, 0); +lean_inc(x_156); +lean_dec(x_152); +x_157 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_157, 0, x_156); +x_158 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_159 = l_Except_orElseLazy___rarg(x_157, x_158); +return x_159; } } else { -lean_object* x_166; lean_object* x_167; -x_166 = lean_ctor_get(x_158, 0); -lean_inc(x_166); -lean_dec(x_158); -x_167 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_167, 0, x_166); -x_88 = x_167; -goto block_154; +lean_object* x_160; lean_object* x_161; +x_160 = lean_ctor_get(x_152, 0); +lean_inc(x_160); +lean_dec(x_152); +x_161 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_161, 0, x_160); +x_82 = x_161; +goto block_148; } } default: { -lean_object* x_168; uint8_t x_169; -lean_inc(x_156); -x_168 = l_Lean_Json_getStr_x3f(x_156); -x_169 = !lean_is_exclusive(x_156); -if (x_169 == 0) +lean_object* x_162; uint8_t x_163; +lean_inc(x_150); +x_162 = l_Lean_Json_getStr_x3f(x_150); +x_163 = !lean_is_exclusive(x_150); +if (x_163 == 0) { -lean_object* x_170; -x_170 = lean_ctor_get(x_156, 0); -lean_dec(x_170); -if (lean_obj_tag(x_168) == 0) +lean_object* x_164; +x_164 = lean_ctor_get(x_150, 0); +lean_dec(x_164); +if (lean_obj_tag(x_162) == 0) { -uint8_t x_171; -lean_free_object(x_156); -lean_dec(x_87); -lean_dec(x_86); -lean_dec(x_75); -lean_dec(x_64); -lean_dec(x_53); -lean_dec(x_31); +uint8_t x_165; +lean_free_object(x_150); +lean_dec(x_81); +lean_dec(x_80); +lean_dec(x_69); +lean_dec(x_58); +lean_dec(x_47); +lean_dec(x_37); +lean_dec(x_32); lean_dec(x_30); -x_171 = !lean_is_exclusive(x_168); -if (x_171 == 0) +x_165 = !lean_is_exclusive(x_162); +if (x_165 == 0) { -lean_object* x_172; lean_object* x_173; -x_172 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_173 = l_Except_orElseLazy___rarg(x_168, x_172); -return x_173; +lean_object* x_166; lean_object* x_167; +x_166 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_167 = l_Except_orElseLazy___rarg(x_162, x_166); +return x_167; } else { -lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; -x_174 = lean_ctor_get(x_168, 0); -lean_inc(x_174); -lean_dec(x_168); -x_175 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_175, 0, x_174); -x_176 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_177 = l_Except_orElseLazy___rarg(x_175, x_176); -return x_177; +lean_object* x_168; lean_object* x_169; lean_object* x_170; lean_object* x_171; +x_168 = lean_ctor_get(x_162, 0); +lean_inc(x_168); +lean_dec(x_162); +x_169 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_169, 0, x_168); +x_170 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_171 = l_Except_orElseLazy___rarg(x_169, x_170); +return x_171; } } else { -lean_object* x_178; -x_178 = lean_ctor_get(x_168, 0); -lean_inc(x_178); -lean_dec(x_168); -lean_ctor_set_tag(x_156, 1); -lean_ctor_set(x_156, 0, x_178); -x_88 = x_156; -goto block_154; +lean_object* x_172; +x_172 = lean_ctor_get(x_162, 0); +lean_inc(x_172); +lean_dec(x_162); +lean_ctor_set_tag(x_150, 1); +lean_ctor_set(x_150, 0, x_172); +x_82 = x_150; +goto block_148; } } else { -lean_dec(x_156); -if (lean_obj_tag(x_168) == 0) +lean_dec(x_150); +if (lean_obj_tag(x_162) == 0) { -lean_object* x_179; lean_object* x_180; lean_object* x_181; lean_object* x_182; lean_object* x_183; -lean_dec(x_87); -lean_dec(x_86); -lean_dec(x_75); -lean_dec(x_64); -lean_dec(x_53); -lean_dec(x_31); +lean_object* x_173; lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; +lean_dec(x_81); +lean_dec(x_80); +lean_dec(x_69); +lean_dec(x_58); +lean_dec(x_47); +lean_dec(x_37); +lean_dec(x_32); lean_dec(x_30); -x_179 = lean_ctor_get(x_168, 0); -lean_inc(x_179); -if (lean_is_exclusive(x_168)) { - lean_ctor_release(x_168, 0); - x_180 = x_168; +x_173 = lean_ctor_get(x_162, 0); +lean_inc(x_173); +if (lean_is_exclusive(x_162)) { + lean_ctor_release(x_162, 0); + x_174 = x_162; } else { - lean_dec_ref(x_168); - x_180 = lean_box(0); + lean_dec_ref(x_162); + x_174 = lean_box(0); } -if (lean_is_scalar(x_180)) { - x_181 = lean_alloc_ctor(0, 1, 0); +if (lean_is_scalar(x_174)) { + x_175 = lean_alloc_ctor(0, 1, 0); } else { - x_181 = x_180; + x_175 = x_174; } -lean_ctor_set(x_181, 0, x_179); -x_182 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_183 = l_Except_orElseLazy___rarg(x_181, x_182); -return x_183; +lean_ctor_set(x_175, 0, x_173); +x_176 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_177 = l_Except_orElseLazy___rarg(x_175, x_176); +return x_177; } else { -lean_object* x_184; lean_object* x_185; -x_184 = lean_ctor_get(x_168, 0); -lean_inc(x_184); -lean_dec(x_168); -x_185 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_185, 0, x_184); -x_88 = x_185; -goto block_154; +lean_object* x_178; lean_object* x_179; +x_178 = lean_ctor_get(x_162, 0); +lean_inc(x_178); +lean_dec(x_162); +x_179 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_179, 0, x_178); +x_82 = x_179; +goto block_148; } } } } -block_154: +block_148: { -lean_object* x_89; lean_object* x_90; -x_89 = lean_unsigned_to_nat(6u); -x_90 = lean_array_get(x_32, x_30, x_89); +lean_object* x_83; lean_object* x_84; +x_83 = lean_unsigned_to_nat(6u); +x_84 = lean_array_get(x_33, x_30, x_83); lean_dec(x_30); -switch (lean_obj_tag(x_90)) { +switch (lean_obj_tag(x_84)) { case 0: { -lean_object* x_91; lean_object* x_92; uint8_t x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; -x_91 = lean_box(0); -x_92 = lean_alloc_ctor(1, 6, 1); -lean_ctor_set(x_92, 0, x_31); -lean_ctor_set(x_92, 1, x_53); -lean_ctor_set(x_92, 2, x_75); -lean_ctor_set(x_92, 3, x_86); -lean_ctor_set(x_92, 4, x_88); -lean_ctor_set(x_92, 5, x_91); -x_93 = lean_unbox(x_64); -lean_dec(x_64); -lean_ctor_set_uint8(x_92, sizeof(void*)*6, x_93); -if (lean_is_scalar(x_87)) { - x_94 = lean_alloc_ctor(1, 1, 0); +lean_object* x_85; lean_object* x_86; uint8_t x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; +lean_dec(x_37); +x_85 = lean_box(0); +x_86 = lean_alloc_ctor(1, 6, 1); +lean_ctor_set(x_86, 0, x_32); +lean_ctor_set(x_86, 1, x_47); +lean_ctor_set(x_86, 2, x_69); +lean_ctor_set(x_86, 3, x_80); +lean_ctor_set(x_86, 4, x_82); +lean_ctor_set(x_86, 5, x_85); +x_87 = lean_unbox(x_58); +lean_dec(x_58); +lean_ctor_set_uint8(x_86, sizeof(void*)*6, x_87); +if (lean_is_scalar(x_81)) { + x_88 = lean_alloc_ctor(1, 1, 0); } else { - x_94 = x_87; + x_88 = x_81; } -lean_ctor_set(x_94, 0, x_92); -x_95 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_96 = l_Except_orElseLazy___rarg(x_94, x_95); -return x_96; +lean_ctor_set(x_88, 0, x_86); +x_89 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_90 = l_Except_orElseLazy___rarg(x_88, x_89); +return x_90; } case 1: { -lean_object* x_97; -lean_dec(x_87); -x_97 = l_Lean_Json_getStr_x3f(x_90); -if (lean_obj_tag(x_97) == 0) +lean_object* x_91; +lean_dec(x_81); +x_91 = l_Lean_Json_getStr_x3f(x_84); +if (lean_obj_tag(x_91) == 0) { -uint8_t x_98; -lean_dec(x_88); -lean_dec(x_86); -lean_dec(x_75); -lean_dec(x_64); -lean_dec(x_53); -lean_dec(x_31); -x_98 = !lean_is_exclusive(x_97); -if (x_98 == 0) +uint8_t x_92; +lean_dec(x_82); +lean_dec(x_80); +lean_dec(x_69); +lean_dec(x_58); +lean_dec(x_47); +lean_dec(x_37); +lean_dec(x_32); +x_92 = !lean_is_exclusive(x_91); +if (x_92 == 0) { -lean_object* x_99; lean_object* x_100; -x_99 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_100 = l_Except_orElseLazy___rarg(x_97, x_99); -return x_100; +lean_object* x_93; lean_object* x_94; +x_93 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_94 = l_Except_orElseLazy___rarg(x_91, x_93); +return x_94; } else { -lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; -x_101 = lean_ctor_get(x_97, 0); -lean_inc(x_101); -lean_dec(x_97); -x_102 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_102, 0, x_101); -x_103 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_104 = l_Except_orElseLazy___rarg(x_102, x_103); -return x_104; +lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; +x_95 = lean_ctor_get(x_91, 0); +lean_inc(x_95); +lean_dec(x_91); +x_96 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_96, 0, x_95); +x_97 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_98 = l_Except_orElseLazy___rarg(x_96, x_97); +return x_98; } } else { -uint8_t x_105; -x_105 = !lean_is_exclusive(x_97); -if (x_105 == 0) +uint8_t x_99; +x_99 = !lean_is_exclusive(x_91); +if (x_99 == 0) { -lean_object* x_106; lean_object* x_107; lean_object* x_108; uint8_t x_109; lean_object* x_110; lean_object* x_111; -x_106 = lean_ctor_get(x_97, 0); -x_107 = lean_alloc_ctor(1, 1, 0); +lean_object* x_100; lean_object* x_101; lean_object* x_102; uint8_t x_103; lean_object* x_104; lean_object* x_105; +x_100 = lean_ctor_get(x_91, 0); +if (lean_is_scalar(x_37)) { + x_101 = lean_alloc_ctor(1, 1, 0); +} else { + x_101 = x_37; + lean_ctor_set_tag(x_101, 1); +} +lean_ctor_set(x_101, 0, x_100); +x_102 = lean_alloc_ctor(1, 6, 1); +lean_ctor_set(x_102, 0, x_32); +lean_ctor_set(x_102, 1, x_47); +lean_ctor_set(x_102, 2, x_69); +lean_ctor_set(x_102, 3, x_80); +lean_ctor_set(x_102, 4, x_82); +lean_ctor_set(x_102, 5, x_101); +x_103 = lean_unbox(x_58); +lean_dec(x_58); +lean_ctor_set_uint8(x_102, sizeof(void*)*6, x_103); +lean_ctor_set(x_91, 0, x_102); +x_104 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_105 = l_Except_orElseLazy___rarg(x_91, x_104); +return x_105; +} +else +{ +lean_object* x_106; lean_object* x_107; lean_object* x_108; uint8_t x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; +x_106 = lean_ctor_get(x_91, 0); +lean_inc(x_106); +lean_dec(x_91); +if (lean_is_scalar(x_37)) { + x_107 = lean_alloc_ctor(1, 1, 0); +} else { + x_107 = x_37; + lean_ctor_set_tag(x_107, 1); +} lean_ctor_set(x_107, 0, x_106); x_108 = lean_alloc_ctor(1, 6, 1); -lean_ctor_set(x_108, 0, x_31); -lean_ctor_set(x_108, 1, x_53); -lean_ctor_set(x_108, 2, x_75); -lean_ctor_set(x_108, 3, x_86); -lean_ctor_set(x_108, 4, x_88); +lean_ctor_set(x_108, 0, x_32); +lean_ctor_set(x_108, 1, x_47); +lean_ctor_set(x_108, 2, x_69); +lean_ctor_set(x_108, 3, x_80); +lean_ctor_set(x_108, 4, x_82); lean_ctor_set(x_108, 5, x_107); -x_109 = lean_unbox(x_64); -lean_dec(x_64); +x_109 = lean_unbox(x_58); +lean_dec(x_58); lean_ctor_set_uint8(x_108, sizeof(void*)*6, x_109); -lean_ctor_set(x_97, 0, x_108); -x_110 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_111 = l_Except_orElseLazy___rarg(x_97, x_110); -return x_111; -} -else -{ -lean_object* x_112; lean_object* x_113; lean_object* x_114; uint8_t x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; -x_112 = lean_ctor_get(x_97, 0); -lean_inc(x_112); -lean_dec(x_97); -x_113 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_113, 0, x_112); -x_114 = lean_alloc_ctor(1, 6, 1); -lean_ctor_set(x_114, 0, x_31); -lean_ctor_set(x_114, 1, x_53); -lean_ctor_set(x_114, 2, x_75); -lean_ctor_set(x_114, 3, x_86); -lean_ctor_set(x_114, 4, x_88); -lean_ctor_set(x_114, 5, x_113); -x_115 = lean_unbox(x_64); -lean_dec(x_64); -lean_ctor_set_uint8(x_114, sizeof(void*)*6, x_115); -x_116 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_116, 0, x_114); -x_117 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_118 = l_Except_orElseLazy___rarg(x_116, x_117); -return x_118; +x_110 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_110, 0, x_108); +x_111 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_112 = l_Except_orElseLazy___rarg(x_110, x_111); +return x_112; } } } default: { -lean_object* x_119; uint8_t x_120; -lean_dec(x_87); -lean_inc(x_90); -x_119 = l_Lean_Json_getStr_x3f(x_90); -x_120 = !lean_is_exclusive(x_90); -if (x_120 == 0) +lean_object* x_113; uint8_t x_114; +lean_dec(x_81); +lean_dec(x_37); +lean_inc(x_84); +x_113 = l_Lean_Json_getStr_x3f(x_84); +x_114 = !lean_is_exclusive(x_84); +if (x_114 == 0) { -lean_object* x_121; -x_121 = lean_ctor_get(x_90, 0); -lean_dec(x_121); -if (lean_obj_tag(x_119) == 0) +lean_object* x_115; +x_115 = lean_ctor_get(x_84, 0); +lean_dec(x_115); +if (lean_obj_tag(x_113) == 0) { -uint8_t x_122; -lean_free_object(x_90); -lean_dec(x_88); -lean_dec(x_86); -lean_dec(x_75); -lean_dec(x_64); -lean_dec(x_53); -lean_dec(x_31); -x_122 = !lean_is_exclusive(x_119); -if (x_122 == 0) +uint8_t x_116; +lean_free_object(x_84); +lean_dec(x_82); +lean_dec(x_80); +lean_dec(x_69); +lean_dec(x_58); +lean_dec(x_47); +lean_dec(x_32); +x_116 = !lean_is_exclusive(x_113); +if (x_116 == 0) { -lean_object* x_123; lean_object* x_124; -x_123 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_124 = l_Except_orElseLazy___rarg(x_119, x_123); -return x_124; +lean_object* x_117; lean_object* x_118; +x_117 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_118 = l_Except_orElseLazy___rarg(x_113, x_117); +return x_118; } else { -lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; -x_125 = lean_ctor_get(x_119, 0); -lean_inc(x_125); -lean_dec(x_119); -x_126 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_126, 0, x_125); -x_127 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_128 = l_Except_orElseLazy___rarg(x_126, x_127); -return x_128; +lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; +x_119 = lean_ctor_get(x_113, 0); +lean_inc(x_119); +lean_dec(x_113); +x_120 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_120, 0, x_119); +x_121 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_122 = l_Except_orElseLazy___rarg(x_120, x_121); +return x_122; } } else { -uint8_t x_129; -x_129 = !lean_is_exclusive(x_119); -if (x_129 == 0) +uint8_t x_123; +x_123 = !lean_is_exclusive(x_113); +if (x_123 == 0) { -lean_object* x_130; lean_object* x_131; uint8_t x_132; lean_object* x_133; lean_object* x_134; -x_130 = lean_ctor_get(x_119, 0); -lean_ctor_set_tag(x_90, 1); -lean_ctor_set(x_90, 0, x_130); -x_131 = lean_alloc_ctor(1, 6, 1); -lean_ctor_set(x_131, 0, x_31); -lean_ctor_set(x_131, 1, x_53); -lean_ctor_set(x_131, 2, x_75); -lean_ctor_set(x_131, 3, x_86); -lean_ctor_set(x_131, 4, x_88); -lean_ctor_set(x_131, 5, x_90); -x_132 = lean_unbox(x_64); -lean_dec(x_64); -lean_ctor_set_uint8(x_131, sizeof(void*)*6, x_132); -lean_ctor_set(x_119, 0, x_131); -x_133 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_134 = l_Except_orElseLazy___rarg(x_119, x_133); -return x_134; +lean_object* x_124; lean_object* x_125; uint8_t x_126; lean_object* x_127; lean_object* x_128; +x_124 = lean_ctor_get(x_113, 0); +lean_ctor_set_tag(x_84, 1); +lean_ctor_set(x_84, 0, x_124); +x_125 = lean_alloc_ctor(1, 6, 1); +lean_ctor_set(x_125, 0, x_32); +lean_ctor_set(x_125, 1, x_47); +lean_ctor_set(x_125, 2, x_69); +lean_ctor_set(x_125, 3, x_80); +lean_ctor_set(x_125, 4, x_82); +lean_ctor_set(x_125, 5, x_84); +x_126 = lean_unbox(x_58); +lean_dec(x_58); +lean_ctor_set_uint8(x_125, sizeof(void*)*6, x_126); +lean_ctor_set(x_113, 0, x_125); +x_127 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_128 = l_Except_orElseLazy___rarg(x_113, x_127); +return x_128; } else { -lean_object* x_135; lean_object* x_136; uint8_t x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; -x_135 = lean_ctor_get(x_119, 0); -lean_inc(x_135); -lean_dec(x_119); -lean_ctor_set_tag(x_90, 1); -lean_ctor_set(x_90, 0, x_135); -x_136 = lean_alloc_ctor(1, 6, 1); -lean_ctor_set(x_136, 0, x_31); -lean_ctor_set(x_136, 1, x_53); -lean_ctor_set(x_136, 2, x_75); -lean_ctor_set(x_136, 3, x_86); -lean_ctor_set(x_136, 4, x_88); -lean_ctor_set(x_136, 5, x_90); -x_137 = lean_unbox(x_64); -lean_dec(x_64); -lean_ctor_set_uint8(x_136, sizeof(void*)*6, x_137); -x_138 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_138, 0, x_136); -x_139 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_140 = l_Except_orElseLazy___rarg(x_138, x_139); -return x_140; +lean_object* x_129; lean_object* x_130; uint8_t x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; +x_129 = lean_ctor_get(x_113, 0); +lean_inc(x_129); +lean_dec(x_113); +lean_ctor_set_tag(x_84, 1); +lean_ctor_set(x_84, 0, x_129); +x_130 = lean_alloc_ctor(1, 6, 1); +lean_ctor_set(x_130, 0, x_32); +lean_ctor_set(x_130, 1, x_47); +lean_ctor_set(x_130, 2, x_69); +lean_ctor_set(x_130, 3, x_80); +lean_ctor_set(x_130, 4, x_82); +lean_ctor_set(x_130, 5, x_84); +x_131 = lean_unbox(x_58); +lean_dec(x_58); +lean_ctor_set_uint8(x_130, sizeof(void*)*6, x_131); +x_132 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_132, 0, x_130); +x_133 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_134 = l_Except_orElseLazy___rarg(x_132, x_133); +return x_134; } } } else { -lean_dec(x_90); -if (lean_obj_tag(x_119) == 0) +lean_dec(x_84); +if (lean_obj_tag(x_113) == 0) { -lean_object* x_141; lean_object* x_142; lean_object* x_143; lean_object* x_144; lean_object* x_145; -lean_dec(x_88); -lean_dec(x_86); -lean_dec(x_75); -lean_dec(x_64); -lean_dec(x_53); -lean_dec(x_31); -x_141 = lean_ctor_get(x_119, 0); -lean_inc(x_141); -if (lean_is_exclusive(x_119)) { - lean_ctor_release(x_119, 0); - x_142 = x_119; +lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; +lean_dec(x_82); +lean_dec(x_80); +lean_dec(x_69); +lean_dec(x_58); +lean_dec(x_47); +lean_dec(x_32); +x_135 = lean_ctor_get(x_113, 0); +lean_inc(x_135); +if (lean_is_exclusive(x_113)) { + lean_ctor_release(x_113, 0); + x_136 = x_113; } else { - lean_dec_ref(x_119); - x_142 = lean_box(0); + lean_dec_ref(x_113); + x_136 = lean_box(0); } -if (lean_is_scalar(x_142)) { - x_143 = lean_alloc_ctor(0, 1, 0); +if (lean_is_scalar(x_136)) { + x_137 = lean_alloc_ctor(0, 1, 0); } else { - x_143 = x_142; + x_137 = x_136; } -lean_ctor_set(x_143, 0, x_141); -x_144 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_145 = l_Except_orElseLazy___rarg(x_143, x_144); -return x_145; +lean_ctor_set(x_137, 0, x_135); +x_138 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_139 = l_Except_orElseLazy___rarg(x_137, x_138); +return x_139; } else { -lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; uint8_t x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; -x_146 = lean_ctor_get(x_119, 0); -lean_inc(x_146); -if (lean_is_exclusive(x_119)) { - lean_ctor_release(x_119, 0); - x_147 = x_119; +lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; uint8_t x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; +x_140 = lean_ctor_get(x_113, 0); +lean_inc(x_140); +if (lean_is_exclusive(x_113)) { + lean_ctor_release(x_113, 0); + x_141 = x_113; } else { - lean_dec_ref(x_119); - x_147 = lean_box(0); -} -x_148 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_148, 0, x_146); -x_149 = lean_alloc_ctor(1, 6, 1); -lean_ctor_set(x_149, 0, x_31); -lean_ctor_set(x_149, 1, x_53); -lean_ctor_set(x_149, 2, x_75); -lean_ctor_set(x_149, 3, x_86); -lean_ctor_set(x_149, 4, x_88); -lean_ctor_set(x_149, 5, x_148); -x_150 = lean_unbox(x_64); -lean_dec(x_64); -lean_ctor_set_uint8(x_149, sizeof(void*)*6, x_150); -if (lean_is_scalar(x_147)) { - x_151 = lean_alloc_ctor(1, 1, 0); + lean_dec_ref(x_113); + x_141 = lean_box(0); +} +x_142 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_142, 0, x_140); +x_143 = lean_alloc_ctor(1, 6, 1); +lean_ctor_set(x_143, 0, x_32); +lean_ctor_set(x_143, 1, x_47); +lean_ctor_set(x_143, 2, x_69); +lean_ctor_set(x_143, 3, x_80); +lean_ctor_set(x_143, 4, x_82); +lean_ctor_set(x_143, 5, x_142); +x_144 = lean_unbox(x_58); +lean_dec(x_58); +lean_ctor_set_uint8(x_143, sizeof(void*)*6, x_144); +if (lean_is_scalar(x_141)) { + x_145 = lean_alloc_ctor(1, 1, 0); } else { - x_151 = x_147; + x_145 = x_141; } -lean_ctor_set(x_151, 0, x_149); -x_152 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; -x_153 = l_Except_orElseLazy___rarg(x_151, x_152); -return x_153; +lean_ctor_set(x_145, 0, x_143); +x_146 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_147 = l_Except_orElseLazy___rarg(x_145, x_146); +return x_147; } } } @@ -1413,6 +1581,29 @@ return x_153; } } } +else +{ +lean_object* x_180; lean_object* x_181; lean_object* x_182; lean_object* x_183; lean_object* x_184; lean_object* x_185; lean_object* x_186; lean_object* x_187; lean_object* x_188; +lean_dec(x_32); +lean_dec(x_30); +x_180 = lean_unsigned_to_nat(80u); +x_181 = l_Lean_Json_pretty(x_35, x_180); +x_182 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11; +x_183 = lean_string_append(x_182, x_181); +lean_dec(x_181); +x_184 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; +x_185 = lean_string_append(x_183, x_184); +if (lean_is_scalar(x_31)) { + x_186 = lean_alloc_ctor(0, 1, 0); +} else { + x_186 = x_31; + lean_ctor_set_tag(x_186, 0); +} +lean_ctor_set(x_186, 0, x_185); +x_187 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10; +x_188 = l_Except_orElseLazy___rarg(x_186, x_187); +return x_188; +} } } } @@ -1607,311 +1798,318 @@ return x_15; } else { -lean_object* x_16; lean_object* x_17; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; x_16 = lean_ctor_get(x_6, 0); lean_inc(x_16); -lean_dec(x_6); -x_64 = l_Lean_instInhabitedJson; -x_65 = lean_unsigned_to_nat(0u); -x_66 = lean_array_get(x_64, x_16, x_65); -lean_inc(x_66); -x_67 = l_Lean_Json_getStr_x3f(x_66); -if (lean_obj_tag(x_67) == 0) +if (lean_is_exclusive(x_6)) { + lean_ctor_release(x_6, 0); + x_17 = x_6; +} else { + lean_dec_ref(x_6); + x_17 = lean_box(0); +} +x_67 = l_Lean_instInhabitedJson; +x_68 = lean_unsigned_to_nat(0u); +x_69 = lean_array_get(x_67, x_16, x_68); +lean_inc(x_69); +x_70 = l_Lean_Json_getStr_x3f(x_69); +if (lean_obj_tag(x_70) == 0) { -uint8_t x_68; -lean_dec(x_66); +uint8_t x_71; +lean_dec(x_69); +lean_dec(x_17); lean_dec(x_16); -x_68 = !lean_is_exclusive(x_67); -if (x_68 == 0) +x_71 = !lean_is_exclusive(x_70); +if (x_71 == 0) { -lean_object* x_69; -x_69 = l_Except_orElseLazy___rarg(x_67, x_10); -return x_69; +lean_object* x_72; +x_72 = l_Except_orElseLazy___rarg(x_70, x_10); +return x_72; } else { -lean_object* x_70; lean_object* x_71; lean_object* x_72; -x_70 = lean_ctor_get(x_67, 0); -lean_inc(x_70); -lean_dec(x_67); -x_71 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_71, 0, x_70); -x_72 = l_Except_orElseLazy___rarg(x_71, x_10); -return x_72; +lean_object* x_73; lean_object* x_74; lean_object* x_75; +x_73 = lean_ctor_get(x_70, 0); +lean_inc(x_73); +lean_dec(x_70); +x_74 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_74, 0, x_73); +x_75 = l_Except_orElseLazy___rarg(x_74, x_10); +return x_75; } } else { -uint8_t x_73; -x_73 = !lean_is_exclusive(x_67); -if (x_73 == 0) -{ -lean_object* x_74; lean_object* x_75; uint8_t x_76; -x_74 = lean_ctor_get(x_67, 0); -x_75 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11; -x_76 = lean_string_dec_eq(x_74, x_75); +uint8_t x_76; +x_76 = !lean_is_exclusive(x_70); if (x_76 == 0) { -lean_object* x_77; uint8_t x_78; -x_77 = l_String_toName(x_74); -x_78 = l_Lean_Name_isAnonymous(x_77); -if (x_78 == 0) +lean_object* x_77; lean_object* x_78; uint8_t x_79; +x_77 = lean_ctor_get(x_70, 0); +x_78 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__1; +x_79 = lean_string_dec_eq(x_77, x_78); +if (x_79 == 0) { -lean_free_object(x_67); -lean_dec(x_66); -x_17 = x_77; -goto block_63; +lean_object* x_80; uint8_t x_81; +x_80 = l_String_toName(x_77); +x_81 = l_Lean_Name_isAnonymous(x_80); +if (x_81 == 0) +{ +lean_free_object(x_70); +lean_dec(x_69); +x_18 = x_80; +goto block_66; } else { -lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; -lean_dec(x_77); -lean_dec(x_16); -x_79 = lean_unsigned_to_nat(80u); -x_80 = l_Lean_Json_pretty(x_66, x_79); -x_81 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12; -x_82 = lean_string_append(x_81, x_80); +lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; lean_object* x_88; lean_dec(x_80); -x_83 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; -x_84 = lean_string_append(x_82, x_83); -lean_ctor_set_tag(x_67, 0); -lean_ctor_set(x_67, 0, x_84); -x_85 = l_Except_orElseLazy___rarg(x_67, x_10); -return x_85; +lean_dec(x_17); +lean_dec(x_16); +x_82 = lean_unsigned_to_nat(80u); +x_83 = l_Lean_Json_pretty(x_69, x_82); +x_84 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2; +x_85 = lean_string_append(x_84, x_83); +lean_dec(x_83); +x_86 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; +x_87 = lean_string_append(x_85, x_86); +lean_ctor_set_tag(x_70, 0); +lean_ctor_set(x_70, 0, x_87); +x_88 = l_Except_orElseLazy___rarg(x_70, x_10); +return x_88; } } else { -lean_object* x_86; -lean_free_object(x_67); -lean_dec(x_74); -lean_dec(x_66); -x_86 = lean_box(0); -x_17 = x_86; -goto block_63; +lean_object* x_89; +lean_free_object(x_70); +lean_dec(x_77); +lean_dec(x_69); +x_89 = lean_box(0); +x_18 = x_89; +goto block_66; } } else { -lean_object* x_87; lean_object* x_88; uint8_t x_89; -x_87 = lean_ctor_get(x_67, 0); -lean_inc(x_87); -lean_dec(x_67); -x_88 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11; -x_89 = lean_string_dec_eq(x_87, x_88); -if (x_89 == 0) +lean_object* x_90; lean_object* x_91; uint8_t x_92; +x_90 = lean_ctor_get(x_70, 0); +lean_inc(x_90); +lean_dec(x_70); +x_91 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__1; +x_92 = lean_string_dec_eq(x_90, x_91); +if (x_92 == 0) { -lean_object* x_90; uint8_t x_91; -x_90 = l_String_toName(x_87); -x_91 = l_Lean_Name_isAnonymous(x_90); -if (x_91 == 0) +lean_object* x_93; uint8_t x_94; +x_93 = l_String_toName(x_90); +x_94 = l_Lean_Name_isAnonymous(x_93); +if (x_94 == 0) { -lean_dec(x_66); -x_17 = x_90; -goto block_63; +lean_dec(x_69); +x_18 = x_93; +goto block_66; } else { -lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; -lean_dec(x_90); -lean_dec(x_16); -x_92 = lean_unsigned_to_nat(80u); -x_93 = l_Lean_Json_pretty(x_66, x_92); -x_94 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12; -x_95 = lean_string_append(x_94, x_93); +lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_dec(x_93); -x_96 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; -x_97 = lean_string_append(x_95, x_96); -x_98 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_98, 0, x_97); -x_99 = l_Except_orElseLazy___rarg(x_98, x_10); -return x_99; +lean_dec(x_17); +lean_dec(x_16); +x_95 = lean_unsigned_to_nat(80u); +x_96 = l_Lean_Json_pretty(x_69, x_95); +x_97 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2; +x_98 = lean_string_append(x_97, x_96); +lean_dec(x_96); +x_99 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; +x_100 = lean_string_append(x_98, x_99); +x_101 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_101, 0, x_100); +x_102 = l_Except_orElseLazy___rarg(x_101, x_10); +return x_102; } } else { -lean_object* x_100; -lean_dec(x_87); -lean_dec(x_66); -x_100 = lean_box(0); -x_17 = x_100; -goto block_63; +lean_object* x_103; +lean_dec(x_90); +lean_dec(x_69); +x_103 = lean_box(0); +x_18 = x_103; +goto block_66; } } } -block_63: +block_66: { -lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; -x_18 = l_Lean_instInhabitedJson; -x_19 = lean_unsigned_to_nat(1u); -x_20 = lean_array_get(x_18, x_16, x_19); -x_21 = l_Lean_Json_getObj_x3f(x_20); -if (lean_obj_tag(x_21) == 0) +lean_object* x_19; lean_object* x_20; lean_object* x_21; +x_19 = l_Lean_instInhabitedJson; +x_20 = lean_unsigned_to_nat(1u); +x_21 = lean_array_get(x_19, x_16, x_20); +if (lean_obj_tag(x_21) == 5) { -uint8_t x_22; +lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_dec(x_17); +x_22 = lean_ctor_get(x_21, 0); +lean_inc(x_22); +lean_dec(x_21); +x_23 = lean_box(0); +x_24 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1(x_23, x_22); +if (lean_obj_tag(x_24) == 0) +{ +uint8_t x_25; +lean_dec(x_18); lean_dec(x_16); -x_22 = !lean_is_exclusive(x_21); -if (x_22 == 0) +x_25 = !lean_is_exclusive(x_24); +if (x_25 == 0) { -lean_object* x_23; -x_23 = l_Except_orElseLazy___rarg(x_21, x_10); -return x_23; +lean_object* x_26; +x_26 = l_Except_orElseLazy___rarg(x_24, x_10); +return x_26; } else { -lean_object* x_24; lean_object* x_25; lean_object* x_26; -x_24 = lean_ctor_get(x_21, 0); -lean_inc(x_24); -lean_dec(x_21); -x_25 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_25, 0, x_24); -x_26 = l_Except_orElseLazy___rarg(x_25, x_10); -return x_26; +lean_object* x_27; lean_object* x_28; lean_object* x_29; +x_27 = lean_ctor_get(x_24, 0); +lean_inc(x_27); +lean_dec(x_24); +x_28 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_28, 0, x_27); +x_29 = l_Except_orElseLazy___rarg(x_28, x_10); +return x_29; } } else { -lean_object* x_27; lean_object* x_28; lean_object* x_29; -x_27 = lean_ctor_get(x_21, 0); -lean_inc(x_27); -lean_dec(x_21); -x_28 = lean_box(0); -x_29 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1(x_28, x_27); -if (lean_obj_tag(x_29) == 0) +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_30 = lean_ctor_get(x_24, 0); +lean_inc(x_30); +lean_dec(x_24); +x_31 = lean_unsigned_to_nat(2u); +x_32 = lean_array_get(x_19, x_16, x_31); +x_33 = l_Lean_Json_getBool_x3f(x_32); +lean_dec(x_32); +if (lean_obj_tag(x_33) == 0) { -uint8_t x_30; -lean_dec(x_17); +uint8_t x_34; +lean_dec(x_30); +lean_dec(x_18); lean_dec(x_16); -x_30 = !lean_is_exclusive(x_29); -if (x_30 == 0) +x_34 = !lean_is_exclusive(x_33); +if (x_34 == 0) { -lean_object* x_31; -x_31 = l_Except_orElseLazy___rarg(x_29, x_10); -return x_31; +lean_object* x_35; +x_35 = l_Except_orElseLazy___rarg(x_33, x_10); +return x_35; } else { -lean_object* x_32; lean_object* x_33; lean_object* x_34; -x_32 = lean_ctor_get(x_29, 0); -lean_inc(x_32); -lean_dec(x_29); -x_33 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_33, 0, x_32); -x_34 = l_Except_orElseLazy___rarg(x_33, x_10); -return x_34; +lean_object* x_36; lean_object* x_37; lean_object* x_38; +x_36 = lean_ctor_get(x_33, 0); +lean_inc(x_36); +lean_dec(x_33); +x_37 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_37, 0, x_36); +x_38 = l_Except_orElseLazy___rarg(x_37, x_10); +return x_38; } } else { -lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; -x_35 = lean_ctor_get(x_29, 0); -lean_inc(x_35); -lean_dec(x_29); -x_36 = lean_unsigned_to_nat(2u); -x_37 = lean_array_get(x_18, x_16, x_36); -x_38 = l_Lean_Json_getBool_x3f(x_37); -lean_dec(x_37); -if (lean_obj_tag(x_38) == 0) -{ -uint8_t x_39; -lean_dec(x_35); -lean_dec(x_17); +lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; +x_39 = lean_ctor_get(x_33, 0); +lean_inc(x_39); +lean_dec(x_33); +x_40 = lean_unsigned_to_nat(3u); +x_41 = lean_array_get(x_19, x_16, x_40); lean_dec(x_16); -x_39 = !lean_is_exclusive(x_38); -if (x_39 == 0) +x_42 = l_Lean_Json_getStr_x3f(x_41); +if (lean_obj_tag(x_42) == 0) +{ +uint8_t x_43; +lean_dec(x_39); +lean_dec(x_30); +lean_dec(x_18); +x_43 = !lean_is_exclusive(x_42); +if (x_43 == 0) { -lean_object* x_40; -x_40 = l_Except_orElseLazy___rarg(x_38, x_10); -return x_40; +lean_object* x_44; +x_44 = l_Except_orElseLazy___rarg(x_42, x_10); +return x_44; } else { -lean_object* x_41; lean_object* x_42; lean_object* x_43; -x_41 = lean_ctor_get(x_38, 0); -lean_inc(x_41); -lean_dec(x_38); -x_42 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_42, 0, x_41); -x_43 = l_Except_orElseLazy___rarg(x_42, x_10); -return x_43; +lean_object* x_45; lean_object* x_46; lean_object* x_47; +x_45 = lean_ctor_get(x_42, 0); +lean_inc(x_45); +lean_dec(x_42); +x_46 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_46, 0, x_45); +x_47 = l_Except_orElseLazy___rarg(x_46, x_10); +return x_47; } } else { -lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; -x_44 = lean_ctor_get(x_38, 0); -lean_inc(x_44); -lean_dec(x_38); -x_45 = lean_unsigned_to_nat(3u); -x_46 = lean_array_get(x_18, x_16, x_45); -lean_dec(x_16); -x_47 = l_Lean_Json_getStr_x3f(x_46); -if (lean_obj_tag(x_47) == 0) -{ uint8_t x_48; -lean_dec(x_44); -lean_dec(x_35); -lean_dec(x_17); -x_48 = !lean_is_exclusive(x_47); +x_48 = !lean_is_exclusive(x_42); if (x_48 == 0) { -lean_object* x_49; -x_49 = l_Except_orElseLazy___rarg(x_47, x_10); -return x_49; -} -else -{ -lean_object* x_50; lean_object* x_51; lean_object* x_52; -x_50 = lean_ctor_get(x_47, 0); -lean_inc(x_50); -lean_dec(x_47); -x_51 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_51, 0, x_50); -x_52 = l_Except_orElseLazy___rarg(x_51, x_10); +lean_object* x_49; lean_object* x_50; uint8_t x_51; lean_object* x_52; +x_49 = lean_ctor_get(x_42, 0); +x_50 = lean_alloc_ctor(0, 3, 1); +lean_ctor_set(x_50, 0, x_18); +lean_ctor_set(x_50, 1, x_30); +lean_ctor_set(x_50, 2, x_49); +x_51 = lean_unbox(x_39); +lean_dec(x_39); +lean_ctor_set_uint8(x_50, sizeof(void*)*3, x_51); +lean_ctor_set(x_42, 0, x_50); +x_52 = l_Except_orElseLazy___rarg(x_42, x_10); return x_52; } -} else { -uint8_t x_53; -x_53 = !lean_is_exclusive(x_47); -if (x_53 == 0) -{ -lean_object* x_54; lean_object* x_55; uint8_t x_56; lean_object* x_57; -x_54 = lean_ctor_get(x_47, 0); -x_55 = lean_alloc_ctor(0, 3, 1); -lean_ctor_set(x_55, 0, x_17); -lean_ctor_set(x_55, 1, x_35); -lean_ctor_set(x_55, 2, x_54); -x_56 = lean_unbox(x_44); -lean_dec(x_44); -lean_ctor_set_uint8(x_55, sizeof(void*)*3, x_56); -lean_ctor_set(x_47, 0, x_55); -x_57 = l_Except_orElseLazy___rarg(x_47, x_10); +lean_object* x_53; lean_object* x_54; uint8_t x_55; lean_object* x_56; lean_object* x_57; +x_53 = lean_ctor_get(x_42, 0); +lean_inc(x_53); +lean_dec(x_42); +x_54 = lean_alloc_ctor(0, 3, 1); +lean_ctor_set(x_54, 0, x_18); +lean_ctor_set(x_54, 1, x_30); +lean_ctor_set(x_54, 2, x_53); +x_55 = lean_unbox(x_39); +lean_dec(x_39); +lean_ctor_set_uint8(x_54, sizeof(void*)*3, x_55); +x_56 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_56, 0, x_54); +x_57 = l_Except_orElseLazy___rarg(x_56, x_10); return x_57; } -else -{ -lean_object* x_58; lean_object* x_59; uint8_t x_60; lean_object* x_61; lean_object* x_62; -x_58 = lean_ctor_get(x_47, 0); -lean_inc(x_58); -lean_dec(x_47); -x_59 = lean_alloc_ctor(0, 3, 1); -lean_ctor_set(x_59, 0, x_17); -lean_ctor_set(x_59, 1, x_35); -lean_ctor_set(x_59, 2, x_58); -x_60 = lean_unbox(x_44); -lean_dec(x_44); -lean_ctor_set_uint8(x_59, sizeof(void*)*3, x_60); -x_61 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_61, 0, x_59); -x_62 = l_Except_orElseLazy___rarg(x_61, x_10); -return x_62; } } } } +else +{ +lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; +lean_dec(x_18); +lean_dec(x_16); +x_58 = lean_unsigned_to_nat(80u); +x_59 = l_Lean_Json_pretty(x_21, x_58); +x_60 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11; +x_61 = lean_string_append(x_60, x_59); +lean_dec(x_59); +x_62 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; +x_63 = lean_string_append(x_61, x_62); +if (lean_is_scalar(x_17)) { + x_64 = lean_alloc_ctor(0, 1, 0); +} else { + x_64 = x_17; + lean_ctor_set_tag(x_64, 0); +} +lean_ctor_set(x_64, 0, x_63); +x_65 = l_Except_orElseLazy___rarg(x_64, x_10); +return x_65; } } } @@ -3700,7 +3898,7 @@ if (x_331 == 0) { lean_object* x_332; lean_object* x_333; uint8_t x_334; x_332 = lean_ctor_get(x_312, 0); -x_333 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11; +x_333 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__1; x_334 = lean_string_dec_eq(x_332, x_333); if (x_334 == 0) { @@ -3722,10 +3920,10 @@ lean_dec(x_23); lean_dec(x_22); x_337 = lean_unsigned_to_nat(80u); x_338 = l_Lean_Json_pretty(x_311, x_337); -x_339 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12; +x_339 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2; x_340 = lean_string_append(x_339, x_338); lean_dec(x_338); -x_341 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; +x_341 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; x_342 = lean_string_append(x_340, x_341); x_343 = l_Lake_PackageEntry_fromJson_x3f___closed__47; x_344 = lean_string_append(x_343, x_342); @@ -3758,7 +3956,7 @@ lean_object* x_351; lean_object* x_352; uint8_t x_353; x_351 = lean_ctor_get(x_312, 0); lean_inc(x_351); lean_dec(x_312); -x_352 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11; +x_352 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__1; x_353 = lean_string_dec_eq(x_351, x_352); if (x_353 == 0) { @@ -3779,10 +3977,10 @@ lean_dec(x_23); lean_dec(x_22); x_356 = lean_unsigned_to_nat(80u); x_357 = l_Lean_Json_pretty(x_311, x_356); -x_358 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12; +x_358 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2; x_359 = lean_string_append(x_358, x_357); lean_dec(x_357); -x_360 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; +x_360 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; x_361 = lean_string_append(x_359, x_360); x_362 = l_Lake_PackageEntry_fromJson_x3f___closed__47; x_363 = lean_string_append(x_362, x_361); @@ -4382,7 +4580,7 @@ lean_dec(x_22); x_129 = l_Lake_PackageEntry_fromJson_x3f___closed__25; x_130 = lean_string_append(x_129, x_73); lean_dec(x_73); -x_131 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; +x_131 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; x_132 = lean_string_append(x_130, x_131); x_133 = lean_alloc_ctor(0, 1, 0); lean_ctor_set(x_133, 0, x_132); @@ -5854,7 +6052,7 @@ x_10 = l_Lake_StdVer_toString(x_1); x_11 = l_Lake_Manifest_getVersion___lambda__1___closed__3; x_12 = lean_string_append(x_11, x_10); lean_dec(x_10); -x_13 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; +x_13 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; x_14 = lean_string_append(x_12, x_13); x_15 = lean_alloc_ctor(0, 1, 0); lean_ctor_set(x_15, 0, x_14); @@ -6618,7 +6816,7 @@ x_44 = l_Lean_Json_pretty(x_33, x_43); x_45 = l_Lake_Manifest_getPackages___closed__8; x_46 = lean_string_append(x_45, x_44); lean_dec(x_44); -x_47 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; +x_47 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; x_48 = lean_string_append(x_46, x_47); x_3 = x_48; goto block_9; @@ -6694,7 +6892,7 @@ x_62 = l_Lean_Json_pretty(x_53, x_61); x_63 = l_Lake_Manifest_getPackages___closed__8; x_64 = lean_string_append(x_63, x_62); lean_dec(x_62); -x_65 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; +x_65 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; x_66 = lean_string_append(x_64, x_65); x_3 = x_66; goto block_9; @@ -6753,7 +6951,7 @@ x_77 = l_Lean_Json_pretty(x_67, x_76); x_78 = l_Lake_Manifest_getPackages___closed__8; x_79 = lean_string_append(x_78, x_77); lean_dec(x_77); -x_80 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; +x_80 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; x_81 = lean_string_append(x_79, x_80); x_3 = x_81; goto block_9; @@ -7019,7 +7217,7 @@ lean_object* x_97; lean_object* x_98; uint8_t x_99; x_97 = lean_ctor_get(x_95, 0); lean_inc(x_97); lean_dec(x_95); -x_98 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11; +x_98 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__1; x_99 = lean_string_dec_eq(x_97, x_98); if (x_99 == 0) { @@ -7042,10 +7240,10 @@ lean_dec(x_32); lean_dec(x_27); x_102 = lean_unsigned_to_nat(80u); x_103 = l_Lean_Json_pretty(x_93, x_102); -x_104 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12; +x_104 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2; x_105 = lean_string_append(x_104, x_103); lean_dec(x_103); -x_106 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; +x_106 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; x_107 = lean_string_append(x_105, x_106); x_16 = x_107; goto block_22; @@ -7100,7 +7298,7 @@ lean_object* x_113; lean_object* x_114; uint8_t x_115; x_113 = lean_ctor_get(x_111, 0); lean_inc(x_113); lean_dec(x_111); -x_114 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11; +x_114 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__1; x_115 = lean_string_dec_eq(x_113, x_114); if (x_115 == 0) { @@ -7124,10 +7322,10 @@ lean_dec(x_32); lean_dec(x_27); x_119 = lean_unsigned_to_nat(80u); x_120 = l_Lean_Json_pretty(x_109, x_119); -x_121 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12; +x_121 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2; x_122 = lean_string_append(x_121, x_120); lean_dec(x_120); -x_123 = l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13; +x_123 = l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3; x_124 = lean_string_append(x_122, x_123); x_16 = x_124; goto block_22; @@ -8881,6 +9079,8 @@ l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEnt lean_mark_persistent(l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__1); l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2 = _init_l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2(); lean_mark_persistent(l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__2); +l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3 = _init_l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3(); +lean_mark_persistent(l_Lean_RBNode_foldM___at___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____spec__1___closed__3); l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__1___closed__1 = _init_l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__1___closed__1(); lean_mark_persistent(l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__1___closed__1); l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__1___closed__2 = _init_l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__1___closed__2(); @@ -8907,10 +9107,6 @@ l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_M lean_mark_persistent(l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__10); l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11 = _init_l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11(); lean_mark_persistent(l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__11); -l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12 = _init_l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12(); -lean_mark_persistent(l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__12); -l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13 = _init_l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13(); -lean_mark_persistent(l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____lambda__3___closed__13); l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____closed__1 = _init_l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____closed__1(); lean_mark_persistent(l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____closed__1); l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____closed__2 = _init_l___private_Lake_Load_Manifest_0__Lake_fromJsonPackageEntryV6____x40_Lake_Load_Manifest___hyg_115____closed__2(); diff --git a/stage0/stdlib/Lake/Util/Name.c b/stage0/stdlib/Lake/Util/Name.c index bfacc0ef6538..9714985579b6 100644 --- a/stage0/stdlib/Lake/Util/Name.c +++ b/stage0/stdlib/Lake/Util/Name.c @@ -13,33 +13,24 @@ #ifdef __cplusplus extern "C" { #endif -lean_object* l_Lean_Json_getObj_x3f(lean_object*); lean_object* l_Lean_Syntax_mkNameLit(lean_object*, lean_object*); -static lean_object* l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg___closed__1; -LEAN_EXPORT lean_object* l_Lake_instFromJsonNameMap__lake___rarg(lean_object*, lean_object*); lean_object* l_Lean_Syntax_setHeadInfo(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lake_Util_Name_0__Lean_Name_cmp_match__1_splitter(lean_object*); LEAN_EXPORT lean_object* l___private_Lake_Util_Name_0__Lean_Name_cmp_match__1_splitter___rarg(uint8_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lake_DNameMap_empty(lean_object*); static lean_object* l_Lake_Name_quoteFrom___closed__4; -lean_object* l_Lean_Name_toString(lean_object*, uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Lake_NameMap_empty(lean_object*); static lean_object* l_Lake_Name_quoteFrom___closed__7; -LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg___lambda__1___boxed(lean_object*); uint8_t l_Lean_Name_isAnonymous(lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_forIn_visit___at_Lake_instForInNameMapProdName__lake___spec__1(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lake_instFromJsonNameMap__lake(lean_object*); LEAN_EXPORT lean_object* l_Lake_Name_quoteFrom___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l_Lake_OrdNameMap_empty___closed__2; -LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1(lean_object*); LEAN_EXPORT lean_object* l___private_Lake_Util_Name_0__Lean_Name_isAnonymous_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lake_Util_Name_0__Lean_Name_appendCore_match__1_splitter(lean_object*); lean_object* l_Lake_RBArray_empty___rarg(lean_object*); static lean_object* l_Lake_Name_quoteFrom___closed__1; -LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg(lean_object*, lean_object*, lean_object*); static lean_object* l_Lake_Name_quoteFrom___closed__5; static lean_object* l_Lake_Name_quoteFrom___closed__2; -LEAN_EXPORT uint8_t l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg___lambda__1(lean_object*); lean_object* l_Lean_Syntax_copyHeadTailInfoFrom(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lake_Util_Name_0__Lean_Name_isPrefixOf_match__1_splitter(lean_object*); lean_object* l_Lean_SourceInfo_fromRef(lean_object*, uint8_t); @@ -51,22 +42,16 @@ LEAN_EXPORT lean_object* l_Lake_stringToLegalOrSimpleName(lean_object*); lean_object* l_Lean_quoteNameMk(lean_object*); lean_object* l_Lean_Name_num___override(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lake_Util_Name_0__Lean_Name_cmp_match__2_splitter___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lake_instCoeRBMapNameQuickCmpNameMap__lake(lean_object*); lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lake_Name_eraseHead(lean_object*); -LEAN_EXPORT lean_object* l_Lake_instToJsonNameMap__lake(lean_object*); -LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1(lean_object*); LEAN_EXPORT lean_object* l_Lake_instForInNameMapProdName__lake(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lake_instForInNameMapProdName__lake___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_forIn_visit___at_Lake_instForInNameMapProdName__lake___spec__1___rarg___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___private_Init_Meta_0__Lean_getEscapedNameParts_x3f(lean_object*, lean_object*); -static lean_object* l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg___closed__1; -static lean_object* l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg___closed__2; LEAN_EXPORT lean_object* l___private_Lake_Util_Name_0__Lean_Name_isAnonymous_match__1_splitter(lean_object*); static lean_object* l_Lake_Name_quoteFrom___closed__3; lean_object* l_id___rarg___boxed(lean_object*); -LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lake_mkOrdNameMap(lean_object*); lean_object* l_String_toName(lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_forIn_visit___at_Lake_instForInNameMapProdName__lake___spec__1___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); @@ -81,10 +66,8 @@ static lean_object* l_Lake_instCoeRBMapNameQuickCmpNameMap__lake___closed__1; lean_object* lean_string_append(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lake_Util_Name_0__Lean_Name_isPrefixOf_match__1_splitter___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lake_Util_Name_0__Lean_Name_isAnonymous_match__1_splitter___rarg___boxed(lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lake_OrdNameMap_empty(lean_object*); LEAN_EXPORT lean_object* l_Lake_Name_quoteFrom(lean_object*, lean_object*, uint8_t); -LEAN_EXPORT lean_object* l_Lake_instToJsonNameMap__lake___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lake_Util_Name_0__Lean_Name_cmp_match__2_splitter(lean_object*); LEAN_EXPORT lean_object* l_Lake_stringToLegalOrSimpleName(lean_object* x_1) { _start: @@ -337,275 +320,6 @@ x_2 = lean_box(0); return x_2; } } -LEAN_EXPORT uint8_t l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg___lambda__1(lean_object* x_1) { -_start: -{ -uint8_t x_2; -x_2 = 0; -return x_2; -} -} -static lean_object* _init_l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg___closed__1() { -_start: -{ -lean_object* x_1; -x_1 = lean_alloc_closure((void*)(l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg___lambda__1___boxed), 1, 0); -return x_1; -} -} -LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { -_start: -{ -if (lean_obj_tag(x_3) == 0) -{ -lean_dec(x_1); -return x_2; -} -else -{ -lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; uint8_t x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; -x_4 = lean_ctor_get(x_3, 0); -lean_inc(x_4); -x_5 = lean_ctor_get(x_3, 1); -lean_inc(x_5); -x_6 = lean_ctor_get(x_3, 2); -lean_inc(x_6); -x_7 = lean_ctor_get(x_3, 3); -lean_inc(x_7); -lean_dec(x_3); -lean_inc(x_1); -x_8 = l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg(x_1, x_2, x_4); -x_9 = 1; -x_10 = l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg___closed__1; -x_11 = l_Lean_Name_toString(x_5, x_9, x_10); -lean_inc(x_1); -x_12 = lean_apply_1(x_1, x_6); -x_13 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_8, x_11, x_12); -x_2 = x_13; -x_3 = x_7; -goto _start; -} -} -} -LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1(lean_object* x_1) { -_start: -{ -lean_object* x_2; -x_2 = lean_alloc_closure((void*)(l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg), 3, 0); -return x_2; -} -} -LEAN_EXPORT lean_object* l_Lake_instToJsonNameMap__lake___rarg(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_3 = lean_box(0); -x_4 = l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg(x_1, x_3, x_2); -x_5 = lean_alloc_ctor(5, 1, 0); -lean_ctor_set(x_5, 0, x_4); -return x_5; -} -} -LEAN_EXPORT lean_object* l_Lake_instToJsonNameMap__lake(lean_object* x_1) { -_start: -{ -lean_object* x_2; -x_2 = lean_alloc_closure((void*)(l_Lake_instToJsonNameMap__lake___rarg), 2, 0); -return x_2; -} -} -LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg___lambda__1___boxed(lean_object* x_1) { -_start: -{ -uint8_t x_2; lean_object* x_3; -x_2 = l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg___lambda__1(x_1); -lean_dec(x_1); -x_3 = lean_box(x_2); -return x_3; -} -} -static lean_object* _init_l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg___closed__1() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("expected name", 13, 13); -return x_1; -} -} -static lean_object* _init_l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg___closed__2() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg___closed__1; -x_2 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { -_start: -{ -if (lean_obj_tag(x_3) == 0) -{ -lean_object* x_4; -lean_dec(x_1); -x_4 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_4, 0, x_2); -return x_4; -} -else -{ -lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; -x_5 = lean_ctor_get(x_3, 0); -lean_inc(x_5); -x_6 = lean_ctor_get(x_3, 1); -lean_inc(x_6); -x_7 = lean_ctor_get(x_3, 2); -lean_inc(x_7); -x_8 = lean_ctor_get(x_3, 3); -lean_inc(x_8); -lean_dec(x_3); -lean_inc(x_1); -x_9 = l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg(x_1, x_2, x_5); -if (lean_obj_tag(x_9) == 0) -{ -uint8_t x_10; -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_1); -x_10 = !lean_is_exclusive(x_9); -if (x_10 == 0) -{ -return x_9; -} -else -{ -lean_object* x_11; lean_object* x_12; -x_11 = lean_ctor_get(x_9, 0); -lean_inc(x_11); -lean_dec(x_9); -x_12 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_12, 0, x_11); -return x_12; -} -} -else -{ -lean_object* x_13; lean_object* x_14; uint8_t x_15; -x_13 = lean_ctor_get(x_9, 0); -lean_inc(x_13); -lean_dec(x_9); -x_14 = l_String_toName(x_6); -x_15 = l_Lean_Name_isAnonymous(x_14); -if (x_15 == 0) -{ -lean_object* x_16; -lean_inc(x_1); -x_16 = lean_apply_1(x_1, x_7); -if (lean_obj_tag(x_16) == 0) -{ -uint8_t x_17; -lean_dec(x_14); -lean_dec(x_13); -lean_dec(x_8); -lean_dec(x_1); -x_17 = !lean_is_exclusive(x_16); -if (x_17 == 0) -{ -return x_16; -} -else -{ -lean_object* x_18; lean_object* x_19; -x_18 = lean_ctor_get(x_16, 0); -lean_inc(x_18); -lean_dec(x_16); -x_19 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_19, 0, x_18); -return x_19; -} -} -else -{ -lean_object* x_20; lean_object* x_21; -x_20 = lean_ctor_get(x_16, 0); -lean_inc(x_20); -lean_dec(x_16); -x_21 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_14, x_20); -x_2 = x_21; -x_3 = x_8; -goto _start; -} -} -else -{ -lean_object* x_23; -lean_dec(x_14); -lean_dec(x_13); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_1); -x_23 = l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg___closed__2; -return x_23; -} -} -} -} -} -LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1(lean_object* x_1) { -_start: -{ -lean_object* x_2; -x_2 = lean_alloc_closure((void*)(l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg), 3, 0); -return x_2; -} -} -LEAN_EXPORT lean_object* l_Lake_instFromJsonNameMap__lake___rarg(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; -x_3 = l_Lean_Json_getObj_x3f(x_2); -if (lean_obj_tag(x_3) == 0) -{ -uint8_t x_4; -lean_dec(x_1); -x_4 = !lean_is_exclusive(x_3); -if (x_4 == 0) -{ -return x_3; -} -else -{ -lean_object* x_5; lean_object* x_6; -x_5 = lean_ctor_get(x_3, 0); -lean_inc(x_5); -lean_dec(x_3); -x_6 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_6, 0, x_5); -return x_6; -} -} -else -{ -lean_object* x_7; lean_object* x_8; lean_object* x_9; -x_7 = lean_ctor_get(x_3, 0); -lean_inc(x_7); -lean_dec(x_3); -x_8 = lean_box(0); -x_9 = l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg(x_1, x_8, x_7); -return x_9; -} -} -} -LEAN_EXPORT lean_object* l_Lake_instFromJsonNameMap__lake(lean_object* x_1) { -_start: -{ -lean_object* x_2; -x_2 = lean_alloc_closure((void*)(l_Lake_instFromJsonNameMap__lake___rarg), 2, 0); -return x_2; -} -} LEAN_EXPORT lean_object* l_Lake_Name_eraseHead(lean_object* x_1) { _start: { @@ -1128,12 +842,6 @@ l_Lake_OrdNameMap_empty___closed__1 = _init_l_Lake_OrdNameMap_empty___closed__1( lean_mark_persistent(l_Lake_OrdNameMap_empty___closed__1); l_Lake_OrdNameMap_empty___closed__2 = _init_l_Lake_OrdNameMap_empty___closed__2(); lean_mark_persistent(l_Lake_OrdNameMap_empty___closed__2); -l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg___closed__1 = _init_l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg___closed__1(); -lean_mark_persistent(l_Lean_RBNode_fold___at_Lake_instToJsonNameMap__lake___spec__1___rarg___closed__1); -l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg___closed__1 = _init_l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg___closed__1(); -lean_mark_persistent(l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg___closed__1); -l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg___closed__2 = _init_l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg___closed__2(); -lean_mark_persistent(l_Lean_RBNode_foldM___at_Lake_instFromJsonNameMap__lake___spec__1___rarg___closed__2); l_Lake_Name_quoteFrom___closed__1 = _init_l_Lake_Name_quoteFrom___closed__1(); lean_mark_persistent(l_Lake_Name_quoteFrom___closed__1); l_Lake_Name_quoteFrom___closed__2 = _init_l_Lake_Name_quoteFrom___closed__2(); diff --git a/stage0/stdlib/Lean/Compiler/IR.c b/stage0/stdlib/Lean/Compiler/IR.c index 8c1b12cd18b9..022873340216 100644 --- a/stage0/stdlib/Lean/Compiler/IR.c +++ b/stage0/stdlib/Lean/Compiler/IR.c @@ -1,6 +1,6 @@ // Lean compiler output // Module: Lean.Compiler.IR -// Imports: Lean.Compiler.IR.Basic Lean.Compiler.IR.Format Lean.Compiler.IR.CompilerM Lean.Compiler.IR.PushProj Lean.Compiler.IR.ElimDeadVars Lean.Compiler.IR.SimpCase Lean.Compiler.IR.ResetReuse Lean.Compiler.IR.NormIds Lean.Compiler.IR.Checker Lean.Compiler.IR.Borrow Lean.Compiler.IR.Boxing Lean.Compiler.IR.RC Lean.Compiler.IR.ExpandResetReuse Lean.Compiler.IR.UnboxResult Lean.Compiler.IR.ElimDeadBranches Lean.Compiler.IR.EmitC Lean.Compiler.IR.CtorLayout Lean.Compiler.IR.Sorry Lean.Compiler.IR.LLVMBindings Lean.Compiler.IR.EmitLLVM +// Imports: Lean.Compiler.IR.Basic Lean.Compiler.IR.Format Lean.Compiler.IR.CompilerM Lean.Compiler.IR.PushProj Lean.Compiler.IR.ElimDeadVars Lean.Compiler.IR.SimpCase Lean.Compiler.IR.ResetReuse Lean.Compiler.IR.NormIds Lean.Compiler.IR.Checker Lean.Compiler.IR.Borrow Lean.Compiler.IR.Boxing Lean.Compiler.IR.RC Lean.Compiler.IR.ExpandResetReuse Lean.Compiler.IR.UnboxResult Lean.Compiler.IR.ElimDeadBranches Lean.Compiler.IR.EmitC Lean.Compiler.IR.CtorLayout Lean.Compiler.IR.Sorry Lean.Compiler.IR.ToIR Lean.Compiler.IR.LLVMBindings Lean.Compiler.IR.EmitLLVM #include <lean/lean.h> #if defined(__clang__) #pragma clang diagnostic ignored "-Wunused-parameter" @@ -27,6 +27,7 @@ static lean_object* l___private_Lean_Compiler_IR_0__Lean_IR_compileAux___closed_ static lean_object* l___private_Lean_Compiler_IR_0__Lean_IR_compileAux___lambda__2___closed__3; static lean_object* l_Lean_IR_initFn____x40_Lean_Compiler_IR___hyg_5____closed__3; static lean_object* l___private_Lean_Compiler_IR_0__Lean_IR_compileAux___lambda__2___closed__12; +uint8_t l_Lean_Option_get___at_Lean_Compiler_LCNF_toConfigOptions___spec__2(lean_object*, lean_object*); uint8_t lean_usize_dec_eq(size_t, size_t); lean_object* l_Lean_IR_inferBorrow(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Compiler_IR_0__Lean_IR_compileAux___closed__11; @@ -56,6 +57,7 @@ static lean_object* l___private_Lean_Compiler_IR_0__Lean_IR_compileAux___closed_ static lean_object* l___private_Lean_Compiler_IR_0__Lean_IR_compileAux___lambda__2___closed__6; lean_object* l_Lean_Name_append(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Compiler_IR_0__Lean_IR_compileAux___spec__5(size_t, size_t, lean_object*); +lean_object* l_Lean_Option_register___at_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_ConfigOptions___hyg_178____spec__1(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Compiler_IR_0__Lean_IR_compileAux___closed__2; static lean_object* l_Lean_IR_initFn____x40_Lean_Compiler_IR___hyg_5____closed__4; static lean_object* l___private_Lean_Compiler_IR_0__Lean_IR_compileAux___lambda__2___closed__17; @@ -64,7 +66,6 @@ static lean_object* l___private_Lean_Compiler_IR_0__Lean_IR_compileAux___lambda_ static lean_object* l___private_Lean_Compiler_IR_0__Lean_IR_compileAux___lambda__2___closed__5; lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); static lean_object* l_Lean_IR_initFn____x40_Lean_Compiler_IR___hyg_5____closed__8; -uint8_t l_Lean_Option_get___at___private_Lean_Util_Profile_0__Lean_get__profiler___spec__1(lean_object*, lean_object*); uint8_t l_Lean_IR_ExplicitBoxing_requiresBoxedVersion(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Compiler_IR_0__Lean_IR_compileAux___spec__3(size_t, size_t, lean_object*); lean_object* l_Lean_IR_checkDecls(lean_object*, lean_object*, lean_object*); @@ -119,7 +120,6 @@ lean_object* lean_array_uset(lean_object*, size_t, lean_object*); extern lean_object* l_Lean_Options_empty; lean_object* l_Lean_IR_explicitBoxing(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Compiler_IR_0__Lean_IR_compileAux___spec__1___boxed(lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Option_register___at_Lean_initFn____x40_Lean_Util_Profile___hyg_5____spec__1(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* _init_l_Lean_IR_initFn____x40_Lean_Compiler_IR___hyg_5____closed__1() { _start: { @@ -212,7 +212,7 @@ lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_2 = l_Lean_IR_initFn____x40_Lean_Compiler_IR___hyg_5____closed__3; x_3 = l_Lean_IR_initFn____x40_Lean_Compiler_IR___hyg_5____closed__6; x_4 = l_Lean_IR_initFn____x40_Lean_Compiler_IR___hyg_5____closed__9; -x_5 = l_Lean_Option_register___at_Lean_initFn____x40_Lean_Util_Profile___hyg_5____spec__1(x_2, x_3, x_4, x_1); +x_5 = l_Lean_Option_register___at_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_ConfigOptions___hyg_178____spec__1(x_2, x_3, x_4, x_1); return x_5; } } @@ -695,7 +695,7 @@ x_42 = lean_ctor_get(x_41, 1); lean_inc(x_42); lean_dec(x_41); x_43 = l___private_Lean_Compiler_IR_0__Lean_IR_compileAux___lambda__2___closed__16; -x_44 = l_Lean_Option_get___at___private_Lean_Util_Profile_0__Lean_get__profiler___spec__1(x_6, x_43); +x_44 = l_Lean_Option_get___at_Lean_Compiler_LCNF_toConfigOptions___spec__2(x_6, x_43); if (x_44 == 0) { lean_object* x_45; lean_object* x_46; @@ -878,7 +878,7 @@ x_23 = lean_ctor_get(x_22, 1); lean_inc(x_23); lean_dec(x_22); x_24 = l___private_Lean_Compiler_IR_0__Lean_IR_compileAux___lambda__2___closed__16; -x_25 = l_Lean_Option_get___at___private_Lean_Util_Profile_0__Lean_get__profiler___spec__1(x_2, x_24); +x_25 = l_Lean_Option_get___at_Lean_Compiler_LCNF_toConfigOptions___spec__2(x_2, x_24); if (x_25 == 0) { lean_object* x_26; lean_object* x_27; @@ -1523,6 +1523,7 @@ lean_object* initialize_Lean_Compiler_IR_ElimDeadBranches(uint8_t builtin, lean_ lean_object* initialize_Lean_Compiler_IR_EmitC(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Compiler_IR_CtorLayout(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Compiler_IR_Sorry(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Compiler_IR_ToIR(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Compiler_IR_LLVMBindings(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Compiler_IR_EmitLLVM(uint8_t builtin, lean_object*); static bool _G_initialized = false; @@ -1584,6 +1585,9 @@ lean_dec_ref(res); res = initialize_Lean_Compiler_IR_Sorry(builtin, lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); +res = initialize_Lean_Compiler_IR_ToIR(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); res = initialize_Lean_Compiler_IR_LLVMBindings(builtin, lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); diff --git a/stage0/stdlib/Lean/Compiler/IR/ToIR.c b/stage0/stdlib/Lean/Compiler/IR/ToIR.c new file mode 100644 index 000000000000..d5ab150531cf --- /dev/null +++ b/stage0/stdlib/Lean/Compiler/IR/ToIR.c @@ -0,0 +1,123945 @@ +// Lean compiler output +// Module: Lean.Compiler.IR.ToIR +// Imports: Lean.Compiler.LCNF.Basic Lean.Compiler.LCNF.CompilerM Lean.Compiler.LCNF.PhaseExt Lean.Compiler.IR.Basic Lean.Compiler.IR.CompilerM Lean.Compiler.IR.CtorLayout Lean.CoreM Lean.Environment +#include <lean/lean.h> +#if defined(__clang__) +#pragma clang diagnostic ignored "-Wunused-parameter" +#pragma clang diagnostic ignored "-Wunused-label" +#elif defined(__GNUC__) && !defined(__CLANG__) +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wunused-label" +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#endif +#ifdef __cplusplus +extern "C" { +#endif +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__32; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__5; +lean_object* l_Lean_Expr_const___override(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_M_run___rarg___closed__4; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindJoinPoint___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__3; +static lean_object* l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__4; +static lean_object* l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__1; +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__21; +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand_go___at_Lean_IR_ToIR_bindVar___spec__3(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__31; +static lean_object* l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__5; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerResultType(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__11; +static lean_object* l_Lean_IR_ToIR_lowerEnumToScalarType___closed__1; +static lean_object* l_panic___at_Lean_IR_ToIR_lowerType___spec__1___closed__1; +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__3; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_getCtorInfo(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__2; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__33; +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_IR_toIR___spec__1(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* lean_ir_find_env_decl(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +extern lean_object* l_Lean_IR_instInhabitedCtorFieldInfo; +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1___boxed(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +size_t lean_uint64_to_usize(uint64_t); +static lean_object* l_Lean_IR_ToIR_lowerType___closed__5; +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Name_toString(lean_object*, uint8_t, lean_object*); +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_panic___at_Lean_IR_ToIR_lowerArg___spec__2___closed__1; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__30; +lean_object* lean_array_push(lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__16; +lean_object* l___private_Init_GetElem_0__List_get_x21Internal___rarg(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindErased(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* lean_mk_array(lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__1; +LEAN_EXPORT lean_object* l_panic___at_Lean_IR_ToIR_lowerArg___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* lean_array_fset(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields_loop___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Environment_find_x3f(lean_object*, lean_object*, uint8_t); +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__38; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerArg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerType(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerProj___closed__1; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Nat_nextPowerOfTwo_go(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerCode___closed__3; +lean_object* l_Lean_Environment_addExtraName(lean_object*, lean_object*); +extern lean_object* l_Lean_IR_instInhabitedFnBody; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__3; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__15; +static lean_object* l_Lean_IR_ToIR_lowerCode___closed__7; +uint8_t lean_string_dec_eq(lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__19; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerResultType___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Compiler_LCNF_getMonoDecl_x3f(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_panic___at_Lean_IR_ToIR_lowerType___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerAlt_loop___closed__3; +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_IR_ToIR_bindVar___spec__2(lean_object*); +extern lean_object* l_instInhabitedPUnit; +static lean_object* l_Lean_IR_ToIR_lowerArg___closed__1; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_findDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +uint64_t l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(lean_object*); +static lean_object* l_Lean_IR_ToIR_getCtorInfo___closed__3; +size_t lean_usize_of_nat(lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__13; +static lean_object* l_Lean_IR_ToIR_lowerType___closed__12; +lean_object* lean_st_ref_take(lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__29; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindErased___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_IR_Decl_params(lean_object*); +uint64_t lean_uint64_shift_right(uint64_t, uint64_t); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__23; +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__7(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_getCtorInfo___closed__2; +lean_object* lean_nat_div(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1___boxed(lean_object*, lean_object*); +extern lean_object* l_Lean_IR_instInhabitedCtorInfo; +lean_object* l_Lean_PersistentEnvExtension_addEntry___rarg(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_addDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_M_run___rarg(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerProj___boxed(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_newVar___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_MessageData_ofFormat(lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet___lambda__2___boxed(lean_object*); +LEAN_EXPORT lean_object* l_panic___at_Lean_IR_ToIR_lowerCode___spec__4(lean_object*); +lean_object* l_Lean_IR_Decl_name(lean_object*); +extern lean_object* l_Lean_IR_declMapExt; +static lean_object* l_Lean_IR_ToIR_lowerType___closed__3; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerArg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerArg___closed__2; +LEAN_EXPORT uint8_t l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_IR_toIR___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerType___closed__2; +lean_object* lean_st_ref_get(lean_object*, lean_object*); +uint8_t l_List_isEmpty___rarg(lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerCode(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerType___closed__9; +lean_object* l_Lean_addMessageContextPartial___at_Lean_Core_instAddMessageContextCoreM___spec__1(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__14; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerResultType_resultTypeForArity(lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerType___closed__11; +static lean_object* l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__2; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__6; +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerType___closed__4; +static lean_object* l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__4; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__18; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_M_run(lean_object*); +extern lean_object* l_Lean_IR_instInhabitedIRType; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerParam(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__22; +lean_object* l_panic___at_Lean_Expr_appFn_x21___spec__1(lean_object*); +lean_object* l_List_lengthTRAux___rarg(lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__7; +static lean_object* l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__1; +static lean_object* l_Lean_IR_ToIR_lowerAlt_loop___closed__2; +uint8_t lean_name_eq(lean_object*, lean_object*); +lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); +extern lean_object* l_Lean_IR_instInhabitedArg; +uint8_t l_Lean_isExtern(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__9(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerArg___closed__3; +LEAN_EXPORT lean_object* l_Lean_IR_toIR___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__35; +extern lean_object* l_Lean_Compiler_LCNF_instInhabitedArg; +lean_object* l___private_Init_Util_0__mkPanicMessageWithDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindVarToVarId___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__8(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerAlt_loop___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerCode___closed__6; +static lean_object* l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__6; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLitValue(lean_object*); +LEAN_EXPORT lean_object* l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1___closed__1; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__26; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__8; +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__2; +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__1(size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at_Lean_IR_ToIR_bindVar___spec__4(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Array_extract___rarg(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerType___closed__7; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__24; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__37; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_findDecl___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* lean_ir_get_ctor_layout(lean_object*, lean_object*); +lean_object* l_StateT_instMonad___rarg(lean_object*); +lean_object* lean_array_fget(lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__2; +static lean_object* l_Lean_IR_ToIR_addDecl___closed__2; +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(lean_object*, lean_object*); +static lean_object* l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__5; +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__9___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT uint8_t l_Lean_IR_ToIR_lowerLet___lambda__2(lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__27; +uint8_t lean_nat_dec_eq(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindJoinPoint(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerCode___closed__2; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__17; +uint8_t lean_nat_dec_lt(lean_object*, lean_object*); +lean_object* l_Lean_Name_mkStr2(lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerType___closed__6; +static lean_object* l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__2; +static lean_object* l_Lean_IR_ToIR_M_run___rarg___closed__2; +static lean_object* l_Lean_IR_ToIR_M_run___rarg___closed__3; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__3; +static lean_object* l_Lean_IR_ToIR_addDecl___closed__1; +static lean_object* l_Lean_IR_ToIR_lowerCode___closed__1; +static lean_object* l_Lean_IR_ToIR_lowerAlt_loop___closed__1; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__28; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields_loop(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +uint64_t lean_uint64_xor(uint64_t, uint64_t); +lean_object* lean_panic_fn(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindVarToVarId(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* lean_nat_sub(lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__4; +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_addDecl___closed__4; +lean_object* lean_nat_mul(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerEnumToScalarType(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__3; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_newVar(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__9; +lean_object* l_Lean_PersistentHashMap_mkEmptyEntriesArray(lean_object*, lean_object*); +extern lean_object* l_Lean_Core_instMonadCoreM; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerProj(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__36; +size_t lean_usize_sub(size_t, size_t); +static lean_object* l_Lean_IR_ToIR_lowerCode___closed__5; +lean_object* lean_array_mk(lean_object*); +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__12; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindVar___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerType___closed__1; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__34; +size_t lean_usize_add(size_t, size_t); +static lean_object* l_Lean_IR_ToIR_lowerCode___closed__4; +LEAN_EXPORT lean_object* l_Lean_IR_toIR(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +extern lean_object* l_Lean_instInhabitedName; +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__3(lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* lean_array_uget(lean_object*, size_t); +size_t lean_array_size(lean_object*); +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__10; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_instInhabitedOfMonad___rarg(lean_object*, lean_object*); +lean_object* lean_st_ref_set(lean_object*, lean_object*, lean_object*); +static lean_object* l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +static lean_object* l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__2; +LEAN_EXPORT lean_object* l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindVar(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__20; +static lean_object* l_Lean_IR_ToIR_M_run___rarg___closed__1; +lean_object* l_Lean_Expr_headBeta(lean_object*); +lean_object* lean_array_get_size(lean_object*); +lean_object* lean_ir_mk_dummy_extern_decl(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__1; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__1; +lean_object* lean_array_get(lean_object*, lean_object*, lean_object*); +uint8_t lean_nat_dec_le(lean_object*, lean_object*); +uint8_t lean_usize_dec_lt(size_t, size_t); +static lean_object* l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__2; +static lean_object* l_Lean_IR_ToIR_lowerLet___closed__25; +static lean_object* l_Lean_IR_ToIR_lowerType___closed__8; +lean_object* lean_nat_add(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__3; +uint8_t l_Lean_Expr_isForall(lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_addDecl___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* lean_array_uset(lean_object*, size_t, lean_object*); +static lean_object* l_Lean_IR_ToIR_lowerType___closed__10; +static lean_object* l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__1; +static lean_object* l_Lean_IR_ToIR_addDecl___closed__3; +lean_object* lean_mk_empty_array_with_capacity(lean_object*); +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerAlt(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +size_t lean_usize_land(size_t, size_t); +static lean_object* l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__7; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___boxed(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_instInhabitedTranslatedProj; +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_IR_ToIR_getCtorInfo___closed__1; +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerAlt_loop(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* _init_l_Lean_IR_ToIR_M_run___rarg___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_unsigned_to_nat(10u); +x_2 = lean_unsigned_to_nat(1u); +x_3 = l_Nat_nextPowerOfTwo_go(x_1, x_2, lean_box(0)); +return x_3; +} +} +static lean_object* _init_l_Lean_IR_ToIR_M_run___rarg___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_Lean_IR_ToIR_M_run___rarg___closed__1; +x_3 = lean_mk_array(x_2, x_1); +return x_3; +} +} +static lean_object* _init_l_Lean_IR_ToIR_M_run___rarg___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_unsigned_to_nat(0u); +x_2 = l_Lean_IR_ToIR_M_run___rarg___closed__2; +x_3 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_IR_ToIR_M_run___rarg___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_IR_ToIR_M_run___rarg___closed__3; +x_2 = lean_unsigned_to_nat(1u); +x_3 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_M_run___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: +{ +lean_object* x_5; lean_object* x_6; +x_5 = l_Lean_IR_ToIR_M_run___rarg___closed__4; +x_6 = lean_apply_4(x_1, x_5, x_2, x_3, x_4); +if (lean_obj_tag(x_6) == 0) +{ +uint8_t x_7; +x_7 = !lean_is_exclusive(x_6); +if (x_7 == 0) +{ +lean_object* x_8; lean_object* x_9; +x_8 = lean_ctor_get(x_6, 0); +x_9 = lean_ctor_get(x_8, 0); +lean_inc(x_9); +lean_dec(x_8); +lean_ctor_set(x_6, 0, x_9); +return x_6; +} +else +{ +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; +x_10 = lean_ctor_get(x_6, 0); +x_11 = lean_ctor_get(x_6, 1); +lean_inc(x_11); +lean_inc(x_10); +lean_dec(x_6); +x_12 = lean_ctor_get(x_10, 0); +lean_inc(x_12); +lean_dec(x_10); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_12); +lean_ctor_set(x_13, 1, x_11); +return x_13; +} +} +else +{ +uint8_t x_14; +x_14 = !lean_is_exclusive(x_6); +if (x_14 == 0) +{ +return x_6; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_15 = lean_ctor_get(x_6, 0); +x_16 = lean_ctor_get(x_6, 1); +lean_inc(x_16); +lean_inc(x_15); +lean_dec(x_6); +x_17 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_17, 0, x_15); +lean_ctor_set(x_17, 1, x_16); +return x_17; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_M_run(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_IR_ToIR_M_run___rarg), 4, 0); +return x_2; +} +} +LEAN_EXPORT uint8_t l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +uint8_t x_3; +x_3 = 0; +return x_3; +} +else +{ +lean_object* x_4; lean_object* x_5; uint8_t x_6; +x_4 = lean_ctor_get(x_2, 0); +x_5 = lean_ctor_get(x_2, 2); +x_6 = lean_name_eq(x_4, x_1); +if (x_6 == 0) +{ +x_2 = x_5; +goto _start; +} +else +{ +uint8_t x_8; +x_8 = 1; +return x_8; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at_Lean_IR_ToIR_bindVar___spec__4(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +return x_1; +} +else +{ +uint8_t x_3; +x_3 = !lean_is_exclusive(x_2); +if (x_3 == 0) +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; uint64_t x_7; uint64_t x_8; uint64_t x_9; uint64_t x_10; uint64_t x_11; uint64_t x_12; uint64_t x_13; size_t x_14; size_t x_15; size_t x_16; size_t x_17; size_t x_18; lean_object* x_19; lean_object* x_20; +x_4 = lean_ctor_get(x_2, 0); +x_5 = lean_ctor_get(x_2, 2); +x_6 = lean_array_get_size(x_1); +x_7 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_4); +x_8 = 32; +x_9 = lean_uint64_shift_right(x_7, x_8); +x_10 = lean_uint64_xor(x_7, x_9); +x_11 = 16; +x_12 = lean_uint64_shift_right(x_10, x_11); +x_13 = lean_uint64_xor(x_10, x_12); +x_14 = lean_uint64_to_usize(x_13); +x_15 = lean_usize_of_nat(x_6); +lean_dec(x_6); +x_16 = 1; +x_17 = lean_usize_sub(x_15, x_16); +x_18 = lean_usize_land(x_14, x_17); +x_19 = lean_array_uget(x_1, x_18); +lean_ctor_set(x_2, 2, x_19); +x_20 = lean_array_uset(x_1, x_18, x_2); +x_1 = x_20; +x_2 = x_5; +goto _start; +} +else +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; uint64_t x_26; uint64_t x_27; uint64_t x_28; uint64_t x_29; uint64_t x_30; uint64_t x_31; uint64_t x_32; size_t x_33; size_t x_34; size_t x_35; size_t x_36; size_t x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_22 = lean_ctor_get(x_2, 0); +x_23 = lean_ctor_get(x_2, 1); +x_24 = lean_ctor_get(x_2, 2); +lean_inc(x_24); +lean_inc(x_23); +lean_inc(x_22); +lean_dec(x_2); +x_25 = lean_array_get_size(x_1); +x_26 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_22); +x_27 = 32; +x_28 = lean_uint64_shift_right(x_26, x_27); +x_29 = lean_uint64_xor(x_26, x_28); +x_30 = 16; +x_31 = lean_uint64_shift_right(x_29, x_30); +x_32 = lean_uint64_xor(x_29, x_31); +x_33 = lean_uint64_to_usize(x_32); +x_34 = lean_usize_of_nat(x_25); +lean_dec(x_25); +x_35 = 1; +x_36 = lean_usize_sub(x_34, x_35); +x_37 = lean_usize_land(x_33, x_36); +x_38 = lean_array_uget(x_1, x_37); +x_39 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_39, 0, x_22); +lean_ctor_set(x_39, 1, x_23); +lean_ctor_set(x_39, 2, x_38); +x_40 = lean_array_uset(x_1, x_37, x_39); +x_1 = x_40; +x_2 = x_24; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand_go___at_Lean_IR_ToIR_bindVar___spec__3(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; uint8_t x_5; +x_4 = lean_array_get_size(x_2); +x_5 = lean_nat_dec_lt(x_1, x_4); +lean_dec(x_4); +if (x_5 == 0) +{ +lean_dec(x_2); +lean_dec(x_1); +return x_3; +} +else +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_6 = lean_array_fget(x_2, x_1); +x_7 = lean_box(0); +x_8 = lean_array_fset(x_2, x_1, x_7); +x_9 = l_Std_DHashMap_Internal_AssocList_foldlM___at_Lean_IR_ToIR_bindVar___spec__4(x_3, x_6); +x_10 = lean_unsigned_to_nat(1u); +x_11 = lean_nat_add(x_1, x_10); +lean_dec(x_1); +x_1 = x_11; +x_2 = x_8; +x_3 = x_9; +goto _start; +} +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_IR_ToIR_bindVar___spec__2(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_2 = lean_array_get_size(x_1); +x_3 = lean_unsigned_to_nat(2u); +x_4 = lean_nat_mul(x_2, x_3); +lean_dec(x_2); +x_5 = lean_box(0); +x_6 = lean_mk_array(x_4, x_5); +x_7 = lean_unsigned_to_nat(0u); +x_8 = l_Std_DHashMap_Internal_Raw_u2080_expand_go___at_Lean_IR_ToIR_bindVar___spec__3(x_7, x_1, x_6); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindVar(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; uint8_t x_11; +x_6 = lean_ctor_get(x_2, 1); +lean_inc(x_6); +x_7 = lean_ctor_get(x_2, 0); +lean_inc(x_7); +lean_dec(x_2); +lean_inc(x_6); +x_8 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_8, 0, x_6); +x_9 = lean_unsigned_to_nat(1u); +x_10 = lean_nat_add(x_6, x_9); +x_11 = !lean_is_exclusive(x_7); +if (x_11 == 0) +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; uint64_t x_15; uint64_t x_16; uint64_t x_17; uint64_t x_18; uint64_t x_19; uint64_t x_20; uint64_t x_21; size_t x_22; size_t x_23; size_t x_24; size_t x_25; size_t x_26; lean_object* x_27; uint8_t x_28; +x_12 = lean_ctor_get(x_7, 0); +x_13 = lean_ctor_get(x_7, 1); +x_14 = lean_array_get_size(x_13); +x_15 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_1); +x_16 = 32; +x_17 = lean_uint64_shift_right(x_15, x_16); +x_18 = lean_uint64_xor(x_15, x_17); +x_19 = 16; +x_20 = lean_uint64_shift_right(x_18, x_19); +x_21 = lean_uint64_xor(x_18, x_20); +x_22 = lean_uint64_to_usize(x_21); +x_23 = lean_usize_of_nat(x_14); +lean_dec(x_14); +x_24 = 1; +x_25 = lean_usize_sub(x_23, x_24); +x_26 = lean_usize_land(x_22, x_25); +x_27 = lean_array_uget(x_13, x_26); +x_28 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(x_1, x_27); +if (x_28 == 0) +{ +lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; uint8_t x_37; +x_29 = lean_nat_add(x_12, x_9); +lean_dec(x_12); +x_30 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_30, 0, x_1); +lean_ctor_set(x_30, 1, x_8); +lean_ctor_set(x_30, 2, x_27); +x_31 = lean_array_uset(x_13, x_26, x_30); +x_32 = lean_unsigned_to_nat(4u); +x_33 = lean_nat_mul(x_29, x_32); +x_34 = lean_unsigned_to_nat(3u); +x_35 = lean_nat_div(x_33, x_34); +lean_dec(x_33); +x_36 = lean_array_get_size(x_31); +x_37 = lean_nat_dec_le(x_35, x_36); +lean_dec(x_36); +lean_dec(x_35); +if (x_37 == 0) +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_38 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_IR_ToIR_bindVar___spec__2(x_31); +lean_ctor_set(x_7, 1, x_38); +lean_ctor_set(x_7, 0, x_29); +x_39 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_39, 0, x_7); +lean_ctor_set(x_39, 1, x_10); +x_40 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_40, 0, x_6); +lean_ctor_set(x_40, 1, x_39); +x_41 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_41, 0, x_40); +lean_ctor_set(x_41, 1, x_5); +return x_41; +} +else +{ +lean_object* x_42; lean_object* x_43; lean_object* x_44; +lean_ctor_set(x_7, 1, x_31); +lean_ctor_set(x_7, 0, x_29); +x_42 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_42, 0, x_7); +lean_ctor_set(x_42, 1, x_10); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_6); +lean_ctor_set(x_43, 1, x_42); +x_44 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_44, 0, x_43); +lean_ctor_set(x_44, 1, x_5); +return x_44; +} +} +else +{ +lean_object* x_45; lean_object* x_46; lean_object* x_47; +lean_dec(x_27); +lean_dec(x_8); +lean_dec(x_1); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_7); +lean_ctor_set(x_45, 1, x_10); +x_46 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_46, 0, x_6); +lean_ctor_set(x_46, 1, x_45); +x_47 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_47, 0, x_46); +lean_ctor_set(x_47, 1, x_5); +return x_47; +} +} +else +{ +lean_object* x_48; lean_object* x_49; lean_object* x_50; uint64_t x_51; uint64_t x_52; uint64_t x_53; uint64_t x_54; uint64_t x_55; uint64_t x_56; uint64_t x_57; size_t x_58; size_t x_59; size_t x_60; size_t x_61; size_t x_62; lean_object* x_63; uint8_t x_64; +x_48 = lean_ctor_get(x_7, 0); +x_49 = lean_ctor_get(x_7, 1); +lean_inc(x_49); +lean_inc(x_48); +lean_dec(x_7); +x_50 = lean_array_get_size(x_49); +x_51 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_1); +x_52 = 32; +x_53 = lean_uint64_shift_right(x_51, x_52); +x_54 = lean_uint64_xor(x_51, x_53); +x_55 = 16; +x_56 = lean_uint64_shift_right(x_54, x_55); +x_57 = lean_uint64_xor(x_54, x_56); +x_58 = lean_uint64_to_usize(x_57); +x_59 = lean_usize_of_nat(x_50); +lean_dec(x_50); +x_60 = 1; +x_61 = lean_usize_sub(x_59, x_60); +x_62 = lean_usize_land(x_58, x_61); +x_63 = lean_array_uget(x_49, x_62); +x_64 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(x_1, x_63); +if (x_64 == 0) +{ +lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; uint8_t x_73; +x_65 = lean_nat_add(x_48, x_9); +lean_dec(x_48); +x_66 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_66, 0, x_1); +lean_ctor_set(x_66, 1, x_8); +lean_ctor_set(x_66, 2, x_63); +x_67 = lean_array_uset(x_49, x_62, x_66); +x_68 = lean_unsigned_to_nat(4u); +x_69 = lean_nat_mul(x_65, x_68); +x_70 = lean_unsigned_to_nat(3u); +x_71 = lean_nat_div(x_69, x_70); +lean_dec(x_69); +x_72 = lean_array_get_size(x_67); +x_73 = lean_nat_dec_le(x_71, x_72); +lean_dec(x_72); +lean_dec(x_71); +if (x_73 == 0) +{ +lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; +x_74 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_IR_ToIR_bindVar___spec__2(x_67); +x_75 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_75, 0, x_65); +lean_ctor_set(x_75, 1, x_74); +x_76 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_76, 0, x_75); +lean_ctor_set(x_76, 1, x_10); +x_77 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_77, 0, x_6); +lean_ctor_set(x_77, 1, x_76); +x_78 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_78, 0, x_77); +lean_ctor_set(x_78, 1, x_5); +return x_78; +} +else +{ +lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; +x_79 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_79, 0, x_65); +lean_ctor_set(x_79, 1, x_67); +x_80 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_80, 0, x_79); +lean_ctor_set(x_80, 1, x_10); +x_81 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_81, 0, x_6); +lean_ctor_set(x_81, 1, x_80); +x_82 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_82, 0, x_81); +lean_ctor_set(x_82, 1, x_5); +return x_82; +} +} +else +{ +lean_object* x_83; lean_object* x_84; lean_object* x_85; lean_object* x_86; +lean_dec(x_63); +lean_dec(x_8); +lean_dec(x_1); +x_83 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_83, 0, x_48); +lean_ctor_set(x_83, 1, x_49); +x_84 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_84, 0, x_83); +lean_ctor_set(x_84, 1, x_10); +x_85 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_85, 0, x_6); +lean_ctor_set(x_85, 1, x_84); +x_86 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_86, 0, x_85); +lean_ctor_set(x_86, 1, x_5); +return x_86; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +uint8_t x_3; lean_object* x_4; +x_3 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(x_1, x_2); +lean_dec(x_2); +lean_dec(x_1); +x_4 = lean_box(x_3); +return x_4; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindVar___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; +x_6 = l_Lean_IR_ToIR_bindVar(x_1, x_2, x_3, x_4, x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_6; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindVarToVarId(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +uint8_t x_7; +x_7 = !lean_is_exclusive(x_3); +if (x_7 == 0) +{ +lean_object* x_8; lean_object* x_9; uint8_t x_10; +x_8 = lean_ctor_get(x_3, 0); +x_9 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_9, 0, x_2); +x_10 = !lean_is_exclusive(x_8); +if (x_10 == 0) +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; uint64_t x_14; uint64_t x_15; uint64_t x_16; uint64_t x_17; uint64_t x_18; uint64_t x_19; uint64_t x_20; size_t x_21; size_t x_22; size_t x_23; size_t x_24; size_t x_25; lean_object* x_26; uint8_t x_27; +x_11 = lean_ctor_get(x_8, 0); +x_12 = lean_ctor_get(x_8, 1); +x_13 = lean_array_get_size(x_12); +x_14 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_1); +x_15 = 32; +x_16 = lean_uint64_shift_right(x_14, x_15); +x_17 = lean_uint64_xor(x_14, x_16); +x_18 = 16; +x_19 = lean_uint64_shift_right(x_17, x_18); +x_20 = lean_uint64_xor(x_17, x_19); +x_21 = lean_uint64_to_usize(x_20); +x_22 = lean_usize_of_nat(x_13); +lean_dec(x_13); +x_23 = 1; +x_24 = lean_usize_sub(x_22, x_23); +x_25 = lean_usize_land(x_21, x_24); +x_26 = lean_array_uget(x_12, x_25); +x_27 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(x_1, x_26); +if (x_27 == 0) +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; uint8_t x_37; +x_28 = lean_unsigned_to_nat(1u); +x_29 = lean_nat_add(x_11, x_28); +lean_dec(x_11); +x_30 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_30, 0, x_1); +lean_ctor_set(x_30, 1, x_9); +lean_ctor_set(x_30, 2, x_26); +x_31 = lean_array_uset(x_12, x_25, x_30); +x_32 = lean_unsigned_to_nat(4u); +x_33 = lean_nat_mul(x_29, x_32); +x_34 = lean_unsigned_to_nat(3u); +x_35 = lean_nat_div(x_33, x_34); +lean_dec(x_33); +x_36 = lean_array_get_size(x_31); +x_37 = lean_nat_dec_le(x_35, x_36); +lean_dec(x_36); +lean_dec(x_35); +if (x_37 == 0) +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_38 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_IR_ToIR_bindVar___spec__2(x_31); +lean_ctor_set(x_8, 1, x_38); +lean_ctor_set(x_8, 0, x_29); +x_39 = lean_box(0); +x_40 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_40, 0, x_39); +lean_ctor_set(x_40, 1, x_3); +x_41 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_41, 0, x_40); +lean_ctor_set(x_41, 1, x_6); +return x_41; +} +else +{ +lean_object* x_42; lean_object* x_43; lean_object* x_44; +lean_ctor_set(x_8, 1, x_31); +lean_ctor_set(x_8, 0, x_29); +x_42 = lean_box(0); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_3); +x_44 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_44, 0, x_43); +lean_ctor_set(x_44, 1, x_6); +return x_44; +} +} +else +{ +lean_object* x_45; lean_object* x_46; lean_object* x_47; +lean_dec(x_26); +lean_dec(x_9); +lean_dec(x_1); +x_45 = lean_box(0); +x_46 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_46, 0, x_45); +lean_ctor_set(x_46, 1, x_3); +x_47 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_47, 0, x_46); +lean_ctor_set(x_47, 1, x_6); +return x_47; +} +} +else +{ +lean_object* x_48; lean_object* x_49; lean_object* x_50; uint64_t x_51; uint64_t x_52; uint64_t x_53; uint64_t x_54; uint64_t x_55; uint64_t x_56; uint64_t x_57; size_t x_58; size_t x_59; size_t x_60; size_t x_61; size_t x_62; lean_object* x_63; uint8_t x_64; +x_48 = lean_ctor_get(x_8, 0); +x_49 = lean_ctor_get(x_8, 1); +lean_inc(x_49); +lean_inc(x_48); +lean_dec(x_8); +x_50 = lean_array_get_size(x_49); +x_51 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_1); +x_52 = 32; +x_53 = lean_uint64_shift_right(x_51, x_52); +x_54 = lean_uint64_xor(x_51, x_53); +x_55 = 16; +x_56 = lean_uint64_shift_right(x_54, x_55); +x_57 = lean_uint64_xor(x_54, x_56); +x_58 = lean_uint64_to_usize(x_57); +x_59 = lean_usize_of_nat(x_50); +lean_dec(x_50); +x_60 = 1; +x_61 = lean_usize_sub(x_59, x_60); +x_62 = lean_usize_land(x_58, x_61); +x_63 = lean_array_uget(x_49, x_62); +x_64 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(x_1, x_63); +if (x_64 == 0) +{ +lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; uint8_t x_74; +x_65 = lean_unsigned_to_nat(1u); +x_66 = lean_nat_add(x_48, x_65); +lean_dec(x_48); +x_67 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_67, 0, x_1); +lean_ctor_set(x_67, 1, x_9); +lean_ctor_set(x_67, 2, x_63); +x_68 = lean_array_uset(x_49, x_62, x_67); +x_69 = lean_unsigned_to_nat(4u); +x_70 = lean_nat_mul(x_66, x_69); +x_71 = lean_unsigned_to_nat(3u); +x_72 = lean_nat_div(x_70, x_71); +lean_dec(x_70); +x_73 = lean_array_get_size(x_68); +x_74 = lean_nat_dec_le(x_72, x_73); +lean_dec(x_73); +lean_dec(x_72); +if (x_74 == 0) +{ +lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; +x_75 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_IR_ToIR_bindVar___spec__2(x_68); +x_76 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_76, 0, x_66); +lean_ctor_set(x_76, 1, x_75); +lean_ctor_set(x_3, 0, x_76); +x_77 = lean_box(0); +x_78 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_78, 0, x_77); +lean_ctor_set(x_78, 1, x_3); +x_79 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_79, 0, x_78); +lean_ctor_set(x_79, 1, x_6); +return x_79; +} +else +{ +lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; +x_80 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_80, 0, x_66); +lean_ctor_set(x_80, 1, x_68); +lean_ctor_set(x_3, 0, x_80); +x_81 = lean_box(0); +x_82 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_82, 0, x_81); +lean_ctor_set(x_82, 1, x_3); +x_83 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_83, 0, x_82); +lean_ctor_set(x_83, 1, x_6); +return x_83; +} +} +else +{ +lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; +lean_dec(x_63); +lean_dec(x_9); +lean_dec(x_1); +x_84 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_84, 0, x_48); +lean_ctor_set(x_84, 1, x_49); +lean_ctor_set(x_3, 0, x_84); +x_85 = lean_box(0); +x_86 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_86, 0, x_85); +lean_ctor_set(x_86, 1, x_3); +x_87 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_87, 1, x_6); +return x_87; +} +} +} +else +{ +lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; uint64_t x_95; uint64_t x_96; uint64_t x_97; uint64_t x_98; uint64_t x_99; uint64_t x_100; uint64_t x_101; size_t x_102; size_t x_103; size_t x_104; size_t x_105; size_t x_106; lean_object* x_107; uint8_t x_108; +x_88 = lean_ctor_get(x_3, 0); +x_89 = lean_ctor_get(x_3, 1); +lean_inc(x_89); +lean_inc(x_88); +lean_dec(x_3); +x_90 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_90, 0, x_2); +x_91 = lean_ctor_get(x_88, 0); +lean_inc(x_91); +x_92 = lean_ctor_get(x_88, 1); +lean_inc(x_92); +if (lean_is_exclusive(x_88)) { + lean_ctor_release(x_88, 0); + lean_ctor_release(x_88, 1); + x_93 = x_88; +} else { + lean_dec_ref(x_88); + x_93 = lean_box(0); +} +x_94 = lean_array_get_size(x_92); +x_95 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_1); +x_96 = 32; +x_97 = lean_uint64_shift_right(x_95, x_96); +x_98 = lean_uint64_xor(x_95, x_97); +x_99 = 16; +x_100 = lean_uint64_shift_right(x_98, x_99); +x_101 = lean_uint64_xor(x_98, x_100); +x_102 = lean_uint64_to_usize(x_101); +x_103 = lean_usize_of_nat(x_94); +lean_dec(x_94); +x_104 = 1; +x_105 = lean_usize_sub(x_103, x_104); +x_106 = lean_usize_land(x_102, x_105); +x_107 = lean_array_uget(x_92, x_106); +x_108 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(x_1, x_107); +if (x_108 == 0) +{ +lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; uint8_t x_118; +x_109 = lean_unsigned_to_nat(1u); +x_110 = lean_nat_add(x_91, x_109); +lean_dec(x_91); +x_111 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_111, 0, x_1); +lean_ctor_set(x_111, 1, x_90); +lean_ctor_set(x_111, 2, x_107); +x_112 = lean_array_uset(x_92, x_106, x_111); +x_113 = lean_unsigned_to_nat(4u); +x_114 = lean_nat_mul(x_110, x_113); +x_115 = lean_unsigned_to_nat(3u); +x_116 = lean_nat_div(x_114, x_115); +lean_dec(x_114); +x_117 = lean_array_get_size(x_112); +x_118 = lean_nat_dec_le(x_116, x_117); +lean_dec(x_117); +lean_dec(x_116); +if (x_118 == 0) +{ +lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; +x_119 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_IR_ToIR_bindVar___spec__2(x_112); +if (lean_is_scalar(x_93)) { + x_120 = lean_alloc_ctor(0, 2, 0); +} else { + x_120 = x_93; +} +lean_ctor_set(x_120, 0, x_110); +lean_ctor_set(x_120, 1, x_119); +x_121 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_121, 0, x_120); +lean_ctor_set(x_121, 1, x_89); +x_122 = lean_box(0); +x_123 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_123, 0, x_122); +lean_ctor_set(x_123, 1, x_121); +x_124 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_124, 0, x_123); +lean_ctor_set(x_124, 1, x_6); +return x_124; +} +else +{ +lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; +if (lean_is_scalar(x_93)) { + x_125 = lean_alloc_ctor(0, 2, 0); +} else { + x_125 = x_93; +} +lean_ctor_set(x_125, 0, x_110); +lean_ctor_set(x_125, 1, x_112); +x_126 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_126, 0, x_125); +lean_ctor_set(x_126, 1, x_89); +x_127 = lean_box(0); +x_128 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_128, 0, x_127); +lean_ctor_set(x_128, 1, x_126); +x_129 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_129, 0, x_128); +lean_ctor_set(x_129, 1, x_6); +return x_129; +} +} +else +{ +lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; +lean_dec(x_107); +lean_dec(x_90); +lean_dec(x_1); +if (lean_is_scalar(x_93)) { + x_130 = lean_alloc_ctor(0, 2, 0); +} else { + x_130 = x_93; +} +lean_ctor_set(x_130, 0, x_91); +lean_ctor_set(x_130, 1, x_92); +x_131 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_131, 0, x_130); +lean_ctor_set(x_131, 1, x_89); +x_132 = lean_box(0); +x_133 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_133, 0, x_132); +lean_ctor_set(x_133, 1, x_131); +x_134 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_134, 0, x_133); +lean_ctor_set(x_134, 1, x_6); +return x_134; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindVarToVarId___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; +x_7 = l_Lean_IR_ToIR_bindVarToVarId(x_1, x_2, x_3, x_4, x_5, x_6); +lean_dec(x_5); +lean_dec(x_4); +return x_7; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_newVar(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: +{ +uint8_t x_5; +x_5 = !lean_is_exclusive(x_1); +if (x_5 == 0) +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_6 = lean_ctor_get(x_1, 1); +x_7 = lean_unsigned_to_nat(1u); +x_8 = lean_nat_add(x_6, x_7); +lean_ctor_set(x_1, 1, x_8); +x_9 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9, 0, x_6); +lean_ctor_set(x_9, 1, x_1); +x_10 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10, 0, x_9); +lean_ctor_set(x_10, 1, x_4); +return x_10; +} +else +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_11 = lean_ctor_get(x_1, 0); +x_12 = lean_ctor_get(x_1, 1); +lean_inc(x_12); +lean_inc(x_11); +lean_dec(x_1); +x_13 = lean_unsigned_to_nat(1u); +x_14 = lean_nat_add(x_12, x_13); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_11); +lean_ctor_set(x_15, 1, x_14); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_12); +lean_ctor_set(x_16, 1, x_15); +x_17 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_17, 0, x_16); +lean_ctor_set(x_17, 1, x_4); +return x_17; +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_newVar___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: +{ +lean_object* x_5; +x_5 = l_Lean_IR_ToIR_newVar(x_1, x_2, x_3, x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_5; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindJoinPoint(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; uint8_t x_11; +x_6 = lean_ctor_get(x_2, 1); +lean_inc(x_6); +x_7 = lean_ctor_get(x_2, 0); +lean_inc(x_7); +lean_dec(x_2); +lean_inc(x_6); +x_8 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_8, 0, x_6); +x_9 = lean_unsigned_to_nat(1u); +x_10 = lean_nat_add(x_6, x_9); +x_11 = !lean_is_exclusive(x_7); +if (x_11 == 0) +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; uint64_t x_15; uint64_t x_16; uint64_t x_17; uint64_t x_18; uint64_t x_19; uint64_t x_20; uint64_t x_21; size_t x_22; size_t x_23; size_t x_24; size_t x_25; size_t x_26; lean_object* x_27; uint8_t x_28; +x_12 = lean_ctor_get(x_7, 0); +x_13 = lean_ctor_get(x_7, 1); +x_14 = lean_array_get_size(x_13); +x_15 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_1); +x_16 = 32; +x_17 = lean_uint64_shift_right(x_15, x_16); +x_18 = lean_uint64_xor(x_15, x_17); +x_19 = 16; +x_20 = lean_uint64_shift_right(x_18, x_19); +x_21 = lean_uint64_xor(x_18, x_20); +x_22 = lean_uint64_to_usize(x_21); +x_23 = lean_usize_of_nat(x_14); +lean_dec(x_14); +x_24 = 1; +x_25 = lean_usize_sub(x_23, x_24); +x_26 = lean_usize_land(x_22, x_25); +x_27 = lean_array_uget(x_13, x_26); +x_28 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(x_1, x_27); +if (x_28 == 0) +{ +lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; uint8_t x_37; +x_29 = lean_nat_add(x_12, x_9); +lean_dec(x_12); +x_30 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_30, 0, x_1); +lean_ctor_set(x_30, 1, x_8); +lean_ctor_set(x_30, 2, x_27); +x_31 = lean_array_uset(x_13, x_26, x_30); +x_32 = lean_unsigned_to_nat(4u); +x_33 = lean_nat_mul(x_29, x_32); +x_34 = lean_unsigned_to_nat(3u); +x_35 = lean_nat_div(x_33, x_34); +lean_dec(x_33); +x_36 = lean_array_get_size(x_31); +x_37 = lean_nat_dec_le(x_35, x_36); +lean_dec(x_36); +lean_dec(x_35); +if (x_37 == 0) +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_38 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_IR_ToIR_bindVar___spec__2(x_31); +lean_ctor_set(x_7, 1, x_38); +lean_ctor_set(x_7, 0, x_29); +x_39 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_39, 0, x_7); +lean_ctor_set(x_39, 1, x_10); +x_40 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_40, 0, x_6); +lean_ctor_set(x_40, 1, x_39); +x_41 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_41, 0, x_40); +lean_ctor_set(x_41, 1, x_5); +return x_41; +} +else +{ +lean_object* x_42; lean_object* x_43; lean_object* x_44; +lean_ctor_set(x_7, 1, x_31); +lean_ctor_set(x_7, 0, x_29); +x_42 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_42, 0, x_7); +lean_ctor_set(x_42, 1, x_10); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_6); +lean_ctor_set(x_43, 1, x_42); +x_44 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_44, 0, x_43); +lean_ctor_set(x_44, 1, x_5); +return x_44; +} +} +else +{ +lean_object* x_45; lean_object* x_46; lean_object* x_47; +lean_dec(x_27); +lean_dec(x_8); +lean_dec(x_1); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_7); +lean_ctor_set(x_45, 1, x_10); +x_46 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_46, 0, x_6); +lean_ctor_set(x_46, 1, x_45); +x_47 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_47, 0, x_46); +lean_ctor_set(x_47, 1, x_5); +return x_47; +} +} +else +{ +lean_object* x_48; lean_object* x_49; lean_object* x_50; uint64_t x_51; uint64_t x_52; uint64_t x_53; uint64_t x_54; uint64_t x_55; uint64_t x_56; uint64_t x_57; size_t x_58; size_t x_59; size_t x_60; size_t x_61; size_t x_62; lean_object* x_63; uint8_t x_64; +x_48 = lean_ctor_get(x_7, 0); +x_49 = lean_ctor_get(x_7, 1); +lean_inc(x_49); +lean_inc(x_48); +lean_dec(x_7); +x_50 = lean_array_get_size(x_49); +x_51 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_1); +x_52 = 32; +x_53 = lean_uint64_shift_right(x_51, x_52); +x_54 = lean_uint64_xor(x_51, x_53); +x_55 = 16; +x_56 = lean_uint64_shift_right(x_54, x_55); +x_57 = lean_uint64_xor(x_54, x_56); +x_58 = lean_uint64_to_usize(x_57); +x_59 = lean_usize_of_nat(x_50); +lean_dec(x_50); +x_60 = 1; +x_61 = lean_usize_sub(x_59, x_60); +x_62 = lean_usize_land(x_58, x_61); +x_63 = lean_array_uget(x_49, x_62); +x_64 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(x_1, x_63); +if (x_64 == 0) +{ +lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; uint8_t x_73; +x_65 = lean_nat_add(x_48, x_9); +lean_dec(x_48); +x_66 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_66, 0, x_1); +lean_ctor_set(x_66, 1, x_8); +lean_ctor_set(x_66, 2, x_63); +x_67 = lean_array_uset(x_49, x_62, x_66); +x_68 = lean_unsigned_to_nat(4u); +x_69 = lean_nat_mul(x_65, x_68); +x_70 = lean_unsigned_to_nat(3u); +x_71 = lean_nat_div(x_69, x_70); +lean_dec(x_69); +x_72 = lean_array_get_size(x_67); +x_73 = lean_nat_dec_le(x_71, x_72); +lean_dec(x_72); +lean_dec(x_71); +if (x_73 == 0) +{ +lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; +x_74 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_IR_ToIR_bindVar___spec__2(x_67); +x_75 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_75, 0, x_65); +lean_ctor_set(x_75, 1, x_74); +x_76 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_76, 0, x_75); +lean_ctor_set(x_76, 1, x_10); +x_77 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_77, 0, x_6); +lean_ctor_set(x_77, 1, x_76); +x_78 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_78, 0, x_77); +lean_ctor_set(x_78, 1, x_5); +return x_78; +} +else +{ +lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; +x_79 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_79, 0, x_65); +lean_ctor_set(x_79, 1, x_67); +x_80 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_80, 0, x_79); +lean_ctor_set(x_80, 1, x_10); +x_81 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_81, 0, x_6); +lean_ctor_set(x_81, 1, x_80); +x_82 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_82, 0, x_81); +lean_ctor_set(x_82, 1, x_5); +return x_82; +} +} +else +{ +lean_object* x_83; lean_object* x_84; lean_object* x_85; lean_object* x_86; +lean_dec(x_63); +lean_dec(x_8); +lean_dec(x_1); +x_83 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_83, 0, x_48); +lean_ctor_set(x_83, 1, x_49); +x_84 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_84, 0, x_83); +lean_ctor_set(x_84, 1, x_10); +x_85 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_85, 0, x_6); +lean_ctor_set(x_85, 1, x_84); +x_86 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_86, 0, x_85); +lean_ctor_set(x_86, 1, x_5); +return x_86; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindJoinPoint___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; +x_6 = l_Lean_IR_ToIR_bindJoinPoint(x_1, x_2, x_3, x_4, x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_6; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindErased(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +uint8_t x_6; +x_6 = !lean_is_exclusive(x_2); +if (x_6 == 0) +{ +lean_object* x_7; uint8_t x_8; +x_7 = lean_ctor_get(x_2, 0); +x_8 = !lean_is_exclusive(x_7); +if (x_8 == 0) +{ +lean_object* x_9; lean_object* x_10; lean_object* x_11; uint64_t x_12; uint64_t x_13; uint64_t x_14; uint64_t x_15; uint64_t x_16; uint64_t x_17; uint64_t x_18; size_t x_19; size_t x_20; size_t x_21; size_t x_22; size_t x_23; lean_object* x_24; uint8_t x_25; +x_9 = lean_ctor_get(x_7, 0); +x_10 = lean_ctor_get(x_7, 1); +x_11 = lean_array_get_size(x_10); +x_12 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_1); +x_13 = 32; +x_14 = lean_uint64_shift_right(x_12, x_13); +x_15 = lean_uint64_xor(x_12, x_14); +x_16 = 16; +x_17 = lean_uint64_shift_right(x_15, x_16); +x_18 = lean_uint64_xor(x_15, x_17); +x_19 = lean_uint64_to_usize(x_18); +x_20 = lean_usize_of_nat(x_11); +lean_dec(x_11); +x_21 = 1; +x_22 = lean_usize_sub(x_20, x_21); +x_23 = lean_usize_land(x_19, x_22); +x_24 = lean_array_uget(x_10, x_23); +x_25 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(x_1, x_24); +if (x_25 == 0) +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; uint8_t x_36; +x_26 = lean_unsigned_to_nat(1u); +x_27 = lean_nat_add(x_9, x_26); +lean_dec(x_9); +x_28 = lean_box(2); +x_29 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_29, 0, x_1); +lean_ctor_set(x_29, 1, x_28); +lean_ctor_set(x_29, 2, x_24); +x_30 = lean_array_uset(x_10, x_23, x_29); +x_31 = lean_unsigned_to_nat(4u); +x_32 = lean_nat_mul(x_27, x_31); +x_33 = lean_unsigned_to_nat(3u); +x_34 = lean_nat_div(x_32, x_33); +lean_dec(x_32); +x_35 = lean_array_get_size(x_30); +x_36 = lean_nat_dec_le(x_34, x_35); +lean_dec(x_35); +lean_dec(x_34); +if (x_36 == 0) +{ +lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_37 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_IR_ToIR_bindVar___spec__2(x_30); +lean_ctor_set(x_7, 1, x_37); +lean_ctor_set(x_7, 0, x_27); +x_38 = lean_box(0); +x_39 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_39, 0, x_38); +lean_ctor_set(x_39, 1, x_2); +x_40 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_40, 0, x_39); +lean_ctor_set(x_40, 1, x_5); +return x_40; +} +else +{ +lean_object* x_41; lean_object* x_42; lean_object* x_43; +lean_ctor_set(x_7, 1, x_30); +lean_ctor_set(x_7, 0, x_27); +x_41 = lean_box(0); +x_42 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_42, 0, x_41); +lean_ctor_set(x_42, 1, x_2); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_5); +return x_43; +} +} +else +{ +lean_object* x_44; lean_object* x_45; lean_object* x_46; +lean_dec(x_24); +lean_dec(x_1); +x_44 = lean_box(0); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_2); +x_46 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_46, 0, x_45); +lean_ctor_set(x_46, 1, x_5); +return x_46; +} +} +else +{ +lean_object* x_47; lean_object* x_48; lean_object* x_49; uint64_t x_50; uint64_t x_51; uint64_t x_52; uint64_t x_53; uint64_t x_54; uint64_t x_55; uint64_t x_56; size_t x_57; size_t x_58; size_t x_59; size_t x_60; size_t x_61; lean_object* x_62; uint8_t x_63; +x_47 = lean_ctor_get(x_7, 0); +x_48 = lean_ctor_get(x_7, 1); +lean_inc(x_48); +lean_inc(x_47); +lean_dec(x_7); +x_49 = lean_array_get_size(x_48); +x_50 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_1); +x_51 = 32; +x_52 = lean_uint64_shift_right(x_50, x_51); +x_53 = lean_uint64_xor(x_50, x_52); +x_54 = 16; +x_55 = lean_uint64_shift_right(x_53, x_54); +x_56 = lean_uint64_xor(x_53, x_55); +x_57 = lean_uint64_to_usize(x_56); +x_58 = lean_usize_of_nat(x_49); +lean_dec(x_49); +x_59 = 1; +x_60 = lean_usize_sub(x_58, x_59); +x_61 = lean_usize_land(x_57, x_60); +x_62 = lean_array_uget(x_48, x_61); +x_63 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(x_1, x_62); +if (x_63 == 0) +{ +lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; uint8_t x_74; +x_64 = lean_unsigned_to_nat(1u); +x_65 = lean_nat_add(x_47, x_64); +lean_dec(x_47); +x_66 = lean_box(2); +x_67 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_67, 0, x_1); +lean_ctor_set(x_67, 1, x_66); +lean_ctor_set(x_67, 2, x_62); +x_68 = lean_array_uset(x_48, x_61, x_67); +x_69 = lean_unsigned_to_nat(4u); +x_70 = lean_nat_mul(x_65, x_69); +x_71 = lean_unsigned_to_nat(3u); +x_72 = lean_nat_div(x_70, x_71); +lean_dec(x_70); +x_73 = lean_array_get_size(x_68); +x_74 = lean_nat_dec_le(x_72, x_73); +lean_dec(x_73); +lean_dec(x_72); +if (x_74 == 0) +{ +lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; +x_75 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_IR_ToIR_bindVar___spec__2(x_68); +x_76 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_76, 0, x_65); +lean_ctor_set(x_76, 1, x_75); +lean_ctor_set(x_2, 0, x_76); +x_77 = lean_box(0); +x_78 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_78, 0, x_77); +lean_ctor_set(x_78, 1, x_2); +x_79 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_79, 0, x_78); +lean_ctor_set(x_79, 1, x_5); +return x_79; +} +else +{ +lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; +x_80 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_80, 0, x_65); +lean_ctor_set(x_80, 1, x_68); +lean_ctor_set(x_2, 0, x_80); +x_81 = lean_box(0); +x_82 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_82, 0, x_81); +lean_ctor_set(x_82, 1, x_2); +x_83 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_83, 0, x_82); +lean_ctor_set(x_83, 1, x_5); +return x_83; +} +} +else +{ +lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; +lean_dec(x_62); +lean_dec(x_1); +x_84 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_84, 0, x_47); +lean_ctor_set(x_84, 1, x_48); +lean_ctor_set(x_2, 0, x_84); +x_85 = lean_box(0); +x_86 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_86, 0, x_85); +lean_ctor_set(x_86, 1, x_2); +x_87 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_87, 1, x_5); +return x_87; +} +} +} +else +{ +lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; lean_object* x_92; lean_object* x_93; uint64_t x_94; uint64_t x_95; uint64_t x_96; uint64_t x_97; uint64_t x_98; uint64_t x_99; uint64_t x_100; size_t x_101; size_t x_102; size_t x_103; size_t x_104; size_t x_105; lean_object* x_106; uint8_t x_107; +x_88 = lean_ctor_get(x_2, 0); +x_89 = lean_ctor_get(x_2, 1); +lean_inc(x_89); +lean_inc(x_88); +lean_dec(x_2); +x_90 = lean_ctor_get(x_88, 0); +lean_inc(x_90); +x_91 = lean_ctor_get(x_88, 1); +lean_inc(x_91); +if (lean_is_exclusive(x_88)) { + lean_ctor_release(x_88, 0); + lean_ctor_release(x_88, 1); + x_92 = x_88; +} else { + lean_dec_ref(x_88); + x_92 = lean_box(0); +} +x_93 = lean_array_get_size(x_91); +x_94 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_1); +x_95 = 32; +x_96 = lean_uint64_shift_right(x_94, x_95); +x_97 = lean_uint64_xor(x_94, x_96); +x_98 = 16; +x_99 = lean_uint64_shift_right(x_97, x_98); +x_100 = lean_uint64_xor(x_97, x_99); +x_101 = lean_uint64_to_usize(x_100); +x_102 = lean_usize_of_nat(x_93); +lean_dec(x_93); +x_103 = 1; +x_104 = lean_usize_sub(x_102, x_103); +x_105 = lean_usize_land(x_101, x_104); +x_106 = lean_array_uget(x_91, x_105); +x_107 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_IR_ToIR_bindVar___spec__1(x_1, x_106); +if (x_107 == 0) +{ +lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; uint8_t x_118; +x_108 = lean_unsigned_to_nat(1u); +x_109 = lean_nat_add(x_90, x_108); +lean_dec(x_90); +x_110 = lean_box(2); +x_111 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_111, 0, x_1); +lean_ctor_set(x_111, 1, x_110); +lean_ctor_set(x_111, 2, x_106); +x_112 = lean_array_uset(x_91, x_105, x_111); +x_113 = lean_unsigned_to_nat(4u); +x_114 = lean_nat_mul(x_109, x_113); +x_115 = lean_unsigned_to_nat(3u); +x_116 = lean_nat_div(x_114, x_115); +lean_dec(x_114); +x_117 = lean_array_get_size(x_112); +x_118 = lean_nat_dec_le(x_116, x_117); +lean_dec(x_117); +lean_dec(x_116); +if (x_118 == 0) +{ +lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; +x_119 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_IR_ToIR_bindVar___spec__2(x_112); +if (lean_is_scalar(x_92)) { + x_120 = lean_alloc_ctor(0, 2, 0); +} else { + x_120 = x_92; +} +lean_ctor_set(x_120, 0, x_109); +lean_ctor_set(x_120, 1, x_119); +x_121 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_121, 0, x_120); +lean_ctor_set(x_121, 1, x_89); +x_122 = lean_box(0); +x_123 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_123, 0, x_122); +lean_ctor_set(x_123, 1, x_121); +x_124 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_124, 0, x_123); +lean_ctor_set(x_124, 1, x_5); +return x_124; +} +else +{ +lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; +if (lean_is_scalar(x_92)) { + x_125 = lean_alloc_ctor(0, 2, 0); +} else { + x_125 = x_92; +} +lean_ctor_set(x_125, 0, x_109); +lean_ctor_set(x_125, 1, x_112); +x_126 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_126, 0, x_125); +lean_ctor_set(x_126, 1, x_89); +x_127 = lean_box(0); +x_128 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_128, 0, x_127); +lean_ctor_set(x_128, 1, x_126); +x_129 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_129, 0, x_128); +lean_ctor_set(x_129, 1, x_5); +return x_129; +} +} +else +{ +lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; +lean_dec(x_106); +lean_dec(x_1); +if (lean_is_scalar(x_92)) { + x_130 = lean_alloc_ctor(0, 2, 0); +} else { + x_130 = x_92; +} +lean_ctor_set(x_130, 0, x_90); +lean_ctor_set(x_130, 1, x_91); +x_131 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_131, 0, x_130); +lean_ctor_set(x_131, 1, x_89); +x_132 = lean_box(0); +x_133 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_133, 0, x_132); +lean_ctor_set(x_133, 1, x_131); +x_134 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_134, 0, x_133); +lean_ctor_set(x_134, 1, x_5); +return x_134; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_bindErased___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; +x_6 = l_Lean_IR_ToIR_bindErased(x_1, x_2, x_3, x_4, x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_6; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_findDecl(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; uint8_t x_7; +x_6 = lean_st_ref_get(x_4, x_5); +x_7 = !lean_is_exclusive(x_6); +if (x_7 == 0) +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_8 = lean_ctor_get(x_6, 0); +x_9 = lean_ctor_get(x_8, 0); +lean_inc(x_9); +lean_dec(x_8); +x_10 = lean_ir_find_env_decl(x_9, x_1); +x_11 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_11, 0, x_10); +lean_ctor_set(x_11, 1, x_2); +lean_ctor_set(x_6, 0, x_11); +return x_6; +} +else +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_12 = lean_ctor_get(x_6, 0); +x_13 = lean_ctor_get(x_6, 1); +lean_inc(x_13); +lean_inc(x_12); +lean_dec(x_6); +x_14 = lean_ctor_get(x_12, 0); +lean_inc(x_14); +lean_dec(x_12); +x_15 = lean_ir_find_env_decl(x_14, x_1); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_15); +lean_ctor_set(x_16, 1, x_2); +x_17 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_17, 0, x_16); +lean_ctor_set(x_17, 1, x_13); +return x_17; +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_findDecl___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; +x_6 = l_Lean_IR_ToIR_findDecl(x_1, x_2, x_3, x_4, x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_addDecl___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_IR_declMapExt; +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_addDecl___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_PersistentHashMap_mkEmptyEntriesArray(lean_box(0), lean_box(0)); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_addDecl___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_IR_ToIR_addDecl___closed__2; +x_2 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_addDecl___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_IR_ToIR_addDecl___closed__3; +x_2 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_2, 0, x_1); +lean_ctor_set(x_2, 1, x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_addDecl(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; uint8_t x_7; +x_6 = lean_st_ref_take(x_4, x_5); +x_7 = !lean_is_exclusive(x_6); +if (x_7 == 0) +{ +lean_object* x_8; uint8_t x_9; +x_8 = lean_ctor_get(x_6, 0); +x_9 = !lean_is_exclusive(x_8); +if (x_9 == 0) +{ +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; uint8_t x_19; +x_10 = lean_ctor_get(x_6, 1); +x_11 = lean_ctor_get(x_8, 0); +x_12 = lean_ctor_get(x_8, 4); +lean_dec(x_12); +x_13 = l_Lean_IR_Decl_name(x_1); +x_14 = l_Lean_Environment_addExtraName(x_11, x_13); +x_15 = l_Lean_IR_ToIR_addDecl___closed__1; +x_16 = l_Lean_PersistentEnvExtension_addEntry___rarg(x_15, x_14, x_1); +x_17 = l_Lean_IR_ToIR_addDecl___closed__4; +lean_ctor_set(x_8, 4, x_17); +lean_ctor_set(x_8, 0, x_16); +x_18 = lean_st_ref_set(x_4, x_8, x_10); +x_19 = !lean_is_exclusive(x_18); +if (x_19 == 0) +{ +lean_object* x_20; lean_object* x_21; +x_20 = lean_ctor_get(x_18, 0); +lean_dec(x_20); +x_21 = lean_box(0); +lean_ctor_set(x_6, 1, x_2); +lean_ctor_set(x_6, 0, x_21); +lean_ctor_set(x_18, 0, x_6); +return x_18; +} +else +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_22 = lean_ctor_get(x_18, 1); +lean_inc(x_22); +lean_dec(x_18); +x_23 = lean_box(0); +lean_ctor_set(x_6, 1, x_2); +lean_ctor_set(x_6, 0, x_23); +x_24 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_24, 0, x_6); +lean_ctor_set(x_24, 1, x_22); +return x_24; +} +} +else +{ +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; +x_25 = lean_ctor_get(x_6, 1); +x_26 = lean_ctor_get(x_8, 0); +x_27 = lean_ctor_get(x_8, 1); +x_28 = lean_ctor_get(x_8, 2); +x_29 = lean_ctor_get(x_8, 3); +x_30 = lean_ctor_get(x_8, 5); +x_31 = lean_ctor_get(x_8, 6); +x_32 = lean_ctor_get(x_8, 7); +lean_inc(x_32); +lean_inc(x_31); +lean_inc(x_30); +lean_inc(x_29); +lean_inc(x_28); +lean_inc(x_27); +lean_inc(x_26); +lean_dec(x_8); +x_33 = l_Lean_IR_Decl_name(x_1); +x_34 = l_Lean_Environment_addExtraName(x_26, x_33); +x_35 = l_Lean_IR_ToIR_addDecl___closed__1; +x_36 = l_Lean_PersistentEnvExtension_addEntry___rarg(x_35, x_34, x_1); +x_37 = l_Lean_IR_ToIR_addDecl___closed__4; +x_38 = lean_alloc_ctor(0, 8, 0); +lean_ctor_set(x_38, 0, x_36); +lean_ctor_set(x_38, 1, x_27); +lean_ctor_set(x_38, 2, x_28); +lean_ctor_set(x_38, 3, x_29); +lean_ctor_set(x_38, 4, x_37); +lean_ctor_set(x_38, 5, x_30); +lean_ctor_set(x_38, 6, x_31); +lean_ctor_set(x_38, 7, x_32); +x_39 = lean_st_ref_set(x_4, x_38, x_25); +x_40 = lean_ctor_get(x_39, 1); +lean_inc(x_40); +if (lean_is_exclusive(x_39)) { + lean_ctor_release(x_39, 0); + lean_ctor_release(x_39, 1); + x_41 = x_39; +} else { + lean_dec_ref(x_39); + x_41 = lean_box(0); +} +x_42 = lean_box(0); +lean_ctor_set(x_6, 1, x_2); +lean_ctor_set(x_6, 0, x_42); +if (lean_is_scalar(x_41)) { + x_43 = lean_alloc_ctor(0, 2, 0); +} else { + x_43 = x_41; +} +lean_ctor_set(x_43, 0, x_6); +lean_ctor_set(x_43, 1, x_40); +return x_43; +} +} +else +{ +lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; +x_44 = lean_ctor_get(x_6, 0); +x_45 = lean_ctor_get(x_6, 1); +lean_inc(x_45); +lean_inc(x_44); +lean_dec(x_6); +x_46 = lean_ctor_get(x_44, 0); +lean_inc(x_46); +x_47 = lean_ctor_get(x_44, 1); +lean_inc(x_47); +x_48 = lean_ctor_get(x_44, 2); +lean_inc(x_48); +x_49 = lean_ctor_get(x_44, 3); +lean_inc(x_49); +x_50 = lean_ctor_get(x_44, 5); +lean_inc(x_50); +x_51 = lean_ctor_get(x_44, 6); +lean_inc(x_51); +x_52 = lean_ctor_get(x_44, 7); +lean_inc(x_52); +if (lean_is_exclusive(x_44)) { + lean_ctor_release(x_44, 0); + lean_ctor_release(x_44, 1); + lean_ctor_release(x_44, 2); + lean_ctor_release(x_44, 3); + lean_ctor_release(x_44, 4); + lean_ctor_release(x_44, 5); + lean_ctor_release(x_44, 6); + lean_ctor_release(x_44, 7); + x_53 = x_44; +} else { + lean_dec_ref(x_44); + x_53 = lean_box(0); +} +x_54 = l_Lean_IR_Decl_name(x_1); +x_55 = l_Lean_Environment_addExtraName(x_46, x_54); +x_56 = l_Lean_IR_ToIR_addDecl___closed__1; +x_57 = l_Lean_PersistentEnvExtension_addEntry___rarg(x_56, x_55, x_1); +x_58 = l_Lean_IR_ToIR_addDecl___closed__4; +if (lean_is_scalar(x_53)) { + x_59 = lean_alloc_ctor(0, 8, 0); +} else { + x_59 = x_53; +} +lean_ctor_set(x_59, 0, x_57); +lean_ctor_set(x_59, 1, x_47); +lean_ctor_set(x_59, 2, x_48); +lean_ctor_set(x_59, 3, x_49); +lean_ctor_set(x_59, 4, x_58); +lean_ctor_set(x_59, 5, x_50); +lean_ctor_set(x_59, 6, x_51); +lean_ctor_set(x_59, 7, x_52); +x_60 = lean_st_ref_set(x_4, x_59, x_45); +x_61 = lean_ctor_get(x_60, 1); +lean_inc(x_61); +if (lean_is_exclusive(x_60)) { + lean_ctor_release(x_60, 0); + lean_ctor_release(x_60, 1); + x_62 = x_60; +} else { + lean_dec_ref(x_60); + x_62 = lean_box(0); +} +x_63 = lean_box(0); +x_64 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_64, 0, x_63); +lean_ctor_set(x_64, 1, x_2); +if (lean_is_scalar(x_62)) { + x_65 = lean_alloc_ctor(0, 2, 0); +} else { + x_65 = x_62; +} +lean_ctor_set(x_65, 0, x_64); +lean_ctor_set(x_65, 1, x_61); +return x_65; +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_addDecl___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; +x_6 = l_Lean_IR_ToIR_addDecl(x_1, x_2, x_3, x_4, x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_6; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLitValue(lean_object* x_1) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +uint8_t x_2; +x_2 = !lean_is_exclusive(x_1); +if (x_2 == 0) +{ +return x_1; +} +else +{ +lean_object* x_3; lean_object* x_4; +x_3 = lean_ctor_get(x_1, 0); +lean_inc(x_3); +lean_dec(x_1); +x_4 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_4, 0, x_3); +return x_4; +} +} +else +{ +uint8_t x_5; +x_5 = !lean_is_exclusive(x_1); +if (x_5 == 0) +{ +return x_1; +} +else +{ +lean_object* x_6; lean_object* x_7; +x_6 = lean_ctor_get(x_1, 0); +lean_inc(x_6); +lean_dec(x_1); +x_7 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_7, 0, x_6); +return x_7; +} +} +} +} +static lean_object* _init_l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Core_instMonadCoreM; +x_2 = l_StateT_instMonad___rarg(x_1); +return x_2; +} +} +static lean_object* _init_l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__1; +x_2 = l_instInhabitedPUnit; +x_3 = l_instInhabitedOfMonad___rarg(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_6 = l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__2; +x_7 = lean_panic_fn(x_6, x_1); +x_8 = lean_apply_4(x_7, x_2, x_3, x_4, x_5); +return x_8; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean.Compiler.IR.ToIR", 21, 21); +return x_1; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean.IR.ToIR.lowerEnumToScalarType", 34, 34); +return x_1; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("expected valid constructor name", 31, 31); +return x_1; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__2; +x_3 = lean_unsigned_to_nat(78u); +x_4 = lean_unsigned_to_nat(57u); +x_5 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__3; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(0); +x_2 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__6() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__5; +x_2 = lean_box(0); +x_3 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__7() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__6; +x_2 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +if (lean_obj_tag(x_6) == 0) +{ +lean_object* x_13; lean_object* x_14; +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_1); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_7); +lean_ctor_set(x_13, 1, x_9); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_12); +return x_14; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_33; lean_object* x_34; +lean_dec(x_7); +x_15 = lean_ctor_get(x_6, 0); +lean_inc(x_15); +x_16 = lean_ctor_get(x_6, 1); +lean_inc(x_16); +if (lean_is_exclusive(x_6)) { + lean_ctor_release(x_6, 0); + lean_ctor_release(x_6, 1); + x_17 = x_6; +} else { + lean_dec_ref(x_6); + x_17 = lean_box(0); +} +x_33 = 0; +lean_inc(x_1); +x_34 = l_Lean_Environment_find_x3f(x_1, x_15, x_33); +if (lean_obj_tag(x_34) == 0) +{ +lean_object* x_35; lean_object* x_36; +x_35 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__4; +lean_inc(x_11); +lean_inc(x_10); +x_36 = l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1(x_35, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_36) == 0) +{ +lean_object* x_37; lean_object* x_38; uint8_t x_39; +x_37 = lean_ctor_get(x_36, 0); +lean_inc(x_37); +x_38 = lean_ctor_get(x_36, 1); +lean_inc(x_38); +lean_dec(x_36); +x_39 = !lean_is_exclusive(x_37); +if (x_39 == 0) +{ +lean_object* x_40; lean_object* x_41; +x_40 = lean_ctor_get(x_37, 0); +lean_dec(x_40); +lean_inc(x_4); +x_41 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_41, 0, x_4); +lean_ctor_set(x_37, 0, x_41); +x_18 = x_37; +x_19 = x_38; +goto block_32; +} +else +{ +lean_object* x_42; lean_object* x_43; lean_object* x_44; +x_42 = lean_ctor_get(x_37, 1); +lean_inc(x_42); +lean_dec(x_37); +lean_inc(x_4); +x_43 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_43, 0, x_4); +x_44 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_44, 0, x_43); +lean_ctor_set(x_44, 1, x_42); +x_18 = x_44; +x_19 = x_38; +goto block_32; +} +} +else +{ +uint8_t x_45; +lean_dec(x_17); +lean_dec(x_16); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_1); +x_45 = !lean_is_exclusive(x_36); +if (x_45 == 0) +{ +return x_36; +} +else +{ +lean_object* x_46; lean_object* x_47; lean_object* x_48; +x_46 = lean_ctor_get(x_36, 0); +x_47 = lean_ctor_get(x_36, 1); +lean_inc(x_47); +lean_inc(x_46); +lean_dec(x_36); +x_48 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_48, 0, x_46); +lean_ctor_set(x_48, 1, x_47); +return x_48; +} +} +} +else +{ +lean_object* x_49; +x_49 = lean_ctor_get(x_34, 0); +lean_inc(x_49); +lean_dec(x_34); +switch (lean_obj_tag(x_49)) { +case 1: +{ +uint8_t x_50; +x_50 = !lean_is_exclusive(x_49); +if (x_50 == 0) +{ +lean_object* x_51; lean_object* x_52; lean_object* x_53; +x_51 = lean_ctor_get(x_49, 0); +lean_dec(x_51); +x_52 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__4; +lean_inc(x_11); +lean_inc(x_10); +x_53 = l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1(x_52, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_53) == 0) +{ +lean_object* x_54; lean_object* x_55; uint8_t x_56; +x_54 = lean_ctor_get(x_53, 0); +lean_inc(x_54); +x_55 = lean_ctor_get(x_53, 1); +lean_inc(x_55); +lean_dec(x_53); +x_56 = !lean_is_exclusive(x_54); +if (x_56 == 0) +{ +lean_object* x_57; +x_57 = lean_ctor_get(x_54, 0); +lean_dec(x_57); +lean_inc(x_4); +lean_ctor_set(x_49, 0, x_4); +lean_ctor_set(x_54, 0, x_49); +x_18 = x_54; +x_19 = x_55; +goto block_32; +} +else +{ +lean_object* x_58; lean_object* x_59; +x_58 = lean_ctor_get(x_54, 1); +lean_inc(x_58); +lean_dec(x_54); +lean_inc(x_4); +lean_ctor_set(x_49, 0, x_4); +x_59 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_59, 0, x_49); +lean_ctor_set(x_59, 1, x_58); +x_18 = x_59; +x_19 = x_55; +goto block_32; +} +} +else +{ +uint8_t x_60; +lean_free_object(x_49); +lean_dec(x_17); +lean_dec(x_16); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_1); +x_60 = !lean_is_exclusive(x_53); +if (x_60 == 0) +{ +return x_53; +} +else +{ +lean_object* x_61; lean_object* x_62; lean_object* x_63; +x_61 = lean_ctor_get(x_53, 0); +x_62 = lean_ctor_get(x_53, 1); +lean_inc(x_62); +lean_inc(x_61); +lean_dec(x_53); +x_63 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_63, 0, x_61); +lean_ctor_set(x_63, 1, x_62); +return x_63; +} +} +} +else +{ +lean_object* x_64; lean_object* x_65; +lean_dec(x_49); +x_64 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__4; +lean_inc(x_11); +lean_inc(x_10); +x_65 = l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1(x_64, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_65) == 0) +{ +lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; +x_66 = lean_ctor_get(x_65, 0); +lean_inc(x_66); +x_67 = lean_ctor_get(x_65, 1); +lean_inc(x_67); +lean_dec(x_65); +x_68 = lean_ctor_get(x_66, 1); +lean_inc(x_68); +if (lean_is_exclusive(x_66)) { + lean_ctor_release(x_66, 0); + lean_ctor_release(x_66, 1); + x_69 = x_66; +} else { + lean_dec_ref(x_66); + x_69 = lean_box(0); +} +lean_inc(x_4); +x_70 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_70, 0, x_4); +if (lean_is_scalar(x_69)) { + x_71 = lean_alloc_ctor(0, 2, 0); +} else { + x_71 = x_69; +} +lean_ctor_set(x_71, 0, x_70); +lean_ctor_set(x_71, 1, x_68); +x_18 = x_71; +x_19 = x_67; +goto block_32; +} +else +{ +lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; +lean_dec(x_17); +lean_dec(x_16); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_1); +x_72 = lean_ctor_get(x_65, 0); +lean_inc(x_72); +x_73 = lean_ctor_get(x_65, 1); +lean_inc(x_73); +if (lean_is_exclusive(x_65)) { + lean_ctor_release(x_65, 0); + lean_ctor_release(x_65, 1); + x_74 = x_65; +} else { + lean_dec_ref(x_65); + x_74 = lean_box(0); +} +if (lean_is_scalar(x_74)) { + x_75 = lean_alloc_ctor(1, 2, 0); +} else { + x_75 = x_74; +} +lean_ctor_set(x_75, 0, x_72); +lean_ctor_set(x_75, 1, x_73); +return x_75; +} +} +} +case 6: +{ +uint8_t x_76; +x_76 = !lean_is_exclusive(x_49); +if (x_76 == 0) +{ +lean_object* x_77; lean_object* x_78; lean_object* x_79; uint8_t x_80; +x_77 = lean_ctor_get(x_49, 0); +x_78 = lean_ctor_get(x_77, 0); +lean_inc(x_78); +lean_dec(x_77); +x_79 = lean_ctor_get(x_78, 2); +lean_inc(x_79); +lean_dec(x_78); +x_80 = l_Lean_Expr_isForall(x_79); +lean_dec(x_79); +if (x_80 == 0) +{ +lean_object* x_81; +lean_inc(x_4); +lean_ctor_set_tag(x_49, 1); +lean_ctor_set(x_49, 0, x_4); +x_81 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_81, 0, x_49); +lean_ctor_set(x_81, 1, x_9); +x_18 = x_81; +x_19 = x_12; +goto block_32; +} +else +{ +lean_object* x_82; lean_object* x_83; +lean_free_object(x_49); +x_82 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__7; +x_83 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_83, 0, x_82); +lean_ctor_set(x_83, 1, x_9); +x_18 = x_83; +x_19 = x_12; +goto block_32; +} +} +else +{ +lean_object* x_84; lean_object* x_85; lean_object* x_86; uint8_t x_87; +x_84 = lean_ctor_get(x_49, 0); +lean_inc(x_84); +lean_dec(x_49); +x_85 = lean_ctor_get(x_84, 0); +lean_inc(x_85); +lean_dec(x_84); +x_86 = lean_ctor_get(x_85, 2); +lean_inc(x_86); +lean_dec(x_85); +x_87 = l_Lean_Expr_isForall(x_86); +lean_dec(x_86); +if (x_87 == 0) +{ +lean_object* x_88; lean_object* x_89; +lean_inc(x_4); +x_88 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_88, 0, x_4); +x_89 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_89, 0, x_88); +lean_ctor_set(x_89, 1, x_9); +x_18 = x_89; +x_19 = x_12; +goto block_32; +} +else +{ +lean_object* x_90; lean_object* x_91; +x_90 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__7; +x_91 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_91, 0, x_90); +lean_ctor_set(x_91, 1, x_9); +x_18 = x_91; +x_19 = x_12; +goto block_32; +} +} +} +default: +{ +uint8_t x_92; +x_92 = !lean_is_exclusive(x_49); +if (x_92 == 0) +{ +lean_object* x_93; lean_object* x_94; lean_object* x_95; +x_93 = lean_ctor_get(x_49, 0); +lean_dec(x_93); +x_94 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__4; +lean_inc(x_11); +lean_inc(x_10); +x_95 = l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1(x_94, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_95) == 0) +{ +lean_object* x_96; lean_object* x_97; uint8_t x_98; +x_96 = lean_ctor_get(x_95, 0); +lean_inc(x_96); +x_97 = lean_ctor_get(x_95, 1); +lean_inc(x_97); +lean_dec(x_95); +x_98 = !lean_is_exclusive(x_96); +if (x_98 == 0) +{ +lean_object* x_99; +x_99 = lean_ctor_get(x_96, 0); +lean_dec(x_99); +lean_inc(x_4); +lean_ctor_set_tag(x_49, 1); +lean_ctor_set(x_49, 0, x_4); +lean_ctor_set(x_96, 0, x_49); +x_18 = x_96; +x_19 = x_97; +goto block_32; +} +else +{ +lean_object* x_100; lean_object* x_101; +x_100 = lean_ctor_get(x_96, 1); +lean_inc(x_100); +lean_dec(x_96); +lean_inc(x_4); +lean_ctor_set_tag(x_49, 1); +lean_ctor_set(x_49, 0, x_4); +x_101 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_101, 0, x_49); +lean_ctor_set(x_101, 1, x_100); +x_18 = x_101; +x_19 = x_97; +goto block_32; +} +} +else +{ +uint8_t x_102; +lean_free_object(x_49); +lean_dec(x_17); +lean_dec(x_16); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_1); +x_102 = !lean_is_exclusive(x_95); +if (x_102 == 0) +{ +return x_95; +} +else +{ +lean_object* x_103; lean_object* x_104; lean_object* x_105; +x_103 = lean_ctor_get(x_95, 0); +x_104 = lean_ctor_get(x_95, 1); +lean_inc(x_104); +lean_inc(x_103); +lean_dec(x_95); +x_105 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_105, 0, x_103); +lean_ctor_set(x_105, 1, x_104); +return x_105; +} +} +} +else +{ +lean_object* x_106; lean_object* x_107; +lean_dec(x_49); +x_106 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__4; +lean_inc(x_11); +lean_inc(x_10); +x_107 = l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1(x_106, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_107) == 0) +{ +lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; +x_108 = lean_ctor_get(x_107, 0); +lean_inc(x_108); +x_109 = lean_ctor_get(x_107, 1); +lean_inc(x_109); +lean_dec(x_107); +x_110 = lean_ctor_get(x_108, 1); +lean_inc(x_110); +if (lean_is_exclusive(x_108)) { + lean_ctor_release(x_108, 0); + lean_ctor_release(x_108, 1); + x_111 = x_108; +} else { + lean_dec_ref(x_108); + x_111 = lean_box(0); +} +lean_inc(x_4); +x_112 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_112, 0, x_4); +if (lean_is_scalar(x_111)) { + x_113 = lean_alloc_ctor(0, 2, 0); +} else { + x_113 = x_111; +} +lean_ctor_set(x_113, 0, x_112); +lean_ctor_set(x_113, 1, x_110); +x_18 = x_113; +x_19 = x_109; +goto block_32; +} +else +{ +lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; +lean_dec(x_17); +lean_dec(x_16); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_1); +x_114 = lean_ctor_get(x_107, 0); +lean_inc(x_114); +x_115 = lean_ctor_get(x_107, 1); +lean_inc(x_115); +if (lean_is_exclusive(x_107)) { + lean_ctor_release(x_107, 0); + lean_ctor_release(x_107, 1); + x_116 = x_107; +} else { + lean_dec_ref(x_107); + x_116 = lean_box(0); +} +if (lean_is_scalar(x_116)) { + x_117 = lean_alloc_ctor(1, 2, 0); +} else { + x_117 = x_116; +} +lean_ctor_set(x_117, 0, x_114); +lean_ctor_set(x_117, 1, x_115); +return x_117; +} +} +} +} +} +block_32: +{ +lean_object* x_20; +x_20 = lean_ctor_get(x_18, 0); +lean_inc(x_20); +if (lean_obj_tag(x_20) == 0) +{ +uint8_t x_21; +lean_dec(x_16); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_1); +x_21 = !lean_is_exclusive(x_18); +if (x_21 == 0) +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_22 = lean_ctor_get(x_18, 0); +lean_dec(x_22); +x_23 = lean_ctor_get(x_20, 0); +lean_inc(x_23); +lean_dec(x_20); +lean_ctor_set(x_18, 0, x_23); +if (lean_is_scalar(x_17)) { + x_24 = lean_alloc_ctor(0, 2, 0); +} else { + x_24 = x_17; + lean_ctor_set_tag(x_24, 0); +} +lean_ctor_set(x_24, 0, x_18); +lean_ctor_set(x_24, 1, x_19); +return x_24; +} +else +{ +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_25 = lean_ctor_get(x_18, 1); +lean_inc(x_25); +lean_dec(x_18); +x_26 = lean_ctor_get(x_20, 0); +lean_inc(x_26); +lean_dec(x_20); +x_27 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_27, 0, x_26); +lean_ctor_set(x_27, 1, x_25); +if (lean_is_scalar(x_17)) { + x_28 = lean_alloc_ctor(0, 2, 0); +} else { + x_28 = x_17; + lean_ctor_set_tag(x_28, 0); +} +lean_ctor_set(x_28, 0, x_27); +lean_ctor_set(x_28, 1, x_19); +return x_28; +} +} +else +{ +lean_object* x_29; lean_object* x_30; +lean_dec(x_17); +x_29 = lean_ctor_get(x_18, 1); +lean_inc(x_29); +lean_dec(x_18); +x_30 = lean_ctor_get(x_20, 0); +lean_inc(x_30); +lean_dec(x_20); +x_6 = x_16; +x_7 = x_30; +x_8 = lean_box(0); +x_9 = x_29; +x_12 = x_19; +goto _start; +} +} +} +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(3); +x_2 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(2); +x_2 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(1); +x_2 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; uint8_t x_9; +x_8 = lean_unsigned_to_nat(1u); +x_9 = lean_nat_dec_eq(x_1, x_8); +if (x_9 == 0) +{ +lean_object* x_10; uint8_t x_11; +x_10 = lean_unsigned_to_nat(256u); +x_11 = lean_nat_dec_lt(x_1, x_10); +if (x_11 == 0) +{ +lean_object* x_12; uint8_t x_13; +x_12 = lean_unsigned_to_nat(65536u); +x_13 = lean_nat_dec_lt(x_1, x_12); +if (x_13 == 0) +{ +lean_object* x_14; uint8_t x_15; +x_14 = lean_cstr_to_nat("4294967296"); +x_15 = lean_nat_dec_lt(x_1, x_14); +if (x_15 == 0) +{ +lean_object* x_16; lean_object* x_17; +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_2); +lean_ctor_set(x_16, 1, x_4); +x_17 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_17, 0, x_16); +lean_ctor_set(x_17, 1, x_7); +return x_17; +} +else +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; +lean_dec(x_2); +x_18 = l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__1; +x_19 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19, 0, x_18); +lean_ctor_set(x_19, 1, x_4); +x_20 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_20, 0, x_19); +lean_ctor_set(x_20, 1, x_7); +return x_20; +} +} +else +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; +lean_dec(x_2); +x_21 = l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__2; +x_22 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_22, 0, x_21); +lean_ctor_set(x_22, 1, x_4); +x_23 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_23, 0, x_22); +lean_ctor_set(x_23, 1, x_7); +return x_23; +} +} +else +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; +lean_dec(x_2); +x_24 = l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__3; +x_25 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_25, 0, x_24); +lean_ctor_set(x_25, 1, x_4); +x_26 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_26, 0, x_25); +lean_ctor_set(x_26, 1, x_7); +return x_26; +} +} +else +{ +lean_object* x_27; lean_object* x_28; +x_27 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_27, 0, x_2); +lean_ctor_set(x_27, 1, x_4); +x_28 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_28, 0, x_27); +lean_ctor_set(x_28, 1, x_7); +return x_28; +} +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerEnumToScalarType___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = lean_box(0); +x_3 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerEnumToScalarType(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; uint8_t x_7; +x_6 = lean_st_ref_get(x_4, x_5); +x_7 = !lean_is_exclusive(x_6); +if (x_7 == 0) +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; uint8_t x_11; lean_object* x_12; +x_8 = lean_ctor_get(x_6, 0); +x_9 = lean_ctor_get(x_6, 1); +x_10 = lean_ctor_get(x_8, 0); +lean_inc(x_10); +lean_dec(x_8); +x_11 = 0; +lean_inc(x_10); +x_12 = l_Lean_Environment_find_x3f(x_10, x_1, x_11); +if (lean_obj_tag(x_12) == 0) +{ +lean_object* x_13; lean_object* x_14; +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_3); +x_13 = lean_box(0); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_2); +lean_ctor_set(x_6, 0, x_14); +return x_6; +} +else +{ +lean_object* x_15; +x_15 = lean_ctor_get(x_12, 0); +lean_inc(x_15); +lean_dec(x_12); +if (lean_obj_tag(x_15) == 5) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +lean_free_object(x_6); +x_16 = lean_ctor_get(x_15, 0); +lean_inc(x_16); +lean_dec(x_15); +x_17 = lean_ctor_get(x_16, 4); +lean_inc(x_17); +lean_dec(x_16); +x_18 = lean_unsigned_to_nat(0u); +x_19 = l_List_lengthTRAux___rarg(x_17, x_18); +x_20 = lean_box(0); +x_21 = lean_box(0); +x_22 = l_Lean_IR_ToIR_lowerEnumToScalarType___closed__1; +lean_inc(x_4); +lean_inc(x_3); +lean_inc(x_17); +x_23 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2(x_10, x_17, x_20, x_22, x_17, x_17, x_22, lean_box(0), x_2, x_3, x_4, x_9); +lean_dec(x_17); +if (lean_obj_tag(x_23) == 0) +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; +x_24 = lean_ctor_get(x_23, 0); +lean_inc(x_24); +x_25 = lean_ctor_get(x_24, 0); +lean_inc(x_25); +x_26 = lean_ctor_get(x_25, 0); +lean_inc(x_26); +lean_dec(x_25); +if (lean_obj_tag(x_26) == 0) +{ +lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; +x_27 = lean_ctor_get(x_23, 1); +lean_inc(x_27); +lean_dec(x_23); +x_28 = lean_ctor_get(x_24, 1); +lean_inc(x_28); +lean_dec(x_24); +x_29 = lean_box(0); +x_30 = l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1(x_19, x_21, x_29, x_28, x_3, x_4, x_27); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_19); +return x_30; +} +else +{ +uint8_t x_31; +lean_dec(x_19); +lean_dec(x_4); +lean_dec(x_3); +x_31 = !lean_is_exclusive(x_23); +if (x_31 == 0) +{ +lean_object* x_32; uint8_t x_33; +x_32 = lean_ctor_get(x_23, 0); +lean_dec(x_32); +x_33 = !lean_is_exclusive(x_24); +if (x_33 == 0) +{ +lean_object* x_34; lean_object* x_35; +x_34 = lean_ctor_get(x_24, 0); +lean_dec(x_34); +x_35 = lean_ctor_get(x_26, 0); +lean_inc(x_35); +lean_dec(x_26); +lean_ctor_set(x_24, 0, x_35); +return x_23; +} +else +{ +lean_object* x_36; lean_object* x_37; lean_object* x_38; +x_36 = lean_ctor_get(x_24, 1); +lean_inc(x_36); +lean_dec(x_24); +x_37 = lean_ctor_get(x_26, 0); +lean_inc(x_37); +lean_dec(x_26); +x_38 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_36); +lean_ctor_set(x_23, 0, x_38); +return x_23; +} +} +else +{ +lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; +x_39 = lean_ctor_get(x_23, 1); +lean_inc(x_39); +lean_dec(x_23); +x_40 = lean_ctor_get(x_24, 1); +lean_inc(x_40); +if (lean_is_exclusive(x_24)) { + lean_ctor_release(x_24, 0); + lean_ctor_release(x_24, 1); + x_41 = x_24; +} else { + lean_dec_ref(x_24); + x_41 = lean_box(0); +} +x_42 = lean_ctor_get(x_26, 0); +lean_inc(x_42); +lean_dec(x_26); +if (lean_is_scalar(x_41)) { + x_43 = lean_alloc_ctor(0, 2, 0); +} else { + x_43 = x_41; +} +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_40); +x_44 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_44, 0, x_43); +lean_ctor_set(x_44, 1, x_39); +return x_44; +} +} +} +else +{ +uint8_t x_45; +lean_dec(x_19); +lean_dec(x_4); +lean_dec(x_3); +x_45 = !lean_is_exclusive(x_23); +if (x_45 == 0) +{ +return x_23; +} +else +{ +lean_object* x_46; lean_object* x_47; lean_object* x_48; +x_46 = lean_ctor_get(x_23, 0); +x_47 = lean_ctor_get(x_23, 1); +lean_inc(x_47); +lean_inc(x_46); +lean_dec(x_23); +x_48 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_48, 0, x_46); +lean_ctor_set(x_48, 1, x_47); +return x_48; +} +} +} +else +{ +lean_object* x_49; lean_object* x_50; +lean_dec(x_15); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_3); +x_49 = lean_box(0); +x_50 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_50, 0, x_49); +lean_ctor_set(x_50, 1, x_2); +lean_ctor_set(x_6, 0, x_50); +return x_6; +} +} +} +else +{ +lean_object* x_51; lean_object* x_52; lean_object* x_53; uint8_t x_54; lean_object* x_55; +x_51 = lean_ctor_get(x_6, 0); +x_52 = lean_ctor_get(x_6, 1); +lean_inc(x_52); +lean_inc(x_51); +lean_dec(x_6); +x_53 = lean_ctor_get(x_51, 0); +lean_inc(x_53); +lean_dec(x_51); +x_54 = 0; +lean_inc(x_53); +x_55 = l_Lean_Environment_find_x3f(x_53, x_1, x_54); +if (lean_obj_tag(x_55) == 0) +{ +lean_object* x_56; lean_object* x_57; lean_object* x_58; +lean_dec(x_53); +lean_dec(x_4); +lean_dec(x_3); +x_56 = lean_box(0); +x_57 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_57, 0, x_56); +lean_ctor_set(x_57, 1, x_2); +x_58 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_58, 0, x_57); +lean_ctor_set(x_58, 1, x_52); +return x_58; +} +else +{ +lean_object* x_59; +x_59 = lean_ctor_get(x_55, 0); +lean_inc(x_59); +lean_dec(x_55); +if (lean_obj_tag(x_59) == 5) +{ +lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; +x_60 = lean_ctor_get(x_59, 0); +lean_inc(x_60); +lean_dec(x_59); +x_61 = lean_ctor_get(x_60, 4); +lean_inc(x_61); +lean_dec(x_60); +x_62 = lean_unsigned_to_nat(0u); +x_63 = l_List_lengthTRAux___rarg(x_61, x_62); +x_64 = lean_box(0); +x_65 = lean_box(0); +x_66 = l_Lean_IR_ToIR_lowerEnumToScalarType___closed__1; +lean_inc(x_4); +lean_inc(x_3); +lean_inc(x_61); +x_67 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2(x_53, x_61, x_64, x_66, x_61, x_61, x_66, lean_box(0), x_2, x_3, x_4, x_52); +lean_dec(x_61); +if (lean_obj_tag(x_67) == 0) +{ +lean_object* x_68; lean_object* x_69; lean_object* x_70; +x_68 = lean_ctor_get(x_67, 0); +lean_inc(x_68); +x_69 = lean_ctor_get(x_68, 0); +lean_inc(x_69); +x_70 = lean_ctor_get(x_69, 0); +lean_inc(x_70); +lean_dec(x_69); +if (lean_obj_tag(x_70) == 0) +{ +lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; +x_71 = lean_ctor_get(x_67, 1); +lean_inc(x_71); +lean_dec(x_67); +x_72 = lean_ctor_get(x_68, 1); +lean_inc(x_72); +lean_dec(x_68); +x_73 = lean_box(0); +x_74 = l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1(x_63, x_65, x_73, x_72, x_3, x_4, x_71); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_63); +return x_74; +} +else +{ +lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; +lean_dec(x_63); +lean_dec(x_4); +lean_dec(x_3); +x_75 = lean_ctor_get(x_67, 1); +lean_inc(x_75); +if (lean_is_exclusive(x_67)) { + lean_ctor_release(x_67, 0); + lean_ctor_release(x_67, 1); + x_76 = x_67; +} else { + lean_dec_ref(x_67); + x_76 = lean_box(0); +} +x_77 = lean_ctor_get(x_68, 1); +lean_inc(x_77); +if (lean_is_exclusive(x_68)) { + lean_ctor_release(x_68, 0); + lean_ctor_release(x_68, 1); + x_78 = x_68; +} else { + lean_dec_ref(x_68); + x_78 = lean_box(0); +} +x_79 = lean_ctor_get(x_70, 0); +lean_inc(x_79); +lean_dec(x_70); +if (lean_is_scalar(x_78)) { + x_80 = lean_alloc_ctor(0, 2, 0); +} else { + x_80 = x_78; +} +lean_ctor_set(x_80, 0, x_79); +lean_ctor_set(x_80, 1, x_77); +if (lean_is_scalar(x_76)) { + x_81 = lean_alloc_ctor(0, 2, 0); +} else { + x_81 = x_76; +} +lean_ctor_set(x_81, 0, x_80); +lean_ctor_set(x_81, 1, x_75); +return x_81; +} +} +else +{ +lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; +lean_dec(x_63); +lean_dec(x_4); +lean_dec(x_3); +x_82 = lean_ctor_get(x_67, 0); +lean_inc(x_82); +x_83 = lean_ctor_get(x_67, 1); +lean_inc(x_83); +if (lean_is_exclusive(x_67)) { + lean_ctor_release(x_67, 0); + lean_ctor_release(x_67, 1); + x_84 = x_67; +} else { + lean_dec_ref(x_67); + x_84 = lean_box(0); +} +if (lean_is_scalar(x_84)) { + x_85 = lean_alloc_ctor(1, 2, 0); +} else { + x_85 = x_84; +} +lean_ctor_set(x_85, 0, x_82); +lean_ctor_set(x_85, 1, x_83); +return x_85; +} +} +else +{ +lean_object* x_86; lean_object* x_87; lean_object* x_88; +lean_dec(x_59); +lean_dec(x_53); +lean_dec(x_4); +lean_dec(x_3); +x_86 = lean_box(0); +x_87 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_87, 1, x_2); +x_88 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_88, 0, x_87); +lean_ctor_set(x_88, 1, x_52); +return x_88; +} +} +} +} +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_5); +lean_dec(x_3); +lean_dec(x_2); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; +x_8 = l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_3); +lean_dec(x_1); +return x_8; +} +} +static lean_object* _init_l_panic___at_Lean_IR_ToIR_lowerType___spec__1___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__1; +x_2 = l_Lean_IR_instInhabitedIRType; +x_3 = l_instInhabitedOfMonad___rarg(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_panic___at_Lean_IR_ToIR_lowerType___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_6 = l_panic___at_Lean_IR_ToIR_lowerType___spec__1___closed__1; +x_7 = lean_panic_fn(x_6, x_1); +x_8 = lean_apply_4(x_7, x_2, x_3, x_4, x_5); +return x_8; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerType___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean.IR.ToIR.lowerType", 22, 22); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerType___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("invalid type", 12, 12); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerType___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerType___closed__1; +x_3 = lean_unsigned_to_nat(117u); +x_4 = lean_unsigned_to_nat(9u); +x_5 = l_Lean_IR_ToIR_lowerType___closed__2; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerType___closed__4() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("UInt8", 5, 5); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerType___closed__5() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Bool", 4, 4); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerType___closed__6() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("UInt16", 6, 6); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerType___closed__7() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("UInt32", 6, 6); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerType___closed__8() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("UInt64", 6, 6); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerType___closed__9() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("USize", 5, 5); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerType___closed__10() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Float", 5, 5); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerType___closed__11() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Float32", 7, 7); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerType___closed__12() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("lcErased", 8, 8); +return x_1; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerType(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +switch (lean_obj_tag(x_1)) { +case 4: +{ +lean_object* x_6; +x_6 = lean_ctor_get(x_1, 0); +lean_inc(x_6); +lean_dec(x_1); +if (lean_obj_tag(x_6) == 1) +{ +lean_object* x_7; +x_7 = lean_ctor_get(x_6, 0); +lean_inc(x_7); +if (lean_obj_tag(x_7) == 0) +{ +lean_object* x_8; lean_object* x_9; uint8_t x_10; +x_8 = lean_ctor_get(x_6, 1); +lean_inc(x_8); +x_9 = l_Lean_IR_ToIR_lowerType___closed__4; +x_10 = lean_string_dec_eq(x_8, x_9); +if (x_10 == 0) +{ +lean_object* x_11; uint8_t x_12; +x_11 = l_Lean_IR_ToIR_lowerType___closed__5; +x_12 = lean_string_dec_eq(x_8, x_11); +if (x_12 == 0) +{ +lean_object* x_13; uint8_t x_14; +x_13 = l_Lean_IR_ToIR_lowerType___closed__6; +x_14 = lean_string_dec_eq(x_8, x_13); +if (x_14 == 0) +{ +lean_object* x_15; uint8_t x_16; +x_15 = l_Lean_IR_ToIR_lowerType___closed__7; +x_16 = lean_string_dec_eq(x_8, x_15); +if (x_16 == 0) +{ +lean_object* x_17; uint8_t x_18; +x_17 = l_Lean_IR_ToIR_lowerType___closed__8; +x_18 = lean_string_dec_eq(x_8, x_17); +if (x_18 == 0) +{ +lean_object* x_19; uint8_t x_20; +x_19 = l_Lean_IR_ToIR_lowerType___closed__9; +x_20 = lean_string_dec_eq(x_8, x_19); +if (x_20 == 0) +{ +lean_object* x_21; uint8_t x_22; +x_21 = l_Lean_IR_ToIR_lowerType___closed__10; +x_22 = lean_string_dec_eq(x_8, x_21); +if (x_22 == 0) +{ +lean_object* x_23; uint8_t x_24; +x_23 = l_Lean_IR_ToIR_lowerType___closed__11; +x_24 = lean_string_dec_eq(x_8, x_23); +if (x_24 == 0) +{ +lean_object* x_25; uint8_t x_26; +x_25 = l_Lean_IR_ToIR_lowerType___closed__12; +x_26 = lean_string_dec_eq(x_8, x_25); +lean_dec(x_8); +if (x_26 == 0) +{ +lean_object* x_27; +x_27 = l_Lean_IR_ToIR_lowerEnumToScalarType(x_6, x_2, x_3, x_4, x_5); +if (lean_obj_tag(x_27) == 0) +{ +lean_object* x_28; lean_object* x_29; +x_28 = lean_ctor_get(x_27, 0); +lean_inc(x_28); +x_29 = lean_ctor_get(x_28, 0); +lean_inc(x_29); +if (lean_obj_tag(x_29) == 0) +{ +uint8_t x_30; +x_30 = !lean_is_exclusive(x_27); +if (x_30 == 0) +{ +lean_object* x_31; uint8_t x_32; +x_31 = lean_ctor_get(x_27, 0); +lean_dec(x_31); +x_32 = !lean_is_exclusive(x_28); +if (x_32 == 0) +{ +lean_object* x_33; lean_object* x_34; +x_33 = lean_ctor_get(x_28, 0); +lean_dec(x_33); +x_34 = lean_box(7); +lean_ctor_set(x_28, 0, x_34); +return x_27; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; +x_35 = lean_ctor_get(x_28, 1); +lean_inc(x_35); +lean_dec(x_28); +x_36 = lean_box(7); +x_37 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_37, 0, x_36); +lean_ctor_set(x_37, 1, x_35); +lean_ctor_set(x_27, 0, x_37); +return x_27; +} +} +else +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; +x_38 = lean_ctor_get(x_27, 1); +lean_inc(x_38); +lean_dec(x_27); +x_39 = lean_ctor_get(x_28, 1); +lean_inc(x_39); +if (lean_is_exclusive(x_28)) { + lean_ctor_release(x_28, 0); + lean_ctor_release(x_28, 1); + x_40 = x_28; +} else { + lean_dec_ref(x_28); + x_40 = lean_box(0); +} +x_41 = lean_box(7); +if (lean_is_scalar(x_40)) { + x_42 = lean_alloc_ctor(0, 2, 0); +} else { + x_42 = x_40; +} +lean_ctor_set(x_42, 0, x_41); +lean_ctor_set(x_42, 1, x_39); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_38); +return x_43; +} +} +else +{ +uint8_t x_44; +x_44 = !lean_is_exclusive(x_27); +if (x_44 == 0) +{ +lean_object* x_45; uint8_t x_46; +x_45 = lean_ctor_get(x_27, 0); +lean_dec(x_45); +x_46 = !lean_is_exclusive(x_28); +if (x_46 == 0) +{ +lean_object* x_47; lean_object* x_48; +x_47 = lean_ctor_get(x_28, 0); +lean_dec(x_47); +x_48 = lean_ctor_get(x_29, 0); +lean_inc(x_48); +lean_dec(x_29); +lean_ctor_set(x_28, 0, x_48); +return x_27; +} +else +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; +x_49 = lean_ctor_get(x_28, 1); +lean_inc(x_49); +lean_dec(x_28); +x_50 = lean_ctor_get(x_29, 0); +lean_inc(x_50); +lean_dec(x_29); +x_51 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_51, 0, x_50); +lean_ctor_set(x_51, 1, x_49); +lean_ctor_set(x_27, 0, x_51); +return x_27; +} +} +else +{ +lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; +x_52 = lean_ctor_get(x_27, 1); +lean_inc(x_52); +lean_dec(x_27); +x_53 = lean_ctor_get(x_28, 1); +lean_inc(x_53); +if (lean_is_exclusive(x_28)) { + lean_ctor_release(x_28, 0); + lean_ctor_release(x_28, 1); + x_54 = x_28; +} else { + lean_dec_ref(x_28); + x_54 = lean_box(0); +} +x_55 = lean_ctor_get(x_29, 0); +lean_inc(x_55); +lean_dec(x_29); +if (lean_is_scalar(x_54)) { + x_56 = lean_alloc_ctor(0, 2, 0); +} else { + x_56 = x_54; +} +lean_ctor_set(x_56, 0, x_55); +lean_ctor_set(x_56, 1, x_53); +x_57 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_57, 0, x_56); +lean_ctor_set(x_57, 1, x_52); +return x_57; +} +} +} +else +{ +uint8_t x_58; +x_58 = !lean_is_exclusive(x_27); +if (x_58 == 0) +{ +return x_27; +} +else +{ +lean_object* x_59; lean_object* x_60; lean_object* x_61; +x_59 = lean_ctor_get(x_27, 0); +x_60 = lean_ctor_get(x_27, 1); +lean_inc(x_60); +lean_inc(x_59); +lean_dec(x_27); +x_61 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_61, 0, x_59); +lean_ctor_set(x_61, 1, x_60); +return x_61; +} +} +} +else +{ +lean_object* x_62; lean_object* x_63; lean_object* x_64; +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +x_62 = lean_box(6); +x_63 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_63, 0, x_62); +lean_ctor_set(x_63, 1, x_2); +x_64 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_64, 0, x_63); +lean_ctor_set(x_64, 1, x_5); +return x_64; +} +} +else +{ +lean_object* x_65; lean_object* x_66; lean_object* x_67; +lean_dec(x_8); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +x_65 = lean_box(9); +x_66 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_66, 0, x_65); +lean_ctor_set(x_66, 1, x_2); +x_67 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_67, 0, x_66); +lean_ctor_set(x_67, 1, x_5); +return x_67; +} +} +else +{ +lean_object* x_68; lean_object* x_69; lean_object* x_70; +lean_dec(x_8); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +x_68 = lean_box(0); +x_69 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_69, 0, x_68); +lean_ctor_set(x_69, 1, x_2); +x_70 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_70, 0, x_69); +lean_ctor_set(x_70, 1, x_5); +return x_70; +} +} +else +{ +lean_object* x_71; lean_object* x_72; lean_object* x_73; +lean_dec(x_8); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +x_71 = lean_box(5); +x_72 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_72, 0, x_71); +lean_ctor_set(x_72, 1, x_2); +x_73 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_73, 0, x_72); +lean_ctor_set(x_73, 1, x_5); +return x_73; +} +} +else +{ +lean_object* x_74; lean_object* x_75; lean_object* x_76; +lean_dec(x_8); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +x_74 = lean_box(4); +x_75 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_75, 0, x_74); +lean_ctor_set(x_75, 1, x_2); +x_76 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_76, 0, x_75); +lean_ctor_set(x_76, 1, x_5); +return x_76; +} +} +else +{ +lean_object* x_77; lean_object* x_78; lean_object* x_79; +lean_dec(x_8); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +x_77 = lean_box(3); +x_78 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_78, 0, x_77); +lean_ctor_set(x_78, 1, x_2); +x_79 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_79, 0, x_78); +lean_ctor_set(x_79, 1, x_5); +return x_79; +} +} +else +{ +lean_object* x_80; lean_object* x_81; lean_object* x_82; +lean_dec(x_8); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +x_80 = lean_box(2); +x_81 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_81, 0, x_80); +lean_ctor_set(x_81, 1, x_2); +x_82 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_82, 0, x_81); +lean_ctor_set(x_82, 1, x_5); +return x_82; +} +} +else +{ +lean_object* x_83; lean_object* x_84; lean_object* x_85; +lean_dec(x_8); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +x_83 = lean_box(1); +x_84 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_84, 0, x_83); +lean_ctor_set(x_84, 1, x_2); +x_85 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_85, 0, x_84); +lean_ctor_set(x_85, 1, x_5); +return x_85; +} +} +else +{ +lean_object* x_86; lean_object* x_87; lean_object* x_88; +lean_dec(x_8); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +x_86 = lean_box(1); +x_87 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_87, 1, x_2); +x_88 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_88, 0, x_87); +lean_ctor_set(x_88, 1, x_5); +return x_88; +} +} +else +{ +lean_object* x_89; +lean_dec(x_7); +x_89 = l_Lean_IR_ToIR_lowerEnumToScalarType(x_6, x_2, x_3, x_4, x_5); +if (lean_obj_tag(x_89) == 0) +{ +lean_object* x_90; lean_object* x_91; +x_90 = lean_ctor_get(x_89, 0); +lean_inc(x_90); +x_91 = lean_ctor_get(x_90, 0); +lean_inc(x_91); +if (lean_obj_tag(x_91) == 0) +{ +uint8_t x_92; +x_92 = !lean_is_exclusive(x_89); +if (x_92 == 0) +{ +lean_object* x_93; uint8_t x_94; +x_93 = lean_ctor_get(x_89, 0); +lean_dec(x_93); +x_94 = !lean_is_exclusive(x_90); +if (x_94 == 0) +{ +lean_object* x_95; lean_object* x_96; +x_95 = lean_ctor_get(x_90, 0); +lean_dec(x_95); +x_96 = lean_box(7); +lean_ctor_set(x_90, 0, x_96); +return x_89; +} +else +{ +lean_object* x_97; lean_object* x_98; lean_object* x_99; +x_97 = lean_ctor_get(x_90, 1); +lean_inc(x_97); +lean_dec(x_90); +x_98 = lean_box(7); +x_99 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_99, 0, x_98); +lean_ctor_set(x_99, 1, x_97); +lean_ctor_set(x_89, 0, x_99); +return x_89; +} +} +else +{ +lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; +x_100 = lean_ctor_get(x_89, 1); +lean_inc(x_100); +lean_dec(x_89); +x_101 = lean_ctor_get(x_90, 1); +lean_inc(x_101); +if (lean_is_exclusive(x_90)) { + lean_ctor_release(x_90, 0); + lean_ctor_release(x_90, 1); + x_102 = x_90; +} else { + lean_dec_ref(x_90); + x_102 = lean_box(0); +} +x_103 = lean_box(7); +if (lean_is_scalar(x_102)) { + x_104 = lean_alloc_ctor(0, 2, 0); +} else { + x_104 = x_102; +} +lean_ctor_set(x_104, 0, x_103); +lean_ctor_set(x_104, 1, x_101); +x_105 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_105, 0, x_104); +lean_ctor_set(x_105, 1, x_100); +return x_105; +} +} +else +{ +uint8_t x_106; +x_106 = !lean_is_exclusive(x_89); +if (x_106 == 0) +{ +lean_object* x_107; uint8_t x_108; +x_107 = lean_ctor_get(x_89, 0); +lean_dec(x_107); +x_108 = !lean_is_exclusive(x_90); +if (x_108 == 0) +{ +lean_object* x_109; lean_object* x_110; +x_109 = lean_ctor_get(x_90, 0); +lean_dec(x_109); +x_110 = lean_ctor_get(x_91, 0); +lean_inc(x_110); +lean_dec(x_91); +lean_ctor_set(x_90, 0, x_110); +return x_89; +} +else +{ +lean_object* x_111; lean_object* x_112; lean_object* x_113; +x_111 = lean_ctor_get(x_90, 1); +lean_inc(x_111); +lean_dec(x_90); +x_112 = lean_ctor_get(x_91, 0); +lean_inc(x_112); +lean_dec(x_91); +x_113 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_113, 0, x_112); +lean_ctor_set(x_113, 1, x_111); +lean_ctor_set(x_89, 0, x_113); +return x_89; +} +} +else +{ +lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; +x_114 = lean_ctor_get(x_89, 1); +lean_inc(x_114); +lean_dec(x_89); +x_115 = lean_ctor_get(x_90, 1); +lean_inc(x_115); +if (lean_is_exclusive(x_90)) { + lean_ctor_release(x_90, 0); + lean_ctor_release(x_90, 1); + x_116 = x_90; +} else { + lean_dec_ref(x_90); + x_116 = lean_box(0); +} +x_117 = lean_ctor_get(x_91, 0); +lean_inc(x_117); +lean_dec(x_91); +if (lean_is_scalar(x_116)) { + x_118 = lean_alloc_ctor(0, 2, 0); +} else { + x_118 = x_116; +} +lean_ctor_set(x_118, 0, x_117); +lean_ctor_set(x_118, 1, x_115); +x_119 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_119, 0, x_118); +lean_ctor_set(x_119, 1, x_114); +return x_119; +} +} +} +else +{ +uint8_t x_120; +x_120 = !lean_is_exclusive(x_89); +if (x_120 == 0) +{ +return x_89; +} +else +{ +lean_object* x_121; lean_object* x_122; lean_object* x_123; +x_121 = lean_ctor_get(x_89, 0); +x_122 = lean_ctor_get(x_89, 1); +lean_inc(x_122); +lean_inc(x_121); +lean_dec(x_89); +x_123 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_123, 0, x_121); +lean_ctor_set(x_123, 1, x_122); +return x_123; +} +} +} +} +else +{ +lean_object* x_124; +x_124 = l_Lean_IR_ToIR_lowerEnumToScalarType(x_6, x_2, x_3, x_4, x_5); +if (lean_obj_tag(x_124) == 0) +{ +lean_object* x_125; lean_object* x_126; +x_125 = lean_ctor_get(x_124, 0); +lean_inc(x_125); +x_126 = lean_ctor_get(x_125, 0); +lean_inc(x_126); +if (lean_obj_tag(x_126) == 0) +{ +uint8_t x_127; +x_127 = !lean_is_exclusive(x_124); +if (x_127 == 0) +{ +lean_object* x_128; uint8_t x_129; +x_128 = lean_ctor_get(x_124, 0); +lean_dec(x_128); +x_129 = !lean_is_exclusive(x_125); +if (x_129 == 0) +{ +lean_object* x_130; lean_object* x_131; +x_130 = lean_ctor_get(x_125, 0); +lean_dec(x_130); +x_131 = lean_box(7); +lean_ctor_set(x_125, 0, x_131); +return x_124; +} +else +{ +lean_object* x_132; lean_object* x_133; lean_object* x_134; +x_132 = lean_ctor_get(x_125, 1); +lean_inc(x_132); +lean_dec(x_125); +x_133 = lean_box(7); +x_134 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_134, 0, x_133); +lean_ctor_set(x_134, 1, x_132); +lean_ctor_set(x_124, 0, x_134); +return x_124; +} +} +else +{ +lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; +x_135 = lean_ctor_get(x_124, 1); +lean_inc(x_135); +lean_dec(x_124); +x_136 = lean_ctor_get(x_125, 1); +lean_inc(x_136); +if (lean_is_exclusive(x_125)) { + lean_ctor_release(x_125, 0); + lean_ctor_release(x_125, 1); + x_137 = x_125; +} else { + lean_dec_ref(x_125); + x_137 = lean_box(0); +} +x_138 = lean_box(7); +if (lean_is_scalar(x_137)) { + x_139 = lean_alloc_ctor(0, 2, 0); +} else { + x_139 = x_137; +} +lean_ctor_set(x_139, 0, x_138); +lean_ctor_set(x_139, 1, x_136); +x_140 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_140, 0, x_139); +lean_ctor_set(x_140, 1, x_135); +return x_140; +} +} +else +{ +uint8_t x_141; +x_141 = !lean_is_exclusive(x_124); +if (x_141 == 0) +{ +lean_object* x_142; uint8_t x_143; +x_142 = lean_ctor_get(x_124, 0); +lean_dec(x_142); +x_143 = !lean_is_exclusive(x_125); +if (x_143 == 0) +{ +lean_object* x_144; lean_object* x_145; +x_144 = lean_ctor_get(x_125, 0); +lean_dec(x_144); +x_145 = lean_ctor_get(x_126, 0); +lean_inc(x_145); +lean_dec(x_126); +lean_ctor_set(x_125, 0, x_145); +return x_124; +} +else +{ +lean_object* x_146; lean_object* x_147; lean_object* x_148; +x_146 = lean_ctor_get(x_125, 1); +lean_inc(x_146); +lean_dec(x_125); +x_147 = lean_ctor_get(x_126, 0); +lean_inc(x_147); +lean_dec(x_126); +x_148 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_148, 0, x_147); +lean_ctor_set(x_148, 1, x_146); +lean_ctor_set(x_124, 0, x_148); +return x_124; +} +} +else +{ +lean_object* x_149; lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; +x_149 = lean_ctor_get(x_124, 1); +lean_inc(x_149); +lean_dec(x_124); +x_150 = lean_ctor_get(x_125, 1); +lean_inc(x_150); +if (lean_is_exclusive(x_125)) { + lean_ctor_release(x_125, 0); + lean_ctor_release(x_125, 1); + x_151 = x_125; +} else { + lean_dec_ref(x_125); + x_151 = lean_box(0); +} +x_152 = lean_ctor_get(x_126, 0); +lean_inc(x_152); +lean_dec(x_126); +if (lean_is_scalar(x_151)) { + x_153 = lean_alloc_ctor(0, 2, 0); +} else { + x_153 = x_151; +} +lean_ctor_set(x_153, 0, x_152); +lean_ctor_set(x_153, 1, x_150); +x_154 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_154, 0, x_153); +lean_ctor_set(x_154, 1, x_149); +return x_154; +} +} +} +else +{ +uint8_t x_155; +x_155 = !lean_is_exclusive(x_124); +if (x_155 == 0) +{ +return x_124; +} +else +{ +lean_object* x_156; lean_object* x_157; lean_object* x_158; +x_156 = lean_ctor_get(x_124, 0); +x_157 = lean_ctor_get(x_124, 1); +lean_inc(x_157); +lean_inc(x_156); +lean_dec(x_124); +x_158 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_158, 0, x_156); +lean_ctor_set(x_158, 1, x_157); +return x_158; +} +} +} +} +case 5: +{ +lean_object* x_159; lean_object* x_160; +x_159 = lean_ctor_get(x_1, 0); +lean_inc(x_159); +lean_dec(x_1); +x_160 = l_Lean_Expr_headBeta(x_159); +if (lean_obj_tag(x_160) == 4) +{ +lean_object* x_161; lean_object* x_162; +x_161 = lean_ctor_get(x_160, 0); +lean_inc(x_161); +lean_dec(x_160); +x_162 = l_Lean_IR_ToIR_lowerEnumToScalarType(x_161, x_2, x_3, x_4, x_5); +if (lean_obj_tag(x_162) == 0) +{ +lean_object* x_163; lean_object* x_164; +x_163 = lean_ctor_get(x_162, 0); +lean_inc(x_163); +x_164 = lean_ctor_get(x_163, 0); +lean_inc(x_164); +if (lean_obj_tag(x_164) == 0) +{ +uint8_t x_165; +x_165 = !lean_is_exclusive(x_162); +if (x_165 == 0) +{ +lean_object* x_166; uint8_t x_167; +x_166 = lean_ctor_get(x_162, 0); +lean_dec(x_166); +x_167 = !lean_is_exclusive(x_163); +if (x_167 == 0) +{ +lean_object* x_168; lean_object* x_169; +x_168 = lean_ctor_get(x_163, 0); +lean_dec(x_168); +x_169 = lean_box(7); +lean_ctor_set(x_163, 0, x_169); +return x_162; +} +else +{ +lean_object* x_170; lean_object* x_171; lean_object* x_172; +x_170 = lean_ctor_get(x_163, 1); +lean_inc(x_170); +lean_dec(x_163); +x_171 = lean_box(7); +x_172 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_172, 0, x_171); +lean_ctor_set(x_172, 1, x_170); +lean_ctor_set(x_162, 0, x_172); +return x_162; +} +} +else +{ +lean_object* x_173; lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; +x_173 = lean_ctor_get(x_162, 1); +lean_inc(x_173); +lean_dec(x_162); +x_174 = lean_ctor_get(x_163, 1); +lean_inc(x_174); +if (lean_is_exclusive(x_163)) { + lean_ctor_release(x_163, 0); + lean_ctor_release(x_163, 1); + x_175 = x_163; +} else { + lean_dec_ref(x_163); + x_175 = lean_box(0); +} +x_176 = lean_box(7); +if (lean_is_scalar(x_175)) { + x_177 = lean_alloc_ctor(0, 2, 0); +} else { + x_177 = x_175; +} +lean_ctor_set(x_177, 0, x_176); +lean_ctor_set(x_177, 1, x_174); +x_178 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_178, 0, x_177); +lean_ctor_set(x_178, 1, x_173); +return x_178; +} +} +else +{ +uint8_t x_179; +x_179 = !lean_is_exclusive(x_162); +if (x_179 == 0) +{ +lean_object* x_180; uint8_t x_181; +x_180 = lean_ctor_get(x_162, 0); +lean_dec(x_180); +x_181 = !lean_is_exclusive(x_163); +if (x_181 == 0) +{ +lean_object* x_182; lean_object* x_183; +x_182 = lean_ctor_get(x_163, 0); +lean_dec(x_182); +x_183 = lean_ctor_get(x_164, 0); +lean_inc(x_183); +lean_dec(x_164); +lean_ctor_set(x_163, 0, x_183); +return x_162; +} +else +{ +lean_object* x_184; lean_object* x_185; lean_object* x_186; +x_184 = lean_ctor_get(x_163, 1); +lean_inc(x_184); +lean_dec(x_163); +x_185 = lean_ctor_get(x_164, 0); +lean_inc(x_185); +lean_dec(x_164); +x_186 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_186, 0, x_185); +lean_ctor_set(x_186, 1, x_184); +lean_ctor_set(x_162, 0, x_186); +return x_162; +} +} +else +{ +lean_object* x_187; lean_object* x_188; lean_object* x_189; lean_object* x_190; lean_object* x_191; lean_object* x_192; +x_187 = lean_ctor_get(x_162, 1); +lean_inc(x_187); +lean_dec(x_162); +x_188 = lean_ctor_get(x_163, 1); +lean_inc(x_188); +if (lean_is_exclusive(x_163)) { + lean_ctor_release(x_163, 0); + lean_ctor_release(x_163, 1); + x_189 = x_163; +} else { + lean_dec_ref(x_163); + x_189 = lean_box(0); +} +x_190 = lean_ctor_get(x_164, 0); +lean_inc(x_190); +lean_dec(x_164); +if (lean_is_scalar(x_189)) { + x_191 = lean_alloc_ctor(0, 2, 0); +} else { + x_191 = x_189; +} +lean_ctor_set(x_191, 0, x_190); +lean_ctor_set(x_191, 1, x_188); +x_192 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_192, 0, x_191); +lean_ctor_set(x_192, 1, x_187); +return x_192; +} +} +} +else +{ +uint8_t x_193; +x_193 = !lean_is_exclusive(x_162); +if (x_193 == 0) +{ +return x_162; +} +else +{ +lean_object* x_194; lean_object* x_195; lean_object* x_196; +x_194 = lean_ctor_get(x_162, 0); +x_195 = lean_ctor_get(x_162, 1); +lean_inc(x_195); +lean_inc(x_194); +lean_dec(x_162); +x_196 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_196, 0, x_194); +lean_ctor_set(x_196, 1, x_195); +return x_196; +} +} +} +else +{ +lean_object* x_197; lean_object* x_198; lean_object* x_199; +lean_dec(x_160); +lean_dec(x_4); +lean_dec(x_3); +x_197 = lean_box(7); +x_198 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_198, 0, x_197); +lean_ctor_set(x_198, 1, x_2); +x_199 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_199, 0, x_198); +lean_ctor_set(x_199, 1, x_5); +return x_199; +} +} +case 7: +{ +lean_object* x_200; lean_object* x_201; lean_object* x_202; +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_1); +x_200 = lean_box(7); +x_201 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_201, 0, x_200); +lean_ctor_set(x_201, 1, x_2); +x_202 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_202, 0, x_201); +lean_ctor_set(x_202, 1, x_5); +return x_202; +} +default: +{ +lean_object* x_203; lean_object* x_204; +lean_dec(x_1); +x_203 = l_Lean_IR_ToIR_lowerType___closed__3; +x_204 = l_panic___at_Lean_IR_ToIR_lowerType___spec__1(x_203, x_2, x_3, x_4, x_5); +return x_204; +} +} +} +} +static lean_object* _init_l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(0u); +x_2 = lean_mk_empty_array_with_capacity(x_1); +return x_2; +} +} +static lean_object* _init_l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_IR_instInhabitedCtorInfo; +x_2 = l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__1; +x_3 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__1; +x_2 = l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__2; +x_3 = l_instInhabitedOfMonad___rarg(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_6 = l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__3; +x_7 = lean_panic_fn(x_6, x_1); +x_8 = lean_apply_4(x_7, x_2, x_3, x_4, x_5); +return x_8; +} +} +static lean_object* _init_l_Lean_IR_ToIR_getCtorInfo___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean.IR.ToIR.getCtorInfo", 24, 24); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_getCtorInfo___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("unrecognized constructor", 24, 24); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_getCtorInfo___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_getCtorInfo___closed__1; +x_3 = lean_unsigned_to_nat(130u); +x_4 = lean_unsigned_to_nat(17u); +x_5 = l_Lean_IR_ToIR_getCtorInfo___closed__2; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_getCtorInfo(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; uint8_t x_7; +x_6 = lean_st_ref_get(x_4, x_5); +x_7 = !lean_is_exclusive(x_6); +if (x_7 == 0) +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_8 = lean_ctor_get(x_6, 0); +x_9 = lean_ctor_get(x_6, 1); +x_10 = lean_ctor_get(x_8, 0); +lean_inc(x_10); +lean_dec(x_8); +x_11 = lean_ir_get_ctor_layout(x_10, x_1); +lean_dec(x_10); +if (lean_obj_tag(x_11) == 0) +{ +lean_object* x_12; lean_object* x_13; +lean_dec(x_11); +lean_free_object(x_6); +lean_dec(x_1); +x_12 = l_Lean_IR_ToIR_getCtorInfo___closed__3; +x_13 = l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1(x_12, x_2, x_3, x_4, x_9); +return x_13; +} +else +{ +lean_object* x_14; uint8_t x_15; +lean_dec(x_4); +lean_dec(x_3); +x_14 = lean_ctor_get(x_11, 0); +lean_inc(x_14); +lean_dec(x_11); +x_15 = !lean_is_exclusive(x_14); +if (x_15 == 0) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_16 = lean_ctor_get(x_14, 0); +x_17 = lean_ctor_get(x_14, 1); +lean_ctor_set(x_14, 1, x_16); +lean_ctor_set(x_14, 0, x_1); +x_18 = lean_array_mk(x_17); +x_19 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19, 0, x_14); +lean_ctor_set(x_19, 1, x_18); +x_20 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_20, 0, x_19); +lean_ctor_set(x_20, 1, x_2); +lean_ctor_set(x_6, 0, x_20); +return x_6; +} +else +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; +x_21 = lean_ctor_get(x_14, 0); +x_22 = lean_ctor_get(x_14, 1); +x_23 = lean_ctor_get(x_14, 2); +x_24 = lean_ctor_get(x_14, 3); +x_25 = lean_ctor_get(x_14, 4); +lean_inc(x_25); +lean_inc(x_24); +lean_inc(x_23); +lean_inc(x_22); +lean_inc(x_21); +lean_dec(x_14); +x_26 = lean_alloc_ctor(0, 5, 0); +lean_ctor_set(x_26, 0, x_1); +lean_ctor_set(x_26, 1, x_21); +lean_ctor_set(x_26, 2, x_23); +lean_ctor_set(x_26, 3, x_24); +lean_ctor_set(x_26, 4, x_25); +x_27 = lean_array_mk(x_22); +x_28 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_28, 0, x_26); +lean_ctor_set(x_28, 1, x_27); +x_29 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_29, 0, x_28); +lean_ctor_set(x_29, 1, x_2); +lean_ctor_set(x_6, 0, x_29); +return x_6; +} +} +} +else +{ +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_30 = lean_ctor_get(x_6, 0); +x_31 = lean_ctor_get(x_6, 1); +lean_inc(x_31); +lean_inc(x_30); +lean_dec(x_6); +x_32 = lean_ctor_get(x_30, 0); +lean_inc(x_32); +lean_dec(x_30); +x_33 = lean_ir_get_ctor_layout(x_32, x_1); +lean_dec(x_32); +if (lean_obj_tag(x_33) == 0) +{ +lean_object* x_34; lean_object* x_35; +lean_dec(x_33); +lean_dec(x_1); +x_34 = l_Lean_IR_ToIR_getCtorInfo___closed__3; +x_35 = l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1(x_34, x_2, x_3, x_4, x_31); +return x_35; +} +else +{ +lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; +lean_dec(x_4); +lean_dec(x_3); +x_36 = lean_ctor_get(x_33, 0); +lean_inc(x_36); +lean_dec(x_33); +x_37 = lean_ctor_get(x_36, 0); +lean_inc(x_37); +x_38 = lean_ctor_get(x_36, 1); +lean_inc(x_38); +x_39 = lean_ctor_get(x_36, 2); +lean_inc(x_39); +x_40 = lean_ctor_get(x_36, 3); +lean_inc(x_40); +x_41 = lean_ctor_get(x_36, 4); +lean_inc(x_41); +if (lean_is_exclusive(x_36)) { + lean_ctor_release(x_36, 0); + lean_ctor_release(x_36, 1); + lean_ctor_release(x_36, 2); + lean_ctor_release(x_36, 3); + lean_ctor_release(x_36, 4); + x_42 = x_36; +} else { + lean_dec_ref(x_36); + x_42 = lean_box(0); +} +if (lean_is_scalar(x_42)) { + x_43 = lean_alloc_ctor(0, 5, 0); +} else { + x_43 = x_42; +} +lean_ctor_set(x_43, 0, x_1); +lean_ctor_set(x_43, 1, x_37); +lean_ctor_set(x_43, 2, x_39); +lean_ctor_set(x_43, 3, x_40); +lean_ctor_set(x_43, 4, x_41); +x_44 = lean_array_mk(x_38); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_43); +lean_ctor_set(x_45, 1, x_44); +x_46 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_46, 0, x_45); +lean_ctor_set(x_46, 1, x_2); +x_47 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_47, 0, x_46); +lean_ctor_set(x_47, 1, x_31); +return x_47; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +lean_object* x_3; +x_3 = lean_box(0); +return x_3; +} +else +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; uint8_t x_7; +x_4 = lean_ctor_get(x_2, 0); +x_5 = lean_ctor_get(x_2, 1); +x_6 = lean_ctor_get(x_2, 2); +x_7 = lean_name_eq(x_4, x_1); +if (x_7 == 0) +{ +x_2 = x_6; +goto _start; +} +else +{ +lean_object* x_9; +lean_inc(x_5); +x_9 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_9, 0, x_5); +return x_9; +} +} +} +} +static lean_object* _init_l_panic___at_Lean_IR_ToIR_lowerArg___spec__2___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__1; +x_2 = l_Lean_IR_instInhabitedArg; +x_3 = l_instInhabitedOfMonad___rarg(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_panic___at_Lean_IR_ToIR_lowerArg___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_6 = l_panic___at_Lean_IR_ToIR_lowerArg___spec__2___closed__1; +x_7 = lean_panic_fn(x_6, x_1); +x_8 = lean_apply_4(x_7, x_2, x_3, x_4, x_5); +return x_8; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerArg___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean.IR.ToIR.lowerArg", 21, 21); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerArg___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("unexpected value", 16, 16); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerArg___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerArg___closed__1; +x_3 = lean_unsigned_to_nat(138u); +x_4 = lean_unsigned_to_nat(37u); +x_5 = l_Lean_IR_ToIR_lowerArg___closed__2; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerArg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +if (lean_obj_tag(x_1) == 1) +{ +lean_object* x_6; uint8_t x_7; +x_6 = lean_ctor_get(x_2, 0); +lean_inc(x_6); +x_7 = !lean_is_exclusive(x_6); +if (x_7 == 0) +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; uint64_t x_12; uint64_t x_13; uint64_t x_14; uint64_t x_15; uint64_t x_16; uint64_t x_17; uint64_t x_18; size_t x_19; size_t x_20; size_t x_21; size_t x_22; size_t x_23; lean_object* x_24; lean_object* x_25; +x_8 = lean_ctor_get(x_1, 0); +x_9 = lean_ctor_get(x_6, 1); +x_10 = lean_ctor_get(x_6, 0); +lean_dec(x_10); +x_11 = lean_array_get_size(x_9); +x_12 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_8); +x_13 = 32; +x_14 = lean_uint64_shift_right(x_12, x_13); +x_15 = lean_uint64_xor(x_12, x_14); +x_16 = 16; +x_17 = lean_uint64_shift_right(x_15, x_16); +x_18 = lean_uint64_xor(x_15, x_17); +x_19 = lean_uint64_to_usize(x_18); +x_20 = lean_usize_of_nat(x_11); +lean_dec(x_11); +x_21 = 1; +x_22 = lean_usize_sub(x_20, x_21); +x_23 = lean_usize_land(x_19, x_22); +x_24 = lean_array_uget(x_9, x_23); +lean_dec(x_9); +x_25 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_8, x_24); +lean_dec(x_24); +if (lean_obj_tag(x_25) == 0) +{ +lean_object* x_26; lean_object* x_27; +lean_free_object(x_6); +x_26 = l_Lean_IR_ToIR_lowerArg___closed__3; +x_27 = l_panic___at_Lean_IR_ToIR_lowerArg___spec__2(x_26, x_2, x_3, x_4, x_5); +return x_27; +} +else +{ +lean_object* x_28; +x_28 = lean_ctor_get(x_25, 0); +lean_inc(x_28); +lean_dec(x_25); +switch (lean_obj_tag(x_28)) { +case 0: +{ +uint8_t x_29; +lean_dec(x_4); +lean_dec(x_3); +x_29 = !lean_is_exclusive(x_28); +if (x_29 == 0) +{ +lean_object* x_30; +lean_ctor_set(x_6, 1, x_2); +lean_ctor_set(x_6, 0, x_28); +x_30 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_30, 0, x_6); +lean_ctor_set(x_30, 1, x_5); +return x_30; +} +else +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_31 = lean_ctor_get(x_28, 0); +lean_inc(x_31); +lean_dec(x_28); +x_32 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_32, 0, x_31); +lean_ctor_set(x_6, 1, x_2); +lean_ctor_set(x_6, 0, x_32); +x_33 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_33, 0, x_6); +lean_ctor_set(x_33, 1, x_5); +return x_33; +} +} +case 1: +{ +lean_object* x_34; lean_object* x_35; +lean_dec(x_28); +lean_free_object(x_6); +x_34 = l_Lean_IR_ToIR_lowerArg___closed__3; +x_35 = l_panic___at_Lean_IR_ToIR_lowerArg___spec__2(x_34, x_2, x_3, x_4, x_5); +return x_35; +} +default: +{ +lean_object* x_36; lean_object* x_37; +lean_dec(x_4); +lean_dec(x_3); +x_36 = lean_box(1); +lean_ctor_set(x_6, 1, x_2); +lean_ctor_set(x_6, 0, x_36); +x_37 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_37, 0, x_6); +lean_ctor_set(x_37, 1, x_5); +return x_37; +} +} +} +} +else +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; uint64_t x_41; uint64_t x_42; uint64_t x_43; uint64_t x_44; uint64_t x_45; uint64_t x_46; uint64_t x_47; size_t x_48; size_t x_49; size_t x_50; size_t x_51; size_t x_52; lean_object* x_53; lean_object* x_54; +x_38 = lean_ctor_get(x_1, 0); +x_39 = lean_ctor_get(x_6, 1); +lean_inc(x_39); +lean_dec(x_6); +x_40 = lean_array_get_size(x_39); +x_41 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_38); +x_42 = 32; +x_43 = lean_uint64_shift_right(x_41, x_42); +x_44 = lean_uint64_xor(x_41, x_43); +x_45 = 16; +x_46 = lean_uint64_shift_right(x_44, x_45); +x_47 = lean_uint64_xor(x_44, x_46); +x_48 = lean_uint64_to_usize(x_47); +x_49 = lean_usize_of_nat(x_40); +lean_dec(x_40); +x_50 = 1; +x_51 = lean_usize_sub(x_49, x_50); +x_52 = lean_usize_land(x_48, x_51); +x_53 = lean_array_uget(x_39, x_52); +lean_dec(x_39); +x_54 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_38, x_53); +lean_dec(x_53); +if (lean_obj_tag(x_54) == 0) +{ +lean_object* x_55; lean_object* x_56; +x_55 = l_Lean_IR_ToIR_lowerArg___closed__3; +x_56 = l_panic___at_Lean_IR_ToIR_lowerArg___spec__2(x_55, x_2, x_3, x_4, x_5); +return x_56; +} +else +{ +lean_object* x_57; +x_57 = lean_ctor_get(x_54, 0); +lean_inc(x_57); +lean_dec(x_54); +switch (lean_obj_tag(x_57)) { +case 0: +{ +lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; +lean_dec(x_4); +lean_dec(x_3); +x_58 = lean_ctor_get(x_57, 0); +lean_inc(x_58); +if (lean_is_exclusive(x_57)) { + lean_ctor_release(x_57, 0); + x_59 = x_57; +} else { + lean_dec_ref(x_57); + x_59 = lean_box(0); +} +if (lean_is_scalar(x_59)) { + x_60 = lean_alloc_ctor(0, 1, 0); +} else { + x_60 = x_59; +} +lean_ctor_set(x_60, 0, x_58); +x_61 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_61, 0, x_60); +lean_ctor_set(x_61, 1, x_2); +x_62 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_62, 0, x_61); +lean_ctor_set(x_62, 1, x_5); +return x_62; +} +case 1: +{ +lean_object* x_63; lean_object* x_64; +lean_dec(x_57); +x_63 = l_Lean_IR_ToIR_lowerArg___closed__3; +x_64 = l_panic___at_Lean_IR_ToIR_lowerArg___spec__2(x_63, x_2, x_3, x_4, x_5); +return x_64; +} +default: +{ +lean_object* x_65; lean_object* x_66; lean_object* x_67; +lean_dec(x_4); +lean_dec(x_3); +x_65 = lean_box(1); +x_66 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_66, 0, x_65); +lean_ctor_set(x_66, 1, x_2); +x_67 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_67, 0, x_66); +lean_ctor_set(x_67, 1, x_5); +return x_67; +} +} +} +} +} +else +{ +lean_object* x_68; lean_object* x_69; lean_object* x_70; +lean_dec(x_4); +lean_dec(x_3); +x_68 = lean_box(1); +x_69 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_69, 0, x_68); +lean_ctor_set(x_69, 1, x_2); +x_70 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_70, 0, x_69); +lean_ctor_set(x_70, 1, x_5); +return x_70; +} +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_1, x_2); +lean_dec(x_2); +lean_dec(x_1); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerArg___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; +x_6 = l_Lean_IR_ToIR_lowerArg(x_1, x_2, x_3, x_4, x_5); +lean_dec(x_1); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = lean_unsigned_to_nat(0u); +x_3 = lean_alloc_ctor(0, 5, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +lean_ctor_set(x_3, 2, x_2); +lean_ctor_set(x_3, 3, x_2); +lean_ctor_set(x_3, 4, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__1; +x_2 = l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__1; +x_3 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__2; +x_2 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_instInhabitedTranslatedProj() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__3; +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerProj___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(1); +x_2 = lean_box(6); +x_3 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerProj(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +switch (lean_obj_tag(x_3)) { +case 0: +{ +lean_object* x_4; +lean_dec(x_1); +x_4 = l_Lean_IR_ToIR_lowerProj___closed__1; +return x_4; +} +case 1: +{ +uint8_t x_5; +x_5 = !lean_is_exclusive(x_3); +if (x_5 == 0) +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_6 = lean_ctor_get(x_3, 0); +x_7 = lean_alloc_ctor(3, 2, 0); +lean_ctor_set(x_7, 0, x_6); +lean_ctor_set(x_7, 1, x_1); +lean_ctor_set_tag(x_3, 0); +lean_ctor_set(x_3, 0, x_7); +x_8 = lean_box(7); +x_9 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9, 0, x_3); +lean_ctor_set(x_9, 1, x_8); +return x_9; +} +else +{ +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_10 = lean_ctor_get(x_3, 0); +lean_inc(x_10); +lean_dec(x_3); +x_11 = lean_alloc_ctor(3, 2, 0); +lean_ctor_set(x_11, 0, x_10); +lean_ctor_set(x_11, 1, x_1); +x_12 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_12, 0, x_11); +x_13 = lean_box(7); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_12); +lean_ctor_set(x_14, 1, x_13); +return x_14; +} +} +case 2: +{ +uint8_t x_15; +x_15 = !lean_is_exclusive(x_3); +if (x_15 == 0) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_16 = lean_ctor_get(x_3, 0); +x_17 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_17, 0, x_16); +lean_ctor_set(x_17, 1, x_1); +lean_ctor_set_tag(x_3, 0); +lean_ctor_set(x_3, 0, x_17); +x_18 = lean_box(5); +x_19 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19, 0, x_3); +lean_ctor_set(x_19, 1, x_18); +return x_19; +} +else +{ +lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_20 = lean_ctor_get(x_3, 0); +lean_inc(x_20); +lean_dec(x_3); +x_21 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_21, 0, x_20); +lean_ctor_set(x_21, 1, x_1); +x_22 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_22, 0, x_21); +x_23 = lean_box(5); +x_24 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_24, 0, x_22); +lean_ctor_set(x_24, 1, x_23); +return x_24; +} +} +default: +{ +uint8_t x_25; +x_25 = !lean_is_exclusive(x_3); +if (x_25 == 0) +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; +x_26 = lean_ctor_get(x_3, 2); +x_27 = lean_ctor_get(x_3, 0); +lean_dec(x_27); +x_28 = lean_ctor_get(x_2, 2); +x_29 = lean_ctor_get(x_2, 3); +x_30 = lean_nat_add(x_28, x_29); +lean_ctor_set_tag(x_3, 5); +lean_ctor_set(x_3, 2, x_1); +lean_ctor_set(x_3, 0, x_30); +x_31 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_31, 0, x_3); +x_32 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_32, 0, x_31); +lean_ctor_set(x_32, 1, x_26); +return x_32; +} +else +{ +lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_33 = lean_ctor_get(x_3, 1); +x_34 = lean_ctor_get(x_3, 2); +lean_inc(x_34); +lean_inc(x_33); +lean_dec(x_3); +x_35 = lean_ctor_get(x_2, 2); +x_36 = lean_ctor_get(x_2, 3); +x_37 = lean_nat_add(x_35, x_36); +x_38 = lean_alloc_ctor(5, 3, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_33); +lean_ctor_set(x_38, 2, x_1); +x_39 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_39, 0, x_38); +x_40 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_40, 0, x_39); +lean_ctor_set(x_40, 1, x_34); +return x_40; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerProj___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; +x_4 = l_Lean_IR_ToIR_lowerProj(x_1, x_2, x_3); +lean_dec(x_2); +return x_4; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerParam(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; +x_6 = lean_ctor_get(x_1, 0); +lean_inc(x_6); +x_7 = l_Lean_IR_ToIR_bindVar(x_6, x_2, x_3, x_4, x_5); +x_8 = lean_ctor_get(x_7, 0); +lean_inc(x_8); +x_9 = lean_ctor_get(x_7, 1); +lean_inc(x_9); +lean_dec(x_7); +x_10 = lean_ctor_get(x_8, 0); +lean_inc(x_10); +x_11 = lean_ctor_get(x_8, 1); +lean_inc(x_11); +lean_dec(x_8); +x_12 = lean_ctor_get(x_1, 2); +lean_inc(x_12); +x_13 = l_Lean_IR_ToIR_lowerType(x_12, x_11, x_3, x_4, x_9); +if (lean_obj_tag(x_13) == 0) +{ +uint8_t x_14; +x_14 = !lean_is_exclusive(x_13); +if (x_14 == 0) +{ +lean_object* x_15; uint8_t x_16; +x_15 = lean_ctor_get(x_13, 0); +x_16 = !lean_is_exclusive(x_15); +if (x_16 == 0) +{ +lean_object* x_17; uint8_t x_18; lean_object* x_19; +x_17 = lean_ctor_get(x_15, 0); +x_18 = lean_ctor_get_uint8(x_1, sizeof(void*)*3); +lean_dec(x_1); +x_19 = lean_alloc_ctor(0, 2, 1); +lean_ctor_set(x_19, 0, x_10); +lean_ctor_set(x_19, 1, x_17); +lean_ctor_set_uint8(x_19, sizeof(void*)*2, x_18); +lean_ctor_set(x_15, 0, x_19); +return x_13; +} +else +{ +lean_object* x_20; lean_object* x_21; uint8_t x_22; lean_object* x_23; lean_object* x_24; +x_20 = lean_ctor_get(x_15, 0); +x_21 = lean_ctor_get(x_15, 1); +lean_inc(x_21); +lean_inc(x_20); +lean_dec(x_15); +x_22 = lean_ctor_get_uint8(x_1, sizeof(void*)*3); +lean_dec(x_1); +x_23 = lean_alloc_ctor(0, 2, 1); +lean_ctor_set(x_23, 0, x_10); +lean_ctor_set(x_23, 1, x_20); +lean_ctor_set_uint8(x_23, sizeof(void*)*2, x_22); +x_24 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_24, 0, x_23); +lean_ctor_set(x_24, 1, x_21); +lean_ctor_set(x_13, 0, x_24); +return x_13; +} +} +else +{ +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; uint8_t x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_25 = lean_ctor_get(x_13, 0); +x_26 = lean_ctor_get(x_13, 1); +lean_inc(x_26); +lean_inc(x_25); +lean_dec(x_13); +x_27 = lean_ctor_get(x_25, 0); +lean_inc(x_27); +x_28 = lean_ctor_get(x_25, 1); +lean_inc(x_28); +if (lean_is_exclusive(x_25)) { + lean_ctor_release(x_25, 0); + lean_ctor_release(x_25, 1); + x_29 = x_25; +} else { + lean_dec_ref(x_25); + x_29 = lean_box(0); +} +x_30 = lean_ctor_get_uint8(x_1, sizeof(void*)*3); +lean_dec(x_1); +x_31 = lean_alloc_ctor(0, 2, 1); +lean_ctor_set(x_31, 0, x_10); +lean_ctor_set(x_31, 1, x_27); +lean_ctor_set_uint8(x_31, sizeof(void*)*2, x_30); +if (lean_is_scalar(x_29)) { + x_32 = lean_alloc_ctor(0, 2, 0); +} else { + x_32 = x_29; +} +lean_ctor_set(x_32, 0, x_31); +lean_ctor_set(x_32, 1, x_28); +x_33 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_33, 0, x_32); +lean_ctor_set(x_33, 1, x_26); +return x_33; +} +} +else +{ +uint8_t x_34; +lean_dec(x_10); +lean_dec(x_1); +x_34 = !lean_is_exclusive(x_13); +if (x_34 == 0) +{ +return x_13; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; +x_35 = lean_ctor_get(x_13, 0); +x_36 = lean_ctor_get(x_13, 1); +lean_inc(x_36); +lean_inc(x_35); +lean_dec(x_13); +x_37 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_37, 0, x_35); +lean_ctor_set(x_37, 1, x_36); +return x_37; +} +} +} +} +static lean_object* _init_l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__1; +x_2 = l_Lean_IR_instInhabitedFnBody; +x_3 = l_instInhabitedOfMonad___rarg(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_6 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1___closed__1; +x_7 = lean_panic_fn(x_6, x_1); +x_8 = lean_apply_4(x_7, x_2, x_3, x_4, x_5); +return x_8; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerAlt_loop___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean.IR.ToIR.lowerAlt.loop", 26, 26); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerAlt_loop___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("mismatched fields and params", 28, 28); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerAlt_loop___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerAlt_loop___closed__1; +x_3 = lean_unsigned_to_nat(367u); +x_4 = lean_unsigned_to_nat(18u); +x_5 = l_Lean_IR_ToIR_lowerAlt_loop___closed__2; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerAlt_loop(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +lean_object* x_11; lean_object* x_12; lean_object* x_59; uint8_t x_60; lean_object* x_61; uint8_t x_62; +x_59 = lean_array_get_size(x_4); +x_60 = lean_nat_dec_lt(x_6, x_59); +lean_dec(x_59); +x_61 = lean_array_get_size(x_5); +x_62 = lean_nat_dec_lt(x_6, x_61); +lean_dec(x_61); +if (x_60 == 0) +{ +lean_dec(x_6); +lean_dec(x_1); +if (x_62 == 0) +{ +lean_object* x_63; +x_63 = l_Lean_IR_ToIR_lowerCode(x_2, x_7, x_8, x_9, x_10); +return x_63; +} +else +{ +lean_object* x_64; lean_object* x_65; +lean_dec(x_2); +x_64 = l_Lean_IR_ToIR_lowerAlt_loop___closed__3; +x_65 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_64, x_7, x_8, x_9, x_10); +return x_65; +} +} +else +{ +lean_object* x_66; +x_66 = lean_array_fget(x_4, x_6); +if (x_62 == 0) +{ +lean_object* x_67; +x_67 = lean_box(0); +x_11 = x_67; +x_12 = x_66; +goto block_58; +} +else +{ +lean_object* x_68; lean_object* x_69; +x_68 = lean_array_fget(x_5, x_6); +x_69 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_69, 0, x_68); +x_11 = x_69; +x_12 = x_66; +goto block_58; +} +} +block_58: +{ +if (lean_obj_tag(x_11) == 0) +{ +lean_object* x_13; lean_object* x_14; +lean_dec(x_12); +lean_dec(x_6); +lean_dec(x_2); +lean_dec(x_1); +x_13 = l_Lean_IR_ToIR_lowerAlt_loop___closed__3; +x_14 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_13, x_7, x_8, x_9, x_10); +return x_14; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_15 = lean_ctor_get(x_11, 0); +lean_inc(x_15); +lean_dec(x_11); +lean_inc(x_1); +x_16 = l_Lean_IR_ToIR_lowerProj(x_1, x_3, x_15); +x_17 = lean_ctor_get(x_16, 0); +lean_inc(x_17); +if (lean_obj_tag(x_17) == 0) +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_18 = lean_ctor_get(x_16, 1); +lean_inc(x_18); +lean_dec(x_16); +x_19 = lean_ctor_get(x_17, 0); +lean_inc(x_19); +lean_dec(x_17); +x_20 = lean_ctor_get(x_12, 0); +lean_inc(x_20); +lean_dec(x_12); +x_21 = l_Lean_IR_ToIR_bindVar(x_20, x_7, x_8, x_9, x_10); +x_22 = lean_ctor_get(x_21, 0); +lean_inc(x_22); +x_23 = lean_ctor_get(x_21, 1); +lean_inc(x_23); +lean_dec(x_21); +x_24 = lean_ctor_get(x_22, 0); +lean_inc(x_24); +x_25 = lean_ctor_get(x_22, 1); +lean_inc(x_25); +lean_dec(x_22); +x_26 = lean_unsigned_to_nat(1u); +x_27 = lean_nat_add(x_6, x_26); +lean_dec(x_6); +x_28 = l_Lean_IR_ToIR_lowerAlt_loop(x_1, x_2, x_3, x_4, x_5, x_27, x_25, x_8, x_9, x_23); +if (lean_obj_tag(x_28) == 0) +{ +uint8_t x_29; +x_29 = !lean_is_exclusive(x_28); +if (x_29 == 0) +{ +lean_object* x_30; uint8_t x_31; +x_30 = lean_ctor_get(x_28, 0); +x_31 = !lean_is_exclusive(x_30); +if (x_31 == 0) +{ +lean_object* x_32; lean_object* x_33; +x_32 = lean_ctor_get(x_30, 0); +x_33 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_33, 0, x_24); +lean_ctor_set(x_33, 1, x_18); +lean_ctor_set(x_33, 2, x_19); +lean_ctor_set(x_33, 3, x_32); +lean_ctor_set(x_30, 0, x_33); +return x_28; +} +else +{ +lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; +x_34 = lean_ctor_get(x_30, 0); +x_35 = lean_ctor_get(x_30, 1); +lean_inc(x_35); +lean_inc(x_34); +lean_dec(x_30); +x_36 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_36, 0, x_24); +lean_ctor_set(x_36, 1, x_18); +lean_ctor_set(x_36, 2, x_19); +lean_ctor_set(x_36, 3, x_34); +x_37 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_37, 0, x_36); +lean_ctor_set(x_37, 1, x_35); +lean_ctor_set(x_28, 0, x_37); +return x_28; +} +} +else +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; +x_38 = lean_ctor_get(x_28, 0); +x_39 = lean_ctor_get(x_28, 1); +lean_inc(x_39); +lean_inc(x_38); +lean_dec(x_28); +x_40 = lean_ctor_get(x_38, 0); +lean_inc(x_40); +x_41 = lean_ctor_get(x_38, 1); +lean_inc(x_41); +if (lean_is_exclusive(x_38)) { + lean_ctor_release(x_38, 0); + lean_ctor_release(x_38, 1); + x_42 = x_38; +} else { + lean_dec_ref(x_38); + x_42 = lean_box(0); +} +x_43 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_43, 0, x_24); +lean_ctor_set(x_43, 1, x_18); +lean_ctor_set(x_43, 2, x_19); +lean_ctor_set(x_43, 3, x_40); +if (lean_is_scalar(x_42)) { + x_44 = lean_alloc_ctor(0, 2, 0); +} else { + x_44 = x_42; +} +lean_ctor_set(x_44, 0, x_43); +lean_ctor_set(x_44, 1, x_41); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_39); +return x_45; +} +} +else +{ +uint8_t x_46; +lean_dec(x_24); +lean_dec(x_19); +lean_dec(x_18); +x_46 = !lean_is_exclusive(x_28); +if (x_46 == 0) +{ +return x_28; +} +else +{ +lean_object* x_47; lean_object* x_48; lean_object* x_49; +x_47 = lean_ctor_get(x_28, 0); +x_48 = lean_ctor_get(x_28, 1); +lean_inc(x_48); +lean_inc(x_47); +lean_dec(x_28); +x_49 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_49, 0, x_47); +lean_ctor_set(x_49, 1, x_48); +return x_49; +} +} +} +else +{ +lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; +lean_dec(x_16); +x_50 = lean_ctor_get(x_12, 0); +lean_inc(x_50); +lean_dec(x_12); +x_51 = l_Lean_IR_ToIR_bindErased(x_50, x_7, x_8, x_9, x_10); +x_52 = lean_ctor_get(x_51, 0); +lean_inc(x_52); +x_53 = lean_ctor_get(x_51, 1); +lean_inc(x_53); +lean_dec(x_51); +x_54 = lean_ctor_get(x_52, 1); +lean_inc(x_54); +lean_dec(x_52); +x_55 = lean_unsigned_to_nat(1u); +x_56 = lean_nat_add(x_6, x_55); +lean_dec(x_6); +x_6 = x_56; +x_7 = x_54; +x_10 = x_53; +goto _start; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__1(size_t x_1, size_t x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +uint8_t x_8; +x_8 = lean_usize_dec_lt(x_2, x_1); +if (x_8 == 0) +{ +lean_object* x_9; lean_object* x_10; +lean_dec(x_6); +lean_dec(x_5); +x_9 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9, 0, x_3); +lean_ctor_set(x_9, 1, x_4); +x_10 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10, 0, x_9); +lean_ctor_set(x_10, 1, x_7); +return x_10; +} +else +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_11 = lean_array_uget(x_3, x_2); +x_12 = lean_unsigned_to_nat(0u); +x_13 = lean_array_uset(x_3, x_2, x_12); +lean_inc(x_6); +lean_inc(x_5); +x_14 = l_Lean_IR_ToIR_lowerParam(x_11, x_4, x_5, x_6, x_7); +if (lean_obj_tag(x_14) == 0) +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; size_t x_19; size_t x_20; lean_object* x_21; +x_15 = lean_ctor_get(x_14, 0); +lean_inc(x_15); +x_16 = lean_ctor_get(x_14, 1); +lean_inc(x_16); +lean_dec(x_14); +x_17 = lean_ctor_get(x_15, 0); +lean_inc(x_17); +x_18 = lean_ctor_get(x_15, 1); +lean_inc(x_18); +lean_dec(x_15); +x_19 = 1; +x_20 = lean_usize_add(x_2, x_19); +x_21 = lean_array_uset(x_13, x_2, x_17); +x_2 = x_20; +x_3 = x_21; +x_4 = x_18; +x_7 = x_16; +goto _start; +} +else +{ +uint8_t x_23; +lean_dec(x_13); +lean_dec(x_6); +lean_dec(x_5); +x_23 = !lean_is_exclusive(x_14); +if (x_23 == 0) +{ +return x_14; +} +else +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; +x_24 = lean_ctor_get(x_14, 0); +x_25 = lean_ctor_get(x_14, 1); +lean_inc(x_25); +lean_inc(x_24); +lean_dec(x_14); +x_26 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_26, 0, x_24); +lean_ctor_set(x_26, 1, x_25); +return x_26; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(size_t x_1, size_t x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +uint8_t x_8; +x_8 = lean_usize_dec_lt(x_2, x_1); +if (x_8 == 0) +{ +lean_object* x_9; lean_object* x_10; +lean_dec(x_6); +lean_dec(x_5); +x_9 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9, 0, x_3); +lean_ctor_set(x_9, 1, x_4); +x_10 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10, 0, x_9); +lean_ctor_set(x_10, 1, x_7); +return x_10; +} +else +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_11 = lean_array_uget(x_3, x_2); +x_12 = lean_unsigned_to_nat(0u); +x_13 = lean_array_uset(x_3, x_2, x_12); +lean_inc(x_6); +lean_inc(x_5); +x_14 = l_Lean_IR_ToIR_lowerArg(x_11, x_4, x_5, x_6, x_7); +lean_dec(x_11); +if (lean_obj_tag(x_14) == 0) +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; size_t x_19; size_t x_20; lean_object* x_21; +x_15 = lean_ctor_get(x_14, 0); +lean_inc(x_15); +x_16 = lean_ctor_get(x_14, 1); +lean_inc(x_16); +lean_dec(x_14); +x_17 = lean_ctor_get(x_15, 0); +lean_inc(x_17); +x_18 = lean_ctor_get(x_15, 1); +lean_inc(x_18); +lean_dec(x_15); +x_19 = 1; +x_20 = lean_usize_add(x_2, x_19); +x_21 = lean_array_uset(x_13, x_2, x_17); +x_2 = x_20; +x_3 = x_21; +x_4 = x_18; +x_7 = x_16; +goto _start; +} +else +{ +uint8_t x_23; +lean_dec(x_13); +lean_dec(x_6); +lean_dec(x_5); +x_23 = !lean_is_exclusive(x_14); +if (x_23 == 0) +{ +return x_14; +} +else +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; +x_24 = lean_ctor_get(x_14, 0); +x_25 = lean_ctor_get(x_14, 1); +lean_inc(x_25); +lean_inc(x_24); +lean_dec(x_14); +x_26 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_26, 0, x_24); +lean_ctor_set(x_26, 1, x_25); +return x_26; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__3(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +uint8_t x_9; +x_9 = lean_usize_dec_lt(x_3, x_2); +if (x_9 == 0) +{ +lean_object* x_10; lean_object* x_11; +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_1); +x_10 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10, 0, x_4); +lean_ctor_set(x_10, 1, x_5); +x_11 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_11, 0, x_10); +lean_ctor_set(x_11, 1, x_8); +return x_11; +} +else +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; +x_12 = lean_array_uget(x_4, x_3); +x_13 = lean_unsigned_to_nat(0u); +x_14 = lean_array_uset(x_4, x_3, x_13); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_1); +x_15 = l_Lean_IR_ToIR_lowerAlt(x_1, x_12, x_5, x_6, x_7, x_8); +if (lean_obj_tag(x_15) == 0) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; size_t x_20; size_t x_21; lean_object* x_22; +x_16 = lean_ctor_get(x_15, 0); +lean_inc(x_16); +x_17 = lean_ctor_get(x_15, 1); +lean_inc(x_17); +lean_dec(x_15); +x_18 = lean_ctor_get(x_16, 0); +lean_inc(x_18); +x_19 = lean_ctor_get(x_16, 1); +lean_inc(x_19); +lean_dec(x_16); +x_20 = 1; +x_21 = lean_usize_add(x_3, x_20); +x_22 = lean_array_uset(x_14, x_3, x_18); +x_3 = x_21; +x_4 = x_22; +x_5 = x_19; +x_8 = x_17; +goto _start; +} +else +{ +uint8_t x_24; +lean_dec(x_14); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_1); +x_24 = !lean_is_exclusive(x_15); +if (x_24 == 0) +{ +return x_15; +} +else +{ +lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_25 = lean_ctor_get(x_15, 0); +x_26 = lean_ctor_get(x_15, 1); +lean_inc(x_26); +lean_inc(x_25); +lean_dec(x_15); +x_27 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_27, 0, x_25); +lean_ctor_set(x_27, 1, x_26); +return x_27; +} +} +} +} +} +LEAN_EXPORT lean_object* l_panic___at_Lean_IR_ToIR_lowerCode___spec__4(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; +x_2 = l_Lean_IR_instInhabitedArg; +x_3 = lean_panic_fn(x_2, x_1); +return x_3; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerCode___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean.IR.ToIR.lowerCode", 22, 22); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerCode___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("all local functions should be λ-lifted", 39, 38); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerCode___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerCode___closed__1; +x_3 = lean_unsigned_to_nat(188u); +x_4 = lean_unsigned_to_nat(15u); +x_5 = l_Lean_IR_ToIR_lowerCode___closed__2; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerCode___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerCode___closed__1; +x_3 = lean_unsigned_to_nat(172u); +x_4 = lean_unsigned_to_nat(46u); +x_5 = l_Lean_IR_ToIR_lowerArg___closed__2; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerCode___closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerCode___closed__1; +x_3 = lean_unsigned_to_nat(180u); +x_4 = lean_unsigned_to_nat(52u); +x_5 = l_Lean_IR_ToIR_lowerArg___closed__2; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerCode___closed__6() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerCode___closed__1; +x_3 = lean_unsigned_to_nat(185u); +x_4 = lean_unsigned_to_nat(37u); +x_5 = l_Lean_IR_ToIR_lowerArg___closed__2; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerCode___closed__7() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(1); +x_2 = lean_alloc_ctor(11, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerCode(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +switch (lean_obj_tag(x_1)) { +case 0: +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_6 = lean_ctor_get(x_1, 0); +lean_inc(x_6); +x_7 = lean_ctor_get(x_1, 1); +lean_inc(x_7); +lean_dec(x_1); +x_8 = l_Lean_IR_ToIR_lowerLet(x_6, x_7, x_2, x_3, x_4, x_5); +return x_8; +} +case 1: +{ +lean_object* x_9; lean_object* x_10; +lean_dec(x_1); +x_9 = l_Lean_IR_ToIR_lowerCode___closed__3; +x_10 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_9, x_2, x_3, x_4, x_5); +return x_10; +} +case 2: +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; size_t x_20; size_t x_21; lean_object* x_22; +x_11 = lean_ctor_get(x_1, 0); +lean_inc(x_11); +x_12 = lean_ctor_get(x_1, 1); +lean_inc(x_12); +lean_dec(x_1); +x_13 = lean_ctor_get(x_11, 0); +lean_inc(x_13); +x_14 = l_Lean_IR_ToIR_bindJoinPoint(x_13, x_2, x_3, x_4, x_5); +x_15 = lean_ctor_get(x_14, 0); +lean_inc(x_15); +x_16 = lean_ctor_get(x_14, 1); +lean_inc(x_16); +lean_dec(x_14); +x_17 = lean_ctor_get(x_15, 0); +lean_inc(x_17); +x_18 = lean_ctor_get(x_15, 1); +lean_inc(x_18); +lean_dec(x_15); +x_19 = lean_ctor_get(x_11, 2); +lean_inc(x_19); +x_20 = lean_array_size(x_19); +x_21 = 0; +lean_inc(x_4); +lean_inc(x_3); +x_22 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__1(x_20, x_21, x_19, x_18, x_3, x_4, x_16); +if (lean_obj_tag(x_22) == 0) +{ +lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_23 = lean_ctor_get(x_22, 0); +lean_inc(x_23); +x_24 = lean_ctor_get(x_22, 1); +lean_inc(x_24); +lean_dec(x_22); +x_25 = lean_ctor_get(x_23, 0); +lean_inc(x_25); +x_26 = lean_ctor_get(x_23, 1); +lean_inc(x_26); +lean_dec(x_23); +x_27 = lean_ctor_get(x_11, 4); +lean_inc(x_27); +lean_dec(x_11); +lean_inc(x_4); +lean_inc(x_3); +x_28 = l_Lean_IR_ToIR_lowerCode(x_27, x_26, x_3, x_4, x_24); +if (lean_obj_tag(x_28) == 0) +{ +lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_29 = lean_ctor_get(x_28, 0); +lean_inc(x_29); +x_30 = lean_ctor_get(x_28, 1); +lean_inc(x_30); +lean_dec(x_28); +x_31 = lean_ctor_get(x_29, 0); +lean_inc(x_31); +x_32 = lean_ctor_get(x_29, 1); +lean_inc(x_32); +lean_dec(x_29); +x_33 = l_Lean_IR_ToIR_lowerCode(x_12, x_32, x_3, x_4, x_30); +if (lean_obj_tag(x_33) == 0) +{ +uint8_t x_34; +x_34 = !lean_is_exclusive(x_33); +if (x_34 == 0) +{ +lean_object* x_35; uint8_t x_36; +x_35 = lean_ctor_get(x_33, 0); +x_36 = !lean_is_exclusive(x_35); +if (x_36 == 0) +{ +lean_object* x_37; lean_object* x_38; +x_37 = lean_ctor_get(x_35, 0); +x_38 = lean_alloc_ctor(1, 4, 0); +lean_ctor_set(x_38, 0, x_17); +lean_ctor_set(x_38, 1, x_25); +lean_ctor_set(x_38, 2, x_31); +lean_ctor_set(x_38, 3, x_37); +lean_ctor_set(x_35, 0, x_38); +return x_33; +} +else +{ +lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; +x_39 = lean_ctor_get(x_35, 0); +x_40 = lean_ctor_get(x_35, 1); +lean_inc(x_40); +lean_inc(x_39); +lean_dec(x_35); +x_41 = lean_alloc_ctor(1, 4, 0); +lean_ctor_set(x_41, 0, x_17); +lean_ctor_set(x_41, 1, x_25); +lean_ctor_set(x_41, 2, x_31); +lean_ctor_set(x_41, 3, x_39); +x_42 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_42, 0, x_41); +lean_ctor_set(x_42, 1, x_40); +lean_ctor_set(x_33, 0, x_42); +return x_33; +} +} +else +{ +lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; +x_43 = lean_ctor_get(x_33, 0); +x_44 = lean_ctor_get(x_33, 1); +lean_inc(x_44); +lean_inc(x_43); +lean_dec(x_33); +x_45 = lean_ctor_get(x_43, 0); +lean_inc(x_45); +x_46 = lean_ctor_get(x_43, 1); +lean_inc(x_46); +if (lean_is_exclusive(x_43)) { + lean_ctor_release(x_43, 0); + lean_ctor_release(x_43, 1); + x_47 = x_43; +} else { + lean_dec_ref(x_43); + x_47 = lean_box(0); +} +x_48 = lean_alloc_ctor(1, 4, 0); +lean_ctor_set(x_48, 0, x_17); +lean_ctor_set(x_48, 1, x_25); +lean_ctor_set(x_48, 2, x_31); +lean_ctor_set(x_48, 3, x_45); +if (lean_is_scalar(x_47)) { + x_49 = lean_alloc_ctor(0, 2, 0); +} else { + x_49 = x_47; +} +lean_ctor_set(x_49, 0, x_48); +lean_ctor_set(x_49, 1, x_46); +x_50 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_50, 0, x_49); +lean_ctor_set(x_50, 1, x_44); +return x_50; +} +} +else +{ +uint8_t x_51; +lean_dec(x_31); +lean_dec(x_25); +lean_dec(x_17); +x_51 = !lean_is_exclusive(x_33); +if (x_51 == 0) +{ +return x_33; +} +else +{ +lean_object* x_52; lean_object* x_53; lean_object* x_54; +x_52 = lean_ctor_get(x_33, 0); +x_53 = lean_ctor_get(x_33, 1); +lean_inc(x_53); +lean_inc(x_52); +lean_dec(x_33); +x_54 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_54, 0, x_52); +lean_ctor_set(x_54, 1, x_53); +return x_54; +} +} +} +else +{ +uint8_t x_55; +lean_dec(x_25); +lean_dec(x_17); +lean_dec(x_12); +lean_dec(x_4); +lean_dec(x_3); +x_55 = !lean_is_exclusive(x_28); +if (x_55 == 0) +{ +return x_28; +} +else +{ +lean_object* x_56; lean_object* x_57; lean_object* x_58; +x_56 = lean_ctor_get(x_28, 0); +x_57 = lean_ctor_get(x_28, 1); +lean_inc(x_57); +lean_inc(x_56); +lean_dec(x_28); +x_58 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_58, 0, x_56); +lean_ctor_set(x_58, 1, x_57); +return x_58; +} +} +} +else +{ +uint8_t x_59; +lean_dec(x_17); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_4); +lean_dec(x_3); +x_59 = !lean_is_exclusive(x_22); +if (x_59 == 0) +{ +return x_22; +} +else +{ +lean_object* x_60; lean_object* x_61; lean_object* x_62; +x_60 = lean_ctor_get(x_22, 0); +x_61 = lean_ctor_get(x_22, 1); +lean_inc(x_61); +lean_inc(x_60); +lean_dec(x_22); +x_62 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_62, 0, x_60); +lean_ctor_set(x_62, 1, x_61); +return x_62; +} +} +} +case 3: +{ +lean_object* x_63; lean_object* x_64; lean_object* x_65; uint8_t x_66; +x_63 = lean_ctor_get(x_2, 0); +lean_inc(x_63); +x_64 = lean_ctor_get(x_1, 0); +lean_inc(x_64); +x_65 = lean_ctor_get(x_1, 1); +lean_inc(x_65); +lean_dec(x_1); +x_66 = !lean_is_exclusive(x_63); +if (x_66 == 0) +{ +lean_object* x_67; lean_object* x_68; lean_object* x_69; uint64_t x_70; uint64_t x_71; uint64_t x_72; uint64_t x_73; uint64_t x_74; uint64_t x_75; uint64_t x_76; size_t x_77; size_t x_78; size_t x_79; size_t x_80; size_t x_81; lean_object* x_82; lean_object* x_83; +x_67 = lean_ctor_get(x_63, 1); +x_68 = lean_ctor_get(x_63, 0); +lean_dec(x_68); +x_69 = lean_array_get_size(x_67); +x_70 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_64); +x_71 = 32; +x_72 = lean_uint64_shift_right(x_70, x_71); +x_73 = lean_uint64_xor(x_70, x_72); +x_74 = 16; +x_75 = lean_uint64_shift_right(x_73, x_74); +x_76 = lean_uint64_xor(x_73, x_75); +x_77 = lean_uint64_to_usize(x_76); +x_78 = lean_usize_of_nat(x_69); +lean_dec(x_69); +x_79 = 1; +x_80 = lean_usize_sub(x_78, x_79); +x_81 = lean_usize_land(x_77, x_80); +x_82 = lean_array_uget(x_67, x_81); +lean_dec(x_67); +x_83 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_64, x_82); +lean_dec(x_82); +lean_dec(x_64); +if (lean_obj_tag(x_83) == 0) +{ +lean_object* x_84; lean_object* x_85; +lean_free_object(x_63); +lean_dec(x_65); +x_84 = l_Lean_IR_ToIR_lowerCode___closed__4; +x_85 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_84, x_2, x_3, x_4, x_5); +return x_85; +} +else +{ +lean_object* x_86; +x_86 = lean_ctor_get(x_83, 0); +lean_inc(x_86); +lean_dec(x_83); +if (lean_obj_tag(x_86) == 1) +{ +lean_object* x_87; size_t x_88; size_t x_89; lean_object* x_90; +x_87 = lean_ctor_get(x_86, 0); +lean_inc(x_87); +lean_dec(x_86); +x_88 = lean_array_size(x_65); +x_89 = 0; +x_90 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_88, x_89, x_65, x_2, x_3, x_4, x_5); +if (lean_obj_tag(x_90) == 0) +{ +uint8_t x_91; +x_91 = !lean_is_exclusive(x_90); +if (x_91 == 0) +{ +lean_object* x_92; uint8_t x_93; +x_92 = lean_ctor_get(x_90, 0); +x_93 = !lean_is_exclusive(x_92); +if (x_93 == 0) +{ +lean_object* x_94; +x_94 = lean_ctor_get(x_92, 0); +lean_ctor_set_tag(x_63, 12); +lean_ctor_set(x_63, 1, x_94); +lean_ctor_set(x_63, 0, x_87); +lean_ctor_set(x_92, 0, x_63); +return x_90; +} +else +{ +lean_object* x_95; lean_object* x_96; lean_object* x_97; +x_95 = lean_ctor_get(x_92, 0); +x_96 = lean_ctor_get(x_92, 1); +lean_inc(x_96); +lean_inc(x_95); +lean_dec(x_92); +lean_ctor_set_tag(x_63, 12); +lean_ctor_set(x_63, 1, x_95); +lean_ctor_set(x_63, 0, x_87); +x_97 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_97, 0, x_63); +lean_ctor_set(x_97, 1, x_96); +lean_ctor_set(x_90, 0, x_97); +return x_90; +} +} +else +{ +lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; +x_98 = lean_ctor_get(x_90, 0); +x_99 = lean_ctor_get(x_90, 1); +lean_inc(x_99); +lean_inc(x_98); +lean_dec(x_90); +x_100 = lean_ctor_get(x_98, 0); +lean_inc(x_100); +x_101 = lean_ctor_get(x_98, 1); +lean_inc(x_101); +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + lean_ctor_release(x_98, 1); + x_102 = x_98; +} else { + lean_dec_ref(x_98); + x_102 = lean_box(0); +} +lean_ctor_set_tag(x_63, 12); +lean_ctor_set(x_63, 1, x_100); +lean_ctor_set(x_63, 0, x_87); +if (lean_is_scalar(x_102)) { + x_103 = lean_alloc_ctor(0, 2, 0); +} else { + x_103 = x_102; +} +lean_ctor_set(x_103, 0, x_63); +lean_ctor_set(x_103, 1, x_101); +x_104 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_104, 0, x_103); +lean_ctor_set(x_104, 1, x_99); +return x_104; +} +} +else +{ +uint8_t x_105; +lean_dec(x_87); +lean_free_object(x_63); +x_105 = !lean_is_exclusive(x_90); +if (x_105 == 0) +{ +return x_90; +} +else +{ +lean_object* x_106; lean_object* x_107; lean_object* x_108; +x_106 = lean_ctor_get(x_90, 0); +x_107 = lean_ctor_get(x_90, 1); +lean_inc(x_107); +lean_inc(x_106); +lean_dec(x_90); +x_108 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_108, 0, x_106); +lean_ctor_set(x_108, 1, x_107); +return x_108; +} +} +} +else +{ +lean_object* x_109; lean_object* x_110; +lean_dec(x_86); +lean_free_object(x_63); +lean_dec(x_65); +x_109 = l_Lean_IR_ToIR_lowerCode___closed__4; +x_110 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_109, x_2, x_3, x_4, x_5); +return x_110; +} +} +} +else +{ +lean_object* x_111; lean_object* x_112; uint64_t x_113; uint64_t x_114; uint64_t x_115; uint64_t x_116; uint64_t x_117; uint64_t x_118; uint64_t x_119; size_t x_120; size_t x_121; size_t x_122; size_t x_123; size_t x_124; lean_object* x_125; lean_object* x_126; +x_111 = lean_ctor_get(x_63, 1); +lean_inc(x_111); +lean_dec(x_63); +x_112 = lean_array_get_size(x_111); +x_113 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_64); +x_114 = 32; +x_115 = lean_uint64_shift_right(x_113, x_114); +x_116 = lean_uint64_xor(x_113, x_115); +x_117 = 16; +x_118 = lean_uint64_shift_right(x_116, x_117); +x_119 = lean_uint64_xor(x_116, x_118); +x_120 = lean_uint64_to_usize(x_119); +x_121 = lean_usize_of_nat(x_112); +lean_dec(x_112); +x_122 = 1; +x_123 = lean_usize_sub(x_121, x_122); +x_124 = lean_usize_land(x_120, x_123); +x_125 = lean_array_uget(x_111, x_124); +lean_dec(x_111); +x_126 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_64, x_125); +lean_dec(x_125); +lean_dec(x_64); +if (lean_obj_tag(x_126) == 0) +{ +lean_object* x_127; lean_object* x_128; +lean_dec(x_65); +x_127 = l_Lean_IR_ToIR_lowerCode___closed__4; +x_128 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_127, x_2, x_3, x_4, x_5); +return x_128; +} +else +{ +lean_object* x_129; +x_129 = lean_ctor_get(x_126, 0); +lean_inc(x_129); +lean_dec(x_126); +if (lean_obj_tag(x_129) == 1) +{ +lean_object* x_130; size_t x_131; size_t x_132; lean_object* x_133; +x_130 = lean_ctor_get(x_129, 0); +lean_inc(x_130); +lean_dec(x_129); +x_131 = lean_array_size(x_65); +x_132 = 0; +x_133 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_131, x_132, x_65, x_2, x_3, x_4, x_5); +if (lean_obj_tag(x_133) == 0) +{ +lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; +x_134 = lean_ctor_get(x_133, 0); +lean_inc(x_134); +x_135 = lean_ctor_get(x_133, 1); +lean_inc(x_135); +if (lean_is_exclusive(x_133)) { + lean_ctor_release(x_133, 0); + lean_ctor_release(x_133, 1); + x_136 = x_133; +} else { + lean_dec_ref(x_133); + x_136 = lean_box(0); +} +x_137 = lean_ctor_get(x_134, 0); +lean_inc(x_137); +x_138 = lean_ctor_get(x_134, 1); +lean_inc(x_138); +if (lean_is_exclusive(x_134)) { + lean_ctor_release(x_134, 0); + lean_ctor_release(x_134, 1); + x_139 = x_134; +} else { + lean_dec_ref(x_134); + x_139 = lean_box(0); +} +x_140 = lean_alloc_ctor(12, 2, 0); +lean_ctor_set(x_140, 0, x_130); +lean_ctor_set(x_140, 1, x_137); +if (lean_is_scalar(x_139)) { + x_141 = lean_alloc_ctor(0, 2, 0); +} else { + x_141 = x_139; +} +lean_ctor_set(x_141, 0, x_140); +lean_ctor_set(x_141, 1, x_138); +if (lean_is_scalar(x_136)) { + x_142 = lean_alloc_ctor(0, 2, 0); +} else { + x_142 = x_136; +} +lean_ctor_set(x_142, 0, x_141); +lean_ctor_set(x_142, 1, x_135); +return x_142; +} +else +{ +lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; +lean_dec(x_130); +x_143 = lean_ctor_get(x_133, 0); +lean_inc(x_143); +x_144 = lean_ctor_get(x_133, 1); +lean_inc(x_144); +if (lean_is_exclusive(x_133)) { + lean_ctor_release(x_133, 0); + lean_ctor_release(x_133, 1); + x_145 = x_133; +} else { + lean_dec_ref(x_133); + x_145 = lean_box(0); +} +if (lean_is_scalar(x_145)) { + x_146 = lean_alloc_ctor(1, 2, 0); +} else { + x_146 = x_145; +} +lean_ctor_set(x_146, 0, x_143); +lean_ctor_set(x_146, 1, x_144); +return x_146; +} +} +else +{ +lean_object* x_147; lean_object* x_148; +lean_dec(x_129); +lean_dec(x_65); +x_147 = l_Lean_IR_ToIR_lowerCode___closed__4; +x_148 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_147, x_2, x_3, x_4, x_5); +return x_148; +} +} +} +} +case 4: +{ +lean_object* x_149; lean_object* x_150; uint8_t x_151; +x_149 = lean_ctor_get(x_1, 0); +lean_inc(x_149); +lean_dec(x_1); +x_150 = lean_ctor_get(x_2, 0); +lean_inc(x_150); +x_151 = !lean_is_exclusive(x_149); +if (x_151 == 0) +{ +lean_object* x_152; lean_object* x_153; lean_object* x_154; lean_object* x_155; lean_object* x_156; lean_object* x_157; uint64_t x_158; uint64_t x_159; uint64_t x_160; uint64_t x_161; uint64_t x_162; uint64_t x_163; uint64_t x_164; size_t x_165; size_t x_166; size_t x_167; size_t x_168; size_t x_169; lean_object* x_170; lean_object* x_171; +x_152 = lean_ctor_get(x_149, 0); +x_153 = lean_ctor_get(x_149, 1); +x_154 = lean_ctor_get(x_149, 2); +x_155 = lean_ctor_get(x_149, 3); +x_156 = lean_ctor_get(x_150, 1); +lean_inc(x_156); +lean_dec(x_150); +x_157 = lean_array_get_size(x_156); +x_158 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_154); +x_159 = 32; +x_160 = lean_uint64_shift_right(x_158, x_159); +x_161 = lean_uint64_xor(x_158, x_160); +x_162 = 16; +x_163 = lean_uint64_shift_right(x_161, x_162); +x_164 = lean_uint64_xor(x_161, x_163); +x_165 = lean_uint64_to_usize(x_164); +x_166 = lean_usize_of_nat(x_157); +lean_dec(x_157); +x_167 = 1; +x_168 = lean_usize_sub(x_166, x_167); +x_169 = lean_usize_land(x_165, x_168); +x_170 = lean_array_uget(x_156, x_169); +lean_dec(x_156); +x_171 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_154, x_170); +lean_dec(x_170); +lean_dec(x_154); +if (lean_obj_tag(x_171) == 0) +{ +lean_object* x_172; lean_object* x_173; +lean_free_object(x_149); +lean_dec(x_155); +lean_dec(x_153); +lean_dec(x_152); +x_172 = l_Lean_IR_ToIR_lowerCode___closed__5; +x_173 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_172, x_2, x_3, x_4, x_5); +return x_173; +} +else +{ +lean_object* x_174; +x_174 = lean_ctor_get(x_171, 0); +lean_inc(x_174); +lean_dec(x_171); +if (lean_obj_tag(x_174) == 0) +{ +lean_object* x_175; lean_object* x_176; +x_175 = lean_ctor_get(x_174, 0); +lean_inc(x_175); +lean_dec(x_174); +lean_inc(x_4); +lean_inc(x_3); +x_176 = l_Lean_IR_ToIR_lowerType(x_153, x_2, x_3, x_4, x_5); +if (lean_obj_tag(x_176) == 0) +{ +lean_object* x_177; lean_object* x_178; lean_object* x_179; lean_object* x_180; size_t x_181; size_t x_182; lean_object* x_183; +x_177 = lean_ctor_get(x_176, 0); +lean_inc(x_177); +x_178 = lean_ctor_get(x_176, 1); +lean_inc(x_178); +lean_dec(x_176); +x_179 = lean_ctor_get(x_177, 0); +lean_inc(x_179); +x_180 = lean_ctor_get(x_177, 1); +lean_inc(x_180); +lean_dec(x_177); +x_181 = lean_array_size(x_155); +x_182 = 0; +lean_inc(x_175); +x_183 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__3(x_175, x_181, x_182, x_155, x_180, x_3, x_4, x_178); +if (lean_obj_tag(x_183) == 0) +{ +uint8_t x_184; +x_184 = !lean_is_exclusive(x_183); +if (x_184 == 0) +{ +lean_object* x_185; uint8_t x_186; +x_185 = lean_ctor_get(x_183, 0); +x_186 = !lean_is_exclusive(x_185); +if (x_186 == 0) +{ +lean_object* x_187; +x_187 = lean_ctor_get(x_185, 0); +lean_ctor_set_tag(x_149, 10); +lean_ctor_set(x_149, 3, x_187); +lean_ctor_set(x_149, 2, x_179); +lean_ctor_set(x_149, 1, x_175); +lean_ctor_set(x_185, 0, x_149); +return x_183; +} +else +{ +lean_object* x_188; lean_object* x_189; lean_object* x_190; +x_188 = lean_ctor_get(x_185, 0); +x_189 = lean_ctor_get(x_185, 1); +lean_inc(x_189); +lean_inc(x_188); +lean_dec(x_185); +lean_ctor_set_tag(x_149, 10); +lean_ctor_set(x_149, 3, x_188); +lean_ctor_set(x_149, 2, x_179); +lean_ctor_set(x_149, 1, x_175); +x_190 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_190, 0, x_149); +lean_ctor_set(x_190, 1, x_189); +lean_ctor_set(x_183, 0, x_190); +return x_183; +} +} +else +{ +lean_object* x_191; lean_object* x_192; lean_object* x_193; lean_object* x_194; lean_object* x_195; lean_object* x_196; lean_object* x_197; +x_191 = lean_ctor_get(x_183, 0); +x_192 = lean_ctor_get(x_183, 1); +lean_inc(x_192); +lean_inc(x_191); +lean_dec(x_183); +x_193 = lean_ctor_get(x_191, 0); +lean_inc(x_193); +x_194 = lean_ctor_get(x_191, 1); +lean_inc(x_194); +if (lean_is_exclusive(x_191)) { + lean_ctor_release(x_191, 0); + lean_ctor_release(x_191, 1); + x_195 = x_191; +} else { + lean_dec_ref(x_191); + x_195 = lean_box(0); +} +lean_ctor_set_tag(x_149, 10); +lean_ctor_set(x_149, 3, x_193); +lean_ctor_set(x_149, 2, x_179); +lean_ctor_set(x_149, 1, x_175); +if (lean_is_scalar(x_195)) { + x_196 = lean_alloc_ctor(0, 2, 0); +} else { + x_196 = x_195; +} +lean_ctor_set(x_196, 0, x_149); +lean_ctor_set(x_196, 1, x_194); +x_197 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_197, 0, x_196); +lean_ctor_set(x_197, 1, x_192); +return x_197; +} +} +else +{ +uint8_t x_198; +lean_dec(x_179); +lean_dec(x_175); +lean_free_object(x_149); +lean_dec(x_152); +x_198 = !lean_is_exclusive(x_183); +if (x_198 == 0) +{ +return x_183; +} +else +{ +lean_object* x_199; lean_object* x_200; lean_object* x_201; +x_199 = lean_ctor_get(x_183, 0); +x_200 = lean_ctor_get(x_183, 1); +lean_inc(x_200); +lean_inc(x_199); +lean_dec(x_183); +x_201 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_201, 0, x_199); +lean_ctor_set(x_201, 1, x_200); +return x_201; +} +} +} +else +{ +uint8_t x_202; +lean_dec(x_175); +lean_free_object(x_149); +lean_dec(x_155); +lean_dec(x_152); +lean_dec(x_4); +lean_dec(x_3); +x_202 = !lean_is_exclusive(x_176); +if (x_202 == 0) +{ +return x_176; +} +else +{ +lean_object* x_203; lean_object* x_204; lean_object* x_205; +x_203 = lean_ctor_get(x_176, 0); +x_204 = lean_ctor_get(x_176, 1); +lean_inc(x_204); +lean_inc(x_203); +lean_dec(x_176); +x_205 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_205, 0, x_203); +lean_ctor_set(x_205, 1, x_204); +return x_205; +} +} +} +else +{ +lean_object* x_206; lean_object* x_207; +lean_dec(x_174); +lean_free_object(x_149); +lean_dec(x_155); +lean_dec(x_153); +lean_dec(x_152); +x_206 = l_Lean_IR_ToIR_lowerCode___closed__5; +x_207 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_206, x_2, x_3, x_4, x_5); +return x_207; +} +} +} +else +{ +lean_object* x_208; lean_object* x_209; lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; uint64_t x_214; uint64_t x_215; uint64_t x_216; uint64_t x_217; uint64_t x_218; uint64_t x_219; uint64_t x_220; size_t x_221; size_t x_222; size_t x_223; size_t x_224; size_t x_225; lean_object* x_226; lean_object* x_227; +x_208 = lean_ctor_get(x_149, 0); +x_209 = lean_ctor_get(x_149, 1); +x_210 = lean_ctor_get(x_149, 2); +x_211 = lean_ctor_get(x_149, 3); +lean_inc(x_211); +lean_inc(x_210); +lean_inc(x_209); +lean_inc(x_208); +lean_dec(x_149); +x_212 = lean_ctor_get(x_150, 1); +lean_inc(x_212); +lean_dec(x_150); +x_213 = lean_array_get_size(x_212); +x_214 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_210); +x_215 = 32; +x_216 = lean_uint64_shift_right(x_214, x_215); +x_217 = lean_uint64_xor(x_214, x_216); +x_218 = 16; +x_219 = lean_uint64_shift_right(x_217, x_218); +x_220 = lean_uint64_xor(x_217, x_219); +x_221 = lean_uint64_to_usize(x_220); +x_222 = lean_usize_of_nat(x_213); +lean_dec(x_213); +x_223 = 1; +x_224 = lean_usize_sub(x_222, x_223); +x_225 = lean_usize_land(x_221, x_224); +x_226 = lean_array_uget(x_212, x_225); +lean_dec(x_212); +x_227 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_210, x_226); +lean_dec(x_226); +lean_dec(x_210); +if (lean_obj_tag(x_227) == 0) +{ +lean_object* x_228; lean_object* x_229; +lean_dec(x_211); +lean_dec(x_209); +lean_dec(x_208); +x_228 = l_Lean_IR_ToIR_lowerCode___closed__5; +x_229 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_228, x_2, x_3, x_4, x_5); +return x_229; +} +else +{ +lean_object* x_230; +x_230 = lean_ctor_get(x_227, 0); +lean_inc(x_230); +lean_dec(x_227); +if (lean_obj_tag(x_230) == 0) +{ +lean_object* x_231; lean_object* x_232; +x_231 = lean_ctor_get(x_230, 0); +lean_inc(x_231); +lean_dec(x_230); +lean_inc(x_4); +lean_inc(x_3); +x_232 = l_Lean_IR_ToIR_lowerType(x_209, x_2, x_3, x_4, x_5); +if (lean_obj_tag(x_232) == 0) +{ +lean_object* x_233; lean_object* x_234; lean_object* x_235; lean_object* x_236; size_t x_237; size_t x_238; lean_object* x_239; +x_233 = lean_ctor_get(x_232, 0); +lean_inc(x_233); +x_234 = lean_ctor_get(x_232, 1); +lean_inc(x_234); +lean_dec(x_232); +x_235 = lean_ctor_get(x_233, 0); +lean_inc(x_235); +x_236 = lean_ctor_get(x_233, 1); +lean_inc(x_236); +lean_dec(x_233); +x_237 = lean_array_size(x_211); +x_238 = 0; +lean_inc(x_231); +x_239 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__3(x_231, x_237, x_238, x_211, x_236, x_3, x_4, x_234); +if (lean_obj_tag(x_239) == 0) +{ +lean_object* x_240; lean_object* x_241; lean_object* x_242; lean_object* x_243; lean_object* x_244; lean_object* x_245; lean_object* x_246; lean_object* x_247; lean_object* x_248; +x_240 = lean_ctor_get(x_239, 0); +lean_inc(x_240); +x_241 = lean_ctor_get(x_239, 1); +lean_inc(x_241); +if (lean_is_exclusive(x_239)) { + lean_ctor_release(x_239, 0); + lean_ctor_release(x_239, 1); + x_242 = x_239; +} else { + lean_dec_ref(x_239); + x_242 = lean_box(0); +} +x_243 = lean_ctor_get(x_240, 0); +lean_inc(x_243); +x_244 = lean_ctor_get(x_240, 1); +lean_inc(x_244); +if (lean_is_exclusive(x_240)) { + lean_ctor_release(x_240, 0); + lean_ctor_release(x_240, 1); + x_245 = x_240; +} else { + lean_dec_ref(x_240); + x_245 = lean_box(0); +} +x_246 = lean_alloc_ctor(10, 4, 0); +lean_ctor_set(x_246, 0, x_208); +lean_ctor_set(x_246, 1, x_231); +lean_ctor_set(x_246, 2, x_235); +lean_ctor_set(x_246, 3, x_243); +if (lean_is_scalar(x_245)) { + x_247 = lean_alloc_ctor(0, 2, 0); +} else { + x_247 = x_245; +} +lean_ctor_set(x_247, 0, x_246); +lean_ctor_set(x_247, 1, x_244); +if (lean_is_scalar(x_242)) { + x_248 = lean_alloc_ctor(0, 2, 0); +} else { + x_248 = x_242; +} +lean_ctor_set(x_248, 0, x_247); +lean_ctor_set(x_248, 1, x_241); +return x_248; +} +else +{ +lean_object* x_249; lean_object* x_250; lean_object* x_251; lean_object* x_252; +lean_dec(x_235); +lean_dec(x_231); +lean_dec(x_208); +x_249 = lean_ctor_get(x_239, 0); +lean_inc(x_249); +x_250 = lean_ctor_get(x_239, 1); +lean_inc(x_250); +if (lean_is_exclusive(x_239)) { + lean_ctor_release(x_239, 0); + lean_ctor_release(x_239, 1); + x_251 = x_239; +} else { + lean_dec_ref(x_239); + x_251 = lean_box(0); +} +if (lean_is_scalar(x_251)) { + x_252 = lean_alloc_ctor(1, 2, 0); +} else { + x_252 = x_251; +} +lean_ctor_set(x_252, 0, x_249); +lean_ctor_set(x_252, 1, x_250); +return x_252; +} +} +else +{ +lean_object* x_253; lean_object* x_254; lean_object* x_255; lean_object* x_256; +lean_dec(x_231); +lean_dec(x_211); +lean_dec(x_208); +lean_dec(x_4); +lean_dec(x_3); +x_253 = lean_ctor_get(x_232, 0); +lean_inc(x_253); +x_254 = lean_ctor_get(x_232, 1); +lean_inc(x_254); +if (lean_is_exclusive(x_232)) { + lean_ctor_release(x_232, 0); + lean_ctor_release(x_232, 1); + x_255 = x_232; +} else { + lean_dec_ref(x_232); + x_255 = lean_box(0); +} +if (lean_is_scalar(x_255)) { + x_256 = lean_alloc_ctor(1, 2, 0); +} else { + x_256 = x_255; +} +lean_ctor_set(x_256, 0, x_253); +lean_ctor_set(x_256, 1, x_254); +return x_256; +} +} +else +{ +lean_object* x_257; lean_object* x_258; +lean_dec(x_230); +lean_dec(x_211); +lean_dec(x_209); +lean_dec(x_208); +x_257 = l_Lean_IR_ToIR_lowerCode___closed__5; +x_258 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_257, x_2, x_3, x_4, x_5); +return x_258; +} +} +} +} +case 5: +{ +lean_object* x_259; uint8_t x_260; +lean_dec(x_4); +lean_dec(x_3); +x_259 = lean_ctor_get(x_2, 0); +lean_inc(x_259); +x_260 = !lean_is_exclusive(x_1); +if (x_260 == 0) +{ +uint8_t x_261; +x_261 = !lean_is_exclusive(x_259); +if (x_261 == 0) +{ +lean_object* x_262; lean_object* x_263; lean_object* x_264; lean_object* x_265; uint64_t x_266; uint64_t x_267; uint64_t x_268; uint64_t x_269; uint64_t x_270; uint64_t x_271; uint64_t x_272; size_t x_273; size_t x_274; size_t x_275; size_t x_276; size_t x_277; lean_object* x_278; lean_object* x_279; +x_262 = lean_ctor_get(x_1, 0); +x_263 = lean_ctor_get(x_259, 1); +x_264 = lean_ctor_get(x_259, 0); +lean_dec(x_264); +x_265 = lean_array_get_size(x_263); +x_266 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_262); +x_267 = 32; +x_268 = lean_uint64_shift_right(x_266, x_267); +x_269 = lean_uint64_xor(x_266, x_268); +x_270 = 16; +x_271 = lean_uint64_shift_right(x_269, x_270); +x_272 = lean_uint64_xor(x_269, x_271); +x_273 = lean_uint64_to_usize(x_272); +x_274 = lean_usize_of_nat(x_265); +lean_dec(x_265); +x_275 = 1; +x_276 = lean_usize_sub(x_274, x_275); +x_277 = lean_usize_land(x_273, x_276); +x_278 = lean_array_uget(x_263, x_277); +lean_dec(x_263); +x_279 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_262, x_278); +lean_dec(x_278); +lean_dec(x_262); +if (lean_obj_tag(x_279) == 0) +{ +lean_object* x_280; lean_object* x_281; lean_object* x_282; +x_280 = l_Lean_IR_ToIR_lowerCode___closed__6; +x_281 = l_panic___at_Lean_IR_ToIR_lowerCode___spec__4(x_280); +lean_ctor_set_tag(x_1, 11); +lean_ctor_set(x_1, 0, x_281); +lean_ctor_set(x_259, 1, x_2); +lean_ctor_set(x_259, 0, x_1); +x_282 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_282, 0, x_259); +lean_ctor_set(x_282, 1, x_5); +return x_282; +} +else +{ +uint8_t x_283; +lean_free_object(x_1); +x_283 = !lean_is_exclusive(x_279); +if (x_283 == 0) +{ +lean_object* x_284; +x_284 = lean_ctor_get(x_279, 0); +switch (lean_obj_tag(x_284)) { +case 0: +{ +uint8_t x_285; +x_285 = !lean_is_exclusive(x_284); +if (x_285 == 0) +{ +lean_object* x_286; +lean_ctor_set_tag(x_279, 11); +lean_ctor_set(x_259, 1, x_2); +lean_ctor_set(x_259, 0, x_279); +x_286 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_286, 0, x_259); +lean_ctor_set(x_286, 1, x_5); +return x_286; +} +else +{ +lean_object* x_287; lean_object* x_288; lean_object* x_289; +x_287 = lean_ctor_get(x_284, 0); +lean_inc(x_287); +lean_dec(x_284); +x_288 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_288, 0, x_287); +lean_ctor_set_tag(x_279, 11); +lean_ctor_set(x_279, 0, x_288); +lean_ctor_set(x_259, 1, x_2); +lean_ctor_set(x_259, 0, x_279); +x_289 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_289, 0, x_259); +lean_ctor_set(x_289, 1, x_5); +return x_289; +} +} +case 1: +{ +uint8_t x_290; +lean_free_object(x_279); +x_290 = !lean_is_exclusive(x_284); +if (x_290 == 0) +{ +lean_object* x_291; lean_object* x_292; lean_object* x_293; lean_object* x_294; +x_291 = lean_ctor_get(x_284, 0); +lean_dec(x_291); +x_292 = l_Lean_IR_ToIR_lowerCode___closed__6; +x_293 = l_panic___at_Lean_IR_ToIR_lowerCode___spec__4(x_292); +lean_ctor_set_tag(x_284, 11); +lean_ctor_set(x_284, 0, x_293); +lean_ctor_set(x_259, 1, x_2); +lean_ctor_set(x_259, 0, x_284); +x_294 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_294, 0, x_259); +lean_ctor_set(x_294, 1, x_5); +return x_294; +} +else +{ +lean_object* x_295; lean_object* x_296; lean_object* x_297; lean_object* x_298; +lean_dec(x_284); +x_295 = l_Lean_IR_ToIR_lowerCode___closed__6; +x_296 = l_panic___at_Lean_IR_ToIR_lowerCode___spec__4(x_295); +x_297 = lean_alloc_ctor(11, 1, 0); +lean_ctor_set(x_297, 0, x_296); +lean_ctor_set(x_259, 1, x_2); +lean_ctor_set(x_259, 0, x_297); +x_298 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_298, 0, x_259); +lean_ctor_set(x_298, 1, x_5); +return x_298; +} +} +default: +{ +lean_object* x_299; lean_object* x_300; +lean_free_object(x_279); +x_299 = l_Lean_IR_ToIR_lowerCode___closed__7; +lean_ctor_set(x_259, 1, x_2); +lean_ctor_set(x_259, 0, x_299); +x_300 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_300, 0, x_259); +lean_ctor_set(x_300, 1, x_5); +return x_300; +} +} +} +else +{ +lean_object* x_301; +x_301 = lean_ctor_get(x_279, 0); +lean_inc(x_301); +lean_dec(x_279); +switch (lean_obj_tag(x_301)) { +case 0: +{ +lean_object* x_302; lean_object* x_303; lean_object* x_304; lean_object* x_305; lean_object* x_306; +x_302 = lean_ctor_get(x_301, 0); +lean_inc(x_302); +if (lean_is_exclusive(x_301)) { + lean_ctor_release(x_301, 0); + x_303 = x_301; +} else { + lean_dec_ref(x_301); + x_303 = lean_box(0); +} +if (lean_is_scalar(x_303)) { + x_304 = lean_alloc_ctor(0, 1, 0); +} else { + x_304 = x_303; +} +lean_ctor_set(x_304, 0, x_302); +x_305 = lean_alloc_ctor(11, 1, 0); +lean_ctor_set(x_305, 0, x_304); +lean_ctor_set(x_259, 1, x_2); +lean_ctor_set(x_259, 0, x_305); +x_306 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_306, 0, x_259); +lean_ctor_set(x_306, 1, x_5); +return x_306; +} +case 1: +{ +lean_object* x_307; lean_object* x_308; lean_object* x_309; lean_object* x_310; lean_object* x_311; +if (lean_is_exclusive(x_301)) { + lean_ctor_release(x_301, 0); + x_307 = x_301; +} else { + lean_dec_ref(x_301); + x_307 = lean_box(0); +} +x_308 = l_Lean_IR_ToIR_lowerCode___closed__6; +x_309 = l_panic___at_Lean_IR_ToIR_lowerCode___spec__4(x_308); +if (lean_is_scalar(x_307)) { + x_310 = lean_alloc_ctor(11, 1, 0); +} else { + x_310 = x_307; + lean_ctor_set_tag(x_310, 11); +} +lean_ctor_set(x_310, 0, x_309); +lean_ctor_set(x_259, 1, x_2); +lean_ctor_set(x_259, 0, x_310); +x_311 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_311, 0, x_259); +lean_ctor_set(x_311, 1, x_5); +return x_311; +} +default: +{ +lean_object* x_312; lean_object* x_313; +x_312 = l_Lean_IR_ToIR_lowerCode___closed__7; +lean_ctor_set(x_259, 1, x_2); +lean_ctor_set(x_259, 0, x_312); +x_313 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_313, 0, x_259); +lean_ctor_set(x_313, 1, x_5); +return x_313; +} +} +} +} +} +else +{ +lean_object* x_314; lean_object* x_315; lean_object* x_316; uint64_t x_317; uint64_t x_318; uint64_t x_319; uint64_t x_320; uint64_t x_321; uint64_t x_322; uint64_t x_323; size_t x_324; size_t x_325; size_t x_326; size_t x_327; size_t x_328; lean_object* x_329; lean_object* x_330; +x_314 = lean_ctor_get(x_1, 0); +x_315 = lean_ctor_get(x_259, 1); +lean_inc(x_315); +lean_dec(x_259); +x_316 = lean_array_get_size(x_315); +x_317 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_314); +x_318 = 32; +x_319 = lean_uint64_shift_right(x_317, x_318); +x_320 = lean_uint64_xor(x_317, x_319); +x_321 = 16; +x_322 = lean_uint64_shift_right(x_320, x_321); +x_323 = lean_uint64_xor(x_320, x_322); +x_324 = lean_uint64_to_usize(x_323); +x_325 = lean_usize_of_nat(x_316); +lean_dec(x_316); +x_326 = 1; +x_327 = lean_usize_sub(x_325, x_326); +x_328 = lean_usize_land(x_324, x_327); +x_329 = lean_array_uget(x_315, x_328); +lean_dec(x_315); +x_330 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_314, x_329); +lean_dec(x_329); +lean_dec(x_314); +if (lean_obj_tag(x_330) == 0) +{ +lean_object* x_331; lean_object* x_332; lean_object* x_333; lean_object* x_334; +x_331 = l_Lean_IR_ToIR_lowerCode___closed__6; +x_332 = l_panic___at_Lean_IR_ToIR_lowerCode___spec__4(x_331); +lean_ctor_set_tag(x_1, 11); +lean_ctor_set(x_1, 0, x_332); +x_333 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_333, 0, x_1); +lean_ctor_set(x_333, 1, x_2); +x_334 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_334, 0, x_333); +lean_ctor_set(x_334, 1, x_5); +return x_334; +} +else +{ +lean_object* x_335; lean_object* x_336; +lean_free_object(x_1); +x_335 = lean_ctor_get(x_330, 0); +lean_inc(x_335); +if (lean_is_exclusive(x_330)) { + lean_ctor_release(x_330, 0); + x_336 = x_330; +} else { + lean_dec_ref(x_330); + x_336 = lean_box(0); +} +switch (lean_obj_tag(x_335)) { +case 0: +{ +lean_object* x_337; lean_object* x_338; lean_object* x_339; lean_object* x_340; lean_object* x_341; lean_object* x_342; +x_337 = lean_ctor_get(x_335, 0); +lean_inc(x_337); +if (lean_is_exclusive(x_335)) { + lean_ctor_release(x_335, 0); + x_338 = x_335; +} else { + lean_dec_ref(x_335); + x_338 = lean_box(0); +} +if (lean_is_scalar(x_338)) { + x_339 = lean_alloc_ctor(0, 1, 0); +} else { + x_339 = x_338; +} +lean_ctor_set(x_339, 0, x_337); +if (lean_is_scalar(x_336)) { + x_340 = lean_alloc_ctor(11, 1, 0); +} else { + x_340 = x_336; + lean_ctor_set_tag(x_340, 11); +} +lean_ctor_set(x_340, 0, x_339); +x_341 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_341, 0, x_340); +lean_ctor_set(x_341, 1, x_2); +x_342 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_342, 0, x_341); +lean_ctor_set(x_342, 1, x_5); +return x_342; +} +case 1: +{ +lean_object* x_343; lean_object* x_344; lean_object* x_345; lean_object* x_346; lean_object* x_347; lean_object* x_348; +lean_dec(x_336); +if (lean_is_exclusive(x_335)) { + lean_ctor_release(x_335, 0); + x_343 = x_335; +} else { + lean_dec_ref(x_335); + x_343 = lean_box(0); +} +x_344 = l_Lean_IR_ToIR_lowerCode___closed__6; +x_345 = l_panic___at_Lean_IR_ToIR_lowerCode___spec__4(x_344); +if (lean_is_scalar(x_343)) { + x_346 = lean_alloc_ctor(11, 1, 0); +} else { + x_346 = x_343; + lean_ctor_set_tag(x_346, 11); +} +lean_ctor_set(x_346, 0, x_345); +x_347 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_347, 0, x_346); +lean_ctor_set(x_347, 1, x_2); +x_348 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_348, 0, x_347); +lean_ctor_set(x_348, 1, x_5); +return x_348; +} +default: +{ +lean_object* x_349; lean_object* x_350; lean_object* x_351; +lean_dec(x_336); +x_349 = l_Lean_IR_ToIR_lowerCode___closed__7; +x_350 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_350, 0, x_349); +lean_ctor_set(x_350, 1, x_2); +x_351 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_351, 0, x_350); +lean_ctor_set(x_351, 1, x_5); +return x_351; +} +} +} +} +} +else +{ +lean_object* x_352; lean_object* x_353; lean_object* x_354; lean_object* x_355; uint64_t x_356; uint64_t x_357; uint64_t x_358; uint64_t x_359; uint64_t x_360; uint64_t x_361; uint64_t x_362; size_t x_363; size_t x_364; size_t x_365; size_t x_366; size_t x_367; lean_object* x_368; lean_object* x_369; +x_352 = lean_ctor_get(x_1, 0); +lean_inc(x_352); +lean_dec(x_1); +x_353 = lean_ctor_get(x_259, 1); +lean_inc(x_353); +if (lean_is_exclusive(x_259)) { + lean_ctor_release(x_259, 0); + lean_ctor_release(x_259, 1); + x_354 = x_259; +} else { + lean_dec_ref(x_259); + x_354 = lean_box(0); +} +x_355 = lean_array_get_size(x_353); +x_356 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_352); +x_357 = 32; +x_358 = lean_uint64_shift_right(x_356, x_357); +x_359 = lean_uint64_xor(x_356, x_358); +x_360 = 16; +x_361 = lean_uint64_shift_right(x_359, x_360); +x_362 = lean_uint64_xor(x_359, x_361); +x_363 = lean_uint64_to_usize(x_362); +x_364 = lean_usize_of_nat(x_355); +lean_dec(x_355); +x_365 = 1; +x_366 = lean_usize_sub(x_364, x_365); +x_367 = lean_usize_land(x_363, x_366); +x_368 = lean_array_uget(x_353, x_367); +lean_dec(x_353); +x_369 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_352, x_368); +lean_dec(x_368); +lean_dec(x_352); +if (lean_obj_tag(x_369) == 0) +{ +lean_object* x_370; lean_object* x_371; lean_object* x_372; lean_object* x_373; lean_object* x_374; +x_370 = l_Lean_IR_ToIR_lowerCode___closed__6; +x_371 = l_panic___at_Lean_IR_ToIR_lowerCode___spec__4(x_370); +x_372 = lean_alloc_ctor(11, 1, 0); +lean_ctor_set(x_372, 0, x_371); +if (lean_is_scalar(x_354)) { + x_373 = lean_alloc_ctor(0, 2, 0); +} else { + x_373 = x_354; +} +lean_ctor_set(x_373, 0, x_372); +lean_ctor_set(x_373, 1, x_2); +x_374 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_374, 0, x_373); +lean_ctor_set(x_374, 1, x_5); +return x_374; +} +else +{ +lean_object* x_375; lean_object* x_376; +x_375 = lean_ctor_get(x_369, 0); +lean_inc(x_375); +if (lean_is_exclusive(x_369)) { + lean_ctor_release(x_369, 0); + x_376 = x_369; +} else { + lean_dec_ref(x_369); + x_376 = lean_box(0); +} +switch (lean_obj_tag(x_375)) { +case 0: +{ +lean_object* x_377; lean_object* x_378; lean_object* x_379; lean_object* x_380; lean_object* x_381; lean_object* x_382; +x_377 = lean_ctor_get(x_375, 0); +lean_inc(x_377); +if (lean_is_exclusive(x_375)) { + lean_ctor_release(x_375, 0); + x_378 = x_375; +} else { + lean_dec_ref(x_375); + x_378 = lean_box(0); +} +if (lean_is_scalar(x_378)) { + x_379 = lean_alloc_ctor(0, 1, 0); +} else { + x_379 = x_378; +} +lean_ctor_set(x_379, 0, x_377); +if (lean_is_scalar(x_376)) { + x_380 = lean_alloc_ctor(11, 1, 0); +} else { + x_380 = x_376; + lean_ctor_set_tag(x_380, 11); +} +lean_ctor_set(x_380, 0, x_379); +if (lean_is_scalar(x_354)) { + x_381 = lean_alloc_ctor(0, 2, 0); +} else { + x_381 = x_354; +} +lean_ctor_set(x_381, 0, x_380); +lean_ctor_set(x_381, 1, x_2); +x_382 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_382, 0, x_381); +lean_ctor_set(x_382, 1, x_5); +return x_382; +} +case 1: +{ +lean_object* x_383; lean_object* x_384; lean_object* x_385; lean_object* x_386; lean_object* x_387; lean_object* x_388; +lean_dec(x_376); +if (lean_is_exclusive(x_375)) { + lean_ctor_release(x_375, 0); + x_383 = x_375; +} else { + lean_dec_ref(x_375); + x_383 = lean_box(0); +} +x_384 = l_Lean_IR_ToIR_lowerCode___closed__6; +x_385 = l_panic___at_Lean_IR_ToIR_lowerCode___spec__4(x_384); +if (lean_is_scalar(x_383)) { + x_386 = lean_alloc_ctor(11, 1, 0); +} else { + x_386 = x_383; + lean_ctor_set_tag(x_386, 11); +} +lean_ctor_set(x_386, 0, x_385); +if (lean_is_scalar(x_354)) { + x_387 = lean_alloc_ctor(0, 2, 0); +} else { + x_387 = x_354; +} +lean_ctor_set(x_387, 0, x_386); +lean_ctor_set(x_387, 1, x_2); +x_388 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_388, 0, x_387); +lean_ctor_set(x_388, 1, x_5); +return x_388; +} +default: +{ +lean_object* x_389; lean_object* x_390; lean_object* x_391; +lean_dec(x_376); +x_389 = l_Lean_IR_ToIR_lowerCode___closed__7; +if (lean_is_scalar(x_354)) { + x_390 = lean_alloc_ctor(0, 2, 0); +} else { + x_390 = x_354; +} +lean_ctor_set(x_390, 0, x_389); +lean_ctor_set(x_390, 1, x_2); +x_391 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_391, 0, x_390); +lean_ctor_set(x_391, 1, x_5); +return x_391; +} +} +} +} +} +default: +{ +lean_object* x_392; lean_object* x_393; lean_object* x_394; +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_1); +x_392 = lean_box(13); +x_393 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_393, 0, x_392); +lean_ctor_set(x_393, 1, x_2); +x_394 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_394, 0, x_393); +lean_ctor_set(x_394, 1, x_5); +return x_394; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerAlt(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_7 = lean_ctor_get(x_2, 0); +lean_inc(x_7); +x_8 = lean_ctor_get(x_2, 1); +lean_inc(x_8); +x_9 = lean_ctor_get(x_2, 2); +lean_inc(x_9); +lean_dec(x_2); +lean_inc(x_5); +lean_inc(x_4); +x_10 = l_Lean_IR_ToIR_getCtorInfo(x_7, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_10) == 0) +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_11 = lean_ctor_get(x_10, 0); +lean_inc(x_11); +x_12 = lean_ctor_get(x_11, 0); +lean_inc(x_12); +x_13 = lean_ctor_get(x_10, 1); +lean_inc(x_13); +lean_dec(x_10); +x_14 = lean_ctor_get(x_11, 1); +lean_inc(x_14); +lean_dec(x_11); +x_15 = !lean_is_exclusive(x_12); +if (x_15 == 0) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_16 = lean_ctor_get(x_12, 0); +x_17 = lean_ctor_get(x_12, 1); +x_18 = lean_unsigned_to_nat(0u); +x_19 = l_Lean_IR_ToIR_lowerAlt_loop(x_1, x_9, x_16, x_8, x_17, x_18, x_14, x_4, x_5, x_13); +lean_dec(x_17); +lean_dec(x_8); +if (lean_obj_tag(x_19) == 0) +{ +uint8_t x_20; +x_20 = !lean_is_exclusive(x_19); +if (x_20 == 0) +{ +lean_object* x_21; uint8_t x_22; +x_21 = lean_ctor_get(x_19, 0); +x_22 = !lean_is_exclusive(x_21); +if (x_22 == 0) +{ +lean_object* x_23; +x_23 = lean_ctor_get(x_21, 0); +lean_ctor_set(x_12, 1, x_23); +lean_ctor_set(x_21, 0, x_12); +return x_19; +} +else +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; +x_24 = lean_ctor_get(x_21, 0); +x_25 = lean_ctor_get(x_21, 1); +lean_inc(x_25); +lean_inc(x_24); +lean_dec(x_21); +lean_ctor_set(x_12, 1, x_24); +x_26 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_26, 0, x_12); +lean_ctor_set(x_26, 1, x_25); +lean_ctor_set(x_19, 0, x_26); +return x_19; +} +} +else +{ +lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_27 = lean_ctor_get(x_19, 0); +x_28 = lean_ctor_get(x_19, 1); +lean_inc(x_28); +lean_inc(x_27); +lean_dec(x_19); +x_29 = lean_ctor_get(x_27, 0); +lean_inc(x_29); +x_30 = lean_ctor_get(x_27, 1); +lean_inc(x_30); +if (lean_is_exclusive(x_27)) { + lean_ctor_release(x_27, 0); + lean_ctor_release(x_27, 1); + x_31 = x_27; +} else { + lean_dec_ref(x_27); + x_31 = lean_box(0); +} +lean_ctor_set(x_12, 1, x_29); +if (lean_is_scalar(x_31)) { + x_32 = lean_alloc_ctor(0, 2, 0); +} else { + x_32 = x_31; +} +lean_ctor_set(x_32, 0, x_12); +lean_ctor_set(x_32, 1, x_30); +x_33 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_33, 0, x_32); +lean_ctor_set(x_33, 1, x_28); +return x_33; +} +} +else +{ +uint8_t x_34; +lean_free_object(x_12); +lean_dec(x_16); +x_34 = !lean_is_exclusive(x_19); +if (x_34 == 0) +{ +return x_19; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; +x_35 = lean_ctor_get(x_19, 0); +x_36 = lean_ctor_get(x_19, 1); +lean_inc(x_36); +lean_inc(x_35); +lean_dec(x_19); +x_37 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_37, 0, x_35); +lean_ctor_set(x_37, 1, x_36); +return x_37; +} +} +} +else +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_38 = lean_ctor_get(x_12, 0); +x_39 = lean_ctor_get(x_12, 1); +lean_inc(x_39); +lean_inc(x_38); +lean_dec(x_12); +x_40 = lean_unsigned_to_nat(0u); +x_41 = l_Lean_IR_ToIR_lowerAlt_loop(x_1, x_9, x_38, x_8, x_39, x_40, x_14, x_4, x_5, x_13); +lean_dec(x_39); +lean_dec(x_8); +if (lean_obj_tag(x_41) == 0) +{ +lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; +x_42 = lean_ctor_get(x_41, 0); +lean_inc(x_42); +x_43 = lean_ctor_get(x_41, 1); +lean_inc(x_43); +if (lean_is_exclusive(x_41)) { + lean_ctor_release(x_41, 0); + lean_ctor_release(x_41, 1); + x_44 = x_41; +} else { + lean_dec_ref(x_41); + x_44 = lean_box(0); +} +x_45 = lean_ctor_get(x_42, 0); +lean_inc(x_45); +x_46 = lean_ctor_get(x_42, 1); +lean_inc(x_46); +if (lean_is_exclusive(x_42)) { + lean_ctor_release(x_42, 0); + lean_ctor_release(x_42, 1); + x_47 = x_42; +} else { + lean_dec_ref(x_42); + x_47 = lean_box(0); +} +x_48 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_48, 0, x_38); +lean_ctor_set(x_48, 1, x_45); +if (lean_is_scalar(x_47)) { + x_49 = lean_alloc_ctor(0, 2, 0); +} else { + x_49 = x_47; +} +lean_ctor_set(x_49, 0, x_48); +lean_ctor_set(x_49, 1, x_46); +if (lean_is_scalar(x_44)) { + x_50 = lean_alloc_ctor(0, 2, 0); +} else { + x_50 = x_44; +} +lean_ctor_set(x_50, 0, x_49); +lean_ctor_set(x_50, 1, x_43); +return x_50; +} +else +{ +lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; +lean_dec(x_38); +x_51 = lean_ctor_get(x_41, 0); +lean_inc(x_51); +x_52 = lean_ctor_get(x_41, 1); +lean_inc(x_52); +if (lean_is_exclusive(x_41)) { + lean_ctor_release(x_41, 0); + lean_ctor_release(x_41, 1); + x_53 = x_41; +} else { + lean_dec_ref(x_41); + x_53 = lean_box(0); +} +if (lean_is_scalar(x_53)) { + x_54 = lean_alloc_ctor(1, 2, 0); +} else { + x_54 = x_53; +} +lean_ctor_set(x_54, 0, x_51); +lean_ctor_set(x_54, 1, x_52); +return x_54; +} +} +} +else +{ +uint8_t x_55; +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_1); +x_55 = !lean_is_exclusive(x_10); +if (x_55 == 0) +{ +return x_10; +} +else +{ +lean_object* x_56; lean_object* x_57; lean_object* x_58; +x_56 = lean_ctor_get(x_10, 0); +x_57 = lean_ctor_get(x_10, 1); +lean_inc(x_57); +lean_inc(x_56); +lean_dec(x_10); +x_58 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_58, 0, x_56); +lean_ctor_set(x_58, 1, x_57); +return x_58; +} +} +} +else +{ +uint8_t x_59; +lean_dec(x_1); +x_59 = !lean_is_exclusive(x_2); +if (x_59 == 0) +{ +lean_object* x_60; lean_object* x_61; +x_60 = lean_ctor_get(x_2, 0); +x_61 = l_Lean_IR_ToIR_lowerCode(x_60, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_61) == 0) +{ +uint8_t x_62; +x_62 = !lean_is_exclusive(x_61); +if (x_62 == 0) +{ +lean_object* x_63; uint8_t x_64; +x_63 = lean_ctor_get(x_61, 0); +x_64 = !lean_is_exclusive(x_63); +if (x_64 == 0) +{ +lean_object* x_65; +x_65 = lean_ctor_get(x_63, 0); +lean_ctor_set(x_2, 0, x_65); +lean_ctor_set(x_63, 0, x_2); +return x_61; +} +else +{ +lean_object* x_66; lean_object* x_67; lean_object* x_68; +x_66 = lean_ctor_get(x_63, 0); +x_67 = lean_ctor_get(x_63, 1); +lean_inc(x_67); +lean_inc(x_66); +lean_dec(x_63); +lean_ctor_set(x_2, 0, x_66); +x_68 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_68, 0, x_2); +lean_ctor_set(x_68, 1, x_67); +lean_ctor_set(x_61, 0, x_68); +return x_61; +} +} +else +{ +lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; +x_69 = lean_ctor_get(x_61, 0); +x_70 = lean_ctor_get(x_61, 1); +lean_inc(x_70); +lean_inc(x_69); +lean_dec(x_61); +x_71 = lean_ctor_get(x_69, 0); +lean_inc(x_71); +x_72 = lean_ctor_get(x_69, 1); +lean_inc(x_72); +if (lean_is_exclusive(x_69)) { + lean_ctor_release(x_69, 0); + lean_ctor_release(x_69, 1); + x_73 = x_69; +} else { + lean_dec_ref(x_69); + x_73 = lean_box(0); +} +lean_ctor_set(x_2, 0, x_71); +if (lean_is_scalar(x_73)) { + x_74 = lean_alloc_ctor(0, 2, 0); +} else { + x_74 = x_73; +} +lean_ctor_set(x_74, 0, x_2); +lean_ctor_set(x_74, 1, x_72); +x_75 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_75, 0, x_74); +lean_ctor_set(x_75, 1, x_70); +return x_75; +} +} +else +{ +uint8_t x_76; +lean_free_object(x_2); +x_76 = !lean_is_exclusive(x_61); +if (x_76 == 0) +{ +return x_61; +} +else +{ +lean_object* x_77; lean_object* x_78; lean_object* x_79; +x_77 = lean_ctor_get(x_61, 0); +x_78 = lean_ctor_get(x_61, 1); +lean_inc(x_78); +lean_inc(x_77); +lean_dec(x_61); +x_79 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_79, 0, x_77); +lean_ctor_set(x_79, 1, x_78); +return x_79; +} +} +} +else +{ +lean_object* x_80; lean_object* x_81; +x_80 = lean_ctor_get(x_2, 0); +lean_inc(x_80); +lean_dec(x_2); +x_81 = l_Lean_IR_ToIR_lowerCode(x_80, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_81) == 0) +{ +lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; +x_82 = lean_ctor_get(x_81, 0); +lean_inc(x_82); +x_83 = lean_ctor_get(x_81, 1); +lean_inc(x_83); +if (lean_is_exclusive(x_81)) { + lean_ctor_release(x_81, 0); + lean_ctor_release(x_81, 1); + x_84 = x_81; +} else { + lean_dec_ref(x_81); + x_84 = lean_box(0); +} +x_85 = lean_ctor_get(x_82, 0); +lean_inc(x_85); +x_86 = lean_ctor_get(x_82, 1); +lean_inc(x_86); +if (lean_is_exclusive(x_82)) { + lean_ctor_release(x_82, 0); + lean_ctor_release(x_82, 1); + x_87 = x_82; +} else { + lean_dec_ref(x_82); + x_87 = lean_box(0); +} +x_88 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_88, 0, x_85); +if (lean_is_scalar(x_87)) { + x_89 = lean_alloc_ctor(0, 2, 0); +} else { + x_89 = x_87; +} +lean_ctor_set(x_89, 0, x_88); +lean_ctor_set(x_89, 1, x_86); +if (lean_is_scalar(x_84)) { + x_90 = lean_alloc_ctor(0, 2, 0); +} else { + x_90 = x_84; +} +lean_ctor_set(x_90, 0, x_89); +lean_ctor_set(x_90, 1, x_83); +return x_90; +} +else +{ +lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; +x_91 = lean_ctor_get(x_81, 0); +lean_inc(x_91); +x_92 = lean_ctor_get(x_81, 1); +lean_inc(x_92); +if (lean_is_exclusive(x_81)) { + lean_ctor_release(x_81, 0); + lean_ctor_release(x_81, 1); + x_93 = x_81; +} else { + lean_dec_ref(x_81); + x_93 = lean_box(0); +} +if (lean_is_scalar(x_93)) { + x_94 = lean_alloc_ctor(1, 2, 0); +} else { + x_94 = x_93; +} +lean_ctor_set(x_94, 0, x_91); +lean_ctor_set(x_94, 1, x_92); +return x_94; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; lean_object* x_7; uint8_t x_8; +x_6 = lean_ctor_get(x_3, 5); +x_7 = l_Lean_addMessageContextPartial___at_Lean_Core_instAddMessageContextCoreM___spec__1(x_1, x_3, x_4, x_5); +x_8 = !lean_is_exclusive(x_7); +if (x_8 == 0) +{ +lean_object* x_9; lean_object* x_10; +x_9 = lean_ctor_get(x_7, 0); +lean_inc(x_6); +x_10 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10, 0, x_6); +lean_ctor_set(x_10, 1, x_9); +lean_ctor_set_tag(x_7, 1); +lean_ctor_set(x_7, 0, x_10); +return x_7; +} +else +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_11 = lean_ctor_get(x_7, 0); +x_12 = lean_ctor_get(x_7, 1); +lean_inc(x_12); +lean_inc(x_11); +lean_dec(x_7); +lean_inc(x_6); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_6); +lean_ctor_set(x_13, 1, x_11); +x_14 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_12); +return x_14; +} +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; uint8_t x_14; +x_13 = lean_ctor_get(x_4, 1); +x_14 = lean_nat_dec_lt(x_6, x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; +lean_dec(x_6); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_5); +lean_ctor_set(x_15, 1, x_9); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_15); +lean_ctor_set(x_16, 1, x_12); +return x_16; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_26; lean_object* x_27; +x_26 = l_Lean_Compiler_LCNF_instInhabitedArg; +x_27 = lean_array_get(x_26, x_2, x_6); +switch (lean_obj_tag(x_27)) { +case 0: +{ +lean_object* x_28; lean_object* x_29; +x_28 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_29 = lean_array_get(x_28, x_1, x_6); +switch (lean_obj_tag(x_29)) { +case 1: +{ +uint8_t x_30; +x_30 = !lean_is_exclusive(x_29); +if (x_30 == 0) +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_31 = lean_ctor_get(x_29, 0); +lean_dec(x_31); +x_32 = lean_box(1); +x_33 = lean_array_push(x_5, x_32); +lean_ctor_set(x_29, 0, x_33); +x_34 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_34, 0, x_29); +lean_ctor_set(x_34, 1, x_9); +x_17 = x_34; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; +lean_dec(x_29); +x_35 = lean_box(1); +x_36 = lean_array_push(x_5, x_35); +x_37 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_37, 0, x_36); +x_38 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_9); +x_17 = x_38; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_39; +x_39 = !lean_is_exclusive(x_29); +if (x_39 == 0) +{ +lean_object* x_40; lean_object* x_41; +x_40 = lean_ctor_get(x_29, 0); +lean_dec(x_40); +lean_ctor_set_tag(x_29, 1); +lean_ctor_set(x_29, 0, x_5); +x_41 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_41, 0, x_29); +lean_ctor_set(x_41, 1, x_9); +x_17 = x_41; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_42; lean_object* x_43; +lean_dec(x_29); +x_42 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_42, 0, x_5); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_9); +x_17 = x_43; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_44; lean_object* x_45; +lean_dec(x_29); +x_44 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_44, 0, x_5); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_9); +x_17 = x_45; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_46; uint8_t x_47; +x_46 = lean_ctor_get(x_9, 0); +lean_inc(x_46); +x_47 = !lean_is_exclusive(x_27); +if (x_47 == 0) +{ +uint8_t x_48; +x_48 = !lean_is_exclusive(x_46); +if (x_48 == 0) +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; uint64_t x_53; uint64_t x_54; uint64_t x_55; uint64_t x_56; uint64_t x_57; uint64_t x_58; uint64_t x_59; size_t x_60; size_t x_61; size_t x_62; size_t x_63; size_t x_64; lean_object* x_65; lean_object* x_66; +x_49 = lean_ctor_get(x_27, 0); +x_50 = lean_ctor_get(x_46, 1); +x_51 = lean_ctor_get(x_46, 0); +lean_dec(x_51); +x_52 = lean_array_get_size(x_50); +x_53 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_49); +x_54 = 32; +x_55 = lean_uint64_shift_right(x_53, x_54); +x_56 = lean_uint64_xor(x_53, x_55); +x_57 = 16; +x_58 = lean_uint64_shift_right(x_56, x_57); +x_59 = lean_uint64_xor(x_56, x_58); +x_60 = lean_uint64_to_usize(x_59); +x_61 = lean_usize_of_nat(x_52); +lean_dec(x_52); +x_62 = 1; +x_63 = lean_usize_sub(x_61, x_62); +x_64 = lean_usize_land(x_60, x_63); +x_65 = lean_array_uget(x_50, x_64); +lean_dec(x_50); +x_66 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_49, x_65); +lean_dec(x_65); +lean_dec(x_49); +if (lean_obj_tag(x_66) == 0) +{ +lean_ctor_set(x_27, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_27); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +uint8_t x_67; +lean_free_object(x_27); +x_67 = !lean_is_exclusive(x_66); +if (x_67 == 0) +{ +lean_object* x_68; +x_68 = lean_ctor_get(x_66, 0); +switch (lean_obj_tag(x_68)) { +case 0: +{ +uint8_t x_69; +lean_free_object(x_66); +x_69 = !lean_is_exclusive(x_68); +if (x_69 == 0) +{ +lean_object* x_70; lean_object* x_71; lean_object* x_72; +x_70 = lean_ctor_get(x_68, 0); +x_71 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_72 = lean_array_get(x_71, x_1, x_6); +switch (lean_obj_tag(x_72)) { +case 1: +{ +uint8_t x_73; +x_73 = !lean_is_exclusive(x_72); +if (x_73 == 0) +{ +lean_object* x_74; lean_object* x_75; +x_74 = lean_ctor_get(x_72, 0); +lean_dec(x_74); +lean_ctor_set_tag(x_72, 0); +lean_ctor_set(x_72, 0, x_70); +x_75 = lean_array_push(x_5, x_72); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_75); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_76; lean_object* x_77; +lean_dec(x_72); +x_76 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_76, 0, x_70); +x_77 = lean_array_push(x_5, x_76); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_77); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_78; +lean_free_object(x_68); +lean_dec(x_70); +x_78 = !lean_is_exclusive(x_72); +if (x_78 == 0) +{ +lean_object* x_79; +x_79 = lean_ctor_get(x_72, 0); +lean_dec(x_79); +lean_ctor_set_tag(x_72, 1); +lean_ctor_set(x_72, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_72); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_80; +lean_dec(x_72); +x_80 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_80, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_80); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_dec(x_72); +lean_dec(x_70); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_81; lean_object* x_82; lean_object* x_83; +x_81 = lean_ctor_get(x_68, 0); +lean_inc(x_81); +lean_dec(x_68); +x_82 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_83 = lean_array_get(x_82, x_1, x_6); +switch (lean_obj_tag(x_83)) { +case 1: +{ +lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_84 = x_83; +} else { + lean_dec_ref(x_83); + x_84 = lean_box(0); +} +if (lean_is_scalar(x_84)) { + x_85 = lean_alloc_ctor(0, 1, 0); +} else { + x_85 = x_84; + lean_ctor_set_tag(x_85, 0); +} +lean_ctor_set(x_85, 0, x_81); +x_86 = lean_array_push(x_5, x_85); +x_87 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_87); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_88; lean_object* x_89; +lean_dec(x_81); +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_88 = x_83; +} else { + lean_dec_ref(x_83); + x_88 = lean_box(0); +} +if (lean_is_scalar(x_88)) { + x_89 = lean_alloc_ctor(1, 1, 0); +} else { + x_89 = x_88; + lean_ctor_set_tag(x_89, 1); +} +lean_ctor_set(x_89, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_89); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_90; +lean_dec(x_83); +lean_dec(x_81); +x_90 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_90, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_90); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +case 1: +{ +uint8_t x_91; +lean_free_object(x_66); +x_91 = !lean_is_exclusive(x_68); +if (x_91 == 0) +{ +lean_object* x_92; +x_92 = lean_ctor_get(x_68, 0); +lean_dec(x_92); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_93; +lean_dec(x_68); +x_93 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_93, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_93); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_ctor_set(x_66, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_66); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_94; +x_94 = lean_ctor_get(x_66, 0); +lean_inc(x_94); +lean_dec(x_66); +switch (lean_obj_tag(x_94)) { +case 0: +{ +lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; +x_95 = lean_ctor_get(x_94, 0); +lean_inc(x_95); +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_96 = x_94; +} else { + lean_dec_ref(x_94); + x_96 = lean_box(0); +} +x_97 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_98 = lean_array_get(x_97, x_1, x_6); +switch (lean_obj_tag(x_98)) { +case 1: +{ +lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_99 = x_98; +} else { + lean_dec_ref(x_98); + x_99 = lean_box(0); +} +if (lean_is_scalar(x_99)) { + x_100 = lean_alloc_ctor(0, 1, 0); +} else { + x_100 = x_99; + lean_ctor_set_tag(x_100, 0); +} +lean_ctor_set(x_100, 0, x_95); +x_101 = lean_array_push(x_5, x_100); +if (lean_is_scalar(x_96)) { + x_102 = lean_alloc_ctor(1, 1, 0); +} else { + x_102 = x_96; + lean_ctor_set_tag(x_102, 1); +} +lean_ctor_set(x_102, 0, x_101); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_102); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_103; lean_object* x_104; +lean_dec(x_96); +lean_dec(x_95); +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_103 = x_98; +} else { + lean_dec_ref(x_98); + x_103 = lean_box(0); +} +if (lean_is_scalar(x_103)) { + x_104 = lean_alloc_ctor(1, 1, 0); +} else { + x_104 = x_103; + lean_ctor_set_tag(x_104, 1); +} +lean_ctor_set(x_104, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_104); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_105; +lean_dec(x_98); +lean_dec(x_95); +if (lean_is_scalar(x_96)) { + x_105 = lean_alloc_ctor(1, 1, 0); +} else { + x_105 = x_96; + lean_ctor_set_tag(x_105, 1); +} +lean_ctor_set(x_105, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_105); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_106; lean_object* x_107; +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_106 = x_94; +} else { + lean_dec_ref(x_94); + x_106 = lean_box(0); +} +if (lean_is_scalar(x_106)) { + x_107 = lean_alloc_ctor(1, 1, 0); +} else { + x_107 = x_106; +} +lean_ctor_set(x_107, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_107); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_108; +x_108 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_108, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_108); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_109; lean_object* x_110; lean_object* x_111; uint64_t x_112; uint64_t x_113; uint64_t x_114; uint64_t x_115; uint64_t x_116; uint64_t x_117; uint64_t x_118; size_t x_119; size_t x_120; size_t x_121; size_t x_122; size_t x_123; lean_object* x_124; lean_object* x_125; +x_109 = lean_ctor_get(x_27, 0); +x_110 = lean_ctor_get(x_46, 1); +lean_inc(x_110); +lean_dec(x_46); +x_111 = lean_array_get_size(x_110); +x_112 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_109); +x_113 = 32; +x_114 = lean_uint64_shift_right(x_112, x_113); +x_115 = lean_uint64_xor(x_112, x_114); +x_116 = 16; +x_117 = lean_uint64_shift_right(x_115, x_116); +x_118 = lean_uint64_xor(x_115, x_117); +x_119 = lean_uint64_to_usize(x_118); +x_120 = lean_usize_of_nat(x_111); +lean_dec(x_111); +x_121 = 1; +x_122 = lean_usize_sub(x_120, x_121); +x_123 = lean_usize_land(x_119, x_122); +x_124 = lean_array_uget(x_110, x_123); +lean_dec(x_110); +x_125 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_109, x_124); +lean_dec(x_124); +lean_dec(x_109); +if (lean_obj_tag(x_125) == 0) +{ +lean_object* x_126; +lean_ctor_set(x_27, 0, x_5); +x_126 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_126, 0, x_27); +lean_ctor_set(x_126, 1, x_9); +x_17 = x_126; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_127; lean_object* x_128; +lean_free_object(x_27); +x_127 = lean_ctor_get(x_125, 0); +lean_inc(x_127); +if (lean_is_exclusive(x_125)) { + lean_ctor_release(x_125, 0); + x_128 = x_125; +} else { + lean_dec_ref(x_125); + x_128 = lean_box(0); +} +switch (lean_obj_tag(x_127)) { +case 0: +{ +lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; +lean_dec(x_128); +x_129 = lean_ctor_get(x_127, 0); +lean_inc(x_129); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_130 = x_127; +} else { + lean_dec_ref(x_127); + x_130 = lean_box(0); +} +x_131 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_132 = lean_array_get(x_131, x_1, x_6); +switch (lean_obj_tag(x_132)) { +case 1: +{ +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_133 = x_132; +} else { + lean_dec_ref(x_132); + x_133 = lean_box(0); +} +if (lean_is_scalar(x_133)) { + x_134 = lean_alloc_ctor(0, 1, 0); +} else { + x_134 = x_133; + lean_ctor_set_tag(x_134, 0); +} +lean_ctor_set(x_134, 0, x_129); +x_135 = lean_array_push(x_5, x_134); +if (lean_is_scalar(x_130)) { + x_136 = lean_alloc_ctor(1, 1, 0); +} else { + x_136 = x_130; + lean_ctor_set_tag(x_136, 1); +} +lean_ctor_set(x_136, 0, x_135); +x_137 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_137, 0, x_136); +lean_ctor_set(x_137, 1, x_9); +x_17 = x_137; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_138; lean_object* x_139; lean_object* x_140; +lean_dec(x_130); +lean_dec(x_129); +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_138 = x_132; +} else { + lean_dec_ref(x_132); + x_138 = lean_box(0); +} +if (lean_is_scalar(x_138)) { + x_139 = lean_alloc_ctor(1, 1, 0); +} else { + x_139 = x_138; + lean_ctor_set_tag(x_139, 1); +} +lean_ctor_set(x_139, 0, x_5); +x_140 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_140, 0, x_139); +lean_ctor_set(x_140, 1, x_9); +x_17 = x_140; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_141; lean_object* x_142; +lean_dec(x_132); +lean_dec(x_129); +if (lean_is_scalar(x_130)) { + x_141 = lean_alloc_ctor(1, 1, 0); +} else { + x_141 = x_130; + lean_ctor_set_tag(x_141, 1); +} +lean_ctor_set(x_141, 0, x_5); +x_142 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_142, 0, x_141); +lean_ctor_set(x_142, 1, x_9); +x_17 = x_142; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_143; lean_object* x_144; lean_object* x_145; +lean_dec(x_128); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_143 = x_127; +} else { + lean_dec_ref(x_127); + x_143 = lean_box(0); +} +if (lean_is_scalar(x_143)) { + x_144 = lean_alloc_ctor(1, 1, 0); +} else { + x_144 = x_143; +} +lean_ctor_set(x_144, 0, x_5); +x_145 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_145, 0, x_144); +lean_ctor_set(x_145, 1, x_9); +x_17 = x_145; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_146; lean_object* x_147; +if (lean_is_scalar(x_128)) { + x_146 = lean_alloc_ctor(1, 1, 0); +} else { + x_146 = x_128; +} +lean_ctor_set(x_146, 0, x_5); +x_147 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_147, 0, x_146); +lean_ctor_set(x_147, 1, x_9); +x_17 = x_147; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; uint64_t x_152; uint64_t x_153; uint64_t x_154; uint64_t x_155; uint64_t x_156; uint64_t x_157; uint64_t x_158; size_t x_159; size_t x_160; size_t x_161; size_t x_162; size_t x_163; lean_object* x_164; lean_object* x_165; +x_148 = lean_ctor_get(x_27, 0); +lean_inc(x_148); +lean_dec(x_27); +x_149 = lean_ctor_get(x_46, 1); +lean_inc(x_149); +if (lean_is_exclusive(x_46)) { + lean_ctor_release(x_46, 0); + lean_ctor_release(x_46, 1); + x_150 = x_46; +} else { + lean_dec_ref(x_46); + x_150 = lean_box(0); +} +x_151 = lean_array_get_size(x_149); +x_152 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_148); +x_153 = 32; +x_154 = lean_uint64_shift_right(x_152, x_153); +x_155 = lean_uint64_xor(x_152, x_154); +x_156 = 16; +x_157 = lean_uint64_shift_right(x_155, x_156); +x_158 = lean_uint64_xor(x_155, x_157); +x_159 = lean_uint64_to_usize(x_158); +x_160 = lean_usize_of_nat(x_151); +lean_dec(x_151); +x_161 = 1; +x_162 = lean_usize_sub(x_160, x_161); +x_163 = lean_usize_land(x_159, x_162); +x_164 = lean_array_uget(x_149, x_163); +lean_dec(x_149); +x_165 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_148, x_164); +lean_dec(x_164); +lean_dec(x_148); +if (lean_obj_tag(x_165) == 0) +{ +lean_object* x_166; lean_object* x_167; +x_166 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_166, 0, x_5); +if (lean_is_scalar(x_150)) { + x_167 = lean_alloc_ctor(0, 2, 0); +} else { + x_167 = x_150; +} +lean_ctor_set(x_167, 0, x_166); +lean_ctor_set(x_167, 1, x_9); +x_17 = x_167; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_168; lean_object* x_169; +x_168 = lean_ctor_get(x_165, 0); +lean_inc(x_168); +if (lean_is_exclusive(x_165)) { + lean_ctor_release(x_165, 0); + x_169 = x_165; +} else { + lean_dec_ref(x_165); + x_169 = lean_box(0); +} +switch (lean_obj_tag(x_168)) { +case 0: +{ +lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; +lean_dec(x_169); +x_170 = lean_ctor_get(x_168, 0); +lean_inc(x_170); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_171 = x_168; +} else { + lean_dec_ref(x_168); + x_171 = lean_box(0); +} +x_172 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_173 = lean_array_get(x_172, x_1, x_6); +switch (lean_obj_tag(x_173)) { +case 1: +{ +lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_174 = x_173; +} else { + lean_dec_ref(x_173); + x_174 = lean_box(0); +} +if (lean_is_scalar(x_174)) { + x_175 = lean_alloc_ctor(0, 1, 0); +} else { + x_175 = x_174; + lean_ctor_set_tag(x_175, 0); +} +lean_ctor_set(x_175, 0, x_170); +x_176 = lean_array_push(x_5, x_175); +if (lean_is_scalar(x_171)) { + x_177 = lean_alloc_ctor(1, 1, 0); +} else { + x_177 = x_171; + lean_ctor_set_tag(x_177, 1); +} +lean_ctor_set(x_177, 0, x_176); +if (lean_is_scalar(x_150)) { + x_178 = lean_alloc_ctor(0, 2, 0); +} else { + x_178 = x_150; +} +lean_ctor_set(x_178, 0, x_177); +lean_ctor_set(x_178, 1, x_9); +x_17 = x_178; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_179; lean_object* x_180; lean_object* x_181; +lean_dec(x_171); +lean_dec(x_170); +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_179 = x_173; +} else { + lean_dec_ref(x_173); + x_179 = lean_box(0); +} +if (lean_is_scalar(x_179)) { + x_180 = lean_alloc_ctor(1, 1, 0); +} else { + x_180 = x_179; + lean_ctor_set_tag(x_180, 1); +} +lean_ctor_set(x_180, 0, x_5); +if (lean_is_scalar(x_150)) { + x_181 = lean_alloc_ctor(0, 2, 0); +} else { + x_181 = x_150; +} +lean_ctor_set(x_181, 0, x_180); +lean_ctor_set(x_181, 1, x_9); +x_17 = x_181; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_182; lean_object* x_183; +lean_dec(x_173); +lean_dec(x_170); +if (lean_is_scalar(x_171)) { + x_182 = lean_alloc_ctor(1, 1, 0); +} else { + x_182 = x_171; + lean_ctor_set_tag(x_182, 1); +} +lean_ctor_set(x_182, 0, x_5); +if (lean_is_scalar(x_150)) { + x_183 = lean_alloc_ctor(0, 2, 0); +} else { + x_183 = x_150; +} +lean_ctor_set(x_183, 0, x_182); +lean_ctor_set(x_183, 1, x_9); +x_17 = x_183; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_184; lean_object* x_185; lean_object* x_186; +lean_dec(x_169); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_184 = x_168; +} else { + lean_dec_ref(x_168); + x_184 = lean_box(0); +} +if (lean_is_scalar(x_184)) { + x_185 = lean_alloc_ctor(1, 1, 0); +} else { + x_185 = x_184; +} +lean_ctor_set(x_185, 0, x_5); +if (lean_is_scalar(x_150)) { + x_186 = lean_alloc_ctor(0, 2, 0); +} else { + x_186 = x_150; +} +lean_ctor_set(x_186, 0, x_185); +lean_ctor_set(x_186, 1, x_9); +x_17 = x_186; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_187; lean_object* x_188; +if (lean_is_scalar(x_169)) { + x_187 = lean_alloc_ctor(1, 1, 0); +} else { + x_187 = x_169; +} +lean_ctor_set(x_187, 0, x_5); +if (lean_is_scalar(x_150)) { + x_188 = lean_alloc_ctor(0, 2, 0); +} else { + x_188 = x_150; +} +lean_ctor_set(x_188, 0, x_187); +lean_ctor_set(x_188, 1, x_9); +x_17 = x_188; +x_18 = x_12; +goto block_25; +} +} +} +} +} +default: +{ +uint8_t x_189; +x_189 = !lean_is_exclusive(x_27); +if (x_189 == 0) +{ +lean_object* x_190; lean_object* x_191; lean_object* x_192; +x_190 = lean_ctor_get(x_27, 0); +lean_dec(x_190); +x_191 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_192 = lean_array_get(x_191, x_1, x_6); +switch (lean_obj_tag(x_192)) { +case 1: +{ +uint8_t x_193; +lean_free_object(x_27); +x_193 = !lean_is_exclusive(x_192); +if (x_193 == 0) +{ +lean_object* x_194; lean_object* x_195; lean_object* x_196; lean_object* x_197; +x_194 = lean_ctor_get(x_192, 0); +lean_dec(x_194); +x_195 = lean_box(1); +x_196 = lean_array_push(x_5, x_195); +lean_ctor_set(x_192, 0, x_196); +x_197 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_197, 0, x_192); +lean_ctor_set(x_197, 1, x_9); +x_17 = x_197; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; +lean_dec(x_192); +x_198 = lean_box(1); +x_199 = lean_array_push(x_5, x_198); +x_200 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_200, 0, x_199); +x_201 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_201, 0, x_200); +lean_ctor_set(x_201, 1, x_9); +x_17 = x_201; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_202; +lean_free_object(x_27); +x_202 = !lean_is_exclusive(x_192); +if (x_202 == 0) +{ +lean_object* x_203; lean_object* x_204; +x_203 = lean_ctor_get(x_192, 0); +lean_dec(x_203); +lean_ctor_set_tag(x_192, 1); +lean_ctor_set(x_192, 0, x_5); +x_204 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_204, 0, x_192); +lean_ctor_set(x_204, 1, x_9); +x_17 = x_204; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_205; lean_object* x_206; +lean_dec(x_192); +x_205 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_205, 0, x_5); +x_206 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_206, 0, x_205); +lean_ctor_set(x_206, 1, x_9); +x_17 = x_206; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_207; +lean_dec(x_192); +lean_ctor_set_tag(x_27, 1); +lean_ctor_set(x_27, 0, x_5); +x_207 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_207, 0, x_27); +lean_ctor_set(x_207, 1, x_9); +x_17 = x_207; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_208; lean_object* x_209; +lean_dec(x_27); +x_208 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_209 = lean_array_get(x_208, x_1, x_6); +switch (lean_obj_tag(x_209)) { +case 1: +{ +lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_210 = x_209; +} else { + lean_dec_ref(x_209); + x_210 = lean_box(0); +} +x_211 = lean_box(1); +x_212 = lean_array_push(x_5, x_211); +if (lean_is_scalar(x_210)) { + x_213 = lean_alloc_ctor(1, 1, 0); +} else { + x_213 = x_210; +} +lean_ctor_set(x_213, 0, x_212); +x_214 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_214, 0, x_213); +lean_ctor_set(x_214, 1, x_9); +x_17 = x_214; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_215; lean_object* x_216; lean_object* x_217; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_215 = x_209; +} else { + lean_dec_ref(x_209); + x_215 = lean_box(0); +} +if (lean_is_scalar(x_215)) { + x_216 = lean_alloc_ctor(1, 1, 0); +} else { + x_216 = x_215; + lean_ctor_set_tag(x_216, 1); +} +lean_ctor_set(x_216, 0, x_5); +x_217 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_217, 0, x_216); +lean_ctor_set(x_217, 1, x_9); +x_17 = x_217; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_218; lean_object* x_219; +lean_dec(x_209); +x_218 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_218, 0, x_5); +x_219 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_219, 0, x_218); +lean_ctor_set(x_219, 1, x_9); +x_17 = x_219; +x_18 = x_12; +goto block_25; +} +} +} +} +} +block_25: +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_19 = lean_ctor_get(x_17, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_17, 1); +lean_inc(x_20); +lean_dec(x_17); +x_21 = lean_ctor_get(x_19, 0); +lean_inc(x_21); +lean_dec(x_19); +x_22 = lean_ctor_get(x_4, 2); +x_23 = lean_nat_add(x_6, x_22); +lean_dec(x_6); +x_5 = x_21; +x_6 = x_23; +x_7 = lean_box(0); +x_8 = lean_box(0); +x_9 = x_20; +x_12 = x_18; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; uint8_t x_14; +x_13 = lean_ctor_get(x_4, 1); +x_14 = lean_nat_dec_lt(x_6, x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; +lean_dec(x_6); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_5); +lean_ctor_set(x_15, 1, x_9); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_15); +lean_ctor_set(x_16, 1, x_12); +return x_16; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_26; lean_object* x_27; +x_26 = l_Lean_Compiler_LCNF_instInhabitedArg; +x_27 = lean_array_get(x_26, x_2, x_6); +switch (lean_obj_tag(x_27)) { +case 0: +{ +lean_object* x_28; lean_object* x_29; +x_28 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_29 = lean_array_get(x_28, x_1, x_6); +switch (lean_obj_tag(x_29)) { +case 1: +{ +uint8_t x_30; +x_30 = !lean_is_exclusive(x_29); +if (x_30 == 0) +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_31 = lean_ctor_get(x_29, 0); +lean_dec(x_31); +x_32 = lean_box(1); +x_33 = lean_array_push(x_5, x_32); +lean_ctor_set(x_29, 0, x_33); +x_34 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_34, 0, x_29); +lean_ctor_set(x_34, 1, x_9); +x_17 = x_34; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; +lean_dec(x_29); +x_35 = lean_box(1); +x_36 = lean_array_push(x_5, x_35); +x_37 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_37, 0, x_36); +x_38 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_9); +x_17 = x_38; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_39; +x_39 = !lean_is_exclusive(x_29); +if (x_39 == 0) +{ +lean_object* x_40; lean_object* x_41; +x_40 = lean_ctor_get(x_29, 0); +lean_dec(x_40); +lean_ctor_set_tag(x_29, 1); +lean_ctor_set(x_29, 0, x_5); +x_41 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_41, 0, x_29); +lean_ctor_set(x_41, 1, x_9); +x_17 = x_41; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_42; lean_object* x_43; +lean_dec(x_29); +x_42 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_42, 0, x_5); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_9); +x_17 = x_43; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_44; lean_object* x_45; +lean_dec(x_29); +x_44 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_44, 0, x_5); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_9); +x_17 = x_45; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_46; uint8_t x_47; +x_46 = lean_ctor_get(x_9, 0); +lean_inc(x_46); +x_47 = !lean_is_exclusive(x_27); +if (x_47 == 0) +{ +uint8_t x_48; +x_48 = !lean_is_exclusive(x_46); +if (x_48 == 0) +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; uint64_t x_53; uint64_t x_54; uint64_t x_55; uint64_t x_56; uint64_t x_57; uint64_t x_58; uint64_t x_59; size_t x_60; size_t x_61; size_t x_62; size_t x_63; size_t x_64; lean_object* x_65; lean_object* x_66; +x_49 = lean_ctor_get(x_27, 0); +x_50 = lean_ctor_get(x_46, 1); +x_51 = lean_ctor_get(x_46, 0); +lean_dec(x_51); +x_52 = lean_array_get_size(x_50); +x_53 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_49); +x_54 = 32; +x_55 = lean_uint64_shift_right(x_53, x_54); +x_56 = lean_uint64_xor(x_53, x_55); +x_57 = 16; +x_58 = lean_uint64_shift_right(x_56, x_57); +x_59 = lean_uint64_xor(x_56, x_58); +x_60 = lean_uint64_to_usize(x_59); +x_61 = lean_usize_of_nat(x_52); +lean_dec(x_52); +x_62 = 1; +x_63 = lean_usize_sub(x_61, x_62); +x_64 = lean_usize_land(x_60, x_63); +x_65 = lean_array_uget(x_50, x_64); +lean_dec(x_50); +x_66 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_49, x_65); +lean_dec(x_65); +lean_dec(x_49); +if (lean_obj_tag(x_66) == 0) +{ +lean_ctor_set(x_27, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_27); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +uint8_t x_67; +lean_free_object(x_27); +x_67 = !lean_is_exclusive(x_66); +if (x_67 == 0) +{ +lean_object* x_68; +x_68 = lean_ctor_get(x_66, 0); +switch (lean_obj_tag(x_68)) { +case 0: +{ +uint8_t x_69; +lean_free_object(x_66); +x_69 = !lean_is_exclusive(x_68); +if (x_69 == 0) +{ +lean_object* x_70; lean_object* x_71; lean_object* x_72; +x_70 = lean_ctor_get(x_68, 0); +x_71 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_72 = lean_array_get(x_71, x_1, x_6); +switch (lean_obj_tag(x_72)) { +case 1: +{ +uint8_t x_73; +x_73 = !lean_is_exclusive(x_72); +if (x_73 == 0) +{ +lean_object* x_74; lean_object* x_75; +x_74 = lean_ctor_get(x_72, 0); +lean_dec(x_74); +lean_ctor_set_tag(x_72, 0); +lean_ctor_set(x_72, 0, x_70); +x_75 = lean_array_push(x_5, x_72); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_75); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_76; lean_object* x_77; +lean_dec(x_72); +x_76 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_76, 0, x_70); +x_77 = lean_array_push(x_5, x_76); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_77); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_78; +lean_free_object(x_68); +lean_dec(x_70); +x_78 = !lean_is_exclusive(x_72); +if (x_78 == 0) +{ +lean_object* x_79; +x_79 = lean_ctor_get(x_72, 0); +lean_dec(x_79); +lean_ctor_set_tag(x_72, 1); +lean_ctor_set(x_72, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_72); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_80; +lean_dec(x_72); +x_80 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_80, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_80); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_dec(x_72); +lean_dec(x_70); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_81; lean_object* x_82; lean_object* x_83; +x_81 = lean_ctor_get(x_68, 0); +lean_inc(x_81); +lean_dec(x_68); +x_82 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_83 = lean_array_get(x_82, x_1, x_6); +switch (lean_obj_tag(x_83)) { +case 1: +{ +lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_84 = x_83; +} else { + lean_dec_ref(x_83); + x_84 = lean_box(0); +} +if (lean_is_scalar(x_84)) { + x_85 = lean_alloc_ctor(0, 1, 0); +} else { + x_85 = x_84; + lean_ctor_set_tag(x_85, 0); +} +lean_ctor_set(x_85, 0, x_81); +x_86 = lean_array_push(x_5, x_85); +x_87 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_87); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_88; lean_object* x_89; +lean_dec(x_81); +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_88 = x_83; +} else { + lean_dec_ref(x_83); + x_88 = lean_box(0); +} +if (lean_is_scalar(x_88)) { + x_89 = lean_alloc_ctor(1, 1, 0); +} else { + x_89 = x_88; + lean_ctor_set_tag(x_89, 1); +} +lean_ctor_set(x_89, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_89); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_90; +lean_dec(x_83); +lean_dec(x_81); +x_90 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_90, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_90); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +case 1: +{ +uint8_t x_91; +lean_free_object(x_66); +x_91 = !lean_is_exclusive(x_68); +if (x_91 == 0) +{ +lean_object* x_92; +x_92 = lean_ctor_get(x_68, 0); +lean_dec(x_92); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_93; +lean_dec(x_68); +x_93 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_93, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_93); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_ctor_set(x_66, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_66); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_94; +x_94 = lean_ctor_get(x_66, 0); +lean_inc(x_94); +lean_dec(x_66); +switch (lean_obj_tag(x_94)) { +case 0: +{ +lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; +x_95 = lean_ctor_get(x_94, 0); +lean_inc(x_95); +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_96 = x_94; +} else { + lean_dec_ref(x_94); + x_96 = lean_box(0); +} +x_97 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_98 = lean_array_get(x_97, x_1, x_6); +switch (lean_obj_tag(x_98)) { +case 1: +{ +lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_99 = x_98; +} else { + lean_dec_ref(x_98); + x_99 = lean_box(0); +} +if (lean_is_scalar(x_99)) { + x_100 = lean_alloc_ctor(0, 1, 0); +} else { + x_100 = x_99; + lean_ctor_set_tag(x_100, 0); +} +lean_ctor_set(x_100, 0, x_95); +x_101 = lean_array_push(x_5, x_100); +if (lean_is_scalar(x_96)) { + x_102 = lean_alloc_ctor(1, 1, 0); +} else { + x_102 = x_96; + lean_ctor_set_tag(x_102, 1); +} +lean_ctor_set(x_102, 0, x_101); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_102); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_103; lean_object* x_104; +lean_dec(x_96); +lean_dec(x_95); +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_103 = x_98; +} else { + lean_dec_ref(x_98); + x_103 = lean_box(0); +} +if (lean_is_scalar(x_103)) { + x_104 = lean_alloc_ctor(1, 1, 0); +} else { + x_104 = x_103; + lean_ctor_set_tag(x_104, 1); +} +lean_ctor_set(x_104, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_104); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_105; +lean_dec(x_98); +lean_dec(x_95); +if (lean_is_scalar(x_96)) { + x_105 = lean_alloc_ctor(1, 1, 0); +} else { + x_105 = x_96; + lean_ctor_set_tag(x_105, 1); +} +lean_ctor_set(x_105, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_105); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_106; lean_object* x_107; +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_106 = x_94; +} else { + lean_dec_ref(x_94); + x_106 = lean_box(0); +} +if (lean_is_scalar(x_106)) { + x_107 = lean_alloc_ctor(1, 1, 0); +} else { + x_107 = x_106; +} +lean_ctor_set(x_107, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_107); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_108; +x_108 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_108, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_108); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_109; lean_object* x_110; lean_object* x_111; uint64_t x_112; uint64_t x_113; uint64_t x_114; uint64_t x_115; uint64_t x_116; uint64_t x_117; uint64_t x_118; size_t x_119; size_t x_120; size_t x_121; size_t x_122; size_t x_123; lean_object* x_124; lean_object* x_125; +x_109 = lean_ctor_get(x_27, 0); +x_110 = lean_ctor_get(x_46, 1); +lean_inc(x_110); +lean_dec(x_46); +x_111 = lean_array_get_size(x_110); +x_112 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_109); +x_113 = 32; +x_114 = lean_uint64_shift_right(x_112, x_113); +x_115 = lean_uint64_xor(x_112, x_114); +x_116 = 16; +x_117 = lean_uint64_shift_right(x_115, x_116); +x_118 = lean_uint64_xor(x_115, x_117); +x_119 = lean_uint64_to_usize(x_118); +x_120 = lean_usize_of_nat(x_111); +lean_dec(x_111); +x_121 = 1; +x_122 = lean_usize_sub(x_120, x_121); +x_123 = lean_usize_land(x_119, x_122); +x_124 = lean_array_uget(x_110, x_123); +lean_dec(x_110); +x_125 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_109, x_124); +lean_dec(x_124); +lean_dec(x_109); +if (lean_obj_tag(x_125) == 0) +{ +lean_object* x_126; +lean_ctor_set(x_27, 0, x_5); +x_126 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_126, 0, x_27); +lean_ctor_set(x_126, 1, x_9); +x_17 = x_126; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_127; lean_object* x_128; +lean_free_object(x_27); +x_127 = lean_ctor_get(x_125, 0); +lean_inc(x_127); +if (lean_is_exclusive(x_125)) { + lean_ctor_release(x_125, 0); + x_128 = x_125; +} else { + lean_dec_ref(x_125); + x_128 = lean_box(0); +} +switch (lean_obj_tag(x_127)) { +case 0: +{ +lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; +lean_dec(x_128); +x_129 = lean_ctor_get(x_127, 0); +lean_inc(x_129); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_130 = x_127; +} else { + lean_dec_ref(x_127); + x_130 = lean_box(0); +} +x_131 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_132 = lean_array_get(x_131, x_1, x_6); +switch (lean_obj_tag(x_132)) { +case 1: +{ +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_133 = x_132; +} else { + lean_dec_ref(x_132); + x_133 = lean_box(0); +} +if (lean_is_scalar(x_133)) { + x_134 = lean_alloc_ctor(0, 1, 0); +} else { + x_134 = x_133; + lean_ctor_set_tag(x_134, 0); +} +lean_ctor_set(x_134, 0, x_129); +x_135 = lean_array_push(x_5, x_134); +if (lean_is_scalar(x_130)) { + x_136 = lean_alloc_ctor(1, 1, 0); +} else { + x_136 = x_130; + lean_ctor_set_tag(x_136, 1); +} +lean_ctor_set(x_136, 0, x_135); +x_137 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_137, 0, x_136); +lean_ctor_set(x_137, 1, x_9); +x_17 = x_137; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_138; lean_object* x_139; lean_object* x_140; +lean_dec(x_130); +lean_dec(x_129); +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_138 = x_132; +} else { + lean_dec_ref(x_132); + x_138 = lean_box(0); +} +if (lean_is_scalar(x_138)) { + x_139 = lean_alloc_ctor(1, 1, 0); +} else { + x_139 = x_138; + lean_ctor_set_tag(x_139, 1); +} +lean_ctor_set(x_139, 0, x_5); +x_140 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_140, 0, x_139); +lean_ctor_set(x_140, 1, x_9); +x_17 = x_140; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_141; lean_object* x_142; +lean_dec(x_132); +lean_dec(x_129); +if (lean_is_scalar(x_130)) { + x_141 = lean_alloc_ctor(1, 1, 0); +} else { + x_141 = x_130; + lean_ctor_set_tag(x_141, 1); +} +lean_ctor_set(x_141, 0, x_5); +x_142 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_142, 0, x_141); +lean_ctor_set(x_142, 1, x_9); +x_17 = x_142; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_143; lean_object* x_144; lean_object* x_145; +lean_dec(x_128); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_143 = x_127; +} else { + lean_dec_ref(x_127); + x_143 = lean_box(0); +} +if (lean_is_scalar(x_143)) { + x_144 = lean_alloc_ctor(1, 1, 0); +} else { + x_144 = x_143; +} +lean_ctor_set(x_144, 0, x_5); +x_145 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_145, 0, x_144); +lean_ctor_set(x_145, 1, x_9); +x_17 = x_145; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_146; lean_object* x_147; +if (lean_is_scalar(x_128)) { + x_146 = lean_alloc_ctor(1, 1, 0); +} else { + x_146 = x_128; +} +lean_ctor_set(x_146, 0, x_5); +x_147 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_147, 0, x_146); +lean_ctor_set(x_147, 1, x_9); +x_17 = x_147; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; uint64_t x_152; uint64_t x_153; uint64_t x_154; uint64_t x_155; uint64_t x_156; uint64_t x_157; uint64_t x_158; size_t x_159; size_t x_160; size_t x_161; size_t x_162; size_t x_163; lean_object* x_164; lean_object* x_165; +x_148 = lean_ctor_get(x_27, 0); +lean_inc(x_148); +lean_dec(x_27); +x_149 = lean_ctor_get(x_46, 1); +lean_inc(x_149); +if (lean_is_exclusive(x_46)) { + lean_ctor_release(x_46, 0); + lean_ctor_release(x_46, 1); + x_150 = x_46; +} else { + lean_dec_ref(x_46); + x_150 = lean_box(0); +} +x_151 = lean_array_get_size(x_149); +x_152 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_148); +x_153 = 32; +x_154 = lean_uint64_shift_right(x_152, x_153); +x_155 = lean_uint64_xor(x_152, x_154); +x_156 = 16; +x_157 = lean_uint64_shift_right(x_155, x_156); +x_158 = lean_uint64_xor(x_155, x_157); +x_159 = lean_uint64_to_usize(x_158); +x_160 = lean_usize_of_nat(x_151); +lean_dec(x_151); +x_161 = 1; +x_162 = lean_usize_sub(x_160, x_161); +x_163 = lean_usize_land(x_159, x_162); +x_164 = lean_array_uget(x_149, x_163); +lean_dec(x_149); +x_165 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_148, x_164); +lean_dec(x_164); +lean_dec(x_148); +if (lean_obj_tag(x_165) == 0) +{ +lean_object* x_166; lean_object* x_167; +x_166 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_166, 0, x_5); +if (lean_is_scalar(x_150)) { + x_167 = lean_alloc_ctor(0, 2, 0); +} else { + x_167 = x_150; +} +lean_ctor_set(x_167, 0, x_166); +lean_ctor_set(x_167, 1, x_9); +x_17 = x_167; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_168; lean_object* x_169; +x_168 = lean_ctor_get(x_165, 0); +lean_inc(x_168); +if (lean_is_exclusive(x_165)) { + lean_ctor_release(x_165, 0); + x_169 = x_165; +} else { + lean_dec_ref(x_165); + x_169 = lean_box(0); +} +switch (lean_obj_tag(x_168)) { +case 0: +{ +lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; +lean_dec(x_169); +x_170 = lean_ctor_get(x_168, 0); +lean_inc(x_170); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_171 = x_168; +} else { + lean_dec_ref(x_168); + x_171 = lean_box(0); +} +x_172 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_173 = lean_array_get(x_172, x_1, x_6); +switch (lean_obj_tag(x_173)) { +case 1: +{ +lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_174 = x_173; +} else { + lean_dec_ref(x_173); + x_174 = lean_box(0); +} +if (lean_is_scalar(x_174)) { + x_175 = lean_alloc_ctor(0, 1, 0); +} else { + x_175 = x_174; + lean_ctor_set_tag(x_175, 0); +} +lean_ctor_set(x_175, 0, x_170); +x_176 = lean_array_push(x_5, x_175); +if (lean_is_scalar(x_171)) { + x_177 = lean_alloc_ctor(1, 1, 0); +} else { + x_177 = x_171; + lean_ctor_set_tag(x_177, 1); +} +lean_ctor_set(x_177, 0, x_176); +if (lean_is_scalar(x_150)) { + x_178 = lean_alloc_ctor(0, 2, 0); +} else { + x_178 = x_150; +} +lean_ctor_set(x_178, 0, x_177); +lean_ctor_set(x_178, 1, x_9); +x_17 = x_178; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_179; lean_object* x_180; lean_object* x_181; +lean_dec(x_171); +lean_dec(x_170); +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_179 = x_173; +} else { + lean_dec_ref(x_173); + x_179 = lean_box(0); +} +if (lean_is_scalar(x_179)) { + x_180 = lean_alloc_ctor(1, 1, 0); +} else { + x_180 = x_179; + lean_ctor_set_tag(x_180, 1); +} +lean_ctor_set(x_180, 0, x_5); +if (lean_is_scalar(x_150)) { + x_181 = lean_alloc_ctor(0, 2, 0); +} else { + x_181 = x_150; +} +lean_ctor_set(x_181, 0, x_180); +lean_ctor_set(x_181, 1, x_9); +x_17 = x_181; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_182; lean_object* x_183; +lean_dec(x_173); +lean_dec(x_170); +if (lean_is_scalar(x_171)) { + x_182 = lean_alloc_ctor(1, 1, 0); +} else { + x_182 = x_171; + lean_ctor_set_tag(x_182, 1); +} +lean_ctor_set(x_182, 0, x_5); +if (lean_is_scalar(x_150)) { + x_183 = lean_alloc_ctor(0, 2, 0); +} else { + x_183 = x_150; +} +lean_ctor_set(x_183, 0, x_182); +lean_ctor_set(x_183, 1, x_9); +x_17 = x_183; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_184; lean_object* x_185; lean_object* x_186; +lean_dec(x_169); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_184 = x_168; +} else { + lean_dec_ref(x_168); + x_184 = lean_box(0); +} +if (lean_is_scalar(x_184)) { + x_185 = lean_alloc_ctor(1, 1, 0); +} else { + x_185 = x_184; +} +lean_ctor_set(x_185, 0, x_5); +if (lean_is_scalar(x_150)) { + x_186 = lean_alloc_ctor(0, 2, 0); +} else { + x_186 = x_150; +} +lean_ctor_set(x_186, 0, x_185); +lean_ctor_set(x_186, 1, x_9); +x_17 = x_186; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_187; lean_object* x_188; +if (lean_is_scalar(x_169)) { + x_187 = lean_alloc_ctor(1, 1, 0); +} else { + x_187 = x_169; +} +lean_ctor_set(x_187, 0, x_5); +if (lean_is_scalar(x_150)) { + x_188 = lean_alloc_ctor(0, 2, 0); +} else { + x_188 = x_150; +} +lean_ctor_set(x_188, 0, x_187); +lean_ctor_set(x_188, 1, x_9); +x_17 = x_188; +x_18 = x_12; +goto block_25; +} +} +} +} +} +default: +{ +uint8_t x_189; +x_189 = !lean_is_exclusive(x_27); +if (x_189 == 0) +{ +lean_object* x_190; lean_object* x_191; lean_object* x_192; +x_190 = lean_ctor_get(x_27, 0); +lean_dec(x_190); +x_191 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_192 = lean_array_get(x_191, x_1, x_6); +switch (lean_obj_tag(x_192)) { +case 1: +{ +uint8_t x_193; +lean_free_object(x_27); +x_193 = !lean_is_exclusive(x_192); +if (x_193 == 0) +{ +lean_object* x_194; lean_object* x_195; lean_object* x_196; lean_object* x_197; +x_194 = lean_ctor_get(x_192, 0); +lean_dec(x_194); +x_195 = lean_box(1); +x_196 = lean_array_push(x_5, x_195); +lean_ctor_set(x_192, 0, x_196); +x_197 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_197, 0, x_192); +lean_ctor_set(x_197, 1, x_9); +x_17 = x_197; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; +lean_dec(x_192); +x_198 = lean_box(1); +x_199 = lean_array_push(x_5, x_198); +x_200 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_200, 0, x_199); +x_201 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_201, 0, x_200); +lean_ctor_set(x_201, 1, x_9); +x_17 = x_201; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_202; +lean_free_object(x_27); +x_202 = !lean_is_exclusive(x_192); +if (x_202 == 0) +{ +lean_object* x_203; lean_object* x_204; +x_203 = lean_ctor_get(x_192, 0); +lean_dec(x_203); +lean_ctor_set_tag(x_192, 1); +lean_ctor_set(x_192, 0, x_5); +x_204 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_204, 0, x_192); +lean_ctor_set(x_204, 1, x_9); +x_17 = x_204; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_205; lean_object* x_206; +lean_dec(x_192); +x_205 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_205, 0, x_5); +x_206 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_206, 0, x_205); +lean_ctor_set(x_206, 1, x_9); +x_17 = x_206; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_207; +lean_dec(x_192); +lean_ctor_set_tag(x_27, 1); +lean_ctor_set(x_27, 0, x_5); +x_207 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_207, 0, x_27); +lean_ctor_set(x_207, 1, x_9); +x_17 = x_207; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_208; lean_object* x_209; +lean_dec(x_27); +x_208 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_209 = lean_array_get(x_208, x_1, x_6); +switch (lean_obj_tag(x_209)) { +case 1: +{ +lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_210 = x_209; +} else { + lean_dec_ref(x_209); + x_210 = lean_box(0); +} +x_211 = lean_box(1); +x_212 = lean_array_push(x_5, x_211); +if (lean_is_scalar(x_210)) { + x_213 = lean_alloc_ctor(1, 1, 0); +} else { + x_213 = x_210; +} +lean_ctor_set(x_213, 0, x_212); +x_214 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_214, 0, x_213); +lean_ctor_set(x_214, 1, x_9); +x_17 = x_214; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_215; lean_object* x_216; lean_object* x_217; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_215 = x_209; +} else { + lean_dec_ref(x_209); + x_215 = lean_box(0); +} +if (lean_is_scalar(x_215)) { + x_216 = lean_alloc_ctor(1, 1, 0); +} else { + x_216 = x_215; + lean_ctor_set_tag(x_216, 1); +} +lean_ctor_set(x_216, 0, x_5); +x_217 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_217, 0, x_216); +lean_ctor_set(x_217, 1, x_9); +x_17 = x_217; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_218; lean_object* x_219; +lean_dec(x_209); +x_218 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_218, 0, x_5); +x_219 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_219, 0, x_218); +lean_ctor_set(x_219, 1, x_9); +x_17 = x_219; +x_18 = x_12; +goto block_25; +} +} +} +} +} +block_25: +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_19 = lean_ctor_get(x_17, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_17, 1); +lean_inc(x_20); +lean_dec(x_17); +x_21 = lean_ctor_get(x_19, 0); +lean_inc(x_21); +lean_dec(x_19); +x_22 = lean_ctor_get(x_4, 2); +x_23 = lean_nat_add(x_6, x_22); +lean_dec(x_6); +x_5 = x_21; +x_6 = x_23; +x_7 = lean_box(0); +x_8 = lean_box(0); +x_9 = x_20; +x_12 = x_18; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__4(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; uint8_t x_14; +x_13 = lean_ctor_get(x_4, 1); +x_14 = lean_nat_dec_lt(x_6, x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; +lean_dec(x_6); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_5); +lean_ctor_set(x_15, 1, x_9); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_15); +lean_ctor_set(x_16, 1, x_12); +return x_16; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_26; lean_object* x_27; +x_26 = l_Lean_Compiler_LCNF_instInhabitedArg; +x_27 = lean_array_get(x_26, x_2, x_6); +switch (lean_obj_tag(x_27)) { +case 0: +{ +lean_object* x_28; lean_object* x_29; +x_28 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_29 = lean_array_get(x_28, x_1, x_6); +switch (lean_obj_tag(x_29)) { +case 1: +{ +uint8_t x_30; +x_30 = !lean_is_exclusive(x_29); +if (x_30 == 0) +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_31 = lean_ctor_get(x_29, 0); +lean_dec(x_31); +x_32 = lean_box(1); +x_33 = lean_array_push(x_5, x_32); +lean_ctor_set(x_29, 0, x_33); +x_34 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_34, 0, x_29); +lean_ctor_set(x_34, 1, x_9); +x_17 = x_34; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; +lean_dec(x_29); +x_35 = lean_box(1); +x_36 = lean_array_push(x_5, x_35); +x_37 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_37, 0, x_36); +x_38 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_9); +x_17 = x_38; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_39; +x_39 = !lean_is_exclusive(x_29); +if (x_39 == 0) +{ +lean_object* x_40; lean_object* x_41; +x_40 = lean_ctor_get(x_29, 0); +lean_dec(x_40); +lean_ctor_set_tag(x_29, 1); +lean_ctor_set(x_29, 0, x_5); +x_41 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_41, 0, x_29); +lean_ctor_set(x_41, 1, x_9); +x_17 = x_41; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_42; lean_object* x_43; +lean_dec(x_29); +x_42 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_42, 0, x_5); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_9); +x_17 = x_43; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_44; lean_object* x_45; +lean_dec(x_29); +x_44 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_44, 0, x_5); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_9); +x_17 = x_45; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_46; uint8_t x_47; +x_46 = lean_ctor_get(x_9, 0); +lean_inc(x_46); +x_47 = !lean_is_exclusive(x_27); +if (x_47 == 0) +{ +uint8_t x_48; +x_48 = !lean_is_exclusive(x_46); +if (x_48 == 0) +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; uint64_t x_53; uint64_t x_54; uint64_t x_55; uint64_t x_56; uint64_t x_57; uint64_t x_58; uint64_t x_59; size_t x_60; size_t x_61; size_t x_62; size_t x_63; size_t x_64; lean_object* x_65; lean_object* x_66; +x_49 = lean_ctor_get(x_27, 0); +x_50 = lean_ctor_get(x_46, 1); +x_51 = lean_ctor_get(x_46, 0); +lean_dec(x_51); +x_52 = lean_array_get_size(x_50); +x_53 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_49); +x_54 = 32; +x_55 = lean_uint64_shift_right(x_53, x_54); +x_56 = lean_uint64_xor(x_53, x_55); +x_57 = 16; +x_58 = lean_uint64_shift_right(x_56, x_57); +x_59 = lean_uint64_xor(x_56, x_58); +x_60 = lean_uint64_to_usize(x_59); +x_61 = lean_usize_of_nat(x_52); +lean_dec(x_52); +x_62 = 1; +x_63 = lean_usize_sub(x_61, x_62); +x_64 = lean_usize_land(x_60, x_63); +x_65 = lean_array_uget(x_50, x_64); +lean_dec(x_50); +x_66 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_49, x_65); +lean_dec(x_65); +lean_dec(x_49); +if (lean_obj_tag(x_66) == 0) +{ +lean_ctor_set(x_27, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_27); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +uint8_t x_67; +lean_free_object(x_27); +x_67 = !lean_is_exclusive(x_66); +if (x_67 == 0) +{ +lean_object* x_68; +x_68 = lean_ctor_get(x_66, 0); +switch (lean_obj_tag(x_68)) { +case 0: +{ +uint8_t x_69; +lean_free_object(x_66); +x_69 = !lean_is_exclusive(x_68); +if (x_69 == 0) +{ +lean_object* x_70; lean_object* x_71; lean_object* x_72; +x_70 = lean_ctor_get(x_68, 0); +x_71 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_72 = lean_array_get(x_71, x_1, x_6); +switch (lean_obj_tag(x_72)) { +case 1: +{ +uint8_t x_73; +x_73 = !lean_is_exclusive(x_72); +if (x_73 == 0) +{ +lean_object* x_74; lean_object* x_75; +x_74 = lean_ctor_get(x_72, 0); +lean_dec(x_74); +lean_ctor_set_tag(x_72, 0); +lean_ctor_set(x_72, 0, x_70); +x_75 = lean_array_push(x_5, x_72); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_75); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_76; lean_object* x_77; +lean_dec(x_72); +x_76 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_76, 0, x_70); +x_77 = lean_array_push(x_5, x_76); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_77); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_78; +lean_free_object(x_68); +lean_dec(x_70); +x_78 = !lean_is_exclusive(x_72); +if (x_78 == 0) +{ +lean_object* x_79; +x_79 = lean_ctor_get(x_72, 0); +lean_dec(x_79); +lean_ctor_set_tag(x_72, 1); +lean_ctor_set(x_72, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_72); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_80; +lean_dec(x_72); +x_80 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_80, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_80); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_dec(x_72); +lean_dec(x_70); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_81; lean_object* x_82; lean_object* x_83; +x_81 = lean_ctor_get(x_68, 0); +lean_inc(x_81); +lean_dec(x_68); +x_82 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_83 = lean_array_get(x_82, x_1, x_6); +switch (lean_obj_tag(x_83)) { +case 1: +{ +lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_84 = x_83; +} else { + lean_dec_ref(x_83); + x_84 = lean_box(0); +} +if (lean_is_scalar(x_84)) { + x_85 = lean_alloc_ctor(0, 1, 0); +} else { + x_85 = x_84; + lean_ctor_set_tag(x_85, 0); +} +lean_ctor_set(x_85, 0, x_81); +x_86 = lean_array_push(x_5, x_85); +x_87 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_87); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_88; lean_object* x_89; +lean_dec(x_81); +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_88 = x_83; +} else { + lean_dec_ref(x_83); + x_88 = lean_box(0); +} +if (lean_is_scalar(x_88)) { + x_89 = lean_alloc_ctor(1, 1, 0); +} else { + x_89 = x_88; + lean_ctor_set_tag(x_89, 1); +} +lean_ctor_set(x_89, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_89); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_90; +lean_dec(x_83); +lean_dec(x_81); +x_90 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_90, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_90); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +case 1: +{ +uint8_t x_91; +lean_free_object(x_66); +x_91 = !lean_is_exclusive(x_68); +if (x_91 == 0) +{ +lean_object* x_92; +x_92 = lean_ctor_get(x_68, 0); +lean_dec(x_92); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_93; +lean_dec(x_68); +x_93 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_93, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_93); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_ctor_set(x_66, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_66); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_94; +x_94 = lean_ctor_get(x_66, 0); +lean_inc(x_94); +lean_dec(x_66); +switch (lean_obj_tag(x_94)) { +case 0: +{ +lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; +x_95 = lean_ctor_get(x_94, 0); +lean_inc(x_95); +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_96 = x_94; +} else { + lean_dec_ref(x_94); + x_96 = lean_box(0); +} +x_97 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_98 = lean_array_get(x_97, x_1, x_6); +switch (lean_obj_tag(x_98)) { +case 1: +{ +lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_99 = x_98; +} else { + lean_dec_ref(x_98); + x_99 = lean_box(0); +} +if (lean_is_scalar(x_99)) { + x_100 = lean_alloc_ctor(0, 1, 0); +} else { + x_100 = x_99; + lean_ctor_set_tag(x_100, 0); +} +lean_ctor_set(x_100, 0, x_95); +x_101 = lean_array_push(x_5, x_100); +if (lean_is_scalar(x_96)) { + x_102 = lean_alloc_ctor(1, 1, 0); +} else { + x_102 = x_96; + lean_ctor_set_tag(x_102, 1); +} +lean_ctor_set(x_102, 0, x_101); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_102); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_103; lean_object* x_104; +lean_dec(x_96); +lean_dec(x_95); +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_103 = x_98; +} else { + lean_dec_ref(x_98); + x_103 = lean_box(0); +} +if (lean_is_scalar(x_103)) { + x_104 = lean_alloc_ctor(1, 1, 0); +} else { + x_104 = x_103; + lean_ctor_set_tag(x_104, 1); +} +lean_ctor_set(x_104, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_104); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_105; +lean_dec(x_98); +lean_dec(x_95); +if (lean_is_scalar(x_96)) { + x_105 = lean_alloc_ctor(1, 1, 0); +} else { + x_105 = x_96; + lean_ctor_set_tag(x_105, 1); +} +lean_ctor_set(x_105, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_105); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_106; lean_object* x_107; +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_106 = x_94; +} else { + lean_dec_ref(x_94); + x_106 = lean_box(0); +} +if (lean_is_scalar(x_106)) { + x_107 = lean_alloc_ctor(1, 1, 0); +} else { + x_107 = x_106; +} +lean_ctor_set(x_107, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_107); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_108; +x_108 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_108, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_108); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_109; lean_object* x_110; lean_object* x_111; uint64_t x_112; uint64_t x_113; uint64_t x_114; uint64_t x_115; uint64_t x_116; uint64_t x_117; uint64_t x_118; size_t x_119; size_t x_120; size_t x_121; size_t x_122; size_t x_123; lean_object* x_124; lean_object* x_125; +x_109 = lean_ctor_get(x_27, 0); +x_110 = lean_ctor_get(x_46, 1); +lean_inc(x_110); +lean_dec(x_46); +x_111 = lean_array_get_size(x_110); +x_112 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_109); +x_113 = 32; +x_114 = lean_uint64_shift_right(x_112, x_113); +x_115 = lean_uint64_xor(x_112, x_114); +x_116 = 16; +x_117 = lean_uint64_shift_right(x_115, x_116); +x_118 = lean_uint64_xor(x_115, x_117); +x_119 = lean_uint64_to_usize(x_118); +x_120 = lean_usize_of_nat(x_111); +lean_dec(x_111); +x_121 = 1; +x_122 = lean_usize_sub(x_120, x_121); +x_123 = lean_usize_land(x_119, x_122); +x_124 = lean_array_uget(x_110, x_123); +lean_dec(x_110); +x_125 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_109, x_124); +lean_dec(x_124); +lean_dec(x_109); +if (lean_obj_tag(x_125) == 0) +{ +lean_object* x_126; +lean_ctor_set(x_27, 0, x_5); +x_126 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_126, 0, x_27); +lean_ctor_set(x_126, 1, x_9); +x_17 = x_126; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_127; lean_object* x_128; +lean_free_object(x_27); +x_127 = lean_ctor_get(x_125, 0); +lean_inc(x_127); +if (lean_is_exclusive(x_125)) { + lean_ctor_release(x_125, 0); + x_128 = x_125; +} else { + lean_dec_ref(x_125); + x_128 = lean_box(0); +} +switch (lean_obj_tag(x_127)) { +case 0: +{ +lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; +lean_dec(x_128); +x_129 = lean_ctor_get(x_127, 0); +lean_inc(x_129); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_130 = x_127; +} else { + lean_dec_ref(x_127); + x_130 = lean_box(0); +} +x_131 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_132 = lean_array_get(x_131, x_1, x_6); +switch (lean_obj_tag(x_132)) { +case 1: +{ +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_133 = x_132; +} else { + lean_dec_ref(x_132); + x_133 = lean_box(0); +} +if (lean_is_scalar(x_133)) { + x_134 = lean_alloc_ctor(0, 1, 0); +} else { + x_134 = x_133; + lean_ctor_set_tag(x_134, 0); +} +lean_ctor_set(x_134, 0, x_129); +x_135 = lean_array_push(x_5, x_134); +if (lean_is_scalar(x_130)) { + x_136 = lean_alloc_ctor(1, 1, 0); +} else { + x_136 = x_130; + lean_ctor_set_tag(x_136, 1); +} +lean_ctor_set(x_136, 0, x_135); +x_137 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_137, 0, x_136); +lean_ctor_set(x_137, 1, x_9); +x_17 = x_137; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_138; lean_object* x_139; lean_object* x_140; +lean_dec(x_130); +lean_dec(x_129); +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_138 = x_132; +} else { + lean_dec_ref(x_132); + x_138 = lean_box(0); +} +if (lean_is_scalar(x_138)) { + x_139 = lean_alloc_ctor(1, 1, 0); +} else { + x_139 = x_138; + lean_ctor_set_tag(x_139, 1); +} +lean_ctor_set(x_139, 0, x_5); +x_140 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_140, 0, x_139); +lean_ctor_set(x_140, 1, x_9); +x_17 = x_140; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_141; lean_object* x_142; +lean_dec(x_132); +lean_dec(x_129); +if (lean_is_scalar(x_130)) { + x_141 = lean_alloc_ctor(1, 1, 0); +} else { + x_141 = x_130; + lean_ctor_set_tag(x_141, 1); +} +lean_ctor_set(x_141, 0, x_5); +x_142 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_142, 0, x_141); +lean_ctor_set(x_142, 1, x_9); +x_17 = x_142; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_143; lean_object* x_144; lean_object* x_145; +lean_dec(x_128); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_143 = x_127; +} else { + lean_dec_ref(x_127); + x_143 = lean_box(0); +} +if (lean_is_scalar(x_143)) { + x_144 = lean_alloc_ctor(1, 1, 0); +} else { + x_144 = x_143; +} +lean_ctor_set(x_144, 0, x_5); +x_145 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_145, 0, x_144); +lean_ctor_set(x_145, 1, x_9); +x_17 = x_145; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_146; lean_object* x_147; +if (lean_is_scalar(x_128)) { + x_146 = lean_alloc_ctor(1, 1, 0); +} else { + x_146 = x_128; +} +lean_ctor_set(x_146, 0, x_5); +x_147 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_147, 0, x_146); +lean_ctor_set(x_147, 1, x_9); +x_17 = x_147; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; uint64_t x_152; uint64_t x_153; uint64_t x_154; uint64_t x_155; uint64_t x_156; uint64_t x_157; uint64_t x_158; size_t x_159; size_t x_160; size_t x_161; size_t x_162; size_t x_163; lean_object* x_164; lean_object* x_165; +x_148 = lean_ctor_get(x_27, 0); +lean_inc(x_148); +lean_dec(x_27); +x_149 = lean_ctor_get(x_46, 1); +lean_inc(x_149); +if (lean_is_exclusive(x_46)) { + lean_ctor_release(x_46, 0); + lean_ctor_release(x_46, 1); + x_150 = x_46; +} else { + lean_dec_ref(x_46); + x_150 = lean_box(0); +} +x_151 = lean_array_get_size(x_149); +x_152 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_148); +x_153 = 32; +x_154 = lean_uint64_shift_right(x_152, x_153); +x_155 = lean_uint64_xor(x_152, x_154); +x_156 = 16; +x_157 = lean_uint64_shift_right(x_155, x_156); +x_158 = lean_uint64_xor(x_155, x_157); +x_159 = lean_uint64_to_usize(x_158); +x_160 = lean_usize_of_nat(x_151); +lean_dec(x_151); +x_161 = 1; +x_162 = lean_usize_sub(x_160, x_161); +x_163 = lean_usize_land(x_159, x_162); +x_164 = lean_array_uget(x_149, x_163); +lean_dec(x_149); +x_165 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_148, x_164); +lean_dec(x_164); +lean_dec(x_148); +if (lean_obj_tag(x_165) == 0) +{ +lean_object* x_166; lean_object* x_167; +x_166 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_166, 0, x_5); +if (lean_is_scalar(x_150)) { + x_167 = lean_alloc_ctor(0, 2, 0); +} else { + x_167 = x_150; +} +lean_ctor_set(x_167, 0, x_166); +lean_ctor_set(x_167, 1, x_9); +x_17 = x_167; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_168; lean_object* x_169; +x_168 = lean_ctor_get(x_165, 0); +lean_inc(x_168); +if (lean_is_exclusive(x_165)) { + lean_ctor_release(x_165, 0); + x_169 = x_165; +} else { + lean_dec_ref(x_165); + x_169 = lean_box(0); +} +switch (lean_obj_tag(x_168)) { +case 0: +{ +lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; +lean_dec(x_169); +x_170 = lean_ctor_get(x_168, 0); +lean_inc(x_170); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_171 = x_168; +} else { + lean_dec_ref(x_168); + x_171 = lean_box(0); +} +x_172 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_173 = lean_array_get(x_172, x_1, x_6); +switch (lean_obj_tag(x_173)) { +case 1: +{ +lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_174 = x_173; +} else { + lean_dec_ref(x_173); + x_174 = lean_box(0); +} +if (lean_is_scalar(x_174)) { + x_175 = lean_alloc_ctor(0, 1, 0); +} else { + x_175 = x_174; + lean_ctor_set_tag(x_175, 0); +} +lean_ctor_set(x_175, 0, x_170); +x_176 = lean_array_push(x_5, x_175); +if (lean_is_scalar(x_171)) { + x_177 = lean_alloc_ctor(1, 1, 0); +} else { + x_177 = x_171; + lean_ctor_set_tag(x_177, 1); +} +lean_ctor_set(x_177, 0, x_176); +if (lean_is_scalar(x_150)) { + x_178 = lean_alloc_ctor(0, 2, 0); +} else { + x_178 = x_150; +} +lean_ctor_set(x_178, 0, x_177); +lean_ctor_set(x_178, 1, x_9); +x_17 = x_178; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_179; lean_object* x_180; lean_object* x_181; +lean_dec(x_171); +lean_dec(x_170); +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_179 = x_173; +} else { + lean_dec_ref(x_173); + x_179 = lean_box(0); +} +if (lean_is_scalar(x_179)) { + x_180 = lean_alloc_ctor(1, 1, 0); +} else { + x_180 = x_179; + lean_ctor_set_tag(x_180, 1); +} +lean_ctor_set(x_180, 0, x_5); +if (lean_is_scalar(x_150)) { + x_181 = lean_alloc_ctor(0, 2, 0); +} else { + x_181 = x_150; +} +lean_ctor_set(x_181, 0, x_180); +lean_ctor_set(x_181, 1, x_9); +x_17 = x_181; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_182; lean_object* x_183; +lean_dec(x_173); +lean_dec(x_170); +if (lean_is_scalar(x_171)) { + x_182 = lean_alloc_ctor(1, 1, 0); +} else { + x_182 = x_171; + lean_ctor_set_tag(x_182, 1); +} +lean_ctor_set(x_182, 0, x_5); +if (lean_is_scalar(x_150)) { + x_183 = lean_alloc_ctor(0, 2, 0); +} else { + x_183 = x_150; +} +lean_ctor_set(x_183, 0, x_182); +lean_ctor_set(x_183, 1, x_9); +x_17 = x_183; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_184; lean_object* x_185; lean_object* x_186; +lean_dec(x_169); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_184 = x_168; +} else { + lean_dec_ref(x_168); + x_184 = lean_box(0); +} +if (lean_is_scalar(x_184)) { + x_185 = lean_alloc_ctor(1, 1, 0); +} else { + x_185 = x_184; +} +lean_ctor_set(x_185, 0, x_5); +if (lean_is_scalar(x_150)) { + x_186 = lean_alloc_ctor(0, 2, 0); +} else { + x_186 = x_150; +} +lean_ctor_set(x_186, 0, x_185); +lean_ctor_set(x_186, 1, x_9); +x_17 = x_186; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_187; lean_object* x_188; +if (lean_is_scalar(x_169)) { + x_187 = lean_alloc_ctor(1, 1, 0); +} else { + x_187 = x_169; +} +lean_ctor_set(x_187, 0, x_5); +if (lean_is_scalar(x_150)) { + x_188 = lean_alloc_ctor(0, 2, 0); +} else { + x_188 = x_150; +} +lean_ctor_set(x_188, 0, x_187); +lean_ctor_set(x_188, 1, x_9); +x_17 = x_188; +x_18 = x_12; +goto block_25; +} +} +} +} +} +default: +{ +uint8_t x_189; +x_189 = !lean_is_exclusive(x_27); +if (x_189 == 0) +{ +lean_object* x_190; lean_object* x_191; lean_object* x_192; +x_190 = lean_ctor_get(x_27, 0); +lean_dec(x_190); +x_191 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_192 = lean_array_get(x_191, x_1, x_6); +switch (lean_obj_tag(x_192)) { +case 1: +{ +uint8_t x_193; +lean_free_object(x_27); +x_193 = !lean_is_exclusive(x_192); +if (x_193 == 0) +{ +lean_object* x_194; lean_object* x_195; lean_object* x_196; lean_object* x_197; +x_194 = lean_ctor_get(x_192, 0); +lean_dec(x_194); +x_195 = lean_box(1); +x_196 = lean_array_push(x_5, x_195); +lean_ctor_set(x_192, 0, x_196); +x_197 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_197, 0, x_192); +lean_ctor_set(x_197, 1, x_9); +x_17 = x_197; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; +lean_dec(x_192); +x_198 = lean_box(1); +x_199 = lean_array_push(x_5, x_198); +x_200 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_200, 0, x_199); +x_201 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_201, 0, x_200); +lean_ctor_set(x_201, 1, x_9); +x_17 = x_201; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_202; +lean_free_object(x_27); +x_202 = !lean_is_exclusive(x_192); +if (x_202 == 0) +{ +lean_object* x_203; lean_object* x_204; +x_203 = lean_ctor_get(x_192, 0); +lean_dec(x_203); +lean_ctor_set_tag(x_192, 1); +lean_ctor_set(x_192, 0, x_5); +x_204 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_204, 0, x_192); +lean_ctor_set(x_204, 1, x_9); +x_17 = x_204; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_205; lean_object* x_206; +lean_dec(x_192); +x_205 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_205, 0, x_5); +x_206 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_206, 0, x_205); +lean_ctor_set(x_206, 1, x_9); +x_17 = x_206; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_207; +lean_dec(x_192); +lean_ctor_set_tag(x_27, 1); +lean_ctor_set(x_27, 0, x_5); +x_207 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_207, 0, x_27); +lean_ctor_set(x_207, 1, x_9); +x_17 = x_207; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_208; lean_object* x_209; +lean_dec(x_27); +x_208 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_209 = lean_array_get(x_208, x_1, x_6); +switch (lean_obj_tag(x_209)) { +case 1: +{ +lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_210 = x_209; +} else { + lean_dec_ref(x_209); + x_210 = lean_box(0); +} +x_211 = lean_box(1); +x_212 = lean_array_push(x_5, x_211); +if (lean_is_scalar(x_210)) { + x_213 = lean_alloc_ctor(1, 1, 0); +} else { + x_213 = x_210; +} +lean_ctor_set(x_213, 0, x_212); +x_214 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_214, 0, x_213); +lean_ctor_set(x_214, 1, x_9); +x_17 = x_214; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_215; lean_object* x_216; lean_object* x_217; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_215 = x_209; +} else { + lean_dec_ref(x_209); + x_215 = lean_box(0); +} +if (lean_is_scalar(x_215)) { + x_216 = lean_alloc_ctor(1, 1, 0); +} else { + x_216 = x_215; + lean_ctor_set_tag(x_216, 1); +} +lean_ctor_set(x_216, 0, x_5); +x_217 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_217, 0, x_216); +lean_ctor_set(x_217, 1, x_9); +x_17 = x_217; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_218; lean_object* x_219; +lean_dec(x_209); +x_218 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_218, 0, x_5); +x_219 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_219, 0, x_218); +lean_ctor_set(x_219, 1, x_9); +x_17 = x_219; +x_18 = x_12; +goto block_25; +} +} +} +} +} +block_25: +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_19 = lean_ctor_get(x_17, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_17, 1); +lean_inc(x_20); +lean_dec(x_17); +x_21 = lean_ctor_get(x_19, 0); +lean_inc(x_21); +lean_dec(x_19); +x_22 = lean_ctor_get(x_4, 2); +x_23 = lean_nat_add(x_6, x_22); +lean_dec(x_6); +x_5 = x_21; +x_6 = x_23; +x_7 = lean_box(0); +x_8 = lean_box(0); +x_9 = x_20; +x_12 = x_18; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__5(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; uint8_t x_14; +x_13 = lean_ctor_get(x_4, 1); +x_14 = lean_nat_dec_lt(x_6, x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; +lean_dec(x_6); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_5); +lean_ctor_set(x_15, 1, x_9); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_15); +lean_ctor_set(x_16, 1, x_12); +return x_16; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_26; lean_object* x_27; +x_26 = l_Lean_Compiler_LCNF_instInhabitedArg; +x_27 = lean_array_get(x_26, x_2, x_6); +switch (lean_obj_tag(x_27)) { +case 0: +{ +lean_object* x_28; lean_object* x_29; +x_28 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_29 = lean_array_get(x_28, x_1, x_6); +switch (lean_obj_tag(x_29)) { +case 1: +{ +uint8_t x_30; +x_30 = !lean_is_exclusive(x_29); +if (x_30 == 0) +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_31 = lean_ctor_get(x_29, 0); +lean_dec(x_31); +x_32 = lean_box(1); +x_33 = lean_array_push(x_5, x_32); +lean_ctor_set(x_29, 0, x_33); +x_34 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_34, 0, x_29); +lean_ctor_set(x_34, 1, x_9); +x_17 = x_34; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; +lean_dec(x_29); +x_35 = lean_box(1); +x_36 = lean_array_push(x_5, x_35); +x_37 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_37, 0, x_36); +x_38 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_9); +x_17 = x_38; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_39; +x_39 = !lean_is_exclusive(x_29); +if (x_39 == 0) +{ +lean_object* x_40; lean_object* x_41; +x_40 = lean_ctor_get(x_29, 0); +lean_dec(x_40); +lean_ctor_set_tag(x_29, 1); +lean_ctor_set(x_29, 0, x_5); +x_41 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_41, 0, x_29); +lean_ctor_set(x_41, 1, x_9); +x_17 = x_41; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_42; lean_object* x_43; +lean_dec(x_29); +x_42 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_42, 0, x_5); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_9); +x_17 = x_43; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_44; lean_object* x_45; +lean_dec(x_29); +x_44 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_44, 0, x_5); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_9); +x_17 = x_45; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_46; uint8_t x_47; +x_46 = lean_ctor_get(x_9, 0); +lean_inc(x_46); +x_47 = !lean_is_exclusive(x_27); +if (x_47 == 0) +{ +uint8_t x_48; +x_48 = !lean_is_exclusive(x_46); +if (x_48 == 0) +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; uint64_t x_53; uint64_t x_54; uint64_t x_55; uint64_t x_56; uint64_t x_57; uint64_t x_58; uint64_t x_59; size_t x_60; size_t x_61; size_t x_62; size_t x_63; size_t x_64; lean_object* x_65; lean_object* x_66; +x_49 = lean_ctor_get(x_27, 0); +x_50 = lean_ctor_get(x_46, 1); +x_51 = lean_ctor_get(x_46, 0); +lean_dec(x_51); +x_52 = lean_array_get_size(x_50); +x_53 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_49); +x_54 = 32; +x_55 = lean_uint64_shift_right(x_53, x_54); +x_56 = lean_uint64_xor(x_53, x_55); +x_57 = 16; +x_58 = lean_uint64_shift_right(x_56, x_57); +x_59 = lean_uint64_xor(x_56, x_58); +x_60 = lean_uint64_to_usize(x_59); +x_61 = lean_usize_of_nat(x_52); +lean_dec(x_52); +x_62 = 1; +x_63 = lean_usize_sub(x_61, x_62); +x_64 = lean_usize_land(x_60, x_63); +x_65 = lean_array_uget(x_50, x_64); +lean_dec(x_50); +x_66 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_49, x_65); +lean_dec(x_65); +lean_dec(x_49); +if (lean_obj_tag(x_66) == 0) +{ +lean_ctor_set(x_27, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_27); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +uint8_t x_67; +lean_free_object(x_27); +x_67 = !lean_is_exclusive(x_66); +if (x_67 == 0) +{ +lean_object* x_68; +x_68 = lean_ctor_get(x_66, 0); +switch (lean_obj_tag(x_68)) { +case 0: +{ +uint8_t x_69; +lean_free_object(x_66); +x_69 = !lean_is_exclusive(x_68); +if (x_69 == 0) +{ +lean_object* x_70; lean_object* x_71; lean_object* x_72; +x_70 = lean_ctor_get(x_68, 0); +x_71 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_72 = lean_array_get(x_71, x_1, x_6); +switch (lean_obj_tag(x_72)) { +case 1: +{ +uint8_t x_73; +x_73 = !lean_is_exclusive(x_72); +if (x_73 == 0) +{ +lean_object* x_74; lean_object* x_75; +x_74 = lean_ctor_get(x_72, 0); +lean_dec(x_74); +lean_ctor_set_tag(x_72, 0); +lean_ctor_set(x_72, 0, x_70); +x_75 = lean_array_push(x_5, x_72); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_75); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_76; lean_object* x_77; +lean_dec(x_72); +x_76 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_76, 0, x_70); +x_77 = lean_array_push(x_5, x_76); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_77); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_78; +lean_free_object(x_68); +lean_dec(x_70); +x_78 = !lean_is_exclusive(x_72); +if (x_78 == 0) +{ +lean_object* x_79; +x_79 = lean_ctor_get(x_72, 0); +lean_dec(x_79); +lean_ctor_set_tag(x_72, 1); +lean_ctor_set(x_72, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_72); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_80; +lean_dec(x_72); +x_80 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_80, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_80); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_dec(x_72); +lean_dec(x_70); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_81; lean_object* x_82; lean_object* x_83; +x_81 = lean_ctor_get(x_68, 0); +lean_inc(x_81); +lean_dec(x_68); +x_82 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_83 = lean_array_get(x_82, x_1, x_6); +switch (lean_obj_tag(x_83)) { +case 1: +{ +lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_84 = x_83; +} else { + lean_dec_ref(x_83); + x_84 = lean_box(0); +} +if (lean_is_scalar(x_84)) { + x_85 = lean_alloc_ctor(0, 1, 0); +} else { + x_85 = x_84; + lean_ctor_set_tag(x_85, 0); +} +lean_ctor_set(x_85, 0, x_81); +x_86 = lean_array_push(x_5, x_85); +x_87 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_87); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_88; lean_object* x_89; +lean_dec(x_81); +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_88 = x_83; +} else { + lean_dec_ref(x_83); + x_88 = lean_box(0); +} +if (lean_is_scalar(x_88)) { + x_89 = lean_alloc_ctor(1, 1, 0); +} else { + x_89 = x_88; + lean_ctor_set_tag(x_89, 1); +} +lean_ctor_set(x_89, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_89); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_90; +lean_dec(x_83); +lean_dec(x_81); +x_90 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_90, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_90); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +case 1: +{ +uint8_t x_91; +lean_free_object(x_66); +x_91 = !lean_is_exclusive(x_68); +if (x_91 == 0) +{ +lean_object* x_92; +x_92 = lean_ctor_get(x_68, 0); +lean_dec(x_92); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_93; +lean_dec(x_68); +x_93 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_93, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_93); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_ctor_set(x_66, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_66); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_94; +x_94 = lean_ctor_get(x_66, 0); +lean_inc(x_94); +lean_dec(x_66); +switch (lean_obj_tag(x_94)) { +case 0: +{ +lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; +x_95 = lean_ctor_get(x_94, 0); +lean_inc(x_95); +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_96 = x_94; +} else { + lean_dec_ref(x_94); + x_96 = lean_box(0); +} +x_97 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_98 = lean_array_get(x_97, x_1, x_6); +switch (lean_obj_tag(x_98)) { +case 1: +{ +lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_99 = x_98; +} else { + lean_dec_ref(x_98); + x_99 = lean_box(0); +} +if (lean_is_scalar(x_99)) { + x_100 = lean_alloc_ctor(0, 1, 0); +} else { + x_100 = x_99; + lean_ctor_set_tag(x_100, 0); +} +lean_ctor_set(x_100, 0, x_95); +x_101 = lean_array_push(x_5, x_100); +if (lean_is_scalar(x_96)) { + x_102 = lean_alloc_ctor(1, 1, 0); +} else { + x_102 = x_96; + lean_ctor_set_tag(x_102, 1); +} +lean_ctor_set(x_102, 0, x_101); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_102); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_103; lean_object* x_104; +lean_dec(x_96); +lean_dec(x_95); +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_103 = x_98; +} else { + lean_dec_ref(x_98); + x_103 = lean_box(0); +} +if (lean_is_scalar(x_103)) { + x_104 = lean_alloc_ctor(1, 1, 0); +} else { + x_104 = x_103; + lean_ctor_set_tag(x_104, 1); +} +lean_ctor_set(x_104, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_104); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_105; +lean_dec(x_98); +lean_dec(x_95); +if (lean_is_scalar(x_96)) { + x_105 = lean_alloc_ctor(1, 1, 0); +} else { + x_105 = x_96; + lean_ctor_set_tag(x_105, 1); +} +lean_ctor_set(x_105, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_105); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_106; lean_object* x_107; +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_106 = x_94; +} else { + lean_dec_ref(x_94); + x_106 = lean_box(0); +} +if (lean_is_scalar(x_106)) { + x_107 = lean_alloc_ctor(1, 1, 0); +} else { + x_107 = x_106; +} +lean_ctor_set(x_107, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_107); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_108; +x_108 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_108, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_108); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_109; lean_object* x_110; lean_object* x_111; uint64_t x_112; uint64_t x_113; uint64_t x_114; uint64_t x_115; uint64_t x_116; uint64_t x_117; uint64_t x_118; size_t x_119; size_t x_120; size_t x_121; size_t x_122; size_t x_123; lean_object* x_124; lean_object* x_125; +x_109 = lean_ctor_get(x_27, 0); +x_110 = lean_ctor_get(x_46, 1); +lean_inc(x_110); +lean_dec(x_46); +x_111 = lean_array_get_size(x_110); +x_112 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_109); +x_113 = 32; +x_114 = lean_uint64_shift_right(x_112, x_113); +x_115 = lean_uint64_xor(x_112, x_114); +x_116 = 16; +x_117 = lean_uint64_shift_right(x_115, x_116); +x_118 = lean_uint64_xor(x_115, x_117); +x_119 = lean_uint64_to_usize(x_118); +x_120 = lean_usize_of_nat(x_111); +lean_dec(x_111); +x_121 = 1; +x_122 = lean_usize_sub(x_120, x_121); +x_123 = lean_usize_land(x_119, x_122); +x_124 = lean_array_uget(x_110, x_123); +lean_dec(x_110); +x_125 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_109, x_124); +lean_dec(x_124); +lean_dec(x_109); +if (lean_obj_tag(x_125) == 0) +{ +lean_object* x_126; +lean_ctor_set(x_27, 0, x_5); +x_126 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_126, 0, x_27); +lean_ctor_set(x_126, 1, x_9); +x_17 = x_126; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_127; lean_object* x_128; +lean_free_object(x_27); +x_127 = lean_ctor_get(x_125, 0); +lean_inc(x_127); +if (lean_is_exclusive(x_125)) { + lean_ctor_release(x_125, 0); + x_128 = x_125; +} else { + lean_dec_ref(x_125); + x_128 = lean_box(0); +} +switch (lean_obj_tag(x_127)) { +case 0: +{ +lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; +lean_dec(x_128); +x_129 = lean_ctor_get(x_127, 0); +lean_inc(x_129); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_130 = x_127; +} else { + lean_dec_ref(x_127); + x_130 = lean_box(0); +} +x_131 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_132 = lean_array_get(x_131, x_1, x_6); +switch (lean_obj_tag(x_132)) { +case 1: +{ +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_133 = x_132; +} else { + lean_dec_ref(x_132); + x_133 = lean_box(0); +} +if (lean_is_scalar(x_133)) { + x_134 = lean_alloc_ctor(0, 1, 0); +} else { + x_134 = x_133; + lean_ctor_set_tag(x_134, 0); +} +lean_ctor_set(x_134, 0, x_129); +x_135 = lean_array_push(x_5, x_134); +if (lean_is_scalar(x_130)) { + x_136 = lean_alloc_ctor(1, 1, 0); +} else { + x_136 = x_130; + lean_ctor_set_tag(x_136, 1); +} +lean_ctor_set(x_136, 0, x_135); +x_137 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_137, 0, x_136); +lean_ctor_set(x_137, 1, x_9); +x_17 = x_137; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_138; lean_object* x_139; lean_object* x_140; +lean_dec(x_130); +lean_dec(x_129); +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_138 = x_132; +} else { + lean_dec_ref(x_132); + x_138 = lean_box(0); +} +if (lean_is_scalar(x_138)) { + x_139 = lean_alloc_ctor(1, 1, 0); +} else { + x_139 = x_138; + lean_ctor_set_tag(x_139, 1); +} +lean_ctor_set(x_139, 0, x_5); +x_140 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_140, 0, x_139); +lean_ctor_set(x_140, 1, x_9); +x_17 = x_140; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_141; lean_object* x_142; +lean_dec(x_132); +lean_dec(x_129); +if (lean_is_scalar(x_130)) { + x_141 = lean_alloc_ctor(1, 1, 0); +} else { + x_141 = x_130; + lean_ctor_set_tag(x_141, 1); +} +lean_ctor_set(x_141, 0, x_5); +x_142 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_142, 0, x_141); +lean_ctor_set(x_142, 1, x_9); +x_17 = x_142; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_143; lean_object* x_144; lean_object* x_145; +lean_dec(x_128); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_143 = x_127; +} else { + lean_dec_ref(x_127); + x_143 = lean_box(0); +} +if (lean_is_scalar(x_143)) { + x_144 = lean_alloc_ctor(1, 1, 0); +} else { + x_144 = x_143; +} +lean_ctor_set(x_144, 0, x_5); +x_145 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_145, 0, x_144); +lean_ctor_set(x_145, 1, x_9); +x_17 = x_145; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_146; lean_object* x_147; +if (lean_is_scalar(x_128)) { + x_146 = lean_alloc_ctor(1, 1, 0); +} else { + x_146 = x_128; +} +lean_ctor_set(x_146, 0, x_5); +x_147 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_147, 0, x_146); +lean_ctor_set(x_147, 1, x_9); +x_17 = x_147; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; uint64_t x_152; uint64_t x_153; uint64_t x_154; uint64_t x_155; uint64_t x_156; uint64_t x_157; uint64_t x_158; size_t x_159; size_t x_160; size_t x_161; size_t x_162; size_t x_163; lean_object* x_164; lean_object* x_165; +x_148 = lean_ctor_get(x_27, 0); +lean_inc(x_148); +lean_dec(x_27); +x_149 = lean_ctor_get(x_46, 1); +lean_inc(x_149); +if (lean_is_exclusive(x_46)) { + lean_ctor_release(x_46, 0); + lean_ctor_release(x_46, 1); + x_150 = x_46; +} else { + lean_dec_ref(x_46); + x_150 = lean_box(0); +} +x_151 = lean_array_get_size(x_149); +x_152 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_148); +x_153 = 32; +x_154 = lean_uint64_shift_right(x_152, x_153); +x_155 = lean_uint64_xor(x_152, x_154); +x_156 = 16; +x_157 = lean_uint64_shift_right(x_155, x_156); +x_158 = lean_uint64_xor(x_155, x_157); +x_159 = lean_uint64_to_usize(x_158); +x_160 = lean_usize_of_nat(x_151); +lean_dec(x_151); +x_161 = 1; +x_162 = lean_usize_sub(x_160, x_161); +x_163 = lean_usize_land(x_159, x_162); +x_164 = lean_array_uget(x_149, x_163); +lean_dec(x_149); +x_165 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_148, x_164); +lean_dec(x_164); +lean_dec(x_148); +if (lean_obj_tag(x_165) == 0) +{ +lean_object* x_166; lean_object* x_167; +x_166 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_166, 0, x_5); +if (lean_is_scalar(x_150)) { + x_167 = lean_alloc_ctor(0, 2, 0); +} else { + x_167 = x_150; +} +lean_ctor_set(x_167, 0, x_166); +lean_ctor_set(x_167, 1, x_9); +x_17 = x_167; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_168; lean_object* x_169; +x_168 = lean_ctor_get(x_165, 0); +lean_inc(x_168); +if (lean_is_exclusive(x_165)) { + lean_ctor_release(x_165, 0); + x_169 = x_165; +} else { + lean_dec_ref(x_165); + x_169 = lean_box(0); +} +switch (lean_obj_tag(x_168)) { +case 0: +{ +lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; +lean_dec(x_169); +x_170 = lean_ctor_get(x_168, 0); +lean_inc(x_170); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_171 = x_168; +} else { + lean_dec_ref(x_168); + x_171 = lean_box(0); +} +x_172 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_173 = lean_array_get(x_172, x_1, x_6); +switch (lean_obj_tag(x_173)) { +case 1: +{ +lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_174 = x_173; +} else { + lean_dec_ref(x_173); + x_174 = lean_box(0); +} +if (lean_is_scalar(x_174)) { + x_175 = lean_alloc_ctor(0, 1, 0); +} else { + x_175 = x_174; + lean_ctor_set_tag(x_175, 0); +} +lean_ctor_set(x_175, 0, x_170); +x_176 = lean_array_push(x_5, x_175); +if (lean_is_scalar(x_171)) { + x_177 = lean_alloc_ctor(1, 1, 0); +} else { + x_177 = x_171; + lean_ctor_set_tag(x_177, 1); +} +lean_ctor_set(x_177, 0, x_176); +if (lean_is_scalar(x_150)) { + x_178 = lean_alloc_ctor(0, 2, 0); +} else { + x_178 = x_150; +} +lean_ctor_set(x_178, 0, x_177); +lean_ctor_set(x_178, 1, x_9); +x_17 = x_178; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_179; lean_object* x_180; lean_object* x_181; +lean_dec(x_171); +lean_dec(x_170); +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_179 = x_173; +} else { + lean_dec_ref(x_173); + x_179 = lean_box(0); +} +if (lean_is_scalar(x_179)) { + x_180 = lean_alloc_ctor(1, 1, 0); +} else { + x_180 = x_179; + lean_ctor_set_tag(x_180, 1); +} +lean_ctor_set(x_180, 0, x_5); +if (lean_is_scalar(x_150)) { + x_181 = lean_alloc_ctor(0, 2, 0); +} else { + x_181 = x_150; +} +lean_ctor_set(x_181, 0, x_180); +lean_ctor_set(x_181, 1, x_9); +x_17 = x_181; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_182; lean_object* x_183; +lean_dec(x_173); +lean_dec(x_170); +if (lean_is_scalar(x_171)) { + x_182 = lean_alloc_ctor(1, 1, 0); +} else { + x_182 = x_171; + lean_ctor_set_tag(x_182, 1); +} +lean_ctor_set(x_182, 0, x_5); +if (lean_is_scalar(x_150)) { + x_183 = lean_alloc_ctor(0, 2, 0); +} else { + x_183 = x_150; +} +lean_ctor_set(x_183, 0, x_182); +lean_ctor_set(x_183, 1, x_9); +x_17 = x_183; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_184; lean_object* x_185; lean_object* x_186; +lean_dec(x_169); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_184 = x_168; +} else { + lean_dec_ref(x_168); + x_184 = lean_box(0); +} +if (lean_is_scalar(x_184)) { + x_185 = lean_alloc_ctor(1, 1, 0); +} else { + x_185 = x_184; +} +lean_ctor_set(x_185, 0, x_5); +if (lean_is_scalar(x_150)) { + x_186 = lean_alloc_ctor(0, 2, 0); +} else { + x_186 = x_150; +} +lean_ctor_set(x_186, 0, x_185); +lean_ctor_set(x_186, 1, x_9); +x_17 = x_186; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_187; lean_object* x_188; +if (lean_is_scalar(x_169)) { + x_187 = lean_alloc_ctor(1, 1, 0); +} else { + x_187 = x_169; +} +lean_ctor_set(x_187, 0, x_5); +if (lean_is_scalar(x_150)) { + x_188 = lean_alloc_ctor(0, 2, 0); +} else { + x_188 = x_150; +} +lean_ctor_set(x_188, 0, x_187); +lean_ctor_set(x_188, 1, x_9); +x_17 = x_188; +x_18 = x_12; +goto block_25; +} +} +} +} +} +default: +{ +uint8_t x_189; +x_189 = !lean_is_exclusive(x_27); +if (x_189 == 0) +{ +lean_object* x_190; lean_object* x_191; lean_object* x_192; +x_190 = lean_ctor_get(x_27, 0); +lean_dec(x_190); +x_191 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_192 = lean_array_get(x_191, x_1, x_6); +switch (lean_obj_tag(x_192)) { +case 1: +{ +uint8_t x_193; +lean_free_object(x_27); +x_193 = !lean_is_exclusive(x_192); +if (x_193 == 0) +{ +lean_object* x_194; lean_object* x_195; lean_object* x_196; lean_object* x_197; +x_194 = lean_ctor_get(x_192, 0); +lean_dec(x_194); +x_195 = lean_box(1); +x_196 = lean_array_push(x_5, x_195); +lean_ctor_set(x_192, 0, x_196); +x_197 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_197, 0, x_192); +lean_ctor_set(x_197, 1, x_9); +x_17 = x_197; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; +lean_dec(x_192); +x_198 = lean_box(1); +x_199 = lean_array_push(x_5, x_198); +x_200 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_200, 0, x_199); +x_201 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_201, 0, x_200); +lean_ctor_set(x_201, 1, x_9); +x_17 = x_201; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_202; +lean_free_object(x_27); +x_202 = !lean_is_exclusive(x_192); +if (x_202 == 0) +{ +lean_object* x_203; lean_object* x_204; +x_203 = lean_ctor_get(x_192, 0); +lean_dec(x_203); +lean_ctor_set_tag(x_192, 1); +lean_ctor_set(x_192, 0, x_5); +x_204 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_204, 0, x_192); +lean_ctor_set(x_204, 1, x_9); +x_17 = x_204; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_205; lean_object* x_206; +lean_dec(x_192); +x_205 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_205, 0, x_5); +x_206 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_206, 0, x_205); +lean_ctor_set(x_206, 1, x_9); +x_17 = x_206; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_207; +lean_dec(x_192); +lean_ctor_set_tag(x_27, 1); +lean_ctor_set(x_27, 0, x_5); +x_207 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_207, 0, x_27); +lean_ctor_set(x_207, 1, x_9); +x_17 = x_207; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_208; lean_object* x_209; +lean_dec(x_27); +x_208 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_209 = lean_array_get(x_208, x_1, x_6); +switch (lean_obj_tag(x_209)) { +case 1: +{ +lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_210 = x_209; +} else { + lean_dec_ref(x_209); + x_210 = lean_box(0); +} +x_211 = lean_box(1); +x_212 = lean_array_push(x_5, x_211); +if (lean_is_scalar(x_210)) { + x_213 = lean_alloc_ctor(1, 1, 0); +} else { + x_213 = x_210; +} +lean_ctor_set(x_213, 0, x_212); +x_214 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_214, 0, x_213); +lean_ctor_set(x_214, 1, x_9); +x_17 = x_214; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_215; lean_object* x_216; lean_object* x_217; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_215 = x_209; +} else { + lean_dec_ref(x_209); + x_215 = lean_box(0); +} +if (lean_is_scalar(x_215)) { + x_216 = lean_alloc_ctor(1, 1, 0); +} else { + x_216 = x_215; + lean_ctor_set_tag(x_216, 1); +} +lean_ctor_set(x_216, 0, x_5); +x_217 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_217, 0, x_216); +lean_ctor_set(x_217, 1, x_9); +x_17 = x_217; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_218; lean_object* x_219; +lean_dec(x_209); +x_218 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_218, 0, x_5); +x_219 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_219, 0, x_218); +lean_ctor_set(x_219, 1, x_9); +x_17 = x_219; +x_18 = x_12; +goto block_25; +} +} +} +} +} +block_25: +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_19 = lean_ctor_get(x_17, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_17, 1); +lean_inc(x_20); +lean_dec(x_17); +x_21 = lean_ctor_get(x_19, 0); +lean_inc(x_21); +lean_dec(x_19); +x_22 = lean_ctor_get(x_4, 2); +x_23 = lean_nat_add(x_6, x_22); +lean_dec(x_6); +x_5 = x_21; +x_6 = x_23; +x_7 = lean_box(0); +x_8 = lean_box(0); +x_9 = x_20; +x_12 = x_18; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__6(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; uint8_t x_14; +x_13 = lean_ctor_get(x_4, 1); +x_14 = lean_nat_dec_lt(x_6, x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; +lean_dec(x_6); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_5); +lean_ctor_set(x_15, 1, x_9); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_15); +lean_ctor_set(x_16, 1, x_12); +return x_16; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_26; lean_object* x_27; +x_26 = l_Lean_Compiler_LCNF_instInhabitedArg; +x_27 = lean_array_get(x_26, x_2, x_6); +switch (lean_obj_tag(x_27)) { +case 0: +{ +lean_object* x_28; lean_object* x_29; +x_28 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_29 = lean_array_get(x_28, x_1, x_6); +switch (lean_obj_tag(x_29)) { +case 1: +{ +uint8_t x_30; +x_30 = !lean_is_exclusive(x_29); +if (x_30 == 0) +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_31 = lean_ctor_get(x_29, 0); +lean_dec(x_31); +x_32 = lean_box(1); +x_33 = lean_array_push(x_5, x_32); +lean_ctor_set(x_29, 0, x_33); +x_34 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_34, 0, x_29); +lean_ctor_set(x_34, 1, x_9); +x_17 = x_34; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; +lean_dec(x_29); +x_35 = lean_box(1); +x_36 = lean_array_push(x_5, x_35); +x_37 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_37, 0, x_36); +x_38 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_9); +x_17 = x_38; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_39; +x_39 = !lean_is_exclusive(x_29); +if (x_39 == 0) +{ +lean_object* x_40; lean_object* x_41; +x_40 = lean_ctor_get(x_29, 0); +lean_dec(x_40); +lean_ctor_set_tag(x_29, 1); +lean_ctor_set(x_29, 0, x_5); +x_41 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_41, 0, x_29); +lean_ctor_set(x_41, 1, x_9); +x_17 = x_41; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_42; lean_object* x_43; +lean_dec(x_29); +x_42 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_42, 0, x_5); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_9); +x_17 = x_43; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_44; lean_object* x_45; +lean_dec(x_29); +x_44 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_44, 0, x_5); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_9); +x_17 = x_45; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_46; uint8_t x_47; +x_46 = lean_ctor_get(x_9, 0); +lean_inc(x_46); +x_47 = !lean_is_exclusive(x_27); +if (x_47 == 0) +{ +uint8_t x_48; +x_48 = !lean_is_exclusive(x_46); +if (x_48 == 0) +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; uint64_t x_53; uint64_t x_54; uint64_t x_55; uint64_t x_56; uint64_t x_57; uint64_t x_58; uint64_t x_59; size_t x_60; size_t x_61; size_t x_62; size_t x_63; size_t x_64; lean_object* x_65; lean_object* x_66; +x_49 = lean_ctor_get(x_27, 0); +x_50 = lean_ctor_get(x_46, 1); +x_51 = lean_ctor_get(x_46, 0); +lean_dec(x_51); +x_52 = lean_array_get_size(x_50); +x_53 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_49); +x_54 = 32; +x_55 = lean_uint64_shift_right(x_53, x_54); +x_56 = lean_uint64_xor(x_53, x_55); +x_57 = 16; +x_58 = lean_uint64_shift_right(x_56, x_57); +x_59 = lean_uint64_xor(x_56, x_58); +x_60 = lean_uint64_to_usize(x_59); +x_61 = lean_usize_of_nat(x_52); +lean_dec(x_52); +x_62 = 1; +x_63 = lean_usize_sub(x_61, x_62); +x_64 = lean_usize_land(x_60, x_63); +x_65 = lean_array_uget(x_50, x_64); +lean_dec(x_50); +x_66 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_49, x_65); +lean_dec(x_65); +lean_dec(x_49); +if (lean_obj_tag(x_66) == 0) +{ +lean_ctor_set(x_27, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_27); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +uint8_t x_67; +lean_free_object(x_27); +x_67 = !lean_is_exclusive(x_66); +if (x_67 == 0) +{ +lean_object* x_68; +x_68 = lean_ctor_get(x_66, 0); +switch (lean_obj_tag(x_68)) { +case 0: +{ +uint8_t x_69; +lean_free_object(x_66); +x_69 = !lean_is_exclusive(x_68); +if (x_69 == 0) +{ +lean_object* x_70; lean_object* x_71; lean_object* x_72; +x_70 = lean_ctor_get(x_68, 0); +x_71 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_72 = lean_array_get(x_71, x_1, x_6); +switch (lean_obj_tag(x_72)) { +case 1: +{ +uint8_t x_73; +x_73 = !lean_is_exclusive(x_72); +if (x_73 == 0) +{ +lean_object* x_74; lean_object* x_75; +x_74 = lean_ctor_get(x_72, 0); +lean_dec(x_74); +lean_ctor_set_tag(x_72, 0); +lean_ctor_set(x_72, 0, x_70); +x_75 = lean_array_push(x_5, x_72); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_75); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_76; lean_object* x_77; +lean_dec(x_72); +x_76 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_76, 0, x_70); +x_77 = lean_array_push(x_5, x_76); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_77); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_78; +lean_free_object(x_68); +lean_dec(x_70); +x_78 = !lean_is_exclusive(x_72); +if (x_78 == 0) +{ +lean_object* x_79; +x_79 = lean_ctor_get(x_72, 0); +lean_dec(x_79); +lean_ctor_set_tag(x_72, 1); +lean_ctor_set(x_72, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_72); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_80; +lean_dec(x_72); +x_80 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_80, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_80); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_dec(x_72); +lean_dec(x_70); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_81; lean_object* x_82; lean_object* x_83; +x_81 = lean_ctor_get(x_68, 0); +lean_inc(x_81); +lean_dec(x_68); +x_82 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_83 = lean_array_get(x_82, x_1, x_6); +switch (lean_obj_tag(x_83)) { +case 1: +{ +lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_84 = x_83; +} else { + lean_dec_ref(x_83); + x_84 = lean_box(0); +} +if (lean_is_scalar(x_84)) { + x_85 = lean_alloc_ctor(0, 1, 0); +} else { + x_85 = x_84; + lean_ctor_set_tag(x_85, 0); +} +lean_ctor_set(x_85, 0, x_81); +x_86 = lean_array_push(x_5, x_85); +x_87 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_87); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_88; lean_object* x_89; +lean_dec(x_81); +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_88 = x_83; +} else { + lean_dec_ref(x_83); + x_88 = lean_box(0); +} +if (lean_is_scalar(x_88)) { + x_89 = lean_alloc_ctor(1, 1, 0); +} else { + x_89 = x_88; + lean_ctor_set_tag(x_89, 1); +} +lean_ctor_set(x_89, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_89); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_90; +lean_dec(x_83); +lean_dec(x_81); +x_90 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_90, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_90); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +case 1: +{ +uint8_t x_91; +lean_free_object(x_66); +x_91 = !lean_is_exclusive(x_68); +if (x_91 == 0) +{ +lean_object* x_92; +x_92 = lean_ctor_get(x_68, 0); +lean_dec(x_92); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_93; +lean_dec(x_68); +x_93 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_93, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_93); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_ctor_set(x_66, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_66); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_94; +x_94 = lean_ctor_get(x_66, 0); +lean_inc(x_94); +lean_dec(x_66); +switch (lean_obj_tag(x_94)) { +case 0: +{ +lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; +x_95 = lean_ctor_get(x_94, 0); +lean_inc(x_95); +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_96 = x_94; +} else { + lean_dec_ref(x_94); + x_96 = lean_box(0); +} +x_97 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_98 = lean_array_get(x_97, x_1, x_6); +switch (lean_obj_tag(x_98)) { +case 1: +{ +lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_99 = x_98; +} else { + lean_dec_ref(x_98); + x_99 = lean_box(0); +} +if (lean_is_scalar(x_99)) { + x_100 = lean_alloc_ctor(0, 1, 0); +} else { + x_100 = x_99; + lean_ctor_set_tag(x_100, 0); +} +lean_ctor_set(x_100, 0, x_95); +x_101 = lean_array_push(x_5, x_100); +if (lean_is_scalar(x_96)) { + x_102 = lean_alloc_ctor(1, 1, 0); +} else { + x_102 = x_96; + lean_ctor_set_tag(x_102, 1); +} +lean_ctor_set(x_102, 0, x_101); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_102); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_103; lean_object* x_104; +lean_dec(x_96); +lean_dec(x_95); +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_103 = x_98; +} else { + lean_dec_ref(x_98); + x_103 = lean_box(0); +} +if (lean_is_scalar(x_103)) { + x_104 = lean_alloc_ctor(1, 1, 0); +} else { + x_104 = x_103; + lean_ctor_set_tag(x_104, 1); +} +lean_ctor_set(x_104, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_104); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_105; +lean_dec(x_98); +lean_dec(x_95); +if (lean_is_scalar(x_96)) { + x_105 = lean_alloc_ctor(1, 1, 0); +} else { + x_105 = x_96; + lean_ctor_set_tag(x_105, 1); +} +lean_ctor_set(x_105, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_105); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_106; lean_object* x_107; +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_106 = x_94; +} else { + lean_dec_ref(x_94); + x_106 = lean_box(0); +} +if (lean_is_scalar(x_106)) { + x_107 = lean_alloc_ctor(1, 1, 0); +} else { + x_107 = x_106; +} +lean_ctor_set(x_107, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_107); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_108; +x_108 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_108, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_108); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_109; lean_object* x_110; lean_object* x_111; uint64_t x_112; uint64_t x_113; uint64_t x_114; uint64_t x_115; uint64_t x_116; uint64_t x_117; uint64_t x_118; size_t x_119; size_t x_120; size_t x_121; size_t x_122; size_t x_123; lean_object* x_124; lean_object* x_125; +x_109 = lean_ctor_get(x_27, 0); +x_110 = lean_ctor_get(x_46, 1); +lean_inc(x_110); +lean_dec(x_46); +x_111 = lean_array_get_size(x_110); +x_112 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_109); +x_113 = 32; +x_114 = lean_uint64_shift_right(x_112, x_113); +x_115 = lean_uint64_xor(x_112, x_114); +x_116 = 16; +x_117 = lean_uint64_shift_right(x_115, x_116); +x_118 = lean_uint64_xor(x_115, x_117); +x_119 = lean_uint64_to_usize(x_118); +x_120 = lean_usize_of_nat(x_111); +lean_dec(x_111); +x_121 = 1; +x_122 = lean_usize_sub(x_120, x_121); +x_123 = lean_usize_land(x_119, x_122); +x_124 = lean_array_uget(x_110, x_123); +lean_dec(x_110); +x_125 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_109, x_124); +lean_dec(x_124); +lean_dec(x_109); +if (lean_obj_tag(x_125) == 0) +{ +lean_object* x_126; +lean_ctor_set(x_27, 0, x_5); +x_126 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_126, 0, x_27); +lean_ctor_set(x_126, 1, x_9); +x_17 = x_126; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_127; lean_object* x_128; +lean_free_object(x_27); +x_127 = lean_ctor_get(x_125, 0); +lean_inc(x_127); +if (lean_is_exclusive(x_125)) { + lean_ctor_release(x_125, 0); + x_128 = x_125; +} else { + lean_dec_ref(x_125); + x_128 = lean_box(0); +} +switch (lean_obj_tag(x_127)) { +case 0: +{ +lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; +lean_dec(x_128); +x_129 = lean_ctor_get(x_127, 0); +lean_inc(x_129); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_130 = x_127; +} else { + lean_dec_ref(x_127); + x_130 = lean_box(0); +} +x_131 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_132 = lean_array_get(x_131, x_1, x_6); +switch (lean_obj_tag(x_132)) { +case 1: +{ +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_133 = x_132; +} else { + lean_dec_ref(x_132); + x_133 = lean_box(0); +} +if (lean_is_scalar(x_133)) { + x_134 = lean_alloc_ctor(0, 1, 0); +} else { + x_134 = x_133; + lean_ctor_set_tag(x_134, 0); +} +lean_ctor_set(x_134, 0, x_129); +x_135 = lean_array_push(x_5, x_134); +if (lean_is_scalar(x_130)) { + x_136 = lean_alloc_ctor(1, 1, 0); +} else { + x_136 = x_130; + lean_ctor_set_tag(x_136, 1); +} +lean_ctor_set(x_136, 0, x_135); +x_137 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_137, 0, x_136); +lean_ctor_set(x_137, 1, x_9); +x_17 = x_137; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_138; lean_object* x_139; lean_object* x_140; +lean_dec(x_130); +lean_dec(x_129); +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_138 = x_132; +} else { + lean_dec_ref(x_132); + x_138 = lean_box(0); +} +if (lean_is_scalar(x_138)) { + x_139 = lean_alloc_ctor(1, 1, 0); +} else { + x_139 = x_138; + lean_ctor_set_tag(x_139, 1); +} +lean_ctor_set(x_139, 0, x_5); +x_140 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_140, 0, x_139); +lean_ctor_set(x_140, 1, x_9); +x_17 = x_140; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_141; lean_object* x_142; +lean_dec(x_132); +lean_dec(x_129); +if (lean_is_scalar(x_130)) { + x_141 = lean_alloc_ctor(1, 1, 0); +} else { + x_141 = x_130; + lean_ctor_set_tag(x_141, 1); +} +lean_ctor_set(x_141, 0, x_5); +x_142 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_142, 0, x_141); +lean_ctor_set(x_142, 1, x_9); +x_17 = x_142; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_143; lean_object* x_144; lean_object* x_145; +lean_dec(x_128); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_143 = x_127; +} else { + lean_dec_ref(x_127); + x_143 = lean_box(0); +} +if (lean_is_scalar(x_143)) { + x_144 = lean_alloc_ctor(1, 1, 0); +} else { + x_144 = x_143; +} +lean_ctor_set(x_144, 0, x_5); +x_145 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_145, 0, x_144); +lean_ctor_set(x_145, 1, x_9); +x_17 = x_145; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_146; lean_object* x_147; +if (lean_is_scalar(x_128)) { + x_146 = lean_alloc_ctor(1, 1, 0); +} else { + x_146 = x_128; +} +lean_ctor_set(x_146, 0, x_5); +x_147 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_147, 0, x_146); +lean_ctor_set(x_147, 1, x_9); +x_17 = x_147; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; uint64_t x_152; uint64_t x_153; uint64_t x_154; uint64_t x_155; uint64_t x_156; uint64_t x_157; uint64_t x_158; size_t x_159; size_t x_160; size_t x_161; size_t x_162; size_t x_163; lean_object* x_164; lean_object* x_165; +x_148 = lean_ctor_get(x_27, 0); +lean_inc(x_148); +lean_dec(x_27); +x_149 = lean_ctor_get(x_46, 1); +lean_inc(x_149); +if (lean_is_exclusive(x_46)) { + lean_ctor_release(x_46, 0); + lean_ctor_release(x_46, 1); + x_150 = x_46; +} else { + lean_dec_ref(x_46); + x_150 = lean_box(0); +} +x_151 = lean_array_get_size(x_149); +x_152 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_148); +x_153 = 32; +x_154 = lean_uint64_shift_right(x_152, x_153); +x_155 = lean_uint64_xor(x_152, x_154); +x_156 = 16; +x_157 = lean_uint64_shift_right(x_155, x_156); +x_158 = lean_uint64_xor(x_155, x_157); +x_159 = lean_uint64_to_usize(x_158); +x_160 = lean_usize_of_nat(x_151); +lean_dec(x_151); +x_161 = 1; +x_162 = lean_usize_sub(x_160, x_161); +x_163 = lean_usize_land(x_159, x_162); +x_164 = lean_array_uget(x_149, x_163); +lean_dec(x_149); +x_165 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_148, x_164); +lean_dec(x_164); +lean_dec(x_148); +if (lean_obj_tag(x_165) == 0) +{ +lean_object* x_166; lean_object* x_167; +x_166 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_166, 0, x_5); +if (lean_is_scalar(x_150)) { + x_167 = lean_alloc_ctor(0, 2, 0); +} else { + x_167 = x_150; +} +lean_ctor_set(x_167, 0, x_166); +lean_ctor_set(x_167, 1, x_9); +x_17 = x_167; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_168; lean_object* x_169; +x_168 = lean_ctor_get(x_165, 0); +lean_inc(x_168); +if (lean_is_exclusive(x_165)) { + lean_ctor_release(x_165, 0); + x_169 = x_165; +} else { + lean_dec_ref(x_165); + x_169 = lean_box(0); +} +switch (lean_obj_tag(x_168)) { +case 0: +{ +lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; +lean_dec(x_169); +x_170 = lean_ctor_get(x_168, 0); +lean_inc(x_170); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_171 = x_168; +} else { + lean_dec_ref(x_168); + x_171 = lean_box(0); +} +x_172 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_173 = lean_array_get(x_172, x_1, x_6); +switch (lean_obj_tag(x_173)) { +case 1: +{ +lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_174 = x_173; +} else { + lean_dec_ref(x_173); + x_174 = lean_box(0); +} +if (lean_is_scalar(x_174)) { + x_175 = lean_alloc_ctor(0, 1, 0); +} else { + x_175 = x_174; + lean_ctor_set_tag(x_175, 0); +} +lean_ctor_set(x_175, 0, x_170); +x_176 = lean_array_push(x_5, x_175); +if (lean_is_scalar(x_171)) { + x_177 = lean_alloc_ctor(1, 1, 0); +} else { + x_177 = x_171; + lean_ctor_set_tag(x_177, 1); +} +lean_ctor_set(x_177, 0, x_176); +if (lean_is_scalar(x_150)) { + x_178 = lean_alloc_ctor(0, 2, 0); +} else { + x_178 = x_150; +} +lean_ctor_set(x_178, 0, x_177); +lean_ctor_set(x_178, 1, x_9); +x_17 = x_178; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_179; lean_object* x_180; lean_object* x_181; +lean_dec(x_171); +lean_dec(x_170); +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_179 = x_173; +} else { + lean_dec_ref(x_173); + x_179 = lean_box(0); +} +if (lean_is_scalar(x_179)) { + x_180 = lean_alloc_ctor(1, 1, 0); +} else { + x_180 = x_179; + lean_ctor_set_tag(x_180, 1); +} +lean_ctor_set(x_180, 0, x_5); +if (lean_is_scalar(x_150)) { + x_181 = lean_alloc_ctor(0, 2, 0); +} else { + x_181 = x_150; +} +lean_ctor_set(x_181, 0, x_180); +lean_ctor_set(x_181, 1, x_9); +x_17 = x_181; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_182; lean_object* x_183; +lean_dec(x_173); +lean_dec(x_170); +if (lean_is_scalar(x_171)) { + x_182 = lean_alloc_ctor(1, 1, 0); +} else { + x_182 = x_171; + lean_ctor_set_tag(x_182, 1); +} +lean_ctor_set(x_182, 0, x_5); +if (lean_is_scalar(x_150)) { + x_183 = lean_alloc_ctor(0, 2, 0); +} else { + x_183 = x_150; +} +lean_ctor_set(x_183, 0, x_182); +lean_ctor_set(x_183, 1, x_9); +x_17 = x_183; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_184; lean_object* x_185; lean_object* x_186; +lean_dec(x_169); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_184 = x_168; +} else { + lean_dec_ref(x_168); + x_184 = lean_box(0); +} +if (lean_is_scalar(x_184)) { + x_185 = lean_alloc_ctor(1, 1, 0); +} else { + x_185 = x_184; +} +lean_ctor_set(x_185, 0, x_5); +if (lean_is_scalar(x_150)) { + x_186 = lean_alloc_ctor(0, 2, 0); +} else { + x_186 = x_150; +} +lean_ctor_set(x_186, 0, x_185); +lean_ctor_set(x_186, 1, x_9); +x_17 = x_186; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_187; lean_object* x_188; +if (lean_is_scalar(x_169)) { + x_187 = lean_alloc_ctor(1, 1, 0); +} else { + x_187 = x_169; +} +lean_ctor_set(x_187, 0, x_5); +if (lean_is_scalar(x_150)) { + x_188 = lean_alloc_ctor(0, 2, 0); +} else { + x_188 = x_150; +} +lean_ctor_set(x_188, 0, x_187); +lean_ctor_set(x_188, 1, x_9); +x_17 = x_188; +x_18 = x_12; +goto block_25; +} +} +} +} +} +default: +{ +uint8_t x_189; +x_189 = !lean_is_exclusive(x_27); +if (x_189 == 0) +{ +lean_object* x_190; lean_object* x_191; lean_object* x_192; +x_190 = lean_ctor_get(x_27, 0); +lean_dec(x_190); +x_191 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_192 = lean_array_get(x_191, x_1, x_6); +switch (lean_obj_tag(x_192)) { +case 1: +{ +uint8_t x_193; +lean_free_object(x_27); +x_193 = !lean_is_exclusive(x_192); +if (x_193 == 0) +{ +lean_object* x_194; lean_object* x_195; lean_object* x_196; lean_object* x_197; +x_194 = lean_ctor_get(x_192, 0); +lean_dec(x_194); +x_195 = lean_box(1); +x_196 = lean_array_push(x_5, x_195); +lean_ctor_set(x_192, 0, x_196); +x_197 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_197, 0, x_192); +lean_ctor_set(x_197, 1, x_9); +x_17 = x_197; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; +lean_dec(x_192); +x_198 = lean_box(1); +x_199 = lean_array_push(x_5, x_198); +x_200 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_200, 0, x_199); +x_201 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_201, 0, x_200); +lean_ctor_set(x_201, 1, x_9); +x_17 = x_201; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_202; +lean_free_object(x_27); +x_202 = !lean_is_exclusive(x_192); +if (x_202 == 0) +{ +lean_object* x_203; lean_object* x_204; +x_203 = lean_ctor_get(x_192, 0); +lean_dec(x_203); +lean_ctor_set_tag(x_192, 1); +lean_ctor_set(x_192, 0, x_5); +x_204 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_204, 0, x_192); +lean_ctor_set(x_204, 1, x_9); +x_17 = x_204; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_205; lean_object* x_206; +lean_dec(x_192); +x_205 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_205, 0, x_5); +x_206 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_206, 0, x_205); +lean_ctor_set(x_206, 1, x_9); +x_17 = x_206; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_207; +lean_dec(x_192); +lean_ctor_set_tag(x_27, 1); +lean_ctor_set(x_27, 0, x_5); +x_207 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_207, 0, x_27); +lean_ctor_set(x_207, 1, x_9); +x_17 = x_207; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_208; lean_object* x_209; +lean_dec(x_27); +x_208 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_209 = lean_array_get(x_208, x_1, x_6); +switch (lean_obj_tag(x_209)) { +case 1: +{ +lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_210 = x_209; +} else { + lean_dec_ref(x_209); + x_210 = lean_box(0); +} +x_211 = lean_box(1); +x_212 = lean_array_push(x_5, x_211); +if (lean_is_scalar(x_210)) { + x_213 = lean_alloc_ctor(1, 1, 0); +} else { + x_213 = x_210; +} +lean_ctor_set(x_213, 0, x_212); +x_214 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_214, 0, x_213); +lean_ctor_set(x_214, 1, x_9); +x_17 = x_214; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_215; lean_object* x_216; lean_object* x_217; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_215 = x_209; +} else { + lean_dec_ref(x_209); + x_215 = lean_box(0); +} +if (lean_is_scalar(x_215)) { + x_216 = lean_alloc_ctor(1, 1, 0); +} else { + x_216 = x_215; + lean_ctor_set_tag(x_216, 1); +} +lean_ctor_set(x_216, 0, x_5); +x_217 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_217, 0, x_216); +lean_ctor_set(x_217, 1, x_9); +x_17 = x_217; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_218; lean_object* x_219; +lean_dec(x_209); +x_218 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_218, 0, x_5); +x_219 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_219, 0, x_218); +lean_ctor_set(x_219, 1, x_9); +x_17 = x_219; +x_18 = x_12; +goto block_25; +} +} +} +} +} +block_25: +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_19 = lean_ctor_get(x_17, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_17, 1); +lean_inc(x_20); +lean_dec(x_17); +x_21 = lean_ctor_get(x_19, 0); +lean_inc(x_21); +lean_dec(x_19); +x_22 = lean_ctor_get(x_4, 2); +x_23 = lean_nat_add(x_6, x_22); +lean_dec(x_6); +x_5 = x_21; +x_6 = x_23; +x_7 = lean_box(0); +x_8 = lean_box(0); +x_9 = x_20; +x_12 = x_18; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__7(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; uint8_t x_14; +x_13 = lean_ctor_get(x_4, 1); +x_14 = lean_nat_dec_lt(x_6, x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; +lean_dec(x_6); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_5); +lean_ctor_set(x_15, 1, x_9); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_15); +lean_ctor_set(x_16, 1, x_12); +return x_16; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_26; lean_object* x_27; +x_26 = l_Lean_Compiler_LCNF_instInhabitedArg; +x_27 = lean_array_get(x_26, x_2, x_6); +switch (lean_obj_tag(x_27)) { +case 0: +{ +lean_object* x_28; lean_object* x_29; +x_28 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_29 = lean_array_get(x_28, x_1, x_6); +switch (lean_obj_tag(x_29)) { +case 1: +{ +uint8_t x_30; +x_30 = !lean_is_exclusive(x_29); +if (x_30 == 0) +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_31 = lean_ctor_get(x_29, 0); +lean_dec(x_31); +x_32 = lean_box(1); +x_33 = lean_array_push(x_5, x_32); +lean_ctor_set(x_29, 0, x_33); +x_34 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_34, 0, x_29); +lean_ctor_set(x_34, 1, x_9); +x_17 = x_34; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; +lean_dec(x_29); +x_35 = lean_box(1); +x_36 = lean_array_push(x_5, x_35); +x_37 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_37, 0, x_36); +x_38 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_9); +x_17 = x_38; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_39; +x_39 = !lean_is_exclusive(x_29); +if (x_39 == 0) +{ +lean_object* x_40; lean_object* x_41; +x_40 = lean_ctor_get(x_29, 0); +lean_dec(x_40); +lean_ctor_set_tag(x_29, 1); +lean_ctor_set(x_29, 0, x_5); +x_41 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_41, 0, x_29); +lean_ctor_set(x_41, 1, x_9); +x_17 = x_41; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_42; lean_object* x_43; +lean_dec(x_29); +x_42 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_42, 0, x_5); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_9); +x_17 = x_43; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_44; lean_object* x_45; +lean_dec(x_29); +x_44 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_44, 0, x_5); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_9); +x_17 = x_45; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_46; uint8_t x_47; +x_46 = lean_ctor_get(x_9, 0); +lean_inc(x_46); +x_47 = !lean_is_exclusive(x_27); +if (x_47 == 0) +{ +uint8_t x_48; +x_48 = !lean_is_exclusive(x_46); +if (x_48 == 0) +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; uint64_t x_53; uint64_t x_54; uint64_t x_55; uint64_t x_56; uint64_t x_57; uint64_t x_58; uint64_t x_59; size_t x_60; size_t x_61; size_t x_62; size_t x_63; size_t x_64; lean_object* x_65; lean_object* x_66; +x_49 = lean_ctor_get(x_27, 0); +x_50 = lean_ctor_get(x_46, 1); +x_51 = lean_ctor_get(x_46, 0); +lean_dec(x_51); +x_52 = lean_array_get_size(x_50); +x_53 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_49); +x_54 = 32; +x_55 = lean_uint64_shift_right(x_53, x_54); +x_56 = lean_uint64_xor(x_53, x_55); +x_57 = 16; +x_58 = lean_uint64_shift_right(x_56, x_57); +x_59 = lean_uint64_xor(x_56, x_58); +x_60 = lean_uint64_to_usize(x_59); +x_61 = lean_usize_of_nat(x_52); +lean_dec(x_52); +x_62 = 1; +x_63 = lean_usize_sub(x_61, x_62); +x_64 = lean_usize_land(x_60, x_63); +x_65 = lean_array_uget(x_50, x_64); +lean_dec(x_50); +x_66 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_49, x_65); +lean_dec(x_65); +lean_dec(x_49); +if (lean_obj_tag(x_66) == 0) +{ +lean_ctor_set(x_27, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_27); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +uint8_t x_67; +lean_free_object(x_27); +x_67 = !lean_is_exclusive(x_66); +if (x_67 == 0) +{ +lean_object* x_68; +x_68 = lean_ctor_get(x_66, 0); +switch (lean_obj_tag(x_68)) { +case 0: +{ +uint8_t x_69; +lean_free_object(x_66); +x_69 = !lean_is_exclusive(x_68); +if (x_69 == 0) +{ +lean_object* x_70; lean_object* x_71; lean_object* x_72; +x_70 = lean_ctor_get(x_68, 0); +x_71 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_72 = lean_array_get(x_71, x_1, x_6); +switch (lean_obj_tag(x_72)) { +case 1: +{ +uint8_t x_73; +x_73 = !lean_is_exclusive(x_72); +if (x_73 == 0) +{ +lean_object* x_74; lean_object* x_75; +x_74 = lean_ctor_get(x_72, 0); +lean_dec(x_74); +lean_ctor_set_tag(x_72, 0); +lean_ctor_set(x_72, 0, x_70); +x_75 = lean_array_push(x_5, x_72); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_75); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_76; lean_object* x_77; +lean_dec(x_72); +x_76 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_76, 0, x_70); +x_77 = lean_array_push(x_5, x_76); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_77); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_78; +lean_free_object(x_68); +lean_dec(x_70); +x_78 = !lean_is_exclusive(x_72); +if (x_78 == 0) +{ +lean_object* x_79; +x_79 = lean_ctor_get(x_72, 0); +lean_dec(x_79); +lean_ctor_set_tag(x_72, 1); +lean_ctor_set(x_72, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_72); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_80; +lean_dec(x_72); +x_80 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_80, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_80); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_dec(x_72); +lean_dec(x_70); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_81; lean_object* x_82; lean_object* x_83; +x_81 = lean_ctor_get(x_68, 0); +lean_inc(x_81); +lean_dec(x_68); +x_82 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_83 = lean_array_get(x_82, x_1, x_6); +switch (lean_obj_tag(x_83)) { +case 1: +{ +lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_84 = x_83; +} else { + lean_dec_ref(x_83); + x_84 = lean_box(0); +} +if (lean_is_scalar(x_84)) { + x_85 = lean_alloc_ctor(0, 1, 0); +} else { + x_85 = x_84; + lean_ctor_set_tag(x_85, 0); +} +lean_ctor_set(x_85, 0, x_81); +x_86 = lean_array_push(x_5, x_85); +x_87 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_87); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_88; lean_object* x_89; +lean_dec(x_81); +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_88 = x_83; +} else { + lean_dec_ref(x_83); + x_88 = lean_box(0); +} +if (lean_is_scalar(x_88)) { + x_89 = lean_alloc_ctor(1, 1, 0); +} else { + x_89 = x_88; + lean_ctor_set_tag(x_89, 1); +} +lean_ctor_set(x_89, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_89); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_90; +lean_dec(x_83); +lean_dec(x_81); +x_90 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_90, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_90); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +case 1: +{ +uint8_t x_91; +lean_free_object(x_66); +x_91 = !lean_is_exclusive(x_68); +if (x_91 == 0) +{ +lean_object* x_92; +x_92 = lean_ctor_get(x_68, 0); +lean_dec(x_92); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_93; +lean_dec(x_68); +x_93 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_93, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_93); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_ctor_set(x_66, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_66); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_94; +x_94 = lean_ctor_get(x_66, 0); +lean_inc(x_94); +lean_dec(x_66); +switch (lean_obj_tag(x_94)) { +case 0: +{ +lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; +x_95 = lean_ctor_get(x_94, 0); +lean_inc(x_95); +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_96 = x_94; +} else { + lean_dec_ref(x_94); + x_96 = lean_box(0); +} +x_97 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_98 = lean_array_get(x_97, x_1, x_6); +switch (lean_obj_tag(x_98)) { +case 1: +{ +lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_99 = x_98; +} else { + lean_dec_ref(x_98); + x_99 = lean_box(0); +} +if (lean_is_scalar(x_99)) { + x_100 = lean_alloc_ctor(0, 1, 0); +} else { + x_100 = x_99; + lean_ctor_set_tag(x_100, 0); +} +lean_ctor_set(x_100, 0, x_95); +x_101 = lean_array_push(x_5, x_100); +if (lean_is_scalar(x_96)) { + x_102 = lean_alloc_ctor(1, 1, 0); +} else { + x_102 = x_96; + lean_ctor_set_tag(x_102, 1); +} +lean_ctor_set(x_102, 0, x_101); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_102); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_103; lean_object* x_104; +lean_dec(x_96); +lean_dec(x_95); +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_103 = x_98; +} else { + lean_dec_ref(x_98); + x_103 = lean_box(0); +} +if (lean_is_scalar(x_103)) { + x_104 = lean_alloc_ctor(1, 1, 0); +} else { + x_104 = x_103; + lean_ctor_set_tag(x_104, 1); +} +lean_ctor_set(x_104, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_104); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_105; +lean_dec(x_98); +lean_dec(x_95); +if (lean_is_scalar(x_96)) { + x_105 = lean_alloc_ctor(1, 1, 0); +} else { + x_105 = x_96; + lean_ctor_set_tag(x_105, 1); +} +lean_ctor_set(x_105, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_105); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_106; lean_object* x_107; +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_106 = x_94; +} else { + lean_dec_ref(x_94); + x_106 = lean_box(0); +} +if (lean_is_scalar(x_106)) { + x_107 = lean_alloc_ctor(1, 1, 0); +} else { + x_107 = x_106; +} +lean_ctor_set(x_107, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_107); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_108; +x_108 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_108, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_108); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_109; lean_object* x_110; lean_object* x_111; uint64_t x_112; uint64_t x_113; uint64_t x_114; uint64_t x_115; uint64_t x_116; uint64_t x_117; uint64_t x_118; size_t x_119; size_t x_120; size_t x_121; size_t x_122; size_t x_123; lean_object* x_124; lean_object* x_125; +x_109 = lean_ctor_get(x_27, 0); +x_110 = lean_ctor_get(x_46, 1); +lean_inc(x_110); +lean_dec(x_46); +x_111 = lean_array_get_size(x_110); +x_112 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_109); +x_113 = 32; +x_114 = lean_uint64_shift_right(x_112, x_113); +x_115 = lean_uint64_xor(x_112, x_114); +x_116 = 16; +x_117 = lean_uint64_shift_right(x_115, x_116); +x_118 = lean_uint64_xor(x_115, x_117); +x_119 = lean_uint64_to_usize(x_118); +x_120 = lean_usize_of_nat(x_111); +lean_dec(x_111); +x_121 = 1; +x_122 = lean_usize_sub(x_120, x_121); +x_123 = lean_usize_land(x_119, x_122); +x_124 = lean_array_uget(x_110, x_123); +lean_dec(x_110); +x_125 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_109, x_124); +lean_dec(x_124); +lean_dec(x_109); +if (lean_obj_tag(x_125) == 0) +{ +lean_object* x_126; +lean_ctor_set(x_27, 0, x_5); +x_126 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_126, 0, x_27); +lean_ctor_set(x_126, 1, x_9); +x_17 = x_126; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_127; lean_object* x_128; +lean_free_object(x_27); +x_127 = lean_ctor_get(x_125, 0); +lean_inc(x_127); +if (lean_is_exclusive(x_125)) { + lean_ctor_release(x_125, 0); + x_128 = x_125; +} else { + lean_dec_ref(x_125); + x_128 = lean_box(0); +} +switch (lean_obj_tag(x_127)) { +case 0: +{ +lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; +lean_dec(x_128); +x_129 = lean_ctor_get(x_127, 0); +lean_inc(x_129); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_130 = x_127; +} else { + lean_dec_ref(x_127); + x_130 = lean_box(0); +} +x_131 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_132 = lean_array_get(x_131, x_1, x_6); +switch (lean_obj_tag(x_132)) { +case 1: +{ +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_133 = x_132; +} else { + lean_dec_ref(x_132); + x_133 = lean_box(0); +} +if (lean_is_scalar(x_133)) { + x_134 = lean_alloc_ctor(0, 1, 0); +} else { + x_134 = x_133; + lean_ctor_set_tag(x_134, 0); +} +lean_ctor_set(x_134, 0, x_129); +x_135 = lean_array_push(x_5, x_134); +if (lean_is_scalar(x_130)) { + x_136 = lean_alloc_ctor(1, 1, 0); +} else { + x_136 = x_130; + lean_ctor_set_tag(x_136, 1); +} +lean_ctor_set(x_136, 0, x_135); +x_137 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_137, 0, x_136); +lean_ctor_set(x_137, 1, x_9); +x_17 = x_137; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_138; lean_object* x_139; lean_object* x_140; +lean_dec(x_130); +lean_dec(x_129); +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_138 = x_132; +} else { + lean_dec_ref(x_132); + x_138 = lean_box(0); +} +if (lean_is_scalar(x_138)) { + x_139 = lean_alloc_ctor(1, 1, 0); +} else { + x_139 = x_138; + lean_ctor_set_tag(x_139, 1); +} +lean_ctor_set(x_139, 0, x_5); +x_140 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_140, 0, x_139); +lean_ctor_set(x_140, 1, x_9); +x_17 = x_140; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_141; lean_object* x_142; +lean_dec(x_132); +lean_dec(x_129); +if (lean_is_scalar(x_130)) { + x_141 = lean_alloc_ctor(1, 1, 0); +} else { + x_141 = x_130; + lean_ctor_set_tag(x_141, 1); +} +lean_ctor_set(x_141, 0, x_5); +x_142 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_142, 0, x_141); +lean_ctor_set(x_142, 1, x_9); +x_17 = x_142; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_143; lean_object* x_144; lean_object* x_145; +lean_dec(x_128); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_143 = x_127; +} else { + lean_dec_ref(x_127); + x_143 = lean_box(0); +} +if (lean_is_scalar(x_143)) { + x_144 = lean_alloc_ctor(1, 1, 0); +} else { + x_144 = x_143; +} +lean_ctor_set(x_144, 0, x_5); +x_145 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_145, 0, x_144); +lean_ctor_set(x_145, 1, x_9); +x_17 = x_145; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_146; lean_object* x_147; +if (lean_is_scalar(x_128)) { + x_146 = lean_alloc_ctor(1, 1, 0); +} else { + x_146 = x_128; +} +lean_ctor_set(x_146, 0, x_5); +x_147 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_147, 0, x_146); +lean_ctor_set(x_147, 1, x_9); +x_17 = x_147; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; uint64_t x_152; uint64_t x_153; uint64_t x_154; uint64_t x_155; uint64_t x_156; uint64_t x_157; uint64_t x_158; size_t x_159; size_t x_160; size_t x_161; size_t x_162; size_t x_163; lean_object* x_164; lean_object* x_165; +x_148 = lean_ctor_get(x_27, 0); +lean_inc(x_148); +lean_dec(x_27); +x_149 = lean_ctor_get(x_46, 1); +lean_inc(x_149); +if (lean_is_exclusive(x_46)) { + lean_ctor_release(x_46, 0); + lean_ctor_release(x_46, 1); + x_150 = x_46; +} else { + lean_dec_ref(x_46); + x_150 = lean_box(0); +} +x_151 = lean_array_get_size(x_149); +x_152 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_148); +x_153 = 32; +x_154 = lean_uint64_shift_right(x_152, x_153); +x_155 = lean_uint64_xor(x_152, x_154); +x_156 = 16; +x_157 = lean_uint64_shift_right(x_155, x_156); +x_158 = lean_uint64_xor(x_155, x_157); +x_159 = lean_uint64_to_usize(x_158); +x_160 = lean_usize_of_nat(x_151); +lean_dec(x_151); +x_161 = 1; +x_162 = lean_usize_sub(x_160, x_161); +x_163 = lean_usize_land(x_159, x_162); +x_164 = lean_array_uget(x_149, x_163); +lean_dec(x_149); +x_165 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_148, x_164); +lean_dec(x_164); +lean_dec(x_148); +if (lean_obj_tag(x_165) == 0) +{ +lean_object* x_166; lean_object* x_167; +x_166 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_166, 0, x_5); +if (lean_is_scalar(x_150)) { + x_167 = lean_alloc_ctor(0, 2, 0); +} else { + x_167 = x_150; +} +lean_ctor_set(x_167, 0, x_166); +lean_ctor_set(x_167, 1, x_9); +x_17 = x_167; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_168; lean_object* x_169; +x_168 = lean_ctor_get(x_165, 0); +lean_inc(x_168); +if (lean_is_exclusive(x_165)) { + lean_ctor_release(x_165, 0); + x_169 = x_165; +} else { + lean_dec_ref(x_165); + x_169 = lean_box(0); +} +switch (lean_obj_tag(x_168)) { +case 0: +{ +lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; +lean_dec(x_169); +x_170 = lean_ctor_get(x_168, 0); +lean_inc(x_170); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_171 = x_168; +} else { + lean_dec_ref(x_168); + x_171 = lean_box(0); +} +x_172 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_173 = lean_array_get(x_172, x_1, x_6); +switch (lean_obj_tag(x_173)) { +case 1: +{ +lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_174 = x_173; +} else { + lean_dec_ref(x_173); + x_174 = lean_box(0); +} +if (lean_is_scalar(x_174)) { + x_175 = lean_alloc_ctor(0, 1, 0); +} else { + x_175 = x_174; + lean_ctor_set_tag(x_175, 0); +} +lean_ctor_set(x_175, 0, x_170); +x_176 = lean_array_push(x_5, x_175); +if (lean_is_scalar(x_171)) { + x_177 = lean_alloc_ctor(1, 1, 0); +} else { + x_177 = x_171; + lean_ctor_set_tag(x_177, 1); +} +lean_ctor_set(x_177, 0, x_176); +if (lean_is_scalar(x_150)) { + x_178 = lean_alloc_ctor(0, 2, 0); +} else { + x_178 = x_150; +} +lean_ctor_set(x_178, 0, x_177); +lean_ctor_set(x_178, 1, x_9); +x_17 = x_178; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_179; lean_object* x_180; lean_object* x_181; +lean_dec(x_171); +lean_dec(x_170); +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_179 = x_173; +} else { + lean_dec_ref(x_173); + x_179 = lean_box(0); +} +if (lean_is_scalar(x_179)) { + x_180 = lean_alloc_ctor(1, 1, 0); +} else { + x_180 = x_179; + lean_ctor_set_tag(x_180, 1); +} +lean_ctor_set(x_180, 0, x_5); +if (lean_is_scalar(x_150)) { + x_181 = lean_alloc_ctor(0, 2, 0); +} else { + x_181 = x_150; +} +lean_ctor_set(x_181, 0, x_180); +lean_ctor_set(x_181, 1, x_9); +x_17 = x_181; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_182; lean_object* x_183; +lean_dec(x_173); +lean_dec(x_170); +if (lean_is_scalar(x_171)) { + x_182 = lean_alloc_ctor(1, 1, 0); +} else { + x_182 = x_171; + lean_ctor_set_tag(x_182, 1); +} +lean_ctor_set(x_182, 0, x_5); +if (lean_is_scalar(x_150)) { + x_183 = lean_alloc_ctor(0, 2, 0); +} else { + x_183 = x_150; +} +lean_ctor_set(x_183, 0, x_182); +lean_ctor_set(x_183, 1, x_9); +x_17 = x_183; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_184; lean_object* x_185; lean_object* x_186; +lean_dec(x_169); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_184 = x_168; +} else { + lean_dec_ref(x_168); + x_184 = lean_box(0); +} +if (lean_is_scalar(x_184)) { + x_185 = lean_alloc_ctor(1, 1, 0); +} else { + x_185 = x_184; +} +lean_ctor_set(x_185, 0, x_5); +if (lean_is_scalar(x_150)) { + x_186 = lean_alloc_ctor(0, 2, 0); +} else { + x_186 = x_150; +} +lean_ctor_set(x_186, 0, x_185); +lean_ctor_set(x_186, 1, x_9); +x_17 = x_186; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_187; lean_object* x_188; +if (lean_is_scalar(x_169)) { + x_187 = lean_alloc_ctor(1, 1, 0); +} else { + x_187 = x_169; +} +lean_ctor_set(x_187, 0, x_5); +if (lean_is_scalar(x_150)) { + x_188 = lean_alloc_ctor(0, 2, 0); +} else { + x_188 = x_150; +} +lean_ctor_set(x_188, 0, x_187); +lean_ctor_set(x_188, 1, x_9); +x_17 = x_188; +x_18 = x_12; +goto block_25; +} +} +} +} +} +default: +{ +uint8_t x_189; +x_189 = !lean_is_exclusive(x_27); +if (x_189 == 0) +{ +lean_object* x_190; lean_object* x_191; lean_object* x_192; +x_190 = lean_ctor_get(x_27, 0); +lean_dec(x_190); +x_191 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_192 = lean_array_get(x_191, x_1, x_6); +switch (lean_obj_tag(x_192)) { +case 1: +{ +uint8_t x_193; +lean_free_object(x_27); +x_193 = !lean_is_exclusive(x_192); +if (x_193 == 0) +{ +lean_object* x_194; lean_object* x_195; lean_object* x_196; lean_object* x_197; +x_194 = lean_ctor_get(x_192, 0); +lean_dec(x_194); +x_195 = lean_box(1); +x_196 = lean_array_push(x_5, x_195); +lean_ctor_set(x_192, 0, x_196); +x_197 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_197, 0, x_192); +lean_ctor_set(x_197, 1, x_9); +x_17 = x_197; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; +lean_dec(x_192); +x_198 = lean_box(1); +x_199 = lean_array_push(x_5, x_198); +x_200 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_200, 0, x_199); +x_201 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_201, 0, x_200); +lean_ctor_set(x_201, 1, x_9); +x_17 = x_201; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_202; +lean_free_object(x_27); +x_202 = !lean_is_exclusive(x_192); +if (x_202 == 0) +{ +lean_object* x_203; lean_object* x_204; +x_203 = lean_ctor_get(x_192, 0); +lean_dec(x_203); +lean_ctor_set_tag(x_192, 1); +lean_ctor_set(x_192, 0, x_5); +x_204 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_204, 0, x_192); +lean_ctor_set(x_204, 1, x_9); +x_17 = x_204; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_205; lean_object* x_206; +lean_dec(x_192); +x_205 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_205, 0, x_5); +x_206 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_206, 0, x_205); +lean_ctor_set(x_206, 1, x_9); +x_17 = x_206; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_207; +lean_dec(x_192); +lean_ctor_set_tag(x_27, 1); +lean_ctor_set(x_27, 0, x_5); +x_207 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_207, 0, x_27); +lean_ctor_set(x_207, 1, x_9); +x_17 = x_207; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_208; lean_object* x_209; +lean_dec(x_27); +x_208 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_209 = lean_array_get(x_208, x_1, x_6); +switch (lean_obj_tag(x_209)) { +case 1: +{ +lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_210 = x_209; +} else { + lean_dec_ref(x_209); + x_210 = lean_box(0); +} +x_211 = lean_box(1); +x_212 = lean_array_push(x_5, x_211); +if (lean_is_scalar(x_210)) { + x_213 = lean_alloc_ctor(1, 1, 0); +} else { + x_213 = x_210; +} +lean_ctor_set(x_213, 0, x_212); +x_214 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_214, 0, x_213); +lean_ctor_set(x_214, 1, x_9); +x_17 = x_214; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_215; lean_object* x_216; lean_object* x_217; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_215 = x_209; +} else { + lean_dec_ref(x_209); + x_215 = lean_box(0); +} +if (lean_is_scalar(x_215)) { + x_216 = lean_alloc_ctor(1, 1, 0); +} else { + x_216 = x_215; + lean_ctor_set_tag(x_216, 1); +} +lean_ctor_set(x_216, 0, x_5); +x_217 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_217, 0, x_216); +lean_ctor_set(x_217, 1, x_9); +x_17 = x_217; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_218; lean_object* x_219; +lean_dec(x_209); +x_218 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_218, 0, x_5); +x_219 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_219, 0, x_218); +lean_ctor_set(x_219, 1, x_9); +x_17 = x_219; +x_18 = x_12; +goto block_25; +} +} +} +} +} +block_25: +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_19 = lean_ctor_get(x_17, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_17, 1); +lean_inc(x_20); +lean_dec(x_17); +x_21 = lean_ctor_get(x_19, 0); +lean_inc(x_21); +lean_dec(x_19); +x_22 = lean_ctor_get(x_4, 2); +x_23 = lean_nat_add(x_6, x_22); +lean_dec(x_6); +x_5 = x_21; +x_6 = x_23; +x_7 = lean_box(0); +x_8 = lean_box(0); +x_9 = x_20; +x_12 = x_18; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__8(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; uint8_t x_14; +x_13 = lean_ctor_get(x_4, 1); +x_14 = lean_nat_dec_lt(x_6, x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; +lean_dec(x_6); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_5); +lean_ctor_set(x_15, 1, x_9); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_15); +lean_ctor_set(x_16, 1, x_12); +return x_16; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_26; lean_object* x_27; +x_26 = l_Lean_Compiler_LCNF_instInhabitedArg; +x_27 = lean_array_get(x_26, x_2, x_6); +switch (lean_obj_tag(x_27)) { +case 0: +{ +lean_object* x_28; lean_object* x_29; +x_28 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_29 = lean_array_get(x_28, x_1, x_6); +switch (lean_obj_tag(x_29)) { +case 1: +{ +uint8_t x_30; +x_30 = !lean_is_exclusive(x_29); +if (x_30 == 0) +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_31 = lean_ctor_get(x_29, 0); +lean_dec(x_31); +x_32 = lean_box(1); +x_33 = lean_array_push(x_5, x_32); +lean_ctor_set(x_29, 0, x_33); +x_34 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_34, 0, x_29); +lean_ctor_set(x_34, 1, x_9); +x_17 = x_34; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; +lean_dec(x_29); +x_35 = lean_box(1); +x_36 = lean_array_push(x_5, x_35); +x_37 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_37, 0, x_36); +x_38 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_9); +x_17 = x_38; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_39; +x_39 = !lean_is_exclusive(x_29); +if (x_39 == 0) +{ +lean_object* x_40; lean_object* x_41; +x_40 = lean_ctor_get(x_29, 0); +lean_dec(x_40); +lean_ctor_set_tag(x_29, 1); +lean_ctor_set(x_29, 0, x_5); +x_41 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_41, 0, x_29); +lean_ctor_set(x_41, 1, x_9); +x_17 = x_41; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_42; lean_object* x_43; +lean_dec(x_29); +x_42 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_42, 0, x_5); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_9); +x_17 = x_43; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_44; lean_object* x_45; +lean_dec(x_29); +x_44 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_44, 0, x_5); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_9); +x_17 = x_45; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_46; uint8_t x_47; +x_46 = lean_ctor_get(x_9, 0); +lean_inc(x_46); +x_47 = !lean_is_exclusive(x_27); +if (x_47 == 0) +{ +uint8_t x_48; +x_48 = !lean_is_exclusive(x_46); +if (x_48 == 0) +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; uint64_t x_53; uint64_t x_54; uint64_t x_55; uint64_t x_56; uint64_t x_57; uint64_t x_58; uint64_t x_59; size_t x_60; size_t x_61; size_t x_62; size_t x_63; size_t x_64; lean_object* x_65; lean_object* x_66; +x_49 = lean_ctor_get(x_27, 0); +x_50 = lean_ctor_get(x_46, 1); +x_51 = lean_ctor_get(x_46, 0); +lean_dec(x_51); +x_52 = lean_array_get_size(x_50); +x_53 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_49); +x_54 = 32; +x_55 = lean_uint64_shift_right(x_53, x_54); +x_56 = lean_uint64_xor(x_53, x_55); +x_57 = 16; +x_58 = lean_uint64_shift_right(x_56, x_57); +x_59 = lean_uint64_xor(x_56, x_58); +x_60 = lean_uint64_to_usize(x_59); +x_61 = lean_usize_of_nat(x_52); +lean_dec(x_52); +x_62 = 1; +x_63 = lean_usize_sub(x_61, x_62); +x_64 = lean_usize_land(x_60, x_63); +x_65 = lean_array_uget(x_50, x_64); +lean_dec(x_50); +x_66 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_49, x_65); +lean_dec(x_65); +lean_dec(x_49); +if (lean_obj_tag(x_66) == 0) +{ +lean_ctor_set(x_27, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_27); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +uint8_t x_67; +lean_free_object(x_27); +x_67 = !lean_is_exclusive(x_66); +if (x_67 == 0) +{ +lean_object* x_68; +x_68 = lean_ctor_get(x_66, 0); +switch (lean_obj_tag(x_68)) { +case 0: +{ +uint8_t x_69; +lean_free_object(x_66); +x_69 = !lean_is_exclusive(x_68); +if (x_69 == 0) +{ +lean_object* x_70; lean_object* x_71; lean_object* x_72; +x_70 = lean_ctor_get(x_68, 0); +x_71 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_72 = lean_array_get(x_71, x_1, x_6); +switch (lean_obj_tag(x_72)) { +case 1: +{ +uint8_t x_73; +x_73 = !lean_is_exclusive(x_72); +if (x_73 == 0) +{ +lean_object* x_74; lean_object* x_75; +x_74 = lean_ctor_get(x_72, 0); +lean_dec(x_74); +lean_ctor_set_tag(x_72, 0); +lean_ctor_set(x_72, 0, x_70); +x_75 = lean_array_push(x_5, x_72); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_75); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_76; lean_object* x_77; +lean_dec(x_72); +x_76 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_76, 0, x_70); +x_77 = lean_array_push(x_5, x_76); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_77); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_78; +lean_free_object(x_68); +lean_dec(x_70); +x_78 = !lean_is_exclusive(x_72); +if (x_78 == 0) +{ +lean_object* x_79; +x_79 = lean_ctor_get(x_72, 0); +lean_dec(x_79); +lean_ctor_set_tag(x_72, 1); +lean_ctor_set(x_72, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_72); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_80; +lean_dec(x_72); +x_80 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_80, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_80); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_dec(x_72); +lean_dec(x_70); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_81; lean_object* x_82; lean_object* x_83; +x_81 = lean_ctor_get(x_68, 0); +lean_inc(x_81); +lean_dec(x_68); +x_82 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_83 = lean_array_get(x_82, x_1, x_6); +switch (lean_obj_tag(x_83)) { +case 1: +{ +lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_84 = x_83; +} else { + lean_dec_ref(x_83); + x_84 = lean_box(0); +} +if (lean_is_scalar(x_84)) { + x_85 = lean_alloc_ctor(0, 1, 0); +} else { + x_85 = x_84; + lean_ctor_set_tag(x_85, 0); +} +lean_ctor_set(x_85, 0, x_81); +x_86 = lean_array_push(x_5, x_85); +x_87 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_87); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_88; lean_object* x_89; +lean_dec(x_81); +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_88 = x_83; +} else { + lean_dec_ref(x_83); + x_88 = lean_box(0); +} +if (lean_is_scalar(x_88)) { + x_89 = lean_alloc_ctor(1, 1, 0); +} else { + x_89 = x_88; + lean_ctor_set_tag(x_89, 1); +} +lean_ctor_set(x_89, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_89); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_90; +lean_dec(x_83); +lean_dec(x_81); +x_90 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_90, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_90); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +case 1: +{ +uint8_t x_91; +lean_free_object(x_66); +x_91 = !lean_is_exclusive(x_68); +if (x_91 == 0) +{ +lean_object* x_92; +x_92 = lean_ctor_get(x_68, 0); +lean_dec(x_92); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_93; +lean_dec(x_68); +x_93 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_93, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_93); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_ctor_set(x_66, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_66); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_94; +x_94 = lean_ctor_get(x_66, 0); +lean_inc(x_94); +lean_dec(x_66); +switch (lean_obj_tag(x_94)) { +case 0: +{ +lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; +x_95 = lean_ctor_get(x_94, 0); +lean_inc(x_95); +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_96 = x_94; +} else { + lean_dec_ref(x_94); + x_96 = lean_box(0); +} +x_97 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_98 = lean_array_get(x_97, x_1, x_6); +switch (lean_obj_tag(x_98)) { +case 1: +{ +lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_99 = x_98; +} else { + lean_dec_ref(x_98); + x_99 = lean_box(0); +} +if (lean_is_scalar(x_99)) { + x_100 = lean_alloc_ctor(0, 1, 0); +} else { + x_100 = x_99; + lean_ctor_set_tag(x_100, 0); +} +lean_ctor_set(x_100, 0, x_95); +x_101 = lean_array_push(x_5, x_100); +if (lean_is_scalar(x_96)) { + x_102 = lean_alloc_ctor(1, 1, 0); +} else { + x_102 = x_96; + lean_ctor_set_tag(x_102, 1); +} +lean_ctor_set(x_102, 0, x_101); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_102); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_103; lean_object* x_104; +lean_dec(x_96); +lean_dec(x_95); +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_103 = x_98; +} else { + lean_dec_ref(x_98); + x_103 = lean_box(0); +} +if (lean_is_scalar(x_103)) { + x_104 = lean_alloc_ctor(1, 1, 0); +} else { + x_104 = x_103; + lean_ctor_set_tag(x_104, 1); +} +lean_ctor_set(x_104, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_104); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_105; +lean_dec(x_98); +lean_dec(x_95); +if (lean_is_scalar(x_96)) { + x_105 = lean_alloc_ctor(1, 1, 0); +} else { + x_105 = x_96; + lean_ctor_set_tag(x_105, 1); +} +lean_ctor_set(x_105, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_105); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_106; lean_object* x_107; +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_106 = x_94; +} else { + lean_dec_ref(x_94); + x_106 = lean_box(0); +} +if (lean_is_scalar(x_106)) { + x_107 = lean_alloc_ctor(1, 1, 0); +} else { + x_107 = x_106; +} +lean_ctor_set(x_107, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_107); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_108; +x_108 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_108, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_108); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_109; lean_object* x_110; lean_object* x_111; uint64_t x_112; uint64_t x_113; uint64_t x_114; uint64_t x_115; uint64_t x_116; uint64_t x_117; uint64_t x_118; size_t x_119; size_t x_120; size_t x_121; size_t x_122; size_t x_123; lean_object* x_124; lean_object* x_125; +x_109 = lean_ctor_get(x_27, 0); +x_110 = lean_ctor_get(x_46, 1); +lean_inc(x_110); +lean_dec(x_46); +x_111 = lean_array_get_size(x_110); +x_112 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_109); +x_113 = 32; +x_114 = lean_uint64_shift_right(x_112, x_113); +x_115 = lean_uint64_xor(x_112, x_114); +x_116 = 16; +x_117 = lean_uint64_shift_right(x_115, x_116); +x_118 = lean_uint64_xor(x_115, x_117); +x_119 = lean_uint64_to_usize(x_118); +x_120 = lean_usize_of_nat(x_111); +lean_dec(x_111); +x_121 = 1; +x_122 = lean_usize_sub(x_120, x_121); +x_123 = lean_usize_land(x_119, x_122); +x_124 = lean_array_uget(x_110, x_123); +lean_dec(x_110); +x_125 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_109, x_124); +lean_dec(x_124); +lean_dec(x_109); +if (lean_obj_tag(x_125) == 0) +{ +lean_object* x_126; +lean_ctor_set(x_27, 0, x_5); +x_126 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_126, 0, x_27); +lean_ctor_set(x_126, 1, x_9); +x_17 = x_126; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_127; lean_object* x_128; +lean_free_object(x_27); +x_127 = lean_ctor_get(x_125, 0); +lean_inc(x_127); +if (lean_is_exclusive(x_125)) { + lean_ctor_release(x_125, 0); + x_128 = x_125; +} else { + lean_dec_ref(x_125); + x_128 = lean_box(0); +} +switch (lean_obj_tag(x_127)) { +case 0: +{ +lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; +lean_dec(x_128); +x_129 = lean_ctor_get(x_127, 0); +lean_inc(x_129); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_130 = x_127; +} else { + lean_dec_ref(x_127); + x_130 = lean_box(0); +} +x_131 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_132 = lean_array_get(x_131, x_1, x_6); +switch (lean_obj_tag(x_132)) { +case 1: +{ +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_133 = x_132; +} else { + lean_dec_ref(x_132); + x_133 = lean_box(0); +} +if (lean_is_scalar(x_133)) { + x_134 = lean_alloc_ctor(0, 1, 0); +} else { + x_134 = x_133; + lean_ctor_set_tag(x_134, 0); +} +lean_ctor_set(x_134, 0, x_129); +x_135 = lean_array_push(x_5, x_134); +if (lean_is_scalar(x_130)) { + x_136 = lean_alloc_ctor(1, 1, 0); +} else { + x_136 = x_130; + lean_ctor_set_tag(x_136, 1); +} +lean_ctor_set(x_136, 0, x_135); +x_137 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_137, 0, x_136); +lean_ctor_set(x_137, 1, x_9); +x_17 = x_137; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_138; lean_object* x_139; lean_object* x_140; +lean_dec(x_130); +lean_dec(x_129); +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_138 = x_132; +} else { + lean_dec_ref(x_132); + x_138 = lean_box(0); +} +if (lean_is_scalar(x_138)) { + x_139 = lean_alloc_ctor(1, 1, 0); +} else { + x_139 = x_138; + lean_ctor_set_tag(x_139, 1); +} +lean_ctor_set(x_139, 0, x_5); +x_140 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_140, 0, x_139); +lean_ctor_set(x_140, 1, x_9); +x_17 = x_140; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_141; lean_object* x_142; +lean_dec(x_132); +lean_dec(x_129); +if (lean_is_scalar(x_130)) { + x_141 = lean_alloc_ctor(1, 1, 0); +} else { + x_141 = x_130; + lean_ctor_set_tag(x_141, 1); +} +lean_ctor_set(x_141, 0, x_5); +x_142 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_142, 0, x_141); +lean_ctor_set(x_142, 1, x_9); +x_17 = x_142; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_143; lean_object* x_144; lean_object* x_145; +lean_dec(x_128); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_143 = x_127; +} else { + lean_dec_ref(x_127); + x_143 = lean_box(0); +} +if (lean_is_scalar(x_143)) { + x_144 = lean_alloc_ctor(1, 1, 0); +} else { + x_144 = x_143; +} +lean_ctor_set(x_144, 0, x_5); +x_145 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_145, 0, x_144); +lean_ctor_set(x_145, 1, x_9); +x_17 = x_145; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_146; lean_object* x_147; +if (lean_is_scalar(x_128)) { + x_146 = lean_alloc_ctor(1, 1, 0); +} else { + x_146 = x_128; +} +lean_ctor_set(x_146, 0, x_5); +x_147 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_147, 0, x_146); +lean_ctor_set(x_147, 1, x_9); +x_17 = x_147; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; uint64_t x_152; uint64_t x_153; uint64_t x_154; uint64_t x_155; uint64_t x_156; uint64_t x_157; uint64_t x_158; size_t x_159; size_t x_160; size_t x_161; size_t x_162; size_t x_163; lean_object* x_164; lean_object* x_165; +x_148 = lean_ctor_get(x_27, 0); +lean_inc(x_148); +lean_dec(x_27); +x_149 = lean_ctor_get(x_46, 1); +lean_inc(x_149); +if (lean_is_exclusive(x_46)) { + lean_ctor_release(x_46, 0); + lean_ctor_release(x_46, 1); + x_150 = x_46; +} else { + lean_dec_ref(x_46); + x_150 = lean_box(0); +} +x_151 = lean_array_get_size(x_149); +x_152 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_148); +x_153 = 32; +x_154 = lean_uint64_shift_right(x_152, x_153); +x_155 = lean_uint64_xor(x_152, x_154); +x_156 = 16; +x_157 = lean_uint64_shift_right(x_155, x_156); +x_158 = lean_uint64_xor(x_155, x_157); +x_159 = lean_uint64_to_usize(x_158); +x_160 = lean_usize_of_nat(x_151); +lean_dec(x_151); +x_161 = 1; +x_162 = lean_usize_sub(x_160, x_161); +x_163 = lean_usize_land(x_159, x_162); +x_164 = lean_array_uget(x_149, x_163); +lean_dec(x_149); +x_165 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_148, x_164); +lean_dec(x_164); +lean_dec(x_148); +if (lean_obj_tag(x_165) == 0) +{ +lean_object* x_166; lean_object* x_167; +x_166 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_166, 0, x_5); +if (lean_is_scalar(x_150)) { + x_167 = lean_alloc_ctor(0, 2, 0); +} else { + x_167 = x_150; +} +lean_ctor_set(x_167, 0, x_166); +lean_ctor_set(x_167, 1, x_9); +x_17 = x_167; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_168; lean_object* x_169; +x_168 = lean_ctor_get(x_165, 0); +lean_inc(x_168); +if (lean_is_exclusive(x_165)) { + lean_ctor_release(x_165, 0); + x_169 = x_165; +} else { + lean_dec_ref(x_165); + x_169 = lean_box(0); +} +switch (lean_obj_tag(x_168)) { +case 0: +{ +lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; +lean_dec(x_169); +x_170 = lean_ctor_get(x_168, 0); +lean_inc(x_170); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_171 = x_168; +} else { + lean_dec_ref(x_168); + x_171 = lean_box(0); +} +x_172 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_173 = lean_array_get(x_172, x_1, x_6); +switch (lean_obj_tag(x_173)) { +case 1: +{ +lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_174 = x_173; +} else { + lean_dec_ref(x_173); + x_174 = lean_box(0); +} +if (lean_is_scalar(x_174)) { + x_175 = lean_alloc_ctor(0, 1, 0); +} else { + x_175 = x_174; + lean_ctor_set_tag(x_175, 0); +} +lean_ctor_set(x_175, 0, x_170); +x_176 = lean_array_push(x_5, x_175); +if (lean_is_scalar(x_171)) { + x_177 = lean_alloc_ctor(1, 1, 0); +} else { + x_177 = x_171; + lean_ctor_set_tag(x_177, 1); +} +lean_ctor_set(x_177, 0, x_176); +if (lean_is_scalar(x_150)) { + x_178 = lean_alloc_ctor(0, 2, 0); +} else { + x_178 = x_150; +} +lean_ctor_set(x_178, 0, x_177); +lean_ctor_set(x_178, 1, x_9); +x_17 = x_178; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_179; lean_object* x_180; lean_object* x_181; +lean_dec(x_171); +lean_dec(x_170); +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_179 = x_173; +} else { + lean_dec_ref(x_173); + x_179 = lean_box(0); +} +if (lean_is_scalar(x_179)) { + x_180 = lean_alloc_ctor(1, 1, 0); +} else { + x_180 = x_179; + lean_ctor_set_tag(x_180, 1); +} +lean_ctor_set(x_180, 0, x_5); +if (lean_is_scalar(x_150)) { + x_181 = lean_alloc_ctor(0, 2, 0); +} else { + x_181 = x_150; +} +lean_ctor_set(x_181, 0, x_180); +lean_ctor_set(x_181, 1, x_9); +x_17 = x_181; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_182; lean_object* x_183; +lean_dec(x_173); +lean_dec(x_170); +if (lean_is_scalar(x_171)) { + x_182 = lean_alloc_ctor(1, 1, 0); +} else { + x_182 = x_171; + lean_ctor_set_tag(x_182, 1); +} +lean_ctor_set(x_182, 0, x_5); +if (lean_is_scalar(x_150)) { + x_183 = lean_alloc_ctor(0, 2, 0); +} else { + x_183 = x_150; +} +lean_ctor_set(x_183, 0, x_182); +lean_ctor_set(x_183, 1, x_9); +x_17 = x_183; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_184; lean_object* x_185; lean_object* x_186; +lean_dec(x_169); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_184 = x_168; +} else { + lean_dec_ref(x_168); + x_184 = lean_box(0); +} +if (lean_is_scalar(x_184)) { + x_185 = lean_alloc_ctor(1, 1, 0); +} else { + x_185 = x_184; +} +lean_ctor_set(x_185, 0, x_5); +if (lean_is_scalar(x_150)) { + x_186 = lean_alloc_ctor(0, 2, 0); +} else { + x_186 = x_150; +} +lean_ctor_set(x_186, 0, x_185); +lean_ctor_set(x_186, 1, x_9); +x_17 = x_186; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_187; lean_object* x_188; +if (lean_is_scalar(x_169)) { + x_187 = lean_alloc_ctor(1, 1, 0); +} else { + x_187 = x_169; +} +lean_ctor_set(x_187, 0, x_5); +if (lean_is_scalar(x_150)) { + x_188 = lean_alloc_ctor(0, 2, 0); +} else { + x_188 = x_150; +} +lean_ctor_set(x_188, 0, x_187); +lean_ctor_set(x_188, 1, x_9); +x_17 = x_188; +x_18 = x_12; +goto block_25; +} +} +} +} +} +default: +{ +uint8_t x_189; +x_189 = !lean_is_exclusive(x_27); +if (x_189 == 0) +{ +lean_object* x_190; lean_object* x_191; lean_object* x_192; +x_190 = lean_ctor_get(x_27, 0); +lean_dec(x_190); +x_191 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_192 = lean_array_get(x_191, x_1, x_6); +switch (lean_obj_tag(x_192)) { +case 1: +{ +uint8_t x_193; +lean_free_object(x_27); +x_193 = !lean_is_exclusive(x_192); +if (x_193 == 0) +{ +lean_object* x_194; lean_object* x_195; lean_object* x_196; lean_object* x_197; +x_194 = lean_ctor_get(x_192, 0); +lean_dec(x_194); +x_195 = lean_box(1); +x_196 = lean_array_push(x_5, x_195); +lean_ctor_set(x_192, 0, x_196); +x_197 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_197, 0, x_192); +lean_ctor_set(x_197, 1, x_9); +x_17 = x_197; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; +lean_dec(x_192); +x_198 = lean_box(1); +x_199 = lean_array_push(x_5, x_198); +x_200 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_200, 0, x_199); +x_201 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_201, 0, x_200); +lean_ctor_set(x_201, 1, x_9); +x_17 = x_201; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_202; +lean_free_object(x_27); +x_202 = !lean_is_exclusive(x_192); +if (x_202 == 0) +{ +lean_object* x_203; lean_object* x_204; +x_203 = lean_ctor_get(x_192, 0); +lean_dec(x_203); +lean_ctor_set_tag(x_192, 1); +lean_ctor_set(x_192, 0, x_5); +x_204 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_204, 0, x_192); +lean_ctor_set(x_204, 1, x_9); +x_17 = x_204; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_205; lean_object* x_206; +lean_dec(x_192); +x_205 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_205, 0, x_5); +x_206 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_206, 0, x_205); +lean_ctor_set(x_206, 1, x_9); +x_17 = x_206; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_207; +lean_dec(x_192); +lean_ctor_set_tag(x_27, 1); +lean_ctor_set(x_27, 0, x_5); +x_207 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_207, 0, x_27); +lean_ctor_set(x_207, 1, x_9); +x_17 = x_207; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_208; lean_object* x_209; +lean_dec(x_27); +x_208 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_209 = lean_array_get(x_208, x_1, x_6); +switch (lean_obj_tag(x_209)) { +case 1: +{ +lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_210 = x_209; +} else { + lean_dec_ref(x_209); + x_210 = lean_box(0); +} +x_211 = lean_box(1); +x_212 = lean_array_push(x_5, x_211); +if (lean_is_scalar(x_210)) { + x_213 = lean_alloc_ctor(1, 1, 0); +} else { + x_213 = x_210; +} +lean_ctor_set(x_213, 0, x_212); +x_214 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_214, 0, x_213); +lean_ctor_set(x_214, 1, x_9); +x_17 = x_214; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_215; lean_object* x_216; lean_object* x_217; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_215 = x_209; +} else { + lean_dec_ref(x_209); + x_215 = lean_box(0); +} +if (lean_is_scalar(x_215)) { + x_216 = lean_alloc_ctor(1, 1, 0); +} else { + x_216 = x_215; + lean_ctor_set_tag(x_216, 1); +} +lean_ctor_set(x_216, 0, x_5); +x_217 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_217, 0, x_216); +lean_ctor_set(x_217, 1, x_9); +x_17 = x_217; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_218; lean_object* x_219; +lean_dec(x_209); +x_218 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_218, 0, x_5); +x_219 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_219, 0, x_218); +lean_ctor_set(x_219, 1, x_9); +x_17 = x_219; +x_18 = x_12; +goto block_25; +} +} +} +} +} +block_25: +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_19 = lean_ctor_get(x_17, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_17, 1); +lean_inc(x_20); +lean_dec(x_17); +x_21 = lean_ctor_get(x_19, 0); +lean_inc(x_21); +lean_dec(x_19); +x_22 = lean_ctor_get(x_4, 2); +x_23 = lean_nat_add(x_6, x_22); +lean_dec(x_6); +x_5 = x_21; +x_6 = x_23; +x_7 = lean_box(0); +x_8 = lean_box(0); +x_9 = x_20; +x_12 = x_18; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__9(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; uint8_t x_14; +x_13 = lean_ctor_get(x_4, 1); +x_14 = lean_nat_dec_lt(x_6, x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; +lean_dec(x_6); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_5); +lean_ctor_set(x_15, 1, x_9); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_15); +lean_ctor_set(x_16, 1, x_12); +return x_16; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_26; lean_object* x_27; +x_26 = l_Lean_Compiler_LCNF_instInhabitedArg; +x_27 = lean_array_get(x_26, x_2, x_6); +switch (lean_obj_tag(x_27)) { +case 0: +{ +lean_object* x_28; lean_object* x_29; +x_28 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_29 = lean_array_get(x_28, x_1, x_6); +switch (lean_obj_tag(x_29)) { +case 1: +{ +uint8_t x_30; +x_30 = !lean_is_exclusive(x_29); +if (x_30 == 0) +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_31 = lean_ctor_get(x_29, 0); +lean_dec(x_31); +x_32 = lean_box(1); +x_33 = lean_array_push(x_5, x_32); +lean_ctor_set(x_29, 0, x_33); +x_34 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_34, 0, x_29); +lean_ctor_set(x_34, 1, x_9); +x_17 = x_34; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; +lean_dec(x_29); +x_35 = lean_box(1); +x_36 = lean_array_push(x_5, x_35); +x_37 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_37, 0, x_36); +x_38 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_9); +x_17 = x_38; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_39; +x_39 = !lean_is_exclusive(x_29); +if (x_39 == 0) +{ +lean_object* x_40; lean_object* x_41; +x_40 = lean_ctor_get(x_29, 0); +lean_dec(x_40); +lean_ctor_set_tag(x_29, 1); +lean_ctor_set(x_29, 0, x_5); +x_41 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_41, 0, x_29); +lean_ctor_set(x_41, 1, x_9); +x_17 = x_41; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_42; lean_object* x_43; +lean_dec(x_29); +x_42 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_42, 0, x_5); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_9); +x_17 = x_43; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_44; lean_object* x_45; +lean_dec(x_29); +x_44 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_44, 0, x_5); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_9); +x_17 = x_45; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_46; uint8_t x_47; +x_46 = lean_ctor_get(x_9, 0); +lean_inc(x_46); +x_47 = !lean_is_exclusive(x_27); +if (x_47 == 0) +{ +uint8_t x_48; +x_48 = !lean_is_exclusive(x_46); +if (x_48 == 0) +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; uint64_t x_53; uint64_t x_54; uint64_t x_55; uint64_t x_56; uint64_t x_57; uint64_t x_58; uint64_t x_59; size_t x_60; size_t x_61; size_t x_62; size_t x_63; size_t x_64; lean_object* x_65; lean_object* x_66; +x_49 = lean_ctor_get(x_27, 0); +x_50 = lean_ctor_get(x_46, 1); +x_51 = lean_ctor_get(x_46, 0); +lean_dec(x_51); +x_52 = lean_array_get_size(x_50); +x_53 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_49); +x_54 = 32; +x_55 = lean_uint64_shift_right(x_53, x_54); +x_56 = lean_uint64_xor(x_53, x_55); +x_57 = 16; +x_58 = lean_uint64_shift_right(x_56, x_57); +x_59 = lean_uint64_xor(x_56, x_58); +x_60 = lean_uint64_to_usize(x_59); +x_61 = lean_usize_of_nat(x_52); +lean_dec(x_52); +x_62 = 1; +x_63 = lean_usize_sub(x_61, x_62); +x_64 = lean_usize_land(x_60, x_63); +x_65 = lean_array_uget(x_50, x_64); +lean_dec(x_50); +x_66 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_49, x_65); +lean_dec(x_65); +lean_dec(x_49); +if (lean_obj_tag(x_66) == 0) +{ +lean_ctor_set(x_27, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_27); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +uint8_t x_67; +lean_free_object(x_27); +x_67 = !lean_is_exclusive(x_66); +if (x_67 == 0) +{ +lean_object* x_68; +x_68 = lean_ctor_get(x_66, 0); +switch (lean_obj_tag(x_68)) { +case 0: +{ +uint8_t x_69; +lean_free_object(x_66); +x_69 = !lean_is_exclusive(x_68); +if (x_69 == 0) +{ +lean_object* x_70; lean_object* x_71; lean_object* x_72; +x_70 = lean_ctor_get(x_68, 0); +x_71 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_72 = lean_array_get(x_71, x_1, x_6); +switch (lean_obj_tag(x_72)) { +case 1: +{ +uint8_t x_73; +x_73 = !lean_is_exclusive(x_72); +if (x_73 == 0) +{ +lean_object* x_74; lean_object* x_75; +x_74 = lean_ctor_get(x_72, 0); +lean_dec(x_74); +lean_ctor_set_tag(x_72, 0); +lean_ctor_set(x_72, 0, x_70); +x_75 = lean_array_push(x_5, x_72); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_75); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_76; lean_object* x_77; +lean_dec(x_72); +x_76 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_76, 0, x_70); +x_77 = lean_array_push(x_5, x_76); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_77); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_78; +lean_free_object(x_68); +lean_dec(x_70); +x_78 = !lean_is_exclusive(x_72); +if (x_78 == 0) +{ +lean_object* x_79; +x_79 = lean_ctor_get(x_72, 0); +lean_dec(x_79); +lean_ctor_set_tag(x_72, 1); +lean_ctor_set(x_72, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_72); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_80; +lean_dec(x_72); +x_80 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_80, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_80); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_dec(x_72); +lean_dec(x_70); +lean_ctor_set_tag(x_68, 1); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_81; lean_object* x_82; lean_object* x_83; +x_81 = lean_ctor_get(x_68, 0); +lean_inc(x_81); +lean_dec(x_68); +x_82 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_83 = lean_array_get(x_82, x_1, x_6); +switch (lean_obj_tag(x_83)) { +case 1: +{ +lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_84 = x_83; +} else { + lean_dec_ref(x_83); + x_84 = lean_box(0); +} +if (lean_is_scalar(x_84)) { + x_85 = lean_alloc_ctor(0, 1, 0); +} else { + x_85 = x_84; + lean_ctor_set_tag(x_85, 0); +} +lean_ctor_set(x_85, 0, x_81); +x_86 = lean_array_push(x_5, x_85); +x_87 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_87); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_88; lean_object* x_89; +lean_dec(x_81); +if (lean_is_exclusive(x_83)) { + lean_ctor_release(x_83, 0); + x_88 = x_83; +} else { + lean_dec_ref(x_83); + x_88 = lean_box(0); +} +if (lean_is_scalar(x_88)) { + x_89 = lean_alloc_ctor(1, 1, 0); +} else { + x_89 = x_88; + lean_ctor_set_tag(x_89, 1); +} +lean_ctor_set(x_89, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_89); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_90; +lean_dec(x_83); +lean_dec(x_81); +x_90 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_90, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_90); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +case 1: +{ +uint8_t x_91; +lean_free_object(x_66); +x_91 = !lean_is_exclusive(x_68); +if (x_91 == 0) +{ +lean_object* x_92; +x_92 = lean_ctor_get(x_68, 0); +lean_dec(x_92); +lean_ctor_set(x_68, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_68); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_93; +lean_dec(x_68); +x_93 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_93, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_93); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_ctor_set(x_66, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_66); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_94; +x_94 = lean_ctor_get(x_66, 0); +lean_inc(x_94); +lean_dec(x_66); +switch (lean_obj_tag(x_94)) { +case 0: +{ +lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; +x_95 = lean_ctor_get(x_94, 0); +lean_inc(x_95); +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_96 = x_94; +} else { + lean_dec_ref(x_94); + x_96 = lean_box(0); +} +x_97 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_98 = lean_array_get(x_97, x_1, x_6); +switch (lean_obj_tag(x_98)) { +case 1: +{ +lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_99 = x_98; +} else { + lean_dec_ref(x_98); + x_99 = lean_box(0); +} +if (lean_is_scalar(x_99)) { + x_100 = lean_alloc_ctor(0, 1, 0); +} else { + x_100 = x_99; + lean_ctor_set_tag(x_100, 0); +} +lean_ctor_set(x_100, 0, x_95); +x_101 = lean_array_push(x_5, x_100); +if (lean_is_scalar(x_96)) { + x_102 = lean_alloc_ctor(1, 1, 0); +} else { + x_102 = x_96; + lean_ctor_set_tag(x_102, 1); +} +lean_ctor_set(x_102, 0, x_101); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_102); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_103; lean_object* x_104; +lean_dec(x_96); +lean_dec(x_95); +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + x_103 = x_98; +} else { + lean_dec_ref(x_98); + x_103 = lean_box(0); +} +if (lean_is_scalar(x_103)) { + x_104 = lean_alloc_ctor(1, 1, 0); +} else { + x_104 = x_103; + lean_ctor_set_tag(x_104, 1); +} +lean_ctor_set(x_104, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_104); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_105; +lean_dec(x_98); +lean_dec(x_95); +if (lean_is_scalar(x_96)) { + x_105 = lean_alloc_ctor(1, 1, 0); +} else { + x_105 = x_96; + lean_ctor_set_tag(x_105, 1); +} +lean_ctor_set(x_105, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_105); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_106; lean_object* x_107; +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + x_106 = x_94; +} else { + lean_dec_ref(x_94); + x_106 = lean_box(0); +} +if (lean_is_scalar(x_106)) { + x_107 = lean_alloc_ctor(1, 1, 0); +} else { + x_107 = x_106; +} +lean_ctor_set(x_107, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_107); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_108; +x_108 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_108, 0, x_5); +lean_ctor_set(x_46, 1, x_9); +lean_ctor_set(x_46, 0, x_108); +x_17 = x_46; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_109; lean_object* x_110; lean_object* x_111; uint64_t x_112; uint64_t x_113; uint64_t x_114; uint64_t x_115; uint64_t x_116; uint64_t x_117; uint64_t x_118; size_t x_119; size_t x_120; size_t x_121; size_t x_122; size_t x_123; lean_object* x_124; lean_object* x_125; +x_109 = lean_ctor_get(x_27, 0); +x_110 = lean_ctor_get(x_46, 1); +lean_inc(x_110); +lean_dec(x_46); +x_111 = lean_array_get_size(x_110); +x_112 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_109); +x_113 = 32; +x_114 = lean_uint64_shift_right(x_112, x_113); +x_115 = lean_uint64_xor(x_112, x_114); +x_116 = 16; +x_117 = lean_uint64_shift_right(x_115, x_116); +x_118 = lean_uint64_xor(x_115, x_117); +x_119 = lean_uint64_to_usize(x_118); +x_120 = lean_usize_of_nat(x_111); +lean_dec(x_111); +x_121 = 1; +x_122 = lean_usize_sub(x_120, x_121); +x_123 = lean_usize_land(x_119, x_122); +x_124 = lean_array_uget(x_110, x_123); +lean_dec(x_110); +x_125 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_109, x_124); +lean_dec(x_124); +lean_dec(x_109); +if (lean_obj_tag(x_125) == 0) +{ +lean_object* x_126; +lean_ctor_set(x_27, 0, x_5); +x_126 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_126, 0, x_27); +lean_ctor_set(x_126, 1, x_9); +x_17 = x_126; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_127; lean_object* x_128; +lean_free_object(x_27); +x_127 = lean_ctor_get(x_125, 0); +lean_inc(x_127); +if (lean_is_exclusive(x_125)) { + lean_ctor_release(x_125, 0); + x_128 = x_125; +} else { + lean_dec_ref(x_125); + x_128 = lean_box(0); +} +switch (lean_obj_tag(x_127)) { +case 0: +{ +lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; +lean_dec(x_128); +x_129 = lean_ctor_get(x_127, 0); +lean_inc(x_129); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_130 = x_127; +} else { + lean_dec_ref(x_127); + x_130 = lean_box(0); +} +x_131 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_132 = lean_array_get(x_131, x_1, x_6); +switch (lean_obj_tag(x_132)) { +case 1: +{ +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_133 = x_132; +} else { + lean_dec_ref(x_132); + x_133 = lean_box(0); +} +if (lean_is_scalar(x_133)) { + x_134 = lean_alloc_ctor(0, 1, 0); +} else { + x_134 = x_133; + lean_ctor_set_tag(x_134, 0); +} +lean_ctor_set(x_134, 0, x_129); +x_135 = lean_array_push(x_5, x_134); +if (lean_is_scalar(x_130)) { + x_136 = lean_alloc_ctor(1, 1, 0); +} else { + x_136 = x_130; + lean_ctor_set_tag(x_136, 1); +} +lean_ctor_set(x_136, 0, x_135); +x_137 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_137, 0, x_136); +lean_ctor_set(x_137, 1, x_9); +x_17 = x_137; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_138; lean_object* x_139; lean_object* x_140; +lean_dec(x_130); +lean_dec(x_129); +if (lean_is_exclusive(x_132)) { + lean_ctor_release(x_132, 0); + x_138 = x_132; +} else { + lean_dec_ref(x_132); + x_138 = lean_box(0); +} +if (lean_is_scalar(x_138)) { + x_139 = lean_alloc_ctor(1, 1, 0); +} else { + x_139 = x_138; + lean_ctor_set_tag(x_139, 1); +} +lean_ctor_set(x_139, 0, x_5); +x_140 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_140, 0, x_139); +lean_ctor_set(x_140, 1, x_9); +x_17 = x_140; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_141; lean_object* x_142; +lean_dec(x_132); +lean_dec(x_129); +if (lean_is_scalar(x_130)) { + x_141 = lean_alloc_ctor(1, 1, 0); +} else { + x_141 = x_130; + lean_ctor_set_tag(x_141, 1); +} +lean_ctor_set(x_141, 0, x_5); +x_142 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_142, 0, x_141); +lean_ctor_set(x_142, 1, x_9); +x_17 = x_142; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_143; lean_object* x_144; lean_object* x_145; +lean_dec(x_128); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + x_143 = x_127; +} else { + lean_dec_ref(x_127); + x_143 = lean_box(0); +} +if (lean_is_scalar(x_143)) { + x_144 = lean_alloc_ctor(1, 1, 0); +} else { + x_144 = x_143; +} +lean_ctor_set(x_144, 0, x_5); +x_145 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_145, 0, x_144); +lean_ctor_set(x_145, 1, x_9); +x_17 = x_145; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_146; lean_object* x_147; +if (lean_is_scalar(x_128)) { + x_146 = lean_alloc_ctor(1, 1, 0); +} else { + x_146 = x_128; +} +lean_ctor_set(x_146, 0, x_5); +x_147 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_147, 0, x_146); +lean_ctor_set(x_147, 1, x_9); +x_17 = x_147; +x_18 = x_12; +goto block_25; +} +} +} +} +} +else +{ +lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; uint64_t x_152; uint64_t x_153; uint64_t x_154; uint64_t x_155; uint64_t x_156; uint64_t x_157; uint64_t x_158; size_t x_159; size_t x_160; size_t x_161; size_t x_162; size_t x_163; lean_object* x_164; lean_object* x_165; +x_148 = lean_ctor_get(x_27, 0); +lean_inc(x_148); +lean_dec(x_27); +x_149 = lean_ctor_get(x_46, 1); +lean_inc(x_149); +if (lean_is_exclusive(x_46)) { + lean_ctor_release(x_46, 0); + lean_ctor_release(x_46, 1); + x_150 = x_46; +} else { + lean_dec_ref(x_46); + x_150 = lean_box(0); +} +x_151 = lean_array_get_size(x_149); +x_152 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_148); +x_153 = 32; +x_154 = lean_uint64_shift_right(x_152, x_153); +x_155 = lean_uint64_xor(x_152, x_154); +x_156 = 16; +x_157 = lean_uint64_shift_right(x_155, x_156); +x_158 = lean_uint64_xor(x_155, x_157); +x_159 = lean_uint64_to_usize(x_158); +x_160 = lean_usize_of_nat(x_151); +lean_dec(x_151); +x_161 = 1; +x_162 = lean_usize_sub(x_160, x_161); +x_163 = lean_usize_land(x_159, x_162); +x_164 = lean_array_uget(x_149, x_163); +lean_dec(x_149); +x_165 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_148, x_164); +lean_dec(x_164); +lean_dec(x_148); +if (lean_obj_tag(x_165) == 0) +{ +lean_object* x_166; lean_object* x_167; +x_166 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_166, 0, x_5); +if (lean_is_scalar(x_150)) { + x_167 = lean_alloc_ctor(0, 2, 0); +} else { + x_167 = x_150; +} +lean_ctor_set(x_167, 0, x_166); +lean_ctor_set(x_167, 1, x_9); +x_17 = x_167; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_168; lean_object* x_169; +x_168 = lean_ctor_get(x_165, 0); +lean_inc(x_168); +if (lean_is_exclusive(x_165)) { + lean_ctor_release(x_165, 0); + x_169 = x_165; +} else { + lean_dec_ref(x_165); + x_169 = lean_box(0); +} +switch (lean_obj_tag(x_168)) { +case 0: +{ +lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; +lean_dec(x_169); +x_170 = lean_ctor_get(x_168, 0); +lean_inc(x_170); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_171 = x_168; +} else { + lean_dec_ref(x_168); + x_171 = lean_box(0); +} +x_172 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_173 = lean_array_get(x_172, x_1, x_6); +switch (lean_obj_tag(x_173)) { +case 1: +{ +lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_174 = x_173; +} else { + lean_dec_ref(x_173); + x_174 = lean_box(0); +} +if (lean_is_scalar(x_174)) { + x_175 = lean_alloc_ctor(0, 1, 0); +} else { + x_175 = x_174; + lean_ctor_set_tag(x_175, 0); +} +lean_ctor_set(x_175, 0, x_170); +x_176 = lean_array_push(x_5, x_175); +if (lean_is_scalar(x_171)) { + x_177 = lean_alloc_ctor(1, 1, 0); +} else { + x_177 = x_171; + lean_ctor_set_tag(x_177, 1); +} +lean_ctor_set(x_177, 0, x_176); +if (lean_is_scalar(x_150)) { + x_178 = lean_alloc_ctor(0, 2, 0); +} else { + x_178 = x_150; +} +lean_ctor_set(x_178, 0, x_177); +lean_ctor_set(x_178, 1, x_9); +x_17 = x_178; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_179; lean_object* x_180; lean_object* x_181; +lean_dec(x_171); +lean_dec(x_170); +if (lean_is_exclusive(x_173)) { + lean_ctor_release(x_173, 0); + x_179 = x_173; +} else { + lean_dec_ref(x_173); + x_179 = lean_box(0); +} +if (lean_is_scalar(x_179)) { + x_180 = lean_alloc_ctor(1, 1, 0); +} else { + x_180 = x_179; + lean_ctor_set_tag(x_180, 1); +} +lean_ctor_set(x_180, 0, x_5); +if (lean_is_scalar(x_150)) { + x_181 = lean_alloc_ctor(0, 2, 0); +} else { + x_181 = x_150; +} +lean_ctor_set(x_181, 0, x_180); +lean_ctor_set(x_181, 1, x_9); +x_17 = x_181; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_182; lean_object* x_183; +lean_dec(x_173); +lean_dec(x_170); +if (lean_is_scalar(x_171)) { + x_182 = lean_alloc_ctor(1, 1, 0); +} else { + x_182 = x_171; + lean_ctor_set_tag(x_182, 1); +} +lean_ctor_set(x_182, 0, x_5); +if (lean_is_scalar(x_150)) { + x_183 = lean_alloc_ctor(0, 2, 0); +} else { + x_183 = x_150; +} +lean_ctor_set(x_183, 0, x_182); +lean_ctor_set(x_183, 1, x_9); +x_17 = x_183; +x_18 = x_12; +goto block_25; +} +} +} +case 1: +{ +lean_object* x_184; lean_object* x_185; lean_object* x_186; +lean_dec(x_169); +if (lean_is_exclusive(x_168)) { + lean_ctor_release(x_168, 0); + x_184 = x_168; +} else { + lean_dec_ref(x_168); + x_184 = lean_box(0); +} +if (lean_is_scalar(x_184)) { + x_185 = lean_alloc_ctor(1, 1, 0); +} else { + x_185 = x_184; +} +lean_ctor_set(x_185, 0, x_5); +if (lean_is_scalar(x_150)) { + x_186 = lean_alloc_ctor(0, 2, 0); +} else { + x_186 = x_150; +} +lean_ctor_set(x_186, 0, x_185); +lean_ctor_set(x_186, 1, x_9); +x_17 = x_186; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_187; lean_object* x_188; +if (lean_is_scalar(x_169)) { + x_187 = lean_alloc_ctor(1, 1, 0); +} else { + x_187 = x_169; +} +lean_ctor_set(x_187, 0, x_5); +if (lean_is_scalar(x_150)) { + x_188 = lean_alloc_ctor(0, 2, 0); +} else { + x_188 = x_150; +} +lean_ctor_set(x_188, 0, x_187); +lean_ctor_set(x_188, 1, x_9); +x_17 = x_188; +x_18 = x_12; +goto block_25; +} +} +} +} +} +default: +{ +uint8_t x_189; +x_189 = !lean_is_exclusive(x_27); +if (x_189 == 0) +{ +lean_object* x_190; lean_object* x_191; lean_object* x_192; +x_190 = lean_ctor_get(x_27, 0); +lean_dec(x_190); +x_191 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_192 = lean_array_get(x_191, x_1, x_6); +switch (lean_obj_tag(x_192)) { +case 1: +{ +uint8_t x_193; +lean_free_object(x_27); +x_193 = !lean_is_exclusive(x_192); +if (x_193 == 0) +{ +lean_object* x_194; lean_object* x_195; lean_object* x_196; lean_object* x_197; +x_194 = lean_ctor_get(x_192, 0); +lean_dec(x_194); +x_195 = lean_box(1); +x_196 = lean_array_push(x_5, x_195); +lean_ctor_set(x_192, 0, x_196); +x_197 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_197, 0, x_192); +lean_ctor_set(x_197, 1, x_9); +x_17 = x_197; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; +lean_dec(x_192); +x_198 = lean_box(1); +x_199 = lean_array_push(x_5, x_198); +x_200 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_200, 0, x_199); +x_201 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_201, 0, x_200); +lean_ctor_set(x_201, 1, x_9); +x_17 = x_201; +x_18 = x_12; +goto block_25; +} +} +case 2: +{ +uint8_t x_202; +lean_free_object(x_27); +x_202 = !lean_is_exclusive(x_192); +if (x_202 == 0) +{ +lean_object* x_203; lean_object* x_204; +x_203 = lean_ctor_get(x_192, 0); +lean_dec(x_203); +lean_ctor_set_tag(x_192, 1); +lean_ctor_set(x_192, 0, x_5); +x_204 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_204, 0, x_192); +lean_ctor_set(x_204, 1, x_9); +x_17 = x_204; +x_18 = x_12; +goto block_25; +} +else +{ +lean_object* x_205; lean_object* x_206; +lean_dec(x_192); +x_205 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_205, 0, x_5); +x_206 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_206, 0, x_205); +lean_ctor_set(x_206, 1, x_9); +x_17 = x_206; +x_18 = x_12; +goto block_25; +} +} +default: +{ +lean_object* x_207; +lean_dec(x_192); +lean_ctor_set_tag(x_27, 1); +lean_ctor_set(x_27, 0, x_5); +x_207 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_207, 0, x_27); +lean_ctor_set(x_207, 1, x_9); +x_17 = x_207; +x_18 = x_12; +goto block_25; +} +} +} +else +{ +lean_object* x_208; lean_object* x_209; +lean_dec(x_27); +x_208 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_209 = lean_array_get(x_208, x_1, x_6); +switch (lean_obj_tag(x_209)) { +case 1: +{ +lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_210 = x_209; +} else { + lean_dec_ref(x_209); + x_210 = lean_box(0); +} +x_211 = lean_box(1); +x_212 = lean_array_push(x_5, x_211); +if (lean_is_scalar(x_210)) { + x_213 = lean_alloc_ctor(1, 1, 0); +} else { + x_213 = x_210; +} +lean_ctor_set(x_213, 0, x_212); +x_214 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_214, 0, x_213); +lean_ctor_set(x_214, 1, x_9); +x_17 = x_214; +x_18 = x_12; +goto block_25; +} +case 2: +{ +lean_object* x_215; lean_object* x_216; lean_object* x_217; +if (lean_is_exclusive(x_209)) { + lean_ctor_release(x_209, 0); + x_215 = x_209; +} else { + lean_dec_ref(x_209); + x_215 = lean_box(0); +} +if (lean_is_scalar(x_215)) { + x_216 = lean_alloc_ctor(1, 1, 0); +} else { + x_216 = x_215; + lean_ctor_set_tag(x_216, 1); +} +lean_ctor_set(x_216, 0, x_5); +x_217 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_217, 0, x_216); +lean_ctor_set(x_217, 1, x_9); +x_17 = x_217; +x_18 = x_12; +goto block_25; +} +default: +{ +lean_object* x_218; lean_object* x_219; +lean_dec(x_209); +x_218 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_218, 0, x_5); +x_219 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_219, 0, x_218); +lean_ctor_set(x_219, 1, x_9); +x_17 = x_219; +x_18 = x_12; +goto block_25; +} +} +} +} +} +block_25: +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_19 = lean_ctor_get(x_17, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_17, 1); +lean_inc(x_20); +lean_dec(x_17); +x_21 = lean_ctor_get(x_19, 0); +lean_inc(x_21); +lean_dec(x_19); +x_22 = lean_ctor_get(x_4, 2); +x_23 = lean_nat_add(x_6, x_22); +lean_dec(x_6); +x_5 = x_21; +x_6 = x_23; +x_7 = lean_box(0); +x_8 = lean_box(0); +x_9 = x_20; +x_12 = x_18; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; +x_9 = l_Lean_IR_ToIR_lowerCode(x_1, x_5, x_6, x_7, x_8); +if (lean_obj_tag(x_9) == 0) +{ +uint8_t x_10; +x_10 = !lean_is_exclusive(x_9); +if (x_10 == 0) +{ +lean_object* x_11; uint8_t x_12; +x_11 = lean_ctor_get(x_9, 0); +x_12 = !lean_is_exclusive(x_11); +if (x_12 == 0) +{ +lean_object* x_13; lean_object* x_14; +x_13 = lean_ctor_get(x_11, 0); +x_14 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_14, 0, x_2); +lean_ctor_set(x_14, 1, x_4); +lean_ctor_set(x_14, 2, x_3); +lean_ctor_set(x_14, 3, x_13); +lean_ctor_set(x_11, 0, x_14); +return x_9; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_15 = lean_ctor_get(x_11, 0); +x_16 = lean_ctor_get(x_11, 1); +lean_inc(x_16); +lean_inc(x_15); +lean_dec(x_11); +x_17 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_17, 0, x_2); +lean_ctor_set(x_17, 1, x_4); +lean_ctor_set(x_17, 2, x_3); +lean_ctor_set(x_17, 3, x_15); +x_18 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18, 0, x_17); +lean_ctor_set(x_18, 1, x_16); +lean_ctor_set(x_9, 0, x_18); +return x_9; +} +} +else +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; +x_19 = lean_ctor_get(x_9, 0); +x_20 = lean_ctor_get(x_9, 1); +lean_inc(x_20); +lean_inc(x_19); +lean_dec(x_9); +x_21 = lean_ctor_get(x_19, 0); +lean_inc(x_21); +x_22 = lean_ctor_get(x_19, 1); +lean_inc(x_22); +if (lean_is_exclusive(x_19)) { + lean_ctor_release(x_19, 0); + lean_ctor_release(x_19, 1); + x_23 = x_19; +} else { + lean_dec_ref(x_19); + x_23 = lean_box(0); +} +x_24 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_24, 0, x_2); +lean_ctor_set(x_24, 1, x_4); +lean_ctor_set(x_24, 2, x_3); +lean_ctor_set(x_24, 3, x_21); +if (lean_is_scalar(x_23)) { + x_25 = lean_alloc_ctor(0, 2, 0); +} else { + x_25 = x_23; +} +lean_ctor_set(x_25, 0, x_24); +lean_ctor_set(x_25, 1, x_22); +x_26 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_26, 0, x_25); +lean_ctor_set(x_26, 1, x_20); +return x_26; +} +} +else +{ +uint8_t x_27; +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_27 = !lean_is_exclusive(x_9); +if (x_27 == 0) +{ +return x_9; +} +else +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; +x_28 = lean_ctor_get(x_9, 0); +x_29 = lean_ctor_get(x_9, 1); +lean_inc(x_29); +lean_inc(x_28); +lean_dec(x_9); +x_30 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_30, 0, x_28); +lean_ctor_set(x_30, 1, x_29); +return x_30; +} +} +} +} +LEAN_EXPORT uint8_t l_Lean_IR_ToIR_lowerLet___lambda__2(lean_object* x_1) { +_start: +{ +uint8_t x_2; +x_2 = 0; +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +lean_object* x_11; +x_11 = l_Lean_IR_ToIR_lowerCode(x_1, x_7, x_8, x_9, x_10); +if (lean_obj_tag(x_11) == 0) +{ +uint8_t x_12; +x_12 = !lean_is_exclusive(x_11); +if (x_12 == 0) +{ +lean_object* x_13; uint8_t x_14; +x_13 = lean_ctor_get(x_11, 0); +x_14 = !lean_is_exclusive(x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_15 = lean_ctor_get(x_13, 0); +lean_inc(x_2); +x_16 = lean_alloc_ctor(8, 2, 0); +lean_ctor_set(x_16, 0, x_2); +lean_ctor_set(x_16, 1, x_3); +x_17 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_17, 0, x_4); +lean_ctor_set(x_17, 1, x_6); +lean_ctor_set(x_17, 2, x_16); +lean_ctor_set(x_17, 3, x_15); +x_18 = lean_box(7); +x_19 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_19, 0, x_2); +lean_ctor_set(x_19, 1, x_18); +lean_ctor_set(x_19, 2, x_5); +lean_ctor_set(x_19, 3, x_17); +lean_ctor_set(x_13, 0, x_19); +return x_11; +} +else +{ +lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; +x_20 = lean_ctor_get(x_13, 0); +x_21 = lean_ctor_get(x_13, 1); +lean_inc(x_21); +lean_inc(x_20); +lean_dec(x_13); +lean_inc(x_2); +x_22 = lean_alloc_ctor(8, 2, 0); +lean_ctor_set(x_22, 0, x_2); +lean_ctor_set(x_22, 1, x_3); +x_23 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_23, 0, x_4); +lean_ctor_set(x_23, 1, x_6); +lean_ctor_set(x_23, 2, x_22); +lean_ctor_set(x_23, 3, x_20); +x_24 = lean_box(7); +x_25 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_25, 0, x_2); +lean_ctor_set(x_25, 1, x_24); +lean_ctor_set(x_25, 2, x_5); +lean_ctor_set(x_25, 3, x_23); +x_26 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_26, 0, x_25); +lean_ctor_set(x_26, 1, x_21); +lean_ctor_set(x_11, 0, x_26); +return x_11; +} +} +else +{ +lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; +x_27 = lean_ctor_get(x_11, 0); +x_28 = lean_ctor_get(x_11, 1); +lean_inc(x_28); +lean_inc(x_27); +lean_dec(x_11); +x_29 = lean_ctor_get(x_27, 0); +lean_inc(x_29); +x_30 = lean_ctor_get(x_27, 1); +lean_inc(x_30); +if (lean_is_exclusive(x_27)) { + lean_ctor_release(x_27, 0); + lean_ctor_release(x_27, 1); + x_31 = x_27; +} else { + lean_dec_ref(x_27); + x_31 = lean_box(0); +} +lean_inc(x_2); +x_32 = lean_alloc_ctor(8, 2, 0); +lean_ctor_set(x_32, 0, x_2); +lean_ctor_set(x_32, 1, x_3); +x_33 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_33, 0, x_4); +lean_ctor_set(x_33, 1, x_6); +lean_ctor_set(x_33, 2, x_32); +lean_ctor_set(x_33, 3, x_29); +x_34 = lean_box(7); +x_35 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_35, 0, x_2); +lean_ctor_set(x_35, 1, x_34); +lean_ctor_set(x_35, 2, x_5); +lean_ctor_set(x_35, 3, x_33); +if (lean_is_scalar(x_31)) { + x_36 = lean_alloc_ctor(0, 2, 0); +} else { + x_36 = x_31; +} +lean_ctor_set(x_36, 0, x_35); +lean_ctor_set(x_36, 1, x_30); +x_37 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_37, 0, x_36); +lean_ctor_set(x_37, 1, x_28); +return x_37; +} +} +else +{ +uint8_t x_38; +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_38 = !lean_is_exclusive(x_11); +if (x_38 == 0) +{ +return x_11; +} +else +{ +lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_39 = lean_ctor_get(x_11, 0); +x_40 = lean_ctor_get(x_11, 1); +lean_inc(x_40); +lean_inc(x_39); +lean_dec(x_11); +x_41 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_41, 0, x_39); +lean_ctor_set(x_41, 1, x_40); +return x_41; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet___lambda__4(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_11 = lean_ctor_get(x_1, 0); +lean_inc(x_11); +lean_dec(x_1); +x_12 = l_Lean_IR_ToIR_bindVar(x_11, x_7, x_8, x_9, x_10); +x_13 = lean_ctor_get(x_12, 0); +lean_inc(x_13); +x_14 = lean_ctor_get(x_12, 1); +lean_inc(x_14); +lean_dec(x_12); +x_15 = !lean_is_exclusive(x_13); +if (x_15 == 0) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_16 = lean_ctor_get(x_13, 0); +x_17 = lean_ctor_get(x_13, 1); +x_18 = lean_unsigned_to_nat(0u); +lean_inc(x_16); +x_19 = l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields_loop(x_2, x_3, x_4, x_5, x_16, x_18, x_18, x_17, x_8, x_9, x_14); +if (lean_obj_tag(x_19) == 0) +{ +uint8_t x_20; +x_20 = !lean_is_exclusive(x_19); +if (x_20 == 0) +{ +lean_object* x_21; uint8_t x_22; +x_21 = lean_ctor_get(x_19, 0); +x_22 = !lean_is_exclusive(x_21); +if (x_22 == 0) +{ +lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_23 = lean_ctor_get(x_21, 0); +lean_ctor_set(x_13, 1, x_6); +lean_ctor_set(x_13, 0, x_3); +x_24 = lean_box(7); +x_25 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_25, 0, x_16); +lean_ctor_set(x_25, 1, x_24); +lean_ctor_set(x_25, 2, x_13); +lean_ctor_set(x_25, 3, x_23); +lean_ctor_set(x_21, 0, x_25); +return x_19; +} +else +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; +x_26 = lean_ctor_get(x_21, 0); +x_27 = lean_ctor_get(x_21, 1); +lean_inc(x_27); +lean_inc(x_26); +lean_dec(x_21); +lean_ctor_set(x_13, 1, x_6); +lean_ctor_set(x_13, 0, x_3); +x_28 = lean_box(7); +x_29 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_29, 0, x_16); +lean_ctor_set(x_29, 1, x_28); +lean_ctor_set(x_29, 2, x_13); +lean_ctor_set(x_29, 3, x_26); +x_30 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_30, 0, x_29); +lean_ctor_set(x_30, 1, x_27); +lean_ctor_set(x_19, 0, x_30); +return x_19; +} +} +else +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; +x_31 = lean_ctor_get(x_19, 0); +x_32 = lean_ctor_get(x_19, 1); +lean_inc(x_32); +lean_inc(x_31); +lean_dec(x_19); +x_33 = lean_ctor_get(x_31, 0); +lean_inc(x_33); +x_34 = lean_ctor_get(x_31, 1); +lean_inc(x_34); +if (lean_is_exclusive(x_31)) { + lean_ctor_release(x_31, 0); + lean_ctor_release(x_31, 1); + x_35 = x_31; +} else { + lean_dec_ref(x_31); + x_35 = lean_box(0); +} +lean_ctor_set(x_13, 1, x_6); +lean_ctor_set(x_13, 0, x_3); +x_36 = lean_box(7); +x_37 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_37, 0, x_16); +lean_ctor_set(x_37, 1, x_36); +lean_ctor_set(x_37, 2, x_13); +lean_ctor_set(x_37, 3, x_33); +if (lean_is_scalar(x_35)) { + x_38 = lean_alloc_ctor(0, 2, 0); +} else { + x_38 = x_35; +} +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_34); +x_39 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_39, 0, x_38); +lean_ctor_set(x_39, 1, x_32); +return x_39; +} +} +else +{ +uint8_t x_40; +lean_free_object(x_13); +lean_dec(x_16); +lean_dec(x_6); +lean_dec(x_3); +x_40 = !lean_is_exclusive(x_19); +if (x_40 == 0) +{ +return x_19; +} +else +{ +lean_object* x_41; lean_object* x_42; lean_object* x_43; +x_41 = lean_ctor_get(x_19, 0); +x_42 = lean_ctor_get(x_19, 1); +lean_inc(x_42); +lean_inc(x_41); +lean_dec(x_19); +x_43 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_43, 0, x_41); +lean_ctor_set(x_43, 1, x_42); +return x_43; +} +} +} +else +{ +lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; +x_44 = lean_ctor_get(x_13, 0); +x_45 = lean_ctor_get(x_13, 1); +lean_inc(x_45); +lean_inc(x_44); +lean_dec(x_13); +x_46 = lean_unsigned_to_nat(0u); +lean_inc(x_44); +x_47 = l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields_loop(x_2, x_3, x_4, x_5, x_44, x_46, x_46, x_45, x_8, x_9, x_14); +if (lean_obj_tag(x_47) == 0) +{ +lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; +x_48 = lean_ctor_get(x_47, 0); +lean_inc(x_48); +x_49 = lean_ctor_get(x_47, 1); +lean_inc(x_49); +if (lean_is_exclusive(x_47)) { + lean_ctor_release(x_47, 0); + lean_ctor_release(x_47, 1); + x_50 = x_47; +} else { + lean_dec_ref(x_47); + x_50 = lean_box(0); +} +x_51 = lean_ctor_get(x_48, 0); +lean_inc(x_51); +x_52 = lean_ctor_get(x_48, 1); +lean_inc(x_52); +if (lean_is_exclusive(x_48)) { + lean_ctor_release(x_48, 0); + lean_ctor_release(x_48, 1); + x_53 = x_48; +} else { + lean_dec_ref(x_48); + x_53 = lean_box(0); +} +x_54 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_54, 0, x_3); +lean_ctor_set(x_54, 1, x_6); +x_55 = lean_box(7); +x_56 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_56, 0, x_44); +lean_ctor_set(x_56, 1, x_55); +lean_ctor_set(x_56, 2, x_54); +lean_ctor_set(x_56, 3, x_51); +if (lean_is_scalar(x_53)) { + x_57 = lean_alloc_ctor(0, 2, 0); +} else { + x_57 = x_53; +} +lean_ctor_set(x_57, 0, x_56); +lean_ctor_set(x_57, 1, x_52); +if (lean_is_scalar(x_50)) { + x_58 = lean_alloc_ctor(0, 2, 0); +} else { + x_58 = x_50; +} +lean_ctor_set(x_58, 0, x_57); +lean_ctor_set(x_58, 1, x_49); +return x_58; +} +else +{ +lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; +lean_dec(x_44); +lean_dec(x_6); +lean_dec(x_3); +x_59 = lean_ctor_get(x_47, 0); +lean_inc(x_59); +x_60 = lean_ctor_get(x_47, 1); +lean_inc(x_60); +if (lean_is_exclusive(x_47)) { + lean_ctor_release(x_47, 0); + lean_ctor_release(x_47, 1); + x_61 = x_47; +} else { + lean_dec_ref(x_47); + x_61 = lean_box(0); +} +if (lean_is_scalar(x_61)) { + x_62 = lean_alloc_ctor(1, 2, 0); +} else { + x_62 = x_61; +} +lean_ctor_set(x_62, 0, x_59); +lean_ctor_set(x_62, 1, x_60); +return x_62; +} +} +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean.IR.ToIR.lowerLet", 21, 21); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerLet___closed__1; +x_3 = lean_unsigned_to_nat(247u); +x_4 = lean_unsigned_to_nat(37u); +x_5 = l_Lean_IR_ToIR_lowerArg___closed__2; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("projection of non-inductive type", 32, 32); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerLet___closed__1; +x_3 = lean_unsigned_to_nat(233u); +x_4 = lean_unsigned_to_nat(10u); +x_5 = l_Lean_IR_ToIR_lowerLet___closed__3; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__5() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("reference to unbound name", 25, 25); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__6() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerLet___closed__1; +x_3 = lean_unsigned_to_nat(338u); +x_4 = lean_unsigned_to_nat(16u); +x_5 = l_Lean_IR_ToIR_lowerLet___closed__5; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__7() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Quot", 4, 4); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__8() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("lcInv", 5, 5); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__9() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_IR_ToIR_lowerLet___closed__7; +x_2 = l_Lean_IR_ToIR_lowerLet___closed__8; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__10() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("lcUnreachable", 13, 13); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__11() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_Lean_IR_ToIR_lowerLet___closed__10; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__12() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("axiom '", 7, 7); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__13() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_IR_ToIR_lowerLet___closed__12; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__14() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l_Lean_IR_ToIR_lowerLet___lambda__2___boxed), 1, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__15() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("' not supported by code generator; consider marking definition as 'noncomputable'", 81, 81); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__16() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_IR_ToIR_lowerLet___closed__15; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__17() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("thm unsupported by code generator", 33, 33); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__18() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerLet___closed__1; +x_3 = lean_unsigned_to_nat(337u); +x_4 = lean_unsigned_to_nat(30u); +x_5 = l_Lean_IR_ToIR_lowerLet___closed__17; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__19() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("mk", 2, 2); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__20() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_IR_ToIR_lowerLet___closed__7; +x_2 = l_Lean_IR_ToIR_lowerLet___closed__19; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__21() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("quot ", 5, 5); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__22() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_IR_ToIR_lowerLet___closed__21; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__23() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked(" unsupported by code generator", 30, 30); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__24() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_IR_ToIR_lowerLet___closed__23; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__25() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("induct unsupported by code generator", 36, 36); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__26() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerLet___closed__1; +x_3 = lean_unsigned_to_nat(336u); +x_4 = lean_unsigned_to_nat(33u); +x_5 = l_Lean_IR_ToIR_lowerLet___closed__25; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__27() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(0); +x_2 = lean_array_mk(x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__28() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("code generator does not support recursor '", 42, 42); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__29() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_IR_ToIR_lowerLet___closed__28; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__30() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("' yet, consider using 'match ... with' and/or structural recursion", 66, 66); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__31() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_IR_ToIR_lowerLet___closed__30; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__32() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Nat", 3, 3); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__33() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("succ", 4, 4); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__34() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("add", 3, 3); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__35() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_IR_ToIR_lowerLet___closed__32; +x_2 = l_Lean_IR_ToIR_lowerLet___closed__34; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__36() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(1u); +x_2 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__37() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_IR_ToIR_lowerLet___closed__36; +x_2 = lean_alloc_ctor(11, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerLet___closed__38() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerLet___closed__1; +x_3 = lean_unsigned_to_nat(345u); +x_4 = lean_unsigned_to_nat(37u); +x_5 = l_Lean_IR_ToIR_lowerArg___closed__2; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; +x_7 = lean_ctor_get(x_1, 3); +lean_inc(x_7); +switch (lean_obj_tag(x_7)) { +case 0: +{ +uint8_t x_8; +x_8 = !lean_is_exclusive(x_7); +if (x_8 == 0) +{ +lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_9 = lean_ctor_get(x_7, 0); +x_10 = l_Lean_IR_ToIR_lowerLitValue(x_9); +lean_ctor_set_tag(x_7, 11); +lean_ctor_set(x_7, 0, x_10); +x_11 = lean_ctor_get(x_1, 0); +lean_inc(x_11); +x_12 = l_Lean_IR_ToIR_bindVar(x_11, x_3, x_4, x_5, x_6); +x_13 = lean_ctor_get(x_12, 0); +lean_inc(x_13); +x_14 = lean_ctor_get(x_12, 1); +lean_inc(x_14); +lean_dec(x_12); +x_15 = lean_ctor_get(x_13, 0); +lean_inc(x_15); +x_16 = lean_ctor_get(x_13, 1); +lean_inc(x_16); +lean_dec(x_13); +x_17 = lean_ctor_get(x_1, 2); +lean_inc(x_17); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_18 = l_Lean_IR_ToIR_lowerType(x_17, x_16, x_4, x_5, x_14); +if (lean_obj_tag(x_18) == 0) +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_19 = lean_ctor_get(x_18, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_18, 1); +lean_inc(x_20); +lean_dec(x_18); +x_21 = lean_ctor_get(x_19, 0); +lean_inc(x_21); +x_22 = lean_ctor_get(x_19, 1); +lean_inc(x_22); +lean_dec(x_19); +x_23 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15, x_7, x_21, x_22, x_4, x_5, x_20); +return x_23; +} +else +{ +uint8_t x_24; +lean_dec(x_15); +lean_dec(x_7); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_24 = !lean_is_exclusive(x_18); +if (x_24 == 0) +{ +return x_18; +} +else +{ +lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_25 = lean_ctor_get(x_18, 0); +x_26 = lean_ctor_get(x_18, 1); +lean_inc(x_26); +lean_inc(x_25); +lean_dec(x_18); +x_27 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_27, 0, x_25); +lean_ctor_set(x_27, 1, x_26); +return x_27; +} +} +} +else +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; +x_28 = lean_ctor_get(x_7, 0); +lean_inc(x_28); +lean_dec(x_7); +x_29 = l_Lean_IR_ToIR_lowerLitValue(x_28); +x_30 = lean_alloc_ctor(11, 1, 0); +lean_ctor_set(x_30, 0, x_29); +x_31 = lean_ctor_get(x_1, 0); +lean_inc(x_31); +x_32 = l_Lean_IR_ToIR_bindVar(x_31, x_3, x_4, x_5, x_6); +x_33 = lean_ctor_get(x_32, 0); +lean_inc(x_33); +x_34 = lean_ctor_get(x_32, 1); +lean_inc(x_34); +lean_dec(x_32); +x_35 = lean_ctor_get(x_33, 0); +lean_inc(x_35); +x_36 = lean_ctor_get(x_33, 1); +lean_inc(x_36); +lean_dec(x_33); +x_37 = lean_ctor_get(x_1, 2); +lean_inc(x_37); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_38 = l_Lean_IR_ToIR_lowerType(x_37, x_36, x_4, x_5, x_34); +if (lean_obj_tag(x_38) == 0) +{ +lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; +x_39 = lean_ctor_get(x_38, 0); +lean_inc(x_39); +x_40 = lean_ctor_get(x_38, 1); +lean_inc(x_40); +lean_dec(x_38); +x_41 = lean_ctor_get(x_39, 0); +lean_inc(x_41); +x_42 = lean_ctor_get(x_39, 1); +lean_inc(x_42); +lean_dec(x_39); +x_43 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_35, x_30, x_41, x_42, x_4, x_5, x_40); +return x_43; +} +else +{ +lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; +lean_dec(x_35); +lean_dec(x_30); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_44 = lean_ctor_get(x_38, 0); +lean_inc(x_44); +x_45 = lean_ctor_get(x_38, 1); +lean_inc(x_45); +if (lean_is_exclusive(x_38)) { + lean_ctor_release(x_38, 0); + lean_ctor_release(x_38, 1); + x_46 = x_38; +} else { + lean_dec_ref(x_38); + x_46 = lean_box(0); +} +if (lean_is_scalar(x_46)) { + x_47 = lean_alloc_ctor(1, 2, 0); +} else { + x_47 = x_46; +} +lean_ctor_set(x_47, 0, x_44); +lean_ctor_set(x_47, 1, x_45); +return x_47; +} +} +} +case 1: +{ +lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; +x_48 = lean_ctor_get(x_1, 0); +lean_inc(x_48); +lean_dec(x_1); +x_49 = l_Lean_IR_ToIR_bindErased(x_48, x_3, x_4, x_5, x_6); +x_50 = lean_ctor_get(x_49, 0); +lean_inc(x_50); +x_51 = lean_ctor_get(x_49, 1); +lean_inc(x_51); +lean_dec(x_49); +x_52 = lean_ctor_get(x_50, 1); +lean_inc(x_52); +lean_dec(x_50); +x_53 = l_Lean_IR_ToIR_lowerCode(x_2, x_52, x_4, x_5, x_51); +return x_53; +} +case 2: +{ +lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; uint64_t x_60; uint64_t x_61; uint64_t x_62; uint64_t x_63; uint64_t x_64; uint64_t x_65; uint64_t x_66; size_t x_67; size_t x_68; size_t x_69; size_t x_70; size_t x_71; lean_object* x_72; lean_object* x_73; +x_54 = lean_ctor_get(x_3, 0); +lean_inc(x_54); +x_55 = lean_ctor_get(x_7, 0); +lean_inc(x_55); +x_56 = lean_ctor_get(x_7, 1); +lean_inc(x_56); +x_57 = lean_ctor_get(x_7, 2); +lean_inc(x_57); +lean_dec(x_7); +x_58 = lean_ctor_get(x_54, 1); +lean_inc(x_58); +lean_dec(x_54); +x_59 = lean_array_get_size(x_58); +x_60 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_57); +x_61 = 32; +x_62 = lean_uint64_shift_right(x_60, x_61); +x_63 = lean_uint64_xor(x_60, x_62); +x_64 = 16; +x_65 = lean_uint64_shift_right(x_63, x_64); +x_66 = lean_uint64_xor(x_63, x_65); +x_67 = lean_uint64_to_usize(x_66); +x_68 = lean_usize_of_nat(x_59); +lean_dec(x_59); +x_69 = 1; +x_70 = lean_usize_sub(x_68, x_69); +x_71 = lean_usize_land(x_67, x_70); +x_72 = lean_array_uget(x_58, x_71); +lean_dec(x_58); +x_73 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_57, x_72); +lean_dec(x_72); +lean_dec(x_57); +if (lean_obj_tag(x_73) == 0) +{ +lean_object* x_74; lean_object* x_75; +lean_dec(x_56); +lean_dec(x_55); +lean_dec(x_2); +lean_dec(x_1); +x_74 = l_Lean_IR_ToIR_lowerLet___closed__2; +x_75 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_74, x_3, x_4, x_5, x_6); +return x_75; +} +else +{ +lean_object* x_76; +x_76 = lean_ctor_get(x_73, 0); +lean_inc(x_76); +lean_dec(x_73); +switch (lean_obj_tag(x_76)) { +case 0: +{ +lean_object* x_77; lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; uint8_t x_82; lean_object* x_83; +x_77 = lean_ctor_get(x_76, 0); +lean_inc(x_77); +lean_dec(x_76); +x_78 = lean_st_ref_get(x_5, x_6); +x_79 = lean_ctor_get(x_78, 0); +lean_inc(x_79); +x_80 = lean_ctor_get(x_78, 1); +lean_inc(x_80); +lean_dec(x_78); +x_81 = lean_ctor_get(x_79, 0); +lean_inc(x_81); +lean_dec(x_79); +x_82 = 0; +x_83 = l_Lean_Environment_find_x3f(x_81, x_55, x_82); +if (lean_obj_tag(x_83) == 0) +{ +lean_object* x_84; lean_object* x_85; +lean_dec(x_77); +lean_dec(x_56); +lean_dec(x_2); +lean_dec(x_1); +x_84 = l_Lean_IR_ToIR_lowerLet___closed__4; +x_85 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_84, x_3, x_4, x_5, x_80); +return x_85; +} +else +{ +lean_object* x_86; +x_86 = lean_ctor_get(x_83, 0); +lean_inc(x_86); +lean_dec(x_83); +if (lean_obj_tag(x_86) == 5) +{ +lean_object* x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; lean_object* x_92; +x_87 = lean_ctor_get(x_86, 0); +lean_inc(x_87); +lean_dec(x_86); +x_88 = lean_ctor_get(x_87, 4); +lean_inc(x_88); +lean_dec(x_87); +x_89 = l_Lean_instInhabitedName; +x_90 = lean_unsigned_to_nat(0u); +x_91 = l___private_Init_GetElem_0__List_get_x21Internal___rarg(x_89, x_88, x_90); +lean_dec(x_88); +lean_inc(x_5); +lean_inc(x_4); +x_92 = l_Lean_IR_ToIR_getCtorInfo(x_91, x_3, x_4, x_5, x_80); +if (lean_obj_tag(x_92) == 0) +{ +lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; +x_93 = lean_ctor_get(x_92, 0); +lean_inc(x_93); +x_94 = lean_ctor_get(x_93, 0); +lean_inc(x_94); +x_95 = lean_ctor_get(x_92, 1); +lean_inc(x_95); +lean_dec(x_92); +x_96 = lean_ctor_get(x_93, 1); +lean_inc(x_96); +lean_dec(x_93); +x_97 = lean_ctor_get(x_94, 0); +lean_inc(x_97); +x_98 = lean_ctor_get(x_94, 1); +lean_inc(x_98); +lean_dec(x_94); +x_99 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_100 = lean_array_get(x_99, x_98, x_56); +lean_dec(x_56); +lean_dec(x_98); +x_101 = l_Lean_IR_ToIR_lowerProj(x_77, x_97, x_100); +lean_dec(x_97); +x_102 = lean_ctor_get(x_101, 0); +lean_inc(x_102); +if (lean_obj_tag(x_102) == 0) +{ +lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; +x_103 = lean_ctor_get(x_101, 1); +lean_inc(x_103); +lean_dec(x_101); +x_104 = lean_ctor_get(x_102, 0); +lean_inc(x_104); +lean_dec(x_102); +x_105 = lean_ctor_get(x_1, 0); +lean_inc(x_105); +lean_dec(x_1); +x_106 = l_Lean_IR_ToIR_bindVar(x_105, x_96, x_4, x_5, x_95); +x_107 = lean_ctor_get(x_106, 0); +lean_inc(x_107); +x_108 = lean_ctor_get(x_106, 1); +lean_inc(x_108); +lean_dec(x_106); +x_109 = lean_ctor_get(x_107, 0); +lean_inc(x_109); +x_110 = lean_ctor_get(x_107, 1); +lean_inc(x_110); +lean_dec(x_107); +x_111 = l_Lean_IR_ToIR_lowerCode(x_2, x_110, x_4, x_5, x_108); +if (lean_obj_tag(x_111) == 0) +{ +uint8_t x_112; +x_112 = !lean_is_exclusive(x_111); +if (x_112 == 0) +{ +lean_object* x_113; uint8_t x_114; +x_113 = lean_ctor_get(x_111, 0); +x_114 = !lean_is_exclusive(x_113); +if (x_114 == 0) +{ +lean_object* x_115; lean_object* x_116; +x_115 = lean_ctor_get(x_113, 0); +x_116 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_116, 0, x_109); +lean_ctor_set(x_116, 1, x_103); +lean_ctor_set(x_116, 2, x_104); +lean_ctor_set(x_116, 3, x_115); +lean_ctor_set(x_113, 0, x_116); +return x_111; +} +else +{ +lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; +x_117 = lean_ctor_get(x_113, 0); +x_118 = lean_ctor_get(x_113, 1); +lean_inc(x_118); +lean_inc(x_117); +lean_dec(x_113); +x_119 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_119, 0, x_109); +lean_ctor_set(x_119, 1, x_103); +lean_ctor_set(x_119, 2, x_104); +lean_ctor_set(x_119, 3, x_117); +x_120 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_120, 0, x_119); +lean_ctor_set(x_120, 1, x_118); +lean_ctor_set(x_111, 0, x_120); +return x_111; +} +} +else +{ +lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; +x_121 = lean_ctor_get(x_111, 0); +x_122 = lean_ctor_get(x_111, 1); +lean_inc(x_122); +lean_inc(x_121); +lean_dec(x_111); +x_123 = lean_ctor_get(x_121, 0); +lean_inc(x_123); +x_124 = lean_ctor_get(x_121, 1); +lean_inc(x_124); +if (lean_is_exclusive(x_121)) { + lean_ctor_release(x_121, 0); + lean_ctor_release(x_121, 1); + x_125 = x_121; +} else { + lean_dec_ref(x_121); + x_125 = lean_box(0); +} +x_126 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_126, 0, x_109); +lean_ctor_set(x_126, 1, x_103); +lean_ctor_set(x_126, 2, x_104); +lean_ctor_set(x_126, 3, x_123); +if (lean_is_scalar(x_125)) { + x_127 = lean_alloc_ctor(0, 2, 0); +} else { + x_127 = x_125; +} +lean_ctor_set(x_127, 0, x_126); +lean_ctor_set(x_127, 1, x_124); +x_128 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_128, 0, x_127); +lean_ctor_set(x_128, 1, x_122); +return x_128; +} +} +else +{ +uint8_t x_129; +lean_dec(x_109); +lean_dec(x_104); +lean_dec(x_103); +x_129 = !lean_is_exclusive(x_111); +if (x_129 == 0) +{ +return x_111; +} +else +{ +lean_object* x_130; lean_object* x_131; lean_object* x_132; +x_130 = lean_ctor_get(x_111, 0); +x_131 = lean_ctor_get(x_111, 1); +lean_inc(x_131); +lean_inc(x_130); +lean_dec(x_111); +x_132 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_132, 0, x_130); +lean_ctor_set(x_132, 1, x_131); +return x_132; +} +} +} +else +{ +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; +lean_dec(x_101); +x_133 = lean_ctor_get(x_1, 0); +lean_inc(x_133); +lean_dec(x_1); +x_134 = l_Lean_IR_ToIR_bindErased(x_133, x_96, x_4, x_5, x_95); +x_135 = lean_ctor_get(x_134, 0); +lean_inc(x_135); +x_136 = lean_ctor_get(x_134, 1); +lean_inc(x_136); +lean_dec(x_134); +x_137 = lean_ctor_get(x_135, 1); +lean_inc(x_137); +lean_dec(x_135); +x_138 = l_Lean_IR_ToIR_lowerCode(x_2, x_137, x_4, x_5, x_136); +return x_138; +} +} +else +{ +uint8_t x_139; +lean_dec(x_77); +lean_dec(x_56); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_139 = !lean_is_exclusive(x_92); +if (x_139 == 0) +{ +return x_92; +} +else +{ +lean_object* x_140; lean_object* x_141; lean_object* x_142; +x_140 = lean_ctor_get(x_92, 0); +x_141 = lean_ctor_get(x_92, 1); +lean_inc(x_141); +lean_inc(x_140); +lean_dec(x_92); +x_142 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_142, 0, x_140); +lean_ctor_set(x_142, 1, x_141); +return x_142; +} +} +} +else +{ +lean_object* x_143; lean_object* x_144; +lean_dec(x_86); +lean_dec(x_77); +lean_dec(x_56); +lean_dec(x_2); +lean_dec(x_1); +x_143 = l_Lean_IR_ToIR_lowerLet___closed__4; +x_144 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_143, x_3, x_4, x_5, x_80); +return x_144; +} +} +} +case 1: +{ +lean_object* x_145; lean_object* x_146; +lean_dec(x_76); +lean_dec(x_56); +lean_dec(x_55); +lean_dec(x_2); +lean_dec(x_1); +x_145 = l_Lean_IR_ToIR_lowerLet___closed__2; +x_146 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_145, x_3, x_4, x_5, x_6); +return x_146; +} +default: +{ +lean_object* x_147; lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; lean_object* x_152; +lean_dec(x_56); +lean_dec(x_55); +x_147 = lean_ctor_get(x_1, 0); +lean_inc(x_147); +lean_dec(x_1); +x_148 = l_Lean_IR_ToIR_bindErased(x_147, x_3, x_4, x_5, x_6); +x_149 = lean_ctor_get(x_148, 0); +lean_inc(x_149); +x_150 = lean_ctor_get(x_148, 1); +lean_inc(x_150); +lean_dec(x_148); +x_151 = lean_ctor_get(x_149, 1); +lean_inc(x_151); +lean_dec(x_149); +x_152 = l_Lean_IR_ToIR_lowerCode(x_2, x_151, x_4, x_5, x_150); +return x_152; +} +} +} +} +case 3: +{ +lean_object* x_153; +x_153 = lean_ctor_get(x_7, 0); +lean_inc(x_153); +switch (lean_obj_tag(x_153)) { +case 0: +{ +lean_object* x_154; lean_object* x_155; size_t x_156; size_t x_157; lean_object* x_158; +x_154 = lean_ctor_get(x_7, 2); +lean_inc(x_154); +if (lean_is_exclusive(x_7)) { + lean_ctor_release(x_7, 0); + lean_ctor_release(x_7, 1); + lean_ctor_release(x_7, 2); + x_155 = x_7; +} else { + lean_dec_ref(x_7); + x_155 = lean_box(0); +} +x_156 = lean_array_size(x_154); +x_157 = 0; +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_154); +x_158 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_156, x_157, x_154, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_158) == 0) +{ +lean_object* x_159; lean_object* x_160; lean_object* x_161; uint8_t x_162; +x_159 = lean_ctor_get(x_158, 0); +lean_inc(x_159); +x_160 = lean_ctor_get(x_158, 1); +lean_inc(x_160); +if (lean_is_exclusive(x_158)) { + lean_ctor_release(x_158, 0); + lean_ctor_release(x_158, 1); + x_161 = x_158; +} else { + lean_dec_ref(x_158); + x_161 = lean_box(0); +} +x_162 = !lean_is_exclusive(x_159); +if (x_162 == 0) +{ +lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_2083; lean_object* x_2084; +x_163 = lean_ctor_get(x_159, 0); +x_164 = lean_ctor_get(x_159, 1); +x_2083 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_160); +x_2084 = lean_ctor_get(x_2083, 0); +lean_inc(x_2084); +if (lean_obj_tag(x_2084) == 0) +{ +lean_object* x_2085; lean_object* x_2086; +x_2085 = lean_ctor_get(x_2083, 1); +lean_inc(x_2085); +lean_dec(x_2083); +x_2086 = lean_box(0); +lean_ctor_set(x_159, 0, x_2086); +x_165 = x_159; +x_166 = x_2085; +goto block_2082; +} +else +{ +uint8_t x_2087; +lean_free_object(x_159); +x_2087 = !lean_is_exclusive(x_2083); +if (x_2087 == 0) +{ +lean_object* x_2088; lean_object* x_2089; uint8_t x_2090; +x_2088 = lean_ctor_get(x_2083, 1); +x_2089 = lean_ctor_get(x_2083, 0); +lean_dec(x_2089); +x_2090 = !lean_is_exclusive(x_2084); +if (x_2090 == 0) +{ +lean_object* x_2091; lean_object* x_2092; lean_object* x_2093; lean_object* x_2094; uint8_t x_2095; +x_2091 = lean_ctor_get(x_2084, 0); +x_2092 = lean_array_get_size(x_163); +x_2093 = lean_ctor_get(x_2091, 3); +lean_inc(x_2093); +lean_dec(x_2091); +x_2094 = lean_array_get_size(x_2093); +lean_dec(x_2093); +x_2095 = lean_nat_dec_lt(x_2092, x_2094); +if (x_2095 == 0) +{ +uint8_t x_2096; +x_2096 = lean_nat_dec_eq(x_2092, x_2094); +if (x_2096 == 0) +{ +lean_object* x_2097; lean_object* x_2098; lean_object* x_2099; lean_object* x_2100; lean_object* x_2101; lean_object* x_2102; lean_object* x_2103; lean_object* x_2104; lean_object* x_2105; lean_object* x_2106; lean_object* x_2107; lean_object* x_2108; lean_object* x_2109; lean_object* x_2110; lean_object* x_2111; lean_object* x_2112; +x_2097 = lean_unsigned_to_nat(0u); +x_2098 = l_Array_extract___rarg(x_163, x_2097, x_2094); +x_2099 = l_Array_extract___rarg(x_163, x_2094, x_2092); +lean_dec(x_2092); +lean_ctor_set_tag(x_2083, 6); +lean_ctor_set(x_2083, 1, x_2098); +lean_ctor_set(x_2083, 0, x_153); +x_2100 = lean_ctor_get(x_1, 0); +lean_inc(x_2100); +x_2101 = l_Lean_IR_ToIR_bindVar(x_2100, x_164, x_4, x_5, x_2088); +x_2102 = lean_ctor_get(x_2101, 0); +lean_inc(x_2102); +x_2103 = lean_ctor_get(x_2101, 1); +lean_inc(x_2103); +lean_dec(x_2101); +x_2104 = lean_ctor_get(x_2102, 0); +lean_inc(x_2104); +x_2105 = lean_ctor_get(x_2102, 1); +lean_inc(x_2105); +lean_dec(x_2102); +x_2106 = l_Lean_IR_ToIR_newVar(x_2105, x_4, x_5, x_2103); +x_2107 = lean_ctor_get(x_2106, 0); +lean_inc(x_2107); +x_2108 = lean_ctor_get(x_2106, 1); +lean_inc(x_2108); +lean_dec(x_2106); +x_2109 = lean_ctor_get(x_2107, 0); +lean_inc(x_2109); +x_2110 = lean_ctor_get(x_2107, 1); +lean_inc(x_2110); +lean_dec(x_2107); +x_2111 = lean_ctor_get(x_1, 2); +lean_inc(x_2111); +lean_inc(x_5); +lean_inc(x_4); +x_2112 = l_Lean_IR_ToIR_lowerType(x_2111, x_2110, x_4, x_5, x_2108); +if (lean_obj_tag(x_2112) == 0) +{ +lean_object* x_2113; lean_object* x_2114; lean_object* x_2115; lean_object* x_2116; lean_object* x_2117; +x_2113 = lean_ctor_get(x_2112, 0); +lean_inc(x_2113); +x_2114 = lean_ctor_get(x_2112, 1); +lean_inc(x_2114); +lean_dec(x_2112); +x_2115 = lean_ctor_get(x_2113, 0); +lean_inc(x_2115); +x_2116 = lean_ctor_get(x_2113, 1); +lean_inc(x_2116); +lean_dec(x_2113); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2117 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_2109, x_2099, x_2104, x_2083, x_2115, x_2116, x_4, x_5, x_2114); +if (lean_obj_tag(x_2117) == 0) +{ +lean_object* x_2118; lean_object* x_2119; uint8_t x_2120; +x_2118 = lean_ctor_get(x_2117, 0); +lean_inc(x_2118); +x_2119 = lean_ctor_get(x_2117, 1); +lean_inc(x_2119); +lean_dec(x_2117); +x_2120 = !lean_is_exclusive(x_2118); +if (x_2120 == 0) +{ +lean_object* x_2121; +x_2121 = lean_ctor_get(x_2118, 0); +lean_ctor_set(x_2084, 0, x_2121); +lean_ctor_set(x_2118, 0, x_2084); +x_165 = x_2118; +x_166 = x_2119; +goto block_2082; +} +else +{ +lean_object* x_2122; lean_object* x_2123; lean_object* x_2124; +x_2122 = lean_ctor_get(x_2118, 0); +x_2123 = lean_ctor_get(x_2118, 1); +lean_inc(x_2123); +lean_inc(x_2122); +lean_dec(x_2118); +lean_ctor_set(x_2084, 0, x_2122); +x_2124 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_2124, 0, x_2084); +lean_ctor_set(x_2124, 1, x_2123); +x_165 = x_2124; +x_166 = x_2119; +goto block_2082; +} +} +else +{ +uint8_t x_2125; +lean_free_object(x_2084); +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2125 = !lean_is_exclusive(x_2117); +if (x_2125 == 0) +{ +return x_2117; +} +else +{ +lean_object* x_2126; lean_object* x_2127; lean_object* x_2128; +x_2126 = lean_ctor_get(x_2117, 0); +x_2127 = lean_ctor_get(x_2117, 1); +lean_inc(x_2127); +lean_inc(x_2126); +lean_dec(x_2117); +x_2128 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_2128, 0, x_2126); +lean_ctor_set(x_2128, 1, x_2127); +return x_2128; +} +} +} +else +{ +uint8_t x_2129; +lean_dec(x_2109); +lean_dec(x_2104); +lean_dec(x_2083); +lean_dec(x_2099); +lean_free_object(x_2084); +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2129 = !lean_is_exclusive(x_2112); +if (x_2129 == 0) +{ +return x_2112; +} +else +{ +lean_object* x_2130; lean_object* x_2131; lean_object* x_2132; +x_2130 = lean_ctor_get(x_2112, 0); +x_2131 = lean_ctor_get(x_2112, 1); +lean_inc(x_2131); +lean_inc(x_2130); +lean_dec(x_2112); +x_2132 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_2132, 0, x_2130); +lean_ctor_set(x_2132, 1, x_2131); +return x_2132; +} +} +} +else +{ +lean_object* x_2133; lean_object* x_2134; lean_object* x_2135; lean_object* x_2136; lean_object* x_2137; lean_object* x_2138; lean_object* x_2139; lean_object* x_2140; +lean_dec(x_2094); +lean_dec(x_2092); +lean_inc(x_163); +lean_ctor_set_tag(x_2083, 6); +lean_ctor_set(x_2083, 1, x_163); +lean_ctor_set(x_2083, 0, x_153); +x_2133 = lean_ctor_get(x_1, 0); +lean_inc(x_2133); +x_2134 = l_Lean_IR_ToIR_bindVar(x_2133, x_164, x_4, x_5, x_2088); +x_2135 = lean_ctor_get(x_2134, 0); +lean_inc(x_2135); +x_2136 = lean_ctor_get(x_2134, 1); +lean_inc(x_2136); +lean_dec(x_2134); +x_2137 = lean_ctor_get(x_2135, 0); +lean_inc(x_2137); +x_2138 = lean_ctor_get(x_2135, 1); +lean_inc(x_2138); +lean_dec(x_2135); +x_2139 = lean_ctor_get(x_1, 2); +lean_inc(x_2139); +lean_inc(x_5); +lean_inc(x_4); +x_2140 = l_Lean_IR_ToIR_lowerType(x_2139, x_2138, x_4, x_5, x_2136); +if (lean_obj_tag(x_2140) == 0) +{ +lean_object* x_2141; lean_object* x_2142; lean_object* x_2143; lean_object* x_2144; lean_object* x_2145; +x_2141 = lean_ctor_get(x_2140, 0); +lean_inc(x_2141); +x_2142 = lean_ctor_get(x_2140, 1); +lean_inc(x_2142); +lean_dec(x_2140); +x_2143 = lean_ctor_get(x_2141, 0); +lean_inc(x_2143); +x_2144 = lean_ctor_get(x_2141, 1); +lean_inc(x_2144); +lean_dec(x_2141); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2145 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2137, x_2083, x_2143, x_2144, x_4, x_5, x_2142); +if (lean_obj_tag(x_2145) == 0) +{ +lean_object* x_2146; lean_object* x_2147; uint8_t x_2148; +x_2146 = lean_ctor_get(x_2145, 0); +lean_inc(x_2146); +x_2147 = lean_ctor_get(x_2145, 1); +lean_inc(x_2147); +lean_dec(x_2145); +x_2148 = !lean_is_exclusive(x_2146); +if (x_2148 == 0) +{ +lean_object* x_2149; +x_2149 = lean_ctor_get(x_2146, 0); +lean_ctor_set(x_2084, 0, x_2149); +lean_ctor_set(x_2146, 0, x_2084); +x_165 = x_2146; +x_166 = x_2147; +goto block_2082; +} +else +{ +lean_object* x_2150; lean_object* x_2151; lean_object* x_2152; +x_2150 = lean_ctor_get(x_2146, 0); +x_2151 = lean_ctor_get(x_2146, 1); +lean_inc(x_2151); +lean_inc(x_2150); +lean_dec(x_2146); +lean_ctor_set(x_2084, 0, x_2150); +x_2152 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_2152, 0, x_2084); +lean_ctor_set(x_2152, 1, x_2151); +x_165 = x_2152; +x_166 = x_2147; +goto block_2082; +} +} +else +{ +uint8_t x_2153; +lean_free_object(x_2084); +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2153 = !lean_is_exclusive(x_2145); +if (x_2153 == 0) +{ +return x_2145; +} +else +{ +lean_object* x_2154; lean_object* x_2155; lean_object* x_2156; +x_2154 = lean_ctor_get(x_2145, 0); +x_2155 = lean_ctor_get(x_2145, 1); +lean_inc(x_2155); +lean_inc(x_2154); +lean_dec(x_2145); +x_2156 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_2156, 0, x_2154); +lean_ctor_set(x_2156, 1, x_2155); +return x_2156; +} +} +} +else +{ +uint8_t x_2157; +lean_dec(x_2137); +lean_dec(x_2083); +lean_free_object(x_2084); +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2157 = !lean_is_exclusive(x_2140); +if (x_2157 == 0) +{ +return x_2140; +} +else +{ +lean_object* x_2158; lean_object* x_2159; lean_object* x_2160; +x_2158 = lean_ctor_get(x_2140, 0); +x_2159 = lean_ctor_get(x_2140, 1); +lean_inc(x_2159); +lean_inc(x_2158); +lean_dec(x_2140); +x_2160 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_2160, 0, x_2158); +lean_ctor_set(x_2160, 1, x_2159); +return x_2160; +} +} +} +} +else +{ +lean_object* x_2161; lean_object* x_2162; lean_object* x_2163; lean_object* x_2164; lean_object* x_2165; lean_object* x_2166; lean_object* x_2167; lean_object* x_2168; +lean_dec(x_2094); +lean_dec(x_2092); +lean_inc(x_163); +lean_ctor_set_tag(x_2083, 7); +lean_ctor_set(x_2083, 1, x_163); +lean_ctor_set(x_2083, 0, x_153); +x_2161 = lean_ctor_get(x_1, 0); +lean_inc(x_2161); +x_2162 = l_Lean_IR_ToIR_bindVar(x_2161, x_164, x_4, x_5, x_2088); +x_2163 = lean_ctor_get(x_2162, 0); +lean_inc(x_2163); +x_2164 = lean_ctor_get(x_2162, 1); +lean_inc(x_2164); +lean_dec(x_2162); +x_2165 = lean_ctor_get(x_2163, 0); +lean_inc(x_2165); +x_2166 = lean_ctor_get(x_2163, 1); +lean_inc(x_2166); +lean_dec(x_2163); +x_2167 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2168 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2165, x_2083, x_2167, x_2166, x_4, x_5, x_2164); +if (lean_obj_tag(x_2168) == 0) +{ +lean_object* x_2169; lean_object* x_2170; uint8_t x_2171; +x_2169 = lean_ctor_get(x_2168, 0); +lean_inc(x_2169); +x_2170 = lean_ctor_get(x_2168, 1); +lean_inc(x_2170); +lean_dec(x_2168); +x_2171 = !lean_is_exclusive(x_2169); +if (x_2171 == 0) +{ +lean_object* x_2172; +x_2172 = lean_ctor_get(x_2169, 0); +lean_ctor_set(x_2084, 0, x_2172); +lean_ctor_set(x_2169, 0, x_2084); +x_165 = x_2169; +x_166 = x_2170; +goto block_2082; +} +else +{ +lean_object* x_2173; lean_object* x_2174; lean_object* x_2175; +x_2173 = lean_ctor_get(x_2169, 0); +x_2174 = lean_ctor_get(x_2169, 1); +lean_inc(x_2174); +lean_inc(x_2173); +lean_dec(x_2169); +lean_ctor_set(x_2084, 0, x_2173); +x_2175 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_2175, 0, x_2084); +lean_ctor_set(x_2175, 1, x_2174); +x_165 = x_2175; +x_166 = x_2170; +goto block_2082; +} +} +else +{ +uint8_t x_2176; +lean_free_object(x_2084); +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2176 = !lean_is_exclusive(x_2168); +if (x_2176 == 0) +{ +return x_2168; +} +else +{ +lean_object* x_2177; lean_object* x_2178; lean_object* x_2179; +x_2177 = lean_ctor_get(x_2168, 0); +x_2178 = lean_ctor_get(x_2168, 1); +lean_inc(x_2178); +lean_inc(x_2177); +lean_dec(x_2168); +x_2179 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_2179, 0, x_2177); +lean_ctor_set(x_2179, 1, x_2178); +return x_2179; +} +} +} +} +else +{ +lean_object* x_2180; lean_object* x_2181; lean_object* x_2182; lean_object* x_2183; uint8_t x_2184; +x_2180 = lean_ctor_get(x_2084, 0); +lean_inc(x_2180); +lean_dec(x_2084); +x_2181 = lean_array_get_size(x_163); +x_2182 = lean_ctor_get(x_2180, 3); +lean_inc(x_2182); +lean_dec(x_2180); +x_2183 = lean_array_get_size(x_2182); +lean_dec(x_2182); +x_2184 = lean_nat_dec_lt(x_2181, x_2183); +if (x_2184 == 0) +{ +uint8_t x_2185; +x_2185 = lean_nat_dec_eq(x_2181, x_2183); +if (x_2185 == 0) +{ +lean_object* x_2186; lean_object* x_2187; lean_object* x_2188; lean_object* x_2189; lean_object* x_2190; lean_object* x_2191; lean_object* x_2192; lean_object* x_2193; lean_object* x_2194; lean_object* x_2195; lean_object* x_2196; lean_object* x_2197; lean_object* x_2198; lean_object* x_2199; lean_object* x_2200; lean_object* x_2201; +x_2186 = lean_unsigned_to_nat(0u); +x_2187 = l_Array_extract___rarg(x_163, x_2186, x_2183); +x_2188 = l_Array_extract___rarg(x_163, x_2183, x_2181); +lean_dec(x_2181); +lean_ctor_set_tag(x_2083, 6); +lean_ctor_set(x_2083, 1, x_2187); +lean_ctor_set(x_2083, 0, x_153); +x_2189 = lean_ctor_get(x_1, 0); +lean_inc(x_2189); +x_2190 = l_Lean_IR_ToIR_bindVar(x_2189, x_164, x_4, x_5, x_2088); +x_2191 = lean_ctor_get(x_2190, 0); +lean_inc(x_2191); +x_2192 = lean_ctor_get(x_2190, 1); +lean_inc(x_2192); +lean_dec(x_2190); +x_2193 = lean_ctor_get(x_2191, 0); +lean_inc(x_2193); +x_2194 = lean_ctor_get(x_2191, 1); +lean_inc(x_2194); +lean_dec(x_2191); +x_2195 = l_Lean_IR_ToIR_newVar(x_2194, x_4, x_5, x_2192); +x_2196 = lean_ctor_get(x_2195, 0); +lean_inc(x_2196); +x_2197 = lean_ctor_get(x_2195, 1); +lean_inc(x_2197); +lean_dec(x_2195); +x_2198 = lean_ctor_get(x_2196, 0); +lean_inc(x_2198); +x_2199 = lean_ctor_get(x_2196, 1); +lean_inc(x_2199); +lean_dec(x_2196); +x_2200 = lean_ctor_get(x_1, 2); +lean_inc(x_2200); +lean_inc(x_5); +lean_inc(x_4); +x_2201 = l_Lean_IR_ToIR_lowerType(x_2200, x_2199, x_4, x_5, x_2197); +if (lean_obj_tag(x_2201) == 0) +{ +lean_object* x_2202; lean_object* x_2203; lean_object* x_2204; lean_object* x_2205; lean_object* x_2206; +x_2202 = lean_ctor_get(x_2201, 0); +lean_inc(x_2202); +x_2203 = lean_ctor_get(x_2201, 1); +lean_inc(x_2203); +lean_dec(x_2201); +x_2204 = lean_ctor_get(x_2202, 0); +lean_inc(x_2204); +x_2205 = lean_ctor_get(x_2202, 1); +lean_inc(x_2205); +lean_dec(x_2202); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2206 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_2198, x_2188, x_2193, x_2083, x_2204, x_2205, x_4, x_5, x_2203); +if (lean_obj_tag(x_2206) == 0) +{ +lean_object* x_2207; lean_object* x_2208; lean_object* x_2209; lean_object* x_2210; lean_object* x_2211; lean_object* x_2212; lean_object* x_2213; +x_2207 = lean_ctor_get(x_2206, 0); +lean_inc(x_2207); +x_2208 = lean_ctor_get(x_2206, 1); +lean_inc(x_2208); +lean_dec(x_2206); +x_2209 = lean_ctor_get(x_2207, 0); +lean_inc(x_2209); +x_2210 = lean_ctor_get(x_2207, 1); +lean_inc(x_2210); +if (lean_is_exclusive(x_2207)) { + lean_ctor_release(x_2207, 0); + lean_ctor_release(x_2207, 1); + x_2211 = x_2207; +} else { + lean_dec_ref(x_2207); + x_2211 = lean_box(0); +} +x_2212 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_2212, 0, x_2209); +if (lean_is_scalar(x_2211)) { + x_2213 = lean_alloc_ctor(0, 2, 0); +} else { + x_2213 = x_2211; +} +lean_ctor_set(x_2213, 0, x_2212); +lean_ctor_set(x_2213, 1, x_2210); +x_165 = x_2213; +x_166 = x_2208; +goto block_2082; +} +else +{ +lean_object* x_2214; lean_object* x_2215; lean_object* x_2216; lean_object* x_2217; +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2214 = lean_ctor_get(x_2206, 0); +lean_inc(x_2214); +x_2215 = lean_ctor_get(x_2206, 1); +lean_inc(x_2215); +if (lean_is_exclusive(x_2206)) { + lean_ctor_release(x_2206, 0); + lean_ctor_release(x_2206, 1); + x_2216 = x_2206; +} else { + lean_dec_ref(x_2206); + x_2216 = lean_box(0); +} +if (lean_is_scalar(x_2216)) { + x_2217 = lean_alloc_ctor(1, 2, 0); +} else { + x_2217 = x_2216; +} +lean_ctor_set(x_2217, 0, x_2214); +lean_ctor_set(x_2217, 1, x_2215); +return x_2217; +} +} +else +{ +lean_object* x_2218; lean_object* x_2219; lean_object* x_2220; lean_object* x_2221; +lean_dec(x_2198); +lean_dec(x_2193); +lean_dec(x_2083); +lean_dec(x_2188); +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2218 = lean_ctor_get(x_2201, 0); +lean_inc(x_2218); +x_2219 = lean_ctor_get(x_2201, 1); +lean_inc(x_2219); +if (lean_is_exclusive(x_2201)) { + lean_ctor_release(x_2201, 0); + lean_ctor_release(x_2201, 1); + x_2220 = x_2201; +} else { + lean_dec_ref(x_2201); + x_2220 = lean_box(0); +} +if (lean_is_scalar(x_2220)) { + x_2221 = lean_alloc_ctor(1, 2, 0); +} else { + x_2221 = x_2220; +} +lean_ctor_set(x_2221, 0, x_2218); +lean_ctor_set(x_2221, 1, x_2219); +return x_2221; +} +} +else +{ +lean_object* x_2222; lean_object* x_2223; lean_object* x_2224; lean_object* x_2225; lean_object* x_2226; lean_object* x_2227; lean_object* x_2228; lean_object* x_2229; +lean_dec(x_2183); +lean_dec(x_2181); +lean_inc(x_163); +lean_ctor_set_tag(x_2083, 6); +lean_ctor_set(x_2083, 1, x_163); +lean_ctor_set(x_2083, 0, x_153); +x_2222 = lean_ctor_get(x_1, 0); +lean_inc(x_2222); +x_2223 = l_Lean_IR_ToIR_bindVar(x_2222, x_164, x_4, x_5, x_2088); +x_2224 = lean_ctor_get(x_2223, 0); +lean_inc(x_2224); +x_2225 = lean_ctor_get(x_2223, 1); +lean_inc(x_2225); +lean_dec(x_2223); +x_2226 = lean_ctor_get(x_2224, 0); +lean_inc(x_2226); +x_2227 = lean_ctor_get(x_2224, 1); +lean_inc(x_2227); +lean_dec(x_2224); +x_2228 = lean_ctor_get(x_1, 2); +lean_inc(x_2228); +lean_inc(x_5); +lean_inc(x_4); +x_2229 = l_Lean_IR_ToIR_lowerType(x_2228, x_2227, x_4, x_5, x_2225); +if (lean_obj_tag(x_2229) == 0) +{ +lean_object* x_2230; lean_object* x_2231; lean_object* x_2232; lean_object* x_2233; lean_object* x_2234; +x_2230 = lean_ctor_get(x_2229, 0); +lean_inc(x_2230); +x_2231 = lean_ctor_get(x_2229, 1); +lean_inc(x_2231); +lean_dec(x_2229); +x_2232 = lean_ctor_get(x_2230, 0); +lean_inc(x_2232); +x_2233 = lean_ctor_get(x_2230, 1); +lean_inc(x_2233); +lean_dec(x_2230); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2234 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2226, x_2083, x_2232, x_2233, x_4, x_5, x_2231); +if (lean_obj_tag(x_2234) == 0) +{ +lean_object* x_2235; lean_object* x_2236; lean_object* x_2237; lean_object* x_2238; lean_object* x_2239; lean_object* x_2240; lean_object* x_2241; +x_2235 = lean_ctor_get(x_2234, 0); +lean_inc(x_2235); +x_2236 = lean_ctor_get(x_2234, 1); +lean_inc(x_2236); +lean_dec(x_2234); +x_2237 = lean_ctor_get(x_2235, 0); +lean_inc(x_2237); +x_2238 = lean_ctor_get(x_2235, 1); +lean_inc(x_2238); +if (lean_is_exclusive(x_2235)) { + lean_ctor_release(x_2235, 0); + lean_ctor_release(x_2235, 1); + x_2239 = x_2235; +} else { + lean_dec_ref(x_2235); + x_2239 = lean_box(0); +} +x_2240 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_2240, 0, x_2237); +if (lean_is_scalar(x_2239)) { + x_2241 = lean_alloc_ctor(0, 2, 0); +} else { + x_2241 = x_2239; +} +lean_ctor_set(x_2241, 0, x_2240); +lean_ctor_set(x_2241, 1, x_2238); +x_165 = x_2241; +x_166 = x_2236; +goto block_2082; +} +else +{ +lean_object* x_2242; lean_object* x_2243; lean_object* x_2244; lean_object* x_2245; +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2242 = lean_ctor_get(x_2234, 0); +lean_inc(x_2242); +x_2243 = lean_ctor_get(x_2234, 1); +lean_inc(x_2243); +if (lean_is_exclusive(x_2234)) { + lean_ctor_release(x_2234, 0); + lean_ctor_release(x_2234, 1); + x_2244 = x_2234; +} else { + lean_dec_ref(x_2234); + x_2244 = lean_box(0); +} +if (lean_is_scalar(x_2244)) { + x_2245 = lean_alloc_ctor(1, 2, 0); +} else { + x_2245 = x_2244; +} +lean_ctor_set(x_2245, 0, x_2242); +lean_ctor_set(x_2245, 1, x_2243); +return x_2245; +} +} +else +{ +lean_object* x_2246; lean_object* x_2247; lean_object* x_2248; lean_object* x_2249; +lean_dec(x_2226); +lean_dec(x_2083); +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2246 = lean_ctor_get(x_2229, 0); +lean_inc(x_2246); +x_2247 = lean_ctor_get(x_2229, 1); +lean_inc(x_2247); +if (lean_is_exclusive(x_2229)) { + lean_ctor_release(x_2229, 0); + lean_ctor_release(x_2229, 1); + x_2248 = x_2229; +} else { + lean_dec_ref(x_2229); + x_2248 = lean_box(0); +} +if (lean_is_scalar(x_2248)) { + x_2249 = lean_alloc_ctor(1, 2, 0); +} else { + x_2249 = x_2248; +} +lean_ctor_set(x_2249, 0, x_2246); +lean_ctor_set(x_2249, 1, x_2247); +return x_2249; +} +} +} +else +{ +lean_object* x_2250; lean_object* x_2251; lean_object* x_2252; lean_object* x_2253; lean_object* x_2254; lean_object* x_2255; lean_object* x_2256; lean_object* x_2257; +lean_dec(x_2183); +lean_dec(x_2181); +lean_inc(x_163); +lean_ctor_set_tag(x_2083, 7); +lean_ctor_set(x_2083, 1, x_163); +lean_ctor_set(x_2083, 0, x_153); +x_2250 = lean_ctor_get(x_1, 0); +lean_inc(x_2250); +x_2251 = l_Lean_IR_ToIR_bindVar(x_2250, x_164, x_4, x_5, x_2088); +x_2252 = lean_ctor_get(x_2251, 0); +lean_inc(x_2252); +x_2253 = lean_ctor_get(x_2251, 1); +lean_inc(x_2253); +lean_dec(x_2251); +x_2254 = lean_ctor_get(x_2252, 0); +lean_inc(x_2254); +x_2255 = lean_ctor_get(x_2252, 1); +lean_inc(x_2255); +lean_dec(x_2252); +x_2256 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2257 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2254, x_2083, x_2256, x_2255, x_4, x_5, x_2253); +if (lean_obj_tag(x_2257) == 0) +{ +lean_object* x_2258; lean_object* x_2259; lean_object* x_2260; lean_object* x_2261; lean_object* x_2262; lean_object* x_2263; lean_object* x_2264; +x_2258 = lean_ctor_get(x_2257, 0); +lean_inc(x_2258); +x_2259 = lean_ctor_get(x_2257, 1); +lean_inc(x_2259); +lean_dec(x_2257); +x_2260 = lean_ctor_get(x_2258, 0); +lean_inc(x_2260); +x_2261 = lean_ctor_get(x_2258, 1); +lean_inc(x_2261); +if (lean_is_exclusive(x_2258)) { + lean_ctor_release(x_2258, 0); + lean_ctor_release(x_2258, 1); + x_2262 = x_2258; +} else { + lean_dec_ref(x_2258); + x_2262 = lean_box(0); +} +x_2263 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_2263, 0, x_2260); +if (lean_is_scalar(x_2262)) { + x_2264 = lean_alloc_ctor(0, 2, 0); +} else { + x_2264 = x_2262; +} +lean_ctor_set(x_2264, 0, x_2263); +lean_ctor_set(x_2264, 1, x_2261); +x_165 = x_2264; +x_166 = x_2259; +goto block_2082; +} +else +{ +lean_object* x_2265; lean_object* x_2266; lean_object* x_2267; lean_object* x_2268; +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2265 = lean_ctor_get(x_2257, 0); +lean_inc(x_2265); +x_2266 = lean_ctor_get(x_2257, 1); +lean_inc(x_2266); +if (lean_is_exclusive(x_2257)) { + lean_ctor_release(x_2257, 0); + lean_ctor_release(x_2257, 1); + x_2267 = x_2257; +} else { + lean_dec_ref(x_2257); + x_2267 = lean_box(0); +} +if (lean_is_scalar(x_2267)) { + x_2268 = lean_alloc_ctor(1, 2, 0); +} else { + x_2268 = x_2267; +} +lean_ctor_set(x_2268, 0, x_2265); +lean_ctor_set(x_2268, 1, x_2266); +return x_2268; +} +} +} +} +else +{ +lean_object* x_2269; lean_object* x_2270; lean_object* x_2271; lean_object* x_2272; lean_object* x_2273; lean_object* x_2274; uint8_t x_2275; +x_2269 = lean_ctor_get(x_2083, 1); +lean_inc(x_2269); +lean_dec(x_2083); +x_2270 = lean_ctor_get(x_2084, 0); +lean_inc(x_2270); +if (lean_is_exclusive(x_2084)) { + lean_ctor_release(x_2084, 0); + x_2271 = x_2084; +} else { + lean_dec_ref(x_2084); + x_2271 = lean_box(0); +} +x_2272 = lean_array_get_size(x_163); +x_2273 = lean_ctor_get(x_2270, 3); +lean_inc(x_2273); +lean_dec(x_2270); +x_2274 = lean_array_get_size(x_2273); +lean_dec(x_2273); +x_2275 = lean_nat_dec_lt(x_2272, x_2274); +if (x_2275 == 0) +{ +uint8_t x_2276; +x_2276 = lean_nat_dec_eq(x_2272, x_2274); +if (x_2276 == 0) +{ +lean_object* x_2277; lean_object* x_2278; lean_object* x_2279; lean_object* x_2280; lean_object* x_2281; lean_object* x_2282; lean_object* x_2283; lean_object* x_2284; lean_object* x_2285; lean_object* x_2286; lean_object* x_2287; lean_object* x_2288; lean_object* x_2289; lean_object* x_2290; lean_object* x_2291; lean_object* x_2292; lean_object* x_2293; +x_2277 = lean_unsigned_to_nat(0u); +x_2278 = l_Array_extract___rarg(x_163, x_2277, x_2274); +x_2279 = l_Array_extract___rarg(x_163, x_2274, x_2272); +lean_dec(x_2272); +x_2280 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_2280, 0, x_153); +lean_ctor_set(x_2280, 1, x_2278); +x_2281 = lean_ctor_get(x_1, 0); +lean_inc(x_2281); +x_2282 = l_Lean_IR_ToIR_bindVar(x_2281, x_164, x_4, x_5, x_2269); +x_2283 = lean_ctor_get(x_2282, 0); +lean_inc(x_2283); +x_2284 = lean_ctor_get(x_2282, 1); +lean_inc(x_2284); +lean_dec(x_2282); +x_2285 = lean_ctor_get(x_2283, 0); +lean_inc(x_2285); +x_2286 = lean_ctor_get(x_2283, 1); +lean_inc(x_2286); +lean_dec(x_2283); +x_2287 = l_Lean_IR_ToIR_newVar(x_2286, x_4, x_5, x_2284); +x_2288 = lean_ctor_get(x_2287, 0); +lean_inc(x_2288); +x_2289 = lean_ctor_get(x_2287, 1); +lean_inc(x_2289); +lean_dec(x_2287); +x_2290 = lean_ctor_get(x_2288, 0); +lean_inc(x_2290); +x_2291 = lean_ctor_get(x_2288, 1); +lean_inc(x_2291); +lean_dec(x_2288); +x_2292 = lean_ctor_get(x_1, 2); +lean_inc(x_2292); +lean_inc(x_5); +lean_inc(x_4); +x_2293 = l_Lean_IR_ToIR_lowerType(x_2292, x_2291, x_4, x_5, x_2289); +if (lean_obj_tag(x_2293) == 0) +{ +lean_object* x_2294; lean_object* x_2295; lean_object* x_2296; lean_object* x_2297; lean_object* x_2298; +x_2294 = lean_ctor_get(x_2293, 0); +lean_inc(x_2294); +x_2295 = lean_ctor_get(x_2293, 1); +lean_inc(x_2295); +lean_dec(x_2293); +x_2296 = lean_ctor_get(x_2294, 0); +lean_inc(x_2296); +x_2297 = lean_ctor_get(x_2294, 1); +lean_inc(x_2297); +lean_dec(x_2294); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2298 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_2290, x_2279, x_2285, x_2280, x_2296, x_2297, x_4, x_5, x_2295); +if (lean_obj_tag(x_2298) == 0) +{ +lean_object* x_2299; lean_object* x_2300; lean_object* x_2301; lean_object* x_2302; lean_object* x_2303; lean_object* x_2304; lean_object* x_2305; +x_2299 = lean_ctor_get(x_2298, 0); +lean_inc(x_2299); +x_2300 = lean_ctor_get(x_2298, 1); +lean_inc(x_2300); +lean_dec(x_2298); +x_2301 = lean_ctor_get(x_2299, 0); +lean_inc(x_2301); +x_2302 = lean_ctor_get(x_2299, 1); +lean_inc(x_2302); +if (lean_is_exclusive(x_2299)) { + lean_ctor_release(x_2299, 0); + lean_ctor_release(x_2299, 1); + x_2303 = x_2299; +} else { + lean_dec_ref(x_2299); + x_2303 = lean_box(0); +} +if (lean_is_scalar(x_2271)) { + x_2304 = lean_alloc_ctor(1, 1, 0); +} else { + x_2304 = x_2271; +} +lean_ctor_set(x_2304, 0, x_2301); +if (lean_is_scalar(x_2303)) { + x_2305 = lean_alloc_ctor(0, 2, 0); +} else { + x_2305 = x_2303; +} +lean_ctor_set(x_2305, 0, x_2304); +lean_ctor_set(x_2305, 1, x_2302); +x_165 = x_2305; +x_166 = x_2300; +goto block_2082; +} +else +{ +lean_object* x_2306; lean_object* x_2307; lean_object* x_2308; lean_object* x_2309; +lean_dec(x_2271); +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2306 = lean_ctor_get(x_2298, 0); +lean_inc(x_2306); +x_2307 = lean_ctor_get(x_2298, 1); +lean_inc(x_2307); +if (lean_is_exclusive(x_2298)) { + lean_ctor_release(x_2298, 0); + lean_ctor_release(x_2298, 1); + x_2308 = x_2298; +} else { + lean_dec_ref(x_2298); + x_2308 = lean_box(0); +} +if (lean_is_scalar(x_2308)) { + x_2309 = lean_alloc_ctor(1, 2, 0); +} else { + x_2309 = x_2308; +} +lean_ctor_set(x_2309, 0, x_2306); +lean_ctor_set(x_2309, 1, x_2307); +return x_2309; +} +} +else +{ +lean_object* x_2310; lean_object* x_2311; lean_object* x_2312; lean_object* x_2313; +lean_dec(x_2290); +lean_dec(x_2285); +lean_dec(x_2280); +lean_dec(x_2279); +lean_dec(x_2271); +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2310 = lean_ctor_get(x_2293, 0); +lean_inc(x_2310); +x_2311 = lean_ctor_get(x_2293, 1); +lean_inc(x_2311); +if (lean_is_exclusive(x_2293)) { + lean_ctor_release(x_2293, 0); + lean_ctor_release(x_2293, 1); + x_2312 = x_2293; +} else { + lean_dec_ref(x_2293); + x_2312 = lean_box(0); +} +if (lean_is_scalar(x_2312)) { + x_2313 = lean_alloc_ctor(1, 2, 0); +} else { + x_2313 = x_2312; +} +lean_ctor_set(x_2313, 0, x_2310); +lean_ctor_set(x_2313, 1, x_2311); +return x_2313; +} +} +else +{ +lean_object* x_2314; lean_object* x_2315; lean_object* x_2316; lean_object* x_2317; lean_object* x_2318; lean_object* x_2319; lean_object* x_2320; lean_object* x_2321; lean_object* x_2322; +lean_dec(x_2274); +lean_dec(x_2272); +lean_inc(x_163); +x_2314 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_2314, 0, x_153); +lean_ctor_set(x_2314, 1, x_163); +x_2315 = lean_ctor_get(x_1, 0); +lean_inc(x_2315); +x_2316 = l_Lean_IR_ToIR_bindVar(x_2315, x_164, x_4, x_5, x_2269); +x_2317 = lean_ctor_get(x_2316, 0); +lean_inc(x_2317); +x_2318 = lean_ctor_get(x_2316, 1); +lean_inc(x_2318); +lean_dec(x_2316); +x_2319 = lean_ctor_get(x_2317, 0); +lean_inc(x_2319); +x_2320 = lean_ctor_get(x_2317, 1); +lean_inc(x_2320); +lean_dec(x_2317); +x_2321 = lean_ctor_get(x_1, 2); +lean_inc(x_2321); +lean_inc(x_5); +lean_inc(x_4); +x_2322 = l_Lean_IR_ToIR_lowerType(x_2321, x_2320, x_4, x_5, x_2318); +if (lean_obj_tag(x_2322) == 0) +{ +lean_object* x_2323; lean_object* x_2324; lean_object* x_2325; lean_object* x_2326; lean_object* x_2327; +x_2323 = lean_ctor_get(x_2322, 0); +lean_inc(x_2323); +x_2324 = lean_ctor_get(x_2322, 1); +lean_inc(x_2324); +lean_dec(x_2322); +x_2325 = lean_ctor_get(x_2323, 0); +lean_inc(x_2325); +x_2326 = lean_ctor_get(x_2323, 1); +lean_inc(x_2326); +lean_dec(x_2323); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2327 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2319, x_2314, x_2325, x_2326, x_4, x_5, x_2324); +if (lean_obj_tag(x_2327) == 0) +{ +lean_object* x_2328; lean_object* x_2329; lean_object* x_2330; lean_object* x_2331; lean_object* x_2332; lean_object* x_2333; lean_object* x_2334; +x_2328 = lean_ctor_get(x_2327, 0); +lean_inc(x_2328); +x_2329 = lean_ctor_get(x_2327, 1); +lean_inc(x_2329); +lean_dec(x_2327); +x_2330 = lean_ctor_get(x_2328, 0); +lean_inc(x_2330); +x_2331 = lean_ctor_get(x_2328, 1); +lean_inc(x_2331); +if (lean_is_exclusive(x_2328)) { + lean_ctor_release(x_2328, 0); + lean_ctor_release(x_2328, 1); + x_2332 = x_2328; +} else { + lean_dec_ref(x_2328); + x_2332 = lean_box(0); +} +if (lean_is_scalar(x_2271)) { + x_2333 = lean_alloc_ctor(1, 1, 0); +} else { + x_2333 = x_2271; +} +lean_ctor_set(x_2333, 0, x_2330); +if (lean_is_scalar(x_2332)) { + x_2334 = lean_alloc_ctor(0, 2, 0); +} else { + x_2334 = x_2332; +} +lean_ctor_set(x_2334, 0, x_2333); +lean_ctor_set(x_2334, 1, x_2331); +x_165 = x_2334; +x_166 = x_2329; +goto block_2082; +} +else +{ +lean_object* x_2335; lean_object* x_2336; lean_object* x_2337; lean_object* x_2338; +lean_dec(x_2271); +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2335 = lean_ctor_get(x_2327, 0); +lean_inc(x_2335); +x_2336 = lean_ctor_get(x_2327, 1); +lean_inc(x_2336); +if (lean_is_exclusive(x_2327)) { + lean_ctor_release(x_2327, 0); + lean_ctor_release(x_2327, 1); + x_2337 = x_2327; +} else { + lean_dec_ref(x_2327); + x_2337 = lean_box(0); +} +if (lean_is_scalar(x_2337)) { + x_2338 = lean_alloc_ctor(1, 2, 0); +} else { + x_2338 = x_2337; +} +lean_ctor_set(x_2338, 0, x_2335); +lean_ctor_set(x_2338, 1, x_2336); +return x_2338; +} +} +else +{ +lean_object* x_2339; lean_object* x_2340; lean_object* x_2341; lean_object* x_2342; +lean_dec(x_2319); +lean_dec(x_2314); +lean_dec(x_2271); +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2339 = lean_ctor_get(x_2322, 0); +lean_inc(x_2339); +x_2340 = lean_ctor_get(x_2322, 1); +lean_inc(x_2340); +if (lean_is_exclusive(x_2322)) { + lean_ctor_release(x_2322, 0); + lean_ctor_release(x_2322, 1); + x_2341 = x_2322; +} else { + lean_dec_ref(x_2322); + x_2341 = lean_box(0); +} +if (lean_is_scalar(x_2341)) { + x_2342 = lean_alloc_ctor(1, 2, 0); +} else { + x_2342 = x_2341; +} +lean_ctor_set(x_2342, 0, x_2339); +lean_ctor_set(x_2342, 1, x_2340); +return x_2342; +} +} +} +else +{ +lean_object* x_2343; lean_object* x_2344; lean_object* x_2345; lean_object* x_2346; lean_object* x_2347; lean_object* x_2348; lean_object* x_2349; lean_object* x_2350; lean_object* x_2351; +lean_dec(x_2274); +lean_dec(x_2272); +lean_inc(x_163); +x_2343 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_2343, 0, x_153); +lean_ctor_set(x_2343, 1, x_163); +x_2344 = lean_ctor_get(x_1, 0); +lean_inc(x_2344); +x_2345 = l_Lean_IR_ToIR_bindVar(x_2344, x_164, x_4, x_5, x_2269); +x_2346 = lean_ctor_get(x_2345, 0); +lean_inc(x_2346); +x_2347 = lean_ctor_get(x_2345, 1); +lean_inc(x_2347); +lean_dec(x_2345); +x_2348 = lean_ctor_get(x_2346, 0); +lean_inc(x_2348); +x_2349 = lean_ctor_get(x_2346, 1); +lean_inc(x_2349); +lean_dec(x_2346); +x_2350 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2351 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2348, x_2343, x_2350, x_2349, x_4, x_5, x_2347); +if (lean_obj_tag(x_2351) == 0) +{ +lean_object* x_2352; lean_object* x_2353; lean_object* x_2354; lean_object* x_2355; lean_object* x_2356; lean_object* x_2357; lean_object* x_2358; +x_2352 = lean_ctor_get(x_2351, 0); +lean_inc(x_2352); +x_2353 = lean_ctor_get(x_2351, 1); +lean_inc(x_2353); +lean_dec(x_2351); +x_2354 = lean_ctor_get(x_2352, 0); +lean_inc(x_2354); +x_2355 = lean_ctor_get(x_2352, 1); +lean_inc(x_2355); +if (lean_is_exclusive(x_2352)) { + lean_ctor_release(x_2352, 0); + lean_ctor_release(x_2352, 1); + x_2356 = x_2352; +} else { + lean_dec_ref(x_2352); + x_2356 = lean_box(0); +} +if (lean_is_scalar(x_2271)) { + x_2357 = lean_alloc_ctor(1, 1, 0); +} else { + x_2357 = x_2271; +} +lean_ctor_set(x_2357, 0, x_2354); +if (lean_is_scalar(x_2356)) { + x_2358 = lean_alloc_ctor(0, 2, 0); +} else { + x_2358 = x_2356; +} +lean_ctor_set(x_2358, 0, x_2357); +lean_ctor_set(x_2358, 1, x_2355); +x_165 = x_2358; +x_166 = x_2353; +goto block_2082; +} +else +{ +lean_object* x_2359; lean_object* x_2360; lean_object* x_2361; lean_object* x_2362; +lean_dec(x_2271); +lean_dec(x_163); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2359 = lean_ctor_get(x_2351, 0); +lean_inc(x_2359); +x_2360 = lean_ctor_get(x_2351, 1); +lean_inc(x_2360); +if (lean_is_exclusive(x_2351)) { + lean_ctor_release(x_2351, 0); + lean_ctor_release(x_2351, 1); + x_2361 = x_2351; +} else { + lean_dec_ref(x_2351); + x_2361 = lean_box(0); +} +if (lean_is_scalar(x_2361)) { + x_2362 = lean_alloc_ctor(1, 2, 0); +} else { + x_2362 = x_2361; +} +lean_ctor_set(x_2362, 0, x_2359); +lean_ctor_set(x_2362, 1, x_2360); +return x_2362; +} +} +} +} +block_2082: +{ +lean_object* x_167; +x_167 = lean_ctor_get(x_165, 0); +lean_inc(x_167); +if (lean_obj_tag(x_167) == 0) +{ +uint8_t x_168; +lean_dec(x_161); +x_168 = !lean_is_exclusive(x_165); +if (x_168 == 0) +{ +lean_object* x_169; lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; lean_object* x_174; lean_object* x_175; uint8_t x_176; lean_object* x_177; +x_169 = lean_ctor_get(x_165, 1); +x_170 = lean_ctor_get(x_165, 0); +lean_dec(x_170); +x_171 = lean_st_ref_get(x_5, x_166); +x_172 = lean_ctor_get(x_171, 0); +lean_inc(x_172); +x_173 = lean_ctor_get(x_171, 1); +lean_inc(x_173); +if (lean_is_exclusive(x_171)) { + lean_ctor_release(x_171, 0); + lean_ctor_release(x_171, 1); + x_174 = x_171; +} else { + lean_dec_ref(x_171); + x_174 = lean_box(0); +} +x_175 = lean_ctor_get(x_172, 0); +lean_inc(x_175); +lean_dec(x_172); +x_176 = 0; +lean_inc(x_175); +x_177 = l_Lean_Environment_find_x3f(x_175, x_153, x_176); +if (lean_obj_tag(x_177) == 0) +{ +lean_object* x_178; lean_object* x_179; +lean_dec(x_175); +lean_dec(x_174); +lean_free_object(x_165); +lean_dec(x_163); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_2); +lean_dec(x_1); +x_178 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_179 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_178, x_169, x_4, x_5, x_173); +return x_179; +} +else +{ +lean_object* x_180; +x_180 = lean_ctor_get(x_177, 0); +lean_inc(x_180); +lean_dec(x_177); +switch (lean_obj_tag(x_180)) { +case 0: +{ +uint8_t x_181; +lean_dec(x_175); +lean_dec(x_155); +lean_dec(x_154); +x_181 = !lean_is_exclusive(x_180); +if (x_181 == 0) +{ +lean_object* x_182; lean_object* x_183; uint8_t x_184; +x_182 = lean_ctor_get(x_180, 0); +lean_dec(x_182); +x_183 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_184 = lean_name_eq(x_153, x_183); +if (x_184 == 0) +{ +lean_object* x_185; uint8_t x_186; +x_185 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_186 = lean_name_eq(x_153, x_185); +if (x_186 == 0) +{ +lean_object* x_187; lean_object* x_188; lean_object* x_189; +lean_dec(x_174); +lean_free_object(x_165); +x_187 = l_Lean_IR_ToIR_findDecl(x_153, x_169, x_4, x_5, x_173); +x_188 = lean_ctor_get(x_187, 0); +lean_inc(x_188); +x_189 = lean_ctor_get(x_188, 0); +lean_inc(x_189); +if (lean_obj_tag(x_189) == 0) +{ +uint8_t x_190; +lean_dec(x_163); +lean_dec(x_2); +lean_dec(x_1); +x_190 = !lean_is_exclusive(x_187); +if (x_190 == 0) +{ +lean_object* x_191; lean_object* x_192; uint8_t x_193; +x_191 = lean_ctor_get(x_187, 1); +x_192 = lean_ctor_get(x_187, 0); +lean_dec(x_192); +x_193 = !lean_is_exclusive(x_188); +if (x_193 == 0) +{ +lean_object* x_194; lean_object* x_195; uint8_t x_196; lean_object* x_197; lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; lean_object* x_202; +x_194 = lean_ctor_get(x_188, 1); +x_195 = lean_ctor_get(x_188, 0); +lean_dec(x_195); +x_196 = 1; +x_197 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_198 = l_Lean_Name_toString(x_153, x_196, x_197); +lean_ctor_set_tag(x_180, 3); +lean_ctor_set(x_180, 0, x_198); +x_199 = l_Lean_IR_ToIR_lowerLet___closed__13; +lean_ctor_set_tag(x_188, 5); +lean_ctor_set(x_188, 1, x_180); +lean_ctor_set(x_188, 0, x_199); +x_200 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_187, 5); +lean_ctor_set(x_187, 1, x_200); +x_201 = l_Lean_MessageData_ofFormat(x_187); +x_202 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_201, x_194, x_4, x_5, x_191); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_194); +return x_202; +} +else +{ +lean_object* x_203; uint8_t x_204; lean_object* x_205; lean_object* x_206; lean_object* x_207; lean_object* x_208; lean_object* x_209; lean_object* x_210; lean_object* x_211; +x_203 = lean_ctor_get(x_188, 1); +lean_inc(x_203); +lean_dec(x_188); +x_204 = 1; +x_205 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_206 = l_Lean_Name_toString(x_153, x_204, x_205); +lean_ctor_set_tag(x_180, 3); +lean_ctor_set(x_180, 0, x_206); +x_207 = l_Lean_IR_ToIR_lowerLet___closed__13; +x_208 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_208, 0, x_207); +lean_ctor_set(x_208, 1, x_180); +x_209 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_187, 5); +lean_ctor_set(x_187, 1, x_209); +lean_ctor_set(x_187, 0, x_208); +x_210 = l_Lean_MessageData_ofFormat(x_187); +x_211 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_210, x_203, x_4, x_5, x_191); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_203); +return x_211; +} +} +else +{ +lean_object* x_212; lean_object* x_213; lean_object* x_214; uint8_t x_215; lean_object* x_216; lean_object* x_217; lean_object* x_218; lean_object* x_219; lean_object* x_220; lean_object* x_221; lean_object* x_222; lean_object* x_223; +x_212 = lean_ctor_get(x_187, 1); +lean_inc(x_212); +lean_dec(x_187); +x_213 = lean_ctor_get(x_188, 1); +lean_inc(x_213); +if (lean_is_exclusive(x_188)) { + lean_ctor_release(x_188, 0); + lean_ctor_release(x_188, 1); + x_214 = x_188; +} else { + lean_dec_ref(x_188); + x_214 = lean_box(0); +} +x_215 = 1; +x_216 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_217 = l_Lean_Name_toString(x_153, x_215, x_216); +lean_ctor_set_tag(x_180, 3); +lean_ctor_set(x_180, 0, x_217); +x_218 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_214)) { + x_219 = lean_alloc_ctor(5, 2, 0); +} else { + x_219 = x_214; + lean_ctor_set_tag(x_219, 5); +} +lean_ctor_set(x_219, 0, x_218); +lean_ctor_set(x_219, 1, x_180); +x_220 = l_Lean_IR_ToIR_lowerLet___closed__16; +x_221 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_221, 0, x_219); +lean_ctor_set(x_221, 1, x_220); +x_222 = l_Lean_MessageData_ofFormat(x_221); +x_223 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_222, x_213, x_4, x_5, x_212); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_213); +return x_223; +} +} +else +{ +lean_object* x_224; uint8_t x_225; +lean_free_object(x_180); +x_224 = lean_ctor_get(x_187, 1); +lean_inc(x_224); +lean_dec(x_187); +x_225 = !lean_is_exclusive(x_188); +if (x_225 == 0) +{ +lean_object* x_226; lean_object* x_227; lean_object* x_228; lean_object* x_229; lean_object* x_230; lean_object* x_231; uint8_t x_232; +x_226 = lean_ctor_get(x_188, 1); +x_227 = lean_ctor_get(x_188, 0); +lean_dec(x_227); +x_228 = lean_ctor_get(x_189, 0); +lean_inc(x_228); +lean_dec(x_189); +x_229 = lean_array_get_size(x_163); +x_230 = l_Lean_IR_Decl_params(x_228); +lean_dec(x_228); +x_231 = lean_array_get_size(x_230); +lean_dec(x_230); +x_232 = lean_nat_dec_lt(x_229, x_231); +if (x_232 == 0) +{ +uint8_t x_233; +x_233 = lean_nat_dec_eq(x_229, x_231); +if (x_233 == 0) +{ +lean_object* x_234; lean_object* x_235; lean_object* x_236; lean_object* x_237; lean_object* x_238; lean_object* x_239; lean_object* x_240; lean_object* x_241; lean_object* x_242; lean_object* x_243; lean_object* x_244; lean_object* x_245; lean_object* x_246; lean_object* x_247; lean_object* x_248; lean_object* x_249; +x_234 = lean_unsigned_to_nat(0u); +x_235 = l_Array_extract___rarg(x_163, x_234, x_231); +x_236 = l_Array_extract___rarg(x_163, x_231, x_229); +lean_dec(x_229); +lean_dec(x_163); +lean_ctor_set_tag(x_188, 6); +lean_ctor_set(x_188, 1, x_235); +lean_ctor_set(x_188, 0, x_153); +x_237 = lean_ctor_get(x_1, 0); +lean_inc(x_237); +x_238 = l_Lean_IR_ToIR_bindVar(x_237, x_226, x_4, x_5, x_224); +x_239 = lean_ctor_get(x_238, 0); +lean_inc(x_239); +x_240 = lean_ctor_get(x_238, 1); +lean_inc(x_240); +lean_dec(x_238); +x_241 = lean_ctor_get(x_239, 0); +lean_inc(x_241); +x_242 = lean_ctor_get(x_239, 1); +lean_inc(x_242); +lean_dec(x_239); +x_243 = l_Lean_IR_ToIR_newVar(x_242, x_4, x_5, x_240); +x_244 = lean_ctor_get(x_243, 0); +lean_inc(x_244); +x_245 = lean_ctor_get(x_243, 1); +lean_inc(x_245); +lean_dec(x_243); +x_246 = lean_ctor_get(x_244, 0); +lean_inc(x_246); +x_247 = lean_ctor_get(x_244, 1); +lean_inc(x_247); +lean_dec(x_244); +x_248 = lean_ctor_get(x_1, 2); +lean_inc(x_248); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_249 = l_Lean_IR_ToIR_lowerType(x_248, x_247, x_4, x_5, x_245); +if (lean_obj_tag(x_249) == 0) +{ +lean_object* x_250; lean_object* x_251; lean_object* x_252; lean_object* x_253; lean_object* x_254; +x_250 = lean_ctor_get(x_249, 0); +lean_inc(x_250); +x_251 = lean_ctor_get(x_249, 1); +lean_inc(x_251); +lean_dec(x_249); +x_252 = lean_ctor_get(x_250, 0); +lean_inc(x_252); +x_253 = lean_ctor_get(x_250, 1); +lean_inc(x_253); +lean_dec(x_250); +x_254 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_246, x_236, x_241, x_188, x_252, x_253, x_4, x_5, x_251); +return x_254; +} +else +{ +uint8_t x_255; +lean_dec(x_246); +lean_dec(x_241); +lean_dec(x_188); +lean_dec(x_236); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_255 = !lean_is_exclusive(x_249); +if (x_255 == 0) +{ +return x_249; +} +else +{ +lean_object* x_256; lean_object* x_257; lean_object* x_258; +x_256 = lean_ctor_get(x_249, 0); +x_257 = lean_ctor_get(x_249, 1); +lean_inc(x_257); +lean_inc(x_256); +lean_dec(x_249); +x_258 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_258, 0, x_256); +lean_ctor_set(x_258, 1, x_257); +return x_258; +} +} +} +else +{ +lean_object* x_259; lean_object* x_260; lean_object* x_261; lean_object* x_262; lean_object* x_263; lean_object* x_264; lean_object* x_265; lean_object* x_266; +lean_dec(x_231); +lean_dec(x_229); +lean_ctor_set_tag(x_188, 6); +lean_ctor_set(x_188, 1, x_163); +lean_ctor_set(x_188, 0, x_153); +x_259 = lean_ctor_get(x_1, 0); +lean_inc(x_259); +x_260 = l_Lean_IR_ToIR_bindVar(x_259, x_226, x_4, x_5, x_224); +x_261 = lean_ctor_get(x_260, 0); +lean_inc(x_261); +x_262 = lean_ctor_get(x_260, 1); +lean_inc(x_262); +lean_dec(x_260); +x_263 = lean_ctor_get(x_261, 0); +lean_inc(x_263); +x_264 = lean_ctor_get(x_261, 1); +lean_inc(x_264); +lean_dec(x_261); +x_265 = lean_ctor_get(x_1, 2); +lean_inc(x_265); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_266 = l_Lean_IR_ToIR_lowerType(x_265, x_264, x_4, x_5, x_262); +if (lean_obj_tag(x_266) == 0) +{ +lean_object* x_267; lean_object* x_268; lean_object* x_269; lean_object* x_270; lean_object* x_271; +x_267 = lean_ctor_get(x_266, 0); +lean_inc(x_267); +x_268 = lean_ctor_get(x_266, 1); +lean_inc(x_268); +lean_dec(x_266); +x_269 = lean_ctor_get(x_267, 0); +lean_inc(x_269); +x_270 = lean_ctor_get(x_267, 1); +lean_inc(x_270); +lean_dec(x_267); +x_271 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_263, x_188, x_269, x_270, x_4, x_5, x_268); +return x_271; +} +else +{ +uint8_t x_272; +lean_dec(x_263); +lean_dec(x_188); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_272 = !lean_is_exclusive(x_266); +if (x_272 == 0) +{ +return x_266; +} +else +{ +lean_object* x_273; lean_object* x_274; lean_object* x_275; +x_273 = lean_ctor_get(x_266, 0); +x_274 = lean_ctor_get(x_266, 1); +lean_inc(x_274); +lean_inc(x_273); +lean_dec(x_266); +x_275 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_275, 0, x_273); +lean_ctor_set(x_275, 1, x_274); +return x_275; +} +} +} +} +else +{ +lean_object* x_276; lean_object* x_277; lean_object* x_278; lean_object* x_279; lean_object* x_280; lean_object* x_281; lean_object* x_282; lean_object* x_283; +lean_dec(x_231); +lean_dec(x_229); +lean_ctor_set_tag(x_188, 7); +lean_ctor_set(x_188, 1, x_163); +lean_ctor_set(x_188, 0, x_153); +x_276 = lean_ctor_get(x_1, 0); +lean_inc(x_276); +lean_dec(x_1); +x_277 = l_Lean_IR_ToIR_bindVar(x_276, x_226, x_4, x_5, x_224); +x_278 = lean_ctor_get(x_277, 0); +lean_inc(x_278); +x_279 = lean_ctor_get(x_277, 1); +lean_inc(x_279); +lean_dec(x_277); +x_280 = lean_ctor_get(x_278, 0); +lean_inc(x_280); +x_281 = lean_ctor_get(x_278, 1); +lean_inc(x_281); +lean_dec(x_278); +x_282 = lean_box(7); +x_283 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_280, x_188, x_282, x_281, x_4, x_5, x_279); +return x_283; +} +} +else +{ +lean_object* x_284; lean_object* x_285; lean_object* x_286; lean_object* x_287; lean_object* x_288; uint8_t x_289; +x_284 = lean_ctor_get(x_188, 1); +lean_inc(x_284); +lean_dec(x_188); +x_285 = lean_ctor_get(x_189, 0); +lean_inc(x_285); +lean_dec(x_189); +x_286 = lean_array_get_size(x_163); +x_287 = l_Lean_IR_Decl_params(x_285); +lean_dec(x_285); +x_288 = lean_array_get_size(x_287); +lean_dec(x_287); +x_289 = lean_nat_dec_lt(x_286, x_288); +if (x_289 == 0) +{ +uint8_t x_290; +x_290 = lean_nat_dec_eq(x_286, x_288); +if (x_290 == 0) +{ +lean_object* x_291; lean_object* x_292; lean_object* x_293; lean_object* x_294; lean_object* x_295; lean_object* x_296; lean_object* x_297; lean_object* x_298; lean_object* x_299; lean_object* x_300; lean_object* x_301; lean_object* x_302; lean_object* x_303; lean_object* x_304; lean_object* x_305; lean_object* x_306; lean_object* x_307; +x_291 = lean_unsigned_to_nat(0u); +x_292 = l_Array_extract___rarg(x_163, x_291, x_288); +x_293 = l_Array_extract___rarg(x_163, x_288, x_286); +lean_dec(x_286); +lean_dec(x_163); +x_294 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_294, 0, x_153); +lean_ctor_set(x_294, 1, x_292); +x_295 = lean_ctor_get(x_1, 0); +lean_inc(x_295); +x_296 = l_Lean_IR_ToIR_bindVar(x_295, x_284, x_4, x_5, x_224); +x_297 = lean_ctor_get(x_296, 0); +lean_inc(x_297); +x_298 = lean_ctor_get(x_296, 1); +lean_inc(x_298); +lean_dec(x_296); +x_299 = lean_ctor_get(x_297, 0); +lean_inc(x_299); +x_300 = lean_ctor_get(x_297, 1); +lean_inc(x_300); +lean_dec(x_297); +x_301 = l_Lean_IR_ToIR_newVar(x_300, x_4, x_5, x_298); +x_302 = lean_ctor_get(x_301, 0); +lean_inc(x_302); +x_303 = lean_ctor_get(x_301, 1); +lean_inc(x_303); +lean_dec(x_301); +x_304 = lean_ctor_get(x_302, 0); +lean_inc(x_304); +x_305 = lean_ctor_get(x_302, 1); +lean_inc(x_305); +lean_dec(x_302); +x_306 = lean_ctor_get(x_1, 2); +lean_inc(x_306); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_307 = l_Lean_IR_ToIR_lowerType(x_306, x_305, x_4, x_5, x_303); +if (lean_obj_tag(x_307) == 0) +{ +lean_object* x_308; lean_object* x_309; lean_object* x_310; lean_object* x_311; lean_object* x_312; +x_308 = lean_ctor_get(x_307, 0); +lean_inc(x_308); +x_309 = lean_ctor_get(x_307, 1); +lean_inc(x_309); +lean_dec(x_307); +x_310 = lean_ctor_get(x_308, 0); +lean_inc(x_310); +x_311 = lean_ctor_get(x_308, 1); +lean_inc(x_311); +lean_dec(x_308); +x_312 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_304, x_293, x_299, x_294, x_310, x_311, x_4, x_5, x_309); +return x_312; +} +else +{ +lean_object* x_313; lean_object* x_314; lean_object* x_315; lean_object* x_316; +lean_dec(x_304); +lean_dec(x_299); +lean_dec(x_294); +lean_dec(x_293); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_313 = lean_ctor_get(x_307, 0); +lean_inc(x_313); +x_314 = lean_ctor_get(x_307, 1); +lean_inc(x_314); +if (lean_is_exclusive(x_307)) { + lean_ctor_release(x_307, 0); + lean_ctor_release(x_307, 1); + x_315 = x_307; +} else { + lean_dec_ref(x_307); + x_315 = lean_box(0); +} +if (lean_is_scalar(x_315)) { + x_316 = lean_alloc_ctor(1, 2, 0); +} else { + x_316 = x_315; +} +lean_ctor_set(x_316, 0, x_313); +lean_ctor_set(x_316, 1, x_314); +return x_316; +} +} +else +{ +lean_object* x_317; lean_object* x_318; lean_object* x_319; lean_object* x_320; lean_object* x_321; lean_object* x_322; lean_object* x_323; lean_object* x_324; lean_object* x_325; +lean_dec(x_288); +lean_dec(x_286); +x_317 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_317, 0, x_153); +lean_ctor_set(x_317, 1, x_163); +x_318 = lean_ctor_get(x_1, 0); +lean_inc(x_318); +x_319 = l_Lean_IR_ToIR_bindVar(x_318, x_284, x_4, x_5, x_224); +x_320 = lean_ctor_get(x_319, 0); +lean_inc(x_320); +x_321 = lean_ctor_get(x_319, 1); +lean_inc(x_321); +lean_dec(x_319); +x_322 = lean_ctor_get(x_320, 0); +lean_inc(x_322); +x_323 = lean_ctor_get(x_320, 1); +lean_inc(x_323); +lean_dec(x_320); +x_324 = lean_ctor_get(x_1, 2); +lean_inc(x_324); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_325 = l_Lean_IR_ToIR_lowerType(x_324, x_323, x_4, x_5, x_321); +if (lean_obj_tag(x_325) == 0) +{ +lean_object* x_326; lean_object* x_327; lean_object* x_328; lean_object* x_329; lean_object* x_330; +x_326 = lean_ctor_get(x_325, 0); +lean_inc(x_326); +x_327 = lean_ctor_get(x_325, 1); +lean_inc(x_327); +lean_dec(x_325); +x_328 = lean_ctor_get(x_326, 0); +lean_inc(x_328); +x_329 = lean_ctor_get(x_326, 1); +lean_inc(x_329); +lean_dec(x_326); +x_330 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_322, x_317, x_328, x_329, x_4, x_5, x_327); +return x_330; +} +else +{ +lean_object* x_331; lean_object* x_332; lean_object* x_333; lean_object* x_334; +lean_dec(x_322); +lean_dec(x_317); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_331 = lean_ctor_get(x_325, 0); +lean_inc(x_331); +x_332 = lean_ctor_get(x_325, 1); +lean_inc(x_332); +if (lean_is_exclusive(x_325)) { + lean_ctor_release(x_325, 0); + lean_ctor_release(x_325, 1); + x_333 = x_325; +} else { + lean_dec_ref(x_325); + x_333 = lean_box(0); +} +if (lean_is_scalar(x_333)) { + x_334 = lean_alloc_ctor(1, 2, 0); +} else { + x_334 = x_333; +} +lean_ctor_set(x_334, 0, x_331); +lean_ctor_set(x_334, 1, x_332); +return x_334; +} +} +} +else +{ +lean_object* x_335; lean_object* x_336; lean_object* x_337; lean_object* x_338; lean_object* x_339; lean_object* x_340; lean_object* x_341; lean_object* x_342; lean_object* x_343; +lean_dec(x_288); +lean_dec(x_286); +x_335 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_335, 0, x_153); +lean_ctor_set(x_335, 1, x_163); +x_336 = lean_ctor_get(x_1, 0); +lean_inc(x_336); +lean_dec(x_1); +x_337 = l_Lean_IR_ToIR_bindVar(x_336, x_284, x_4, x_5, x_224); +x_338 = lean_ctor_get(x_337, 0); +lean_inc(x_338); +x_339 = lean_ctor_get(x_337, 1); +lean_inc(x_339); +lean_dec(x_337); +x_340 = lean_ctor_get(x_338, 0); +lean_inc(x_340); +x_341 = lean_ctor_get(x_338, 1); +lean_inc(x_341); +lean_dec(x_338); +x_342 = lean_box(7); +x_343 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_340, x_335, x_342, x_341, x_4, x_5, x_339); +return x_343; +} +} +} +} +else +{ +lean_object* x_344; lean_object* x_345; +lean_free_object(x_180); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_344 = lean_box(13); +lean_ctor_set(x_165, 0, x_344); +if (lean_is_scalar(x_174)) { + x_345 = lean_alloc_ctor(0, 2, 0); +} else { + x_345 = x_174; +} +lean_ctor_set(x_345, 0, x_165); +lean_ctor_set(x_345, 1, x_173); +return x_345; +} +} +else +{ +lean_object* x_346; lean_object* x_347; lean_object* x_348; +lean_free_object(x_180); +lean_dec(x_174); +lean_free_object(x_165); +x_346 = l_Lean_IR_instInhabitedArg; +x_347 = lean_unsigned_to_nat(2u); +x_348 = lean_array_get(x_346, x_163, x_347); +lean_dec(x_163); +if (lean_obj_tag(x_348) == 0) +{ +lean_object* x_349; lean_object* x_350; lean_object* x_351; lean_object* x_352; lean_object* x_353; lean_object* x_354; lean_object* x_355; +x_349 = lean_ctor_get(x_348, 0); +lean_inc(x_349); +lean_dec(x_348); +x_350 = lean_ctor_get(x_1, 0); +lean_inc(x_350); +lean_dec(x_1); +x_351 = l_Lean_IR_ToIR_bindVarToVarId(x_350, x_349, x_169, x_4, x_5, x_173); +x_352 = lean_ctor_get(x_351, 0); +lean_inc(x_352); +x_353 = lean_ctor_get(x_351, 1); +lean_inc(x_353); +lean_dec(x_351); +x_354 = lean_ctor_get(x_352, 1); +lean_inc(x_354); +lean_dec(x_352); +x_355 = l_Lean_IR_ToIR_lowerCode(x_2, x_354, x_4, x_5, x_353); +return x_355; +} +else +{ +lean_object* x_356; lean_object* x_357; lean_object* x_358; lean_object* x_359; lean_object* x_360; lean_object* x_361; +x_356 = lean_ctor_get(x_1, 0); +lean_inc(x_356); +lean_dec(x_1); +x_357 = l_Lean_IR_ToIR_bindErased(x_356, x_169, x_4, x_5, x_173); +x_358 = lean_ctor_get(x_357, 0); +lean_inc(x_358); +x_359 = lean_ctor_get(x_357, 1); +lean_inc(x_359); +lean_dec(x_357); +x_360 = lean_ctor_get(x_358, 1); +lean_inc(x_360); +lean_dec(x_358); +x_361 = l_Lean_IR_ToIR_lowerCode(x_2, x_360, x_4, x_5, x_359); +return x_361; +} +} +} +else +{ +lean_object* x_362; uint8_t x_363; +lean_dec(x_180); +x_362 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_363 = lean_name_eq(x_153, x_362); +if (x_363 == 0) +{ +lean_object* x_364; uint8_t x_365; +x_364 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_365 = lean_name_eq(x_153, x_364); +if (x_365 == 0) +{ +lean_object* x_366; lean_object* x_367; lean_object* x_368; +lean_dec(x_174); +lean_free_object(x_165); +x_366 = l_Lean_IR_ToIR_findDecl(x_153, x_169, x_4, x_5, x_173); +x_367 = lean_ctor_get(x_366, 0); +lean_inc(x_367); +x_368 = lean_ctor_get(x_367, 0); +lean_inc(x_368); +if (lean_obj_tag(x_368) == 0) +{ +lean_object* x_369; lean_object* x_370; lean_object* x_371; lean_object* x_372; uint8_t x_373; lean_object* x_374; lean_object* x_375; lean_object* x_376; lean_object* x_377; lean_object* x_378; lean_object* x_379; lean_object* x_380; lean_object* x_381; lean_object* x_382; +lean_dec(x_163); +lean_dec(x_2); +lean_dec(x_1); +x_369 = lean_ctor_get(x_366, 1); +lean_inc(x_369); +if (lean_is_exclusive(x_366)) { + lean_ctor_release(x_366, 0); + lean_ctor_release(x_366, 1); + x_370 = x_366; +} else { + lean_dec_ref(x_366); + x_370 = lean_box(0); +} +x_371 = lean_ctor_get(x_367, 1); +lean_inc(x_371); +if (lean_is_exclusive(x_367)) { + lean_ctor_release(x_367, 0); + lean_ctor_release(x_367, 1); + x_372 = x_367; +} else { + lean_dec_ref(x_367); + x_372 = lean_box(0); +} +x_373 = 1; +x_374 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_375 = l_Lean_Name_toString(x_153, x_373, x_374); +x_376 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_376, 0, x_375); +x_377 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_372)) { + x_378 = lean_alloc_ctor(5, 2, 0); +} else { + x_378 = x_372; + lean_ctor_set_tag(x_378, 5); +} +lean_ctor_set(x_378, 0, x_377); +lean_ctor_set(x_378, 1, x_376); +x_379 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_370)) { + x_380 = lean_alloc_ctor(5, 2, 0); +} else { + x_380 = x_370; + lean_ctor_set_tag(x_380, 5); +} +lean_ctor_set(x_380, 0, x_378); +lean_ctor_set(x_380, 1, x_379); +x_381 = l_Lean_MessageData_ofFormat(x_380); +x_382 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_381, x_371, x_4, x_5, x_369); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_371); +return x_382; +} +else +{ +lean_object* x_383; lean_object* x_384; lean_object* x_385; lean_object* x_386; lean_object* x_387; lean_object* x_388; lean_object* x_389; uint8_t x_390; +x_383 = lean_ctor_get(x_366, 1); +lean_inc(x_383); +lean_dec(x_366); +x_384 = lean_ctor_get(x_367, 1); +lean_inc(x_384); +if (lean_is_exclusive(x_367)) { + lean_ctor_release(x_367, 0); + lean_ctor_release(x_367, 1); + x_385 = x_367; +} else { + lean_dec_ref(x_367); + x_385 = lean_box(0); +} +x_386 = lean_ctor_get(x_368, 0); +lean_inc(x_386); +lean_dec(x_368); +x_387 = lean_array_get_size(x_163); +x_388 = l_Lean_IR_Decl_params(x_386); +lean_dec(x_386); +x_389 = lean_array_get_size(x_388); +lean_dec(x_388); +x_390 = lean_nat_dec_lt(x_387, x_389); +if (x_390 == 0) +{ +uint8_t x_391; +x_391 = lean_nat_dec_eq(x_387, x_389); +if (x_391 == 0) +{ +lean_object* x_392; lean_object* x_393; lean_object* x_394; lean_object* x_395; lean_object* x_396; lean_object* x_397; lean_object* x_398; lean_object* x_399; lean_object* x_400; lean_object* x_401; lean_object* x_402; lean_object* x_403; lean_object* x_404; lean_object* x_405; lean_object* x_406; lean_object* x_407; lean_object* x_408; +x_392 = lean_unsigned_to_nat(0u); +x_393 = l_Array_extract___rarg(x_163, x_392, x_389); +x_394 = l_Array_extract___rarg(x_163, x_389, x_387); +lean_dec(x_387); +lean_dec(x_163); +if (lean_is_scalar(x_385)) { + x_395 = lean_alloc_ctor(6, 2, 0); +} else { + x_395 = x_385; + lean_ctor_set_tag(x_395, 6); +} +lean_ctor_set(x_395, 0, x_153); +lean_ctor_set(x_395, 1, x_393); +x_396 = lean_ctor_get(x_1, 0); +lean_inc(x_396); +x_397 = l_Lean_IR_ToIR_bindVar(x_396, x_384, x_4, x_5, x_383); +x_398 = lean_ctor_get(x_397, 0); +lean_inc(x_398); +x_399 = lean_ctor_get(x_397, 1); +lean_inc(x_399); +lean_dec(x_397); +x_400 = lean_ctor_get(x_398, 0); +lean_inc(x_400); +x_401 = lean_ctor_get(x_398, 1); +lean_inc(x_401); +lean_dec(x_398); +x_402 = l_Lean_IR_ToIR_newVar(x_401, x_4, x_5, x_399); +x_403 = lean_ctor_get(x_402, 0); +lean_inc(x_403); +x_404 = lean_ctor_get(x_402, 1); +lean_inc(x_404); +lean_dec(x_402); +x_405 = lean_ctor_get(x_403, 0); +lean_inc(x_405); +x_406 = lean_ctor_get(x_403, 1); +lean_inc(x_406); +lean_dec(x_403); +x_407 = lean_ctor_get(x_1, 2); +lean_inc(x_407); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_408 = l_Lean_IR_ToIR_lowerType(x_407, x_406, x_4, x_5, x_404); +if (lean_obj_tag(x_408) == 0) +{ +lean_object* x_409; lean_object* x_410; lean_object* x_411; lean_object* x_412; lean_object* x_413; +x_409 = lean_ctor_get(x_408, 0); +lean_inc(x_409); +x_410 = lean_ctor_get(x_408, 1); +lean_inc(x_410); +lean_dec(x_408); +x_411 = lean_ctor_get(x_409, 0); +lean_inc(x_411); +x_412 = lean_ctor_get(x_409, 1); +lean_inc(x_412); +lean_dec(x_409); +x_413 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_405, x_394, x_400, x_395, x_411, x_412, x_4, x_5, x_410); +return x_413; +} +else +{ +lean_object* x_414; lean_object* x_415; lean_object* x_416; lean_object* x_417; +lean_dec(x_405); +lean_dec(x_400); +lean_dec(x_395); +lean_dec(x_394); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_414 = lean_ctor_get(x_408, 0); +lean_inc(x_414); +x_415 = lean_ctor_get(x_408, 1); +lean_inc(x_415); +if (lean_is_exclusive(x_408)) { + lean_ctor_release(x_408, 0); + lean_ctor_release(x_408, 1); + x_416 = x_408; +} else { + lean_dec_ref(x_408); + x_416 = lean_box(0); +} +if (lean_is_scalar(x_416)) { + x_417 = lean_alloc_ctor(1, 2, 0); +} else { + x_417 = x_416; +} +lean_ctor_set(x_417, 0, x_414); +lean_ctor_set(x_417, 1, x_415); +return x_417; +} +} +else +{ +lean_object* x_418; lean_object* x_419; lean_object* x_420; lean_object* x_421; lean_object* x_422; lean_object* x_423; lean_object* x_424; lean_object* x_425; lean_object* x_426; +lean_dec(x_389); +lean_dec(x_387); +if (lean_is_scalar(x_385)) { + x_418 = lean_alloc_ctor(6, 2, 0); +} else { + x_418 = x_385; + lean_ctor_set_tag(x_418, 6); +} +lean_ctor_set(x_418, 0, x_153); +lean_ctor_set(x_418, 1, x_163); +x_419 = lean_ctor_get(x_1, 0); +lean_inc(x_419); +x_420 = l_Lean_IR_ToIR_bindVar(x_419, x_384, x_4, x_5, x_383); +x_421 = lean_ctor_get(x_420, 0); +lean_inc(x_421); +x_422 = lean_ctor_get(x_420, 1); +lean_inc(x_422); +lean_dec(x_420); +x_423 = lean_ctor_get(x_421, 0); +lean_inc(x_423); +x_424 = lean_ctor_get(x_421, 1); +lean_inc(x_424); +lean_dec(x_421); +x_425 = lean_ctor_get(x_1, 2); +lean_inc(x_425); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_426 = l_Lean_IR_ToIR_lowerType(x_425, x_424, x_4, x_5, x_422); +if (lean_obj_tag(x_426) == 0) +{ +lean_object* x_427; lean_object* x_428; lean_object* x_429; lean_object* x_430; lean_object* x_431; +x_427 = lean_ctor_get(x_426, 0); +lean_inc(x_427); +x_428 = lean_ctor_get(x_426, 1); +lean_inc(x_428); +lean_dec(x_426); +x_429 = lean_ctor_get(x_427, 0); +lean_inc(x_429); +x_430 = lean_ctor_get(x_427, 1); +lean_inc(x_430); +lean_dec(x_427); +x_431 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_423, x_418, x_429, x_430, x_4, x_5, x_428); +return x_431; +} +else +{ +lean_object* x_432; lean_object* x_433; lean_object* x_434; lean_object* x_435; +lean_dec(x_423); +lean_dec(x_418); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_432 = lean_ctor_get(x_426, 0); +lean_inc(x_432); +x_433 = lean_ctor_get(x_426, 1); +lean_inc(x_433); +if (lean_is_exclusive(x_426)) { + lean_ctor_release(x_426, 0); + lean_ctor_release(x_426, 1); + x_434 = x_426; +} else { + lean_dec_ref(x_426); + x_434 = lean_box(0); +} +if (lean_is_scalar(x_434)) { + x_435 = lean_alloc_ctor(1, 2, 0); +} else { + x_435 = x_434; +} +lean_ctor_set(x_435, 0, x_432); +lean_ctor_set(x_435, 1, x_433); +return x_435; +} +} +} +else +{ +lean_object* x_436; lean_object* x_437; lean_object* x_438; lean_object* x_439; lean_object* x_440; lean_object* x_441; lean_object* x_442; lean_object* x_443; lean_object* x_444; +lean_dec(x_389); +lean_dec(x_387); +if (lean_is_scalar(x_385)) { + x_436 = lean_alloc_ctor(7, 2, 0); +} else { + x_436 = x_385; + lean_ctor_set_tag(x_436, 7); +} +lean_ctor_set(x_436, 0, x_153); +lean_ctor_set(x_436, 1, x_163); +x_437 = lean_ctor_get(x_1, 0); +lean_inc(x_437); +lean_dec(x_1); +x_438 = l_Lean_IR_ToIR_bindVar(x_437, x_384, x_4, x_5, x_383); +x_439 = lean_ctor_get(x_438, 0); +lean_inc(x_439); +x_440 = lean_ctor_get(x_438, 1); +lean_inc(x_440); +lean_dec(x_438); +x_441 = lean_ctor_get(x_439, 0); +lean_inc(x_441); +x_442 = lean_ctor_get(x_439, 1); +lean_inc(x_442); +lean_dec(x_439); +x_443 = lean_box(7); +x_444 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_441, x_436, x_443, x_442, x_4, x_5, x_440); +return x_444; +} +} +} +else +{ +lean_object* x_445; lean_object* x_446; +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_445 = lean_box(13); +lean_ctor_set(x_165, 0, x_445); +if (lean_is_scalar(x_174)) { + x_446 = lean_alloc_ctor(0, 2, 0); +} else { + x_446 = x_174; +} +lean_ctor_set(x_446, 0, x_165); +lean_ctor_set(x_446, 1, x_173); +return x_446; +} +} +else +{ +lean_object* x_447; lean_object* x_448; lean_object* x_449; +lean_dec(x_174); +lean_free_object(x_165); +x_447 = l_Lean_IR_instInhabitedArg; +x_448 = lean_unsigned_to_nat(2u); +x_449 = lean_array_get(x_447, x_163, x_448); +lean_dec(x_163); +if (lean_obj_tag(x_449) == 0) +{ +lean_object* x_450; lean_object* x_451; lean_object* x_452; lean_object* x_453; lean_object* x_454; lean_object* x_455; lean_object* x_456; +x_450 = lean_ctor_get(x_449, 0); +lean_inc(x_450); +lean_dec(x_449); +x_451 = lean_ctor_get(x_1, 0); +lean_inc(x_451); +lean_dec(x_1); +x_452 = l_Lean_IR_ToIR_bindVarToVarId(x_451, x_450, x_169, x_4, x_5, x_173); +x_453 = lean_ctor_get(x_452, 0); +lean_inc(x_453); +x_454 = lean_ctor_get(x_452, 1); +lean_inc(x_454); +lean_dec(x_452); +x_455 = lean_ctor_get(x_453, 1); +lean_inc(x_455); +lean_dec(x_453); +x_456 = l_Lean_IR_ToIR_lowerCode(x_2, x_455, x_4, x_5, x_454); +return x_456; +} +else +{ +lean_object* x_457; lean_object* x_458; lean_object* x_459; lean_object* x_460; lean_object* x_461; lean_object* x_462; +x_457 = lean_ctor_get(x_1, 0); +lean_inc(x_457); +lean_dec(x_1); +x_458 = l_Lean_IR_ToIR_bindErased(x_457, x_169, x_4, x_5, x_173); +x_459 = lean_ctor_get(x_458, 0); +lean_inc(x_459); +x_460 = lean_ctor_get(x_458, 1); +lean_inc(x_460); +lean_dec(x_458); +x_461 = lean_ctor_get(x_459, 1); +lean_inc(x_461); +lean_dec(x_459); +x_462 = l_Lean_IR_ToIR_lowerCode(x_2, x_461, x_4, x_5, x_460); +return x_462; +} +} +} +} +case 1: +{ +lean_object* x_463; lean_object* x_464; lean_object* x_494; lean_object* x_495; +lean_dec(x_180); +lean_dec(x_175); +lean_dec(x_155); +lean_dec(x_154); +x_494 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_173); +x_495 = lean_ctor_get(x_494, 0); +lean_inc(x_495); +if (lean_obj_tag(x_495) == 0) +{ +lean_object* x_496; lean_object* x_497; +x_496 = lean_ctor_get(x_494, 1); +lean_inc(x_496); +lean_dec(x_494); +x_497 = lean_box(0); +lean_ctor_set(x_165, 0, x_497); +x_463 = x_165; +x_464 = x_496; +goto block_493; +} +else +{ +uint8_t x_498; +lean_free_object(x_165); +x_498 = !lean_is_exclusive(x_494); +if (x_498 == 0) +{ +lean_object* x_499; lean_object* x_500; uint8_t x_501; +x_499 = lean_ctor_get(x_494, 1); +x_500 = lean_ctor_get(x_494, 0); +lean_dec(x_500); +x_501 = !lean_is_exclusive(x_495); +if (x_501 == 0) +{ +lean_object* x_502; lean_object* x_503; lean_object* x_504; lean_object* x_505; uint8_t x_506; +x_502 = lean_ctor_get(x_495, 0); +x_503 = lean_array_get_size(x_163); +x_504 = lean_ctor_get(x_502, 3); +lean_inc(x_504); +lean_dec(x_502); +x_505 = lean_array_get_size(x_504); +lean_dec(x_504); +x_506 = lean_nat_dec_lt(x_503, x_505); +if (x_506 == 0) +{ +uint8_t x_507; +x_507 = lean_nat_dec_eq(x_503, x_505); +if (x_507 == 0) +{ +lean_object* x_508; lean_object* x_509; lean_object* x_510; lean_object* x_511; lean_object* x_512; lean_object* x_513; lean_object* x_514; lean_object* x_515; lean_object* x_516; lean_object* x_517; lean_object* x_518; lean_object* x_519; lean_object* x_520; lean_object* x_521; lean_object* x_522; lean_object* x_523; +x_508 = lean_unsigned_to_nat(0u); +x_509 = l_Array_extract___rarg(x_163, x_508, x_505); +x_510 = l_Array_extract___rarg(x_163, x_505, x_503); +lean_dec(x_503); +lean_ctor_set_tag(x_494, 6); +lean_ctor_set(x_494, 1, x_509); +lean_ctor_set(x_494, 0, x_153); +x_511 = lean_ctor_get(x_1, 0); +lean_inc(x_511); +x_512 = l_Lean_IR_ToIR_bindVar(x_511, x_169, x_4, x_5, x_499); +x_513 = lean_ctor_get(x_512, 0); +lean_inc(x_513); +x_514 = lean_ctor_get(x_512, 1); +lean_inc(x_514); +lean_dec(x_512); +x_515 = lean_ctor_get(x_513, 0); +lean_inc(x_515); +x_516 = lean_ctor_get(x_513, 1); +lean_inc(x_516); +lean_dec(x_513); +x_517 = l_Lean_IR_ToIR_newVar(x_516, x_4, x_5, x_514); +x_518 = lean_ctor_get(x_517, 0); +lean_inc(x_518); +x_519 = lean_ctor_get(x_517, 1); +lean_inc(x_519); +lean_dec(x_517); +x_520 = lean_ctor_get(x_518, 0); +lean_inc(x_520); +x_521 = lean_ctor_get(x_518, 1); +lean_inc(x_521); +lean_dec(x_518); +x_522 = lean_ctor_get(x_1, 2); +lean_inc(x_522); +lean_inc(x_5); +lean_inc(x_4); +x_523 = l_Lean_IR_ToIR_lowerType(x_522, x_521, x_4, x_5, x_519); +if (lean_obj_tag(x_523) == 0) +{ +lean_object* x_524; lean_object* x_525; lean_object* x_526; lean_object* x_527; lean_object* x_528; +x_524 = lean_ctor_get(x_523, 0); +lean_inc(x_524); +x_525 = lean_ctor_get(x_523, 1); +lean_inc(x_525); +lean_dec(x_523); +x_526 = lean_ctor_get(x_524, 0); +lean_inc(x_526); +x_527 = lean_ctor_get(x_524, 1); +lean_inc(x_527); +lean_dec(x_524); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_528 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_520, x_510, x_515, x_494, x_526, x_527, x_4, x_5, x_525); +if (lean_obj_tag(x_528) == 0) +{ +lean_object* x_529; lean_object* x_530; uint8_t x_531; +x_529 = lean_ctor_get(x_528, 0); +lean_inc(x_529); +x_530 = lean_ctor_get(x_528, 1); +lean_inc(x_530); +lean_dec(x_528); +x_531 = !lean_is_exclusive(x_529); +if (x_531 == 0) +{ +lean_object* x_532; +x_532 = lean_ctor_get(x_529, 0); +lean_ctor_set(x_495, 0, x_532); +lean_ctor_set(x_529, 0, x_495); +x_463 = x_529; +x_464 = x_530; +goto block_493; +} +else +{ +lean_object* x_533; lean_object* x_534; lean_object* x_535; +x_533 = lean_ctor_get(x_529, 0); +x_534 = lean_ctor_get(x_529, 1); +lean_inc(x_534); +lean_inc(x_533); +lean_dec(x_529); +lean_ctor_set(x_495, 0, x_533); +x_535 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_535, 0, x_495); +lean_ctor_set(x_535, 1, x_534); +x_463 = x_535; +x_464 = x_530; +goto block_493; +} +} +else +{ +uint8_t x_536; +lean_free_object(x_495); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_536 = !lean_is_exclusive(x_528); +if (x_536 == 0) +{ +return x_528; +} +else +{ +lean_object* x_537; lean_object* x_538; lean_object* x_539; +x_537 = lean_ctor_get(x_528, 0); +x_538 = lean_ctor_get(x_528, 1); +lean_inc(x_538); +lean_inc(x_537); +lean_dec(x_528); +x_539 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_539, 0, x_537); +lean_ctor_set(x_539, 1, x_538); +return x_539; +} +} +} +else +{ +uint8_t x_540; +lean_dec(x_520); +lean_dec(x_515); +lean_dec(x_494); +lean_dec(x_510); +lean_free_object(x_495); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_540 = !lean_is_exclusive(x_523); +if (x_540 == 0) +{ +return x_523; +} +else +{ +lean_object* x_541; lean_object* x_542; lean_object* x_543; +x_541 = lean_ctor_get(x_523, 0); +x_542 = lean_ctor_get(x_523, 1); +lean_inc(x_542); +lean_inc(x_541); +lean_dec(x_523); +x_543 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_543, 0, x_541); +lean_ctor_set(x_543, 1, x_542); +return x_543; +} +} +} +else +{ +lean_object* x_544; lean_object* x_545; lean_object* x_546; lean_object* x_547; lean_object* x_548; lean_object* x_549; lean_object* x_550; lean_object* x_551; +lean_dec(x_505); +lean_dec(x_503); +lean_inc(x_163); +lean_ctor_set_tag(x_494, 6); +lean_ctor_set(x_494, 1, x_163); +lean_ctor_set(x_494, 0, x_153); +x_544 = lean_ctor_get(x_1, 0); +lean_inc(x_544); +x_545 = l_Lean_IR_ToIR_bindVar(x_544, x_169, x_4, x_5, x_499); +x_546 = lean_ctor_get(x_545, 0); +lean_inc(x_546); +x_547 = lean_ctor_get(x_545, 1); +lean_inc(x_547); +lean_dec(x_545); +x_548 = lean_ctor_get(x_546, 0); +lean_inc(x_548); +x_549 = lean_ctor_get(x_546, 1); +lean_inc(x_549); +lean_dec(x_546); +x_550 = lean_ctor_get(x_1, 2); +lean_inc(x_550); +lean_inc(x_5); +lean_inc(x_4); +x_551 = l_Lean_IR_ToIR_lowerType(x_550, x_549, x_4, x_5, x_547); +if (lean_obj_tag(x_551) == 0) +{ +lean_object* x_552; lean_object* x_553; lean_object* x_554; lean_object* x_555; lean_object* x_556; +x_552 = lean_ctor_get(x_551, 0); +lean_inc(x_552); +x_553 = lean_ctor_get(x_551, 1); +lean_inc(x_553); +lean_dec(x_551); +x_554 = lean_ctor_get(x_552, 0); +lean_inc(x_554); +x_555 = lean_ctor_get(x_552, 1); +lean_inc(x_555); +lean_dec(x_552); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_556 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_548, x_494, x_554, x_555, x_4, x_5, x_553); +if (lean_obj_tag(x_556) == 0) +{ +lean_object* x_557; lean_object* x_558; uint8_t x_559; +x_557 = lean_ctor_get(x_556, 0); +lean_inc(x_557); +x_558 = lean_ctor_get(x_556, 1); +lean_inc(x_558); +lean_dec(x_556); +x_559 = !lean_is_exclusive(x_557); +if (x_559 == 0) +{ +lean_object* x_560; +x_560 = lean_ctor_get(x_557, 0); +lean_ctor_set(x_495, 0, x_560); +lean_ctor_set(x_557, 0, x_495); +x_463 = x_557; +x_464 = x_558; +goto block_493; +} +else +{ +lean_object* x_561; lean_object* x_562; lean_object* x_563; +x_561 = lean_ctor_get(x_557, 0); +x_562 = lean_ctor_get(x_557, 1); +lean_inc(x_562); +lean_inc(x_561); +lean_dec(x_557); +lean_ctor_set(x_495, 0, x_561); +x_563 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_563, 0, x_495); +lean_ctor_set(x_563, 1, x_562); +x_463 = x_563; +x_464 = x_558; +goto block_493; +} +} +else +{ +uint8_t x_564; +lean_free_object(x_495); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_564 = !lean_is_exclusive(x_556); +if (x_564 == 0) +{ +return x_556; +} +else +{ +lean_object* x_565; lean_object* x_566; lean_object* x_567; +x_565 = lean_ctor_get(x_556, 0); +x_566 = lean_ctor_get(x_556, 1); +lean_inc(x_566); +lean_inc(x_565); +lean_dec(x_556); +x_567 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_567, 0, x_565); +lean_ctor_set(x_567, 1, x_566); +return x_567; +} +} +} +else +{ +uint8_t x_568; +lean_dec(x_548); +lean_dec(x_494); +lean_free_object(x_495); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_568 = !lean_is_exclusive(x_551); +if (x_568 == 0) +{ +return x_551; +} +else +{ +lean_object* x_569; lean_object* x_570; lean_object* x_571; +x_569 = lean_ctor_get(x_551, 0); +x_570 = lean_ctor_get(x_551, 1); +lean_inc(x_570); +lean_inc(x_569); +lean_dec(x_551); +x_571 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_571, 0, x_569); +lean_ctor_set(x_571, 1, x_570); +return x_571; +} +} +} +} +else +{ +lean_object* x_572; lean_object* x_573; lean_object* x_574; lean_object* x_575; lean_object* x_576; lean_object* x_577; lean_object* x_578; lean_object* x_579; +lean_dec(x_505); +lean_dec(x_503); +lean_inc(x_163); +lean_ctor_set_tag(x_494, 7); +lean_ctor_set(x_494, 1, x_163); +lean_ctor_set(x_494, 0, x_153); +x_572 = lean_ctor_get(x_1, 0); +lean_inc(x_572); +x_573 = l_Lean_IR_ToIR_bindVar(x_572, x_169, x_4, x_5, x_499); +x_574 = lean_ctor_get(x_573, 0); +lean_inc(x_574); +x_575 = lean_ctor_get(x_573, 1); +lean_inc(x_575); +lean_dec(x_573); +x_576 = lean_ctor_get(x_574, 0); +lean_inc(x_576); +x_577 = lean_ctor_get(x_574, 1); +lean_inc(x_577); +lean_dec(x_574); +x_578 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_579 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_576, x_494, x_578, x_577, x_4, x_5, x_575); +if (lean_obj_tag(x_579) == 0) +{ +lean_object* x_580; lean_object* x_581; uint8_t x_582; +x_580 = lean_ctor_get(x_579, 0); +lean_inc(x_580); +x_581 = lean_ctor_get(x_579, 1); +lean_inc(x_581); +lean_dec(x_579); +x_582 = !lean_is_exclusive(x_580); +if (x_582 == 0) +{ +lean_object* x_583; +x_583 = lean_ctor_get(x_580, 0); +lean_ctor_set(x_495, 0, x_583); +lean_ctor_set(x_580, 0, x_495); +x_463 = x_580; +x_464 = x_581; +goto block_493; +} +else +{ +lean_object* x_584; lean_object* x_585; lean_object* x_586; +x_584 = lean_ctor_get(x_580, 0); +x_585 = lean_ctor_get(x_580, 1); +lean_inc(x_585); +lean_inc(x_584); +lean_dec(x_580); +lean_ctor_set(x_495, 0, x_584); +x_586 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_586, 0, x_495); +lean_ctor_set(x_586, 1, x_585); +x_463 = x_586; +x_464 = x_581; +goto block_493; +} +} +else +{ +uint8_t x_587; +lean_free_object(x_495); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_587 = !lean_is_exclusive(x_579); +if (x_587 == 0) +{ +return x_579; +} +else +{ +lean_object* x_588; lean_object* x_589; lean_object* x_590; +x_588 = lean_ctor_get(x_579, 0); +x_589 = lean_ctor_get(x_579, 1); +lean_inc(x_589); +lean_inc(x_588); +lean_dec(x_579); +x_590 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_590, 0, x_588); +lean_ctor_set(x_590, 1, x_589); +return x_590; +} +} +} +} +else +{ +lean_object* x_591; lean_object* x_592; lean_object* x_593; lean_object* x_594; uint8_t x_595; +x_591 = lean_ctor_get(x_495, 0); +lean_inc(x_591); +lean_dec(x_495); +x_592 = lean_array_get_size(x_163); +x_593 = lean_ctor_get(x_591, 3); +lean_inc(x_593); +lean_dec(x_591); +x_594 = lean_array_get_size(x_593); +lean_dec(x_593); +x_595 = lean_nat_dec_lt(x_592, x_594); +if (x_595 == 0) +{ +uint8_t x_596; +x_596 = lean_nat_dec_eq(x_592, x_594); +if (x_596 == 0) +{ +lean_object* x_597; lean_object* x_598; lean_object* x_599; lean_object* x_600; lean_object* x_601; lean_object* x_602; lean_object* x_603; lean_object* x_604; lean_object* x_605; lean_object* x_606; lean_object* x_607; lean_object* x_608; lean_object* x_609; lean_object* x_610; lean_object* x_611; lean_object* x_612; +x_597 = lean_unsigned_to_nat(0u); +x_598 = l_Array_extract___rarg(x_163, x_597, x_594); +x_599 = l_Array_extract___rarg(x_163, x_594, x_592); +lean_dec(x_592); +lean_ctor_set_tag(x_494, 6); +lean_ctor_set(x_494, 1, x_598); +lean_ctor_set(x_494, 0, x_153); +x_600 = lean_ctor_get(x_1, 0); +lean_inc(x_600); +x_601 = l_Lean_IR_ToIR_bindVar(x_600, x_169, x_4, x_5, x_499); +x_602 = lean_ctor_get(x_601, 0); +lean_inc(x_602); +x_603 = lean_ctor_get(x_601, 1); +lean_inc(x_603); +lean_dec(x_601); +x_604 = lean_ctor_get(x_602, 0); +lean_inc(x_604); +x_605 = lean_ctor_get(x_602, 1); +lean_inc(x_605); +lean_dec(x_602); +x_606 = l_Lean_IR_ToIR_newVar(x_605, x_4, x_5, x_603); +x_607 = lean_ctor_get(x_606, 0); +lean_inc(x_607); +x_608 = lean_ctor_get(x_606, 1); +lean_inc(x_608); +lean_dec(x_606); +x_609 = lean_ctor_get(x_607, 0); +lean_inc(x_609); +x_610 = lean_ctor_get(x_607, 1); +lean_inc(x_610); +lean_dec(x_607); +x_611 = lean_ctor_get(x_1, 2); +lean_inc(x_611); +lean_inc(x_5); +lean_inc(x_4); +x_612 = l_Lean_IR_ToIR_lowerType(x_611, x_610, x_4, x_5, x_608); +if (lean_obj_tag(x_612) == 0) +{ +lean_object* x_613; lean_object* x_614; lean_object* x_615; lean_object* x_616; lean_object* x_617; +x_613 = lean_ctor_get(x_612, 0); +lean_inc(x_613); +x_614 = lean_ctor_get(x_612, 1); +lean_inc(x_614); +lean_dec(x_612); +x_615 = lean_ctor_get(x_613, 0); +lean_inc(x_615); +x_616 = lean_ctor_get(x_613, 1); +lean_inc(x_616); +lean_dec(x_613); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_617 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_609, x_599, x_604, x_494, x_615, x_616, x_4, x_5, x_614); +if (lean_obj_tag(x_617) == 0) +{ +lean_object* x_618; lean_object* x_619; lean_object* x_620; lean_object* x_621; lean_object* x_622; lean_object* x_623; lean_object* x_624; +x_618 = lean_ctor_get(x_617, 0); +lean_inc(x_618); +x_619 = lean_ctor_get(x_617, 1); +lean_inc(x_619); +lean_dec(x_617); +x_620 = lean_ctor_get(x_618, 0); +lean_inc(x_620); +x_621 = lean_ctor_get(x_618, 1); +lean_inc(x_621); +if (lean_is_exclusive(x_618)) { + lean_ctor_release(x_618, 0); + lean_ctor_release(x_618, 1); + x_622 = x_618; +} else { + lean_dec_ref(x_618); + x_622 = lean_box(0); +} +x_623 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_623, 0, x_620); +if (lean_is_scalar(x_622)) { + x_624 = lean_alloc_ctor(0, 2, 0); +} else { + x_624 = x_622; +} +lean_ctor_set(x_624, 0, x_623); +lean_ctor_set(x_624, 1, x_621); +x_463 = x_624; +x_464 = x_619; +goto block_493; +} +else +{ +lean_object* x_625; lean_object* x_626; lean_object* x_627; lean_object* x_628; +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_625 = lean_ctor_get(x_617, 0); +lean_inc(x_625); +x_626 = lean_ctor_get(x_617, 1); +lean_inc(x_626); +if (lean_is_exclusive(x_617)) { + lean_ctor_release(x_617, 0); + lean_ctor_release(x_617, 1); + x_627 = x_617; +} else { + lean_dec_ref(x_617); + x_627 = lean_box(0); +} +if (lean_is_scalar(x_627)) { + x_628 = lean_alloc_ctor(1, 2, 0); +} else { + x_628 = x_627; +} +lean_ctor_set(x_628, 0, x_625); +lean_ctor_set(x_628, 1, x_626); +return x_628; +} +} +else +{ +lean_object* x_629; lean_object* x_630; lean_object* x_631; lean_object* x_632; +lean_dec(x_609); +lean_dec(x_604); +lean_dec(x_494); +lean_dec(x_599); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_629 = lean_ctor_get(x_612, 0); +lean_inc(x_629); +x_630 = lean_ctor_get(x_612, 1); +lean_inc(x_630); +if (lean_is_exclusive(x_612)) { + lean_ctor_release(x_612, 0); + lean_ctor_release(x_612, 1); + x_631 = x_612; +} else { + lean_dec_ref(x_612); + x_631 = lean_box(0); +} +if (lean_is_scalar(x_631)) { + x_632 = lean_alloc_ctor(1, 2, 0); +} else { + x_632 = x_631; +} +lean_ctor_set(x_632, 0, x_629); +lean_ctor_set(x_632, 1, x_630); +return x_632; +} +} +else +{ +lean_object* x_633; lean_object* x_634; lean_object* x_635; lean_object* x_636; lean_object* x_637; lean_object* x_638; lean_object* x_639; lean_object* x_640; +lean_dec(x_594); +lean_dec(x_592); +lean_inc(x_163); +lean_ctor_set_tag(x_494, 6); +lean_ctor_set(x_494, 1, x_163); +lean_ctor_set(x_494, 0, x_153); +x_633 = lean_ctor_get(x_1, 0); +lean_inc(x_633); +x_634 = l_Lean_IR_ToIR_bindVar(x_633, x_169, x_4, x_5, x_499); +x_635 = lean_ctor_get(x_634, 0); +lean_inc(x_635); +x_636 = lean_ctor_get(x_634, 1); +lean_inc(x_636); +lean_dec(x_634); +x_637 = lean_ctor_get(x_635, 0); +lean_inc(x_637); +x_638 = lean_ctor_get(x_635, 1); +lean_inc(x_638); +lean_dec(x_635); +x_639 = lean_ctor_get(x_1, 2); +lean_inc(x_639); +lean_inc(x_5); +lean_inc(x_4); +x_640 = l_Lean_IR_ToIR_lowerType(x_639, x_638, x_4, x_5, x_636); +if (lean_obj_tag(x_640) == 0) +{ +lean_object* x_641; lean_object* x_642; lean_object* x_643; lean_object* x_644; lean_object* x_645; +x_641 = lean_ctor_get(x_640, 0); +lean_inc(x_641); +x_642 = lean_ctor_get(x_640, 1); +lean_inc(x_642); +lean_dec(x_640); +x_643 = lean_ctor_get(x_641, 0); +lean_inc(x_643); +x_644 = lean_ctor_get(x_641, 1); +lean_inc(x_644); +lean_dec(x_641); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_645 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_637, x_494, x_643, x_644, x_4, x_5, x_642); +if (lean_obj_tag(x_645) == 0) +{ +lean_object* x_646; lean_object* x_647; lean_object* x_648; lean_object* x_649; lean_object* x_650; lean_object* x_651; lean_object* x_652; +x_646 = lean_ctor_get(x_645, 0); +lean_inc(x_646); +x_647 = lean_ctor_get(x_645, 1); +lean_inc(x_647); +lean_dec(x_645); +x_648 = lean_ctor_get(x_646, 0); +lean_inc(x_648); +x_649 = lean_ctor_get(x_646, 1); +lean_inc(x_649); +if (lean_is_exclusive(x_646)) { + lean_ctor_release(x_646, 0); + lean_ctor_release(x_646, 1); + x_650 = x_646; +} else { + lean_dec_ref(x_646); + x_650 = lean_box(0); +} +x_651 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_651, 0, x_648); +if (lean_is_scalar(x_650)) { + x_652 = lean_alloc_ctor(0, 2, 0); +} else { + x_652 = x_650; +} +lean_ctor_set(x_652, 0, x_651); +lean_ctor_set(x_652, 1, x_649); +x_463 = x_652; +x_464 = x_647; +goto block_493; +} +else +{ +lean_object* x_653; lean_object* x_654; lean_object* x_655; lean_object* x_656; +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_653 = lean_ctor_get(x_645, 0); +lean_inc(x_653); +x_654 = lean_ctor_get(x_645, 1); +lean_inc(x_654); +if (lean_is_exclusive(x_645)) { + lean_ctor_release(x_645, 0); + lean_ctor_release(x_645, 1); + x_655 = x_645; +} else { + lean_dec_ref(x_645); + x_655 = lean_box(0); +} +if (lean_is_scalar(x_655)) { + x_656 = lean_alloc_ctor(1, 2, 0); +} else { + x_656 = x_655; +} +lean_ctor_set(x_656, 0, x_653); +lean_ctor_set(x_656, 1, x_654); +return x_656; +} +} +else +{ +lean_object* x_657; lean_object* x_658; lean_object* x_659; lean_object* x_660; +lean_dec(x_637); +lean_dec(x_494); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_657 = lean_ctor_get(x_640, 0); +lean_inc(x_657); +x_658 = lean_ctor_get(x_640, 1); +lean_inc(x_658); +if (lean_is_exclusive(x_640)) { + lean_ctor_release(x_640, 0); + lean_ctor_release(x_640, 1); + x_659 = x_640; +} else { + lean_dec_ref(x_640); + x_659 = lean_box(0); +} +if (lean_is_scalar(x_659)) { + x_660 = lean_alloc_ctor(1, 2, 0); +} else { + x_660 = x_659; +} +lean_ctor_set(x_660, 0, x_657); +lean_ctor_set(x_660, 1, x_658); +return x_660; +} +} +} +else +{ +lean_object* x_661; lean_object* x_662; lean_object* x_663; lean_object* x_664; lean_object* x_665; lean_object* x_666; lean_object* x_667; lean_object* x_668; +lean_dec(x_594); +lean_dec(x_592); +lean_inc(x_163); +lean_ctor_set_tag(x_494, 7); +lean_ctor_set(x_494, 1, x_163); +lean_ctor_set(x_494, 0, x_153); +x_661 = lean_ctor_get(x_1, 0); +lean_inc(x_661); +x_662 = l_Lean_IR_ToIR_bindVar(x_661, x_169, x_4, x_5, x_499); +x_663 = lean_ctor_get(x_662, 0); +lean_inc(x_663); +x_664 = lean_ctor_get(x_662, 1); +lean_inc(x_664); +lean_dec(x_662); +x_665 = lean_ctor_get(x_663, 0); +lean_inc(x_665); +x_666 = lean_ctor_get(x_663, 1); +lean_inc(x_666); +lean_dec(x_663); +x_667 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_668 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_665, x_494, x_667, x_666, x_4, x_5, x_664); +if (lean_obj_tag(x_668) == 0) +{ +lean_object* x_669; lean_object* x_670; lean_object* x_671; lean_object* x_672; lean_object* x_673; lean_object* x_674; lean_object* x_675; +x_669 = lean_ctor_get(x_668, 0); +lean_inc(x_669); +x_670 = lean_ctor_get(x_668, 1); +lean_inc(x_670); +lean_dec(x_668); +x_671 = lean_ctor_get(x_669, 0); +lean_inc(x_671); +x_672 = lean_ctor_get(x_669, 1); +lean_inc(x_672); +if (lean_is_exclusive(x_669)) { + lean_ctor_release(x_669, 0); + lean_ctor_release(x_669, 1); + x_673 = x_669; +} else { + lean_dec_ref(x_669); + x_673 = lean_box(0); +} +x_674 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_674, 0, x_671); +if (lean_is_scalar(x_673)) { + x_675 = lean_alloc_ctor(0, 2, 0); +} else { + x_675 = x_673; +} +lean_ctor_set(x_675, 0, x_674); +lean_ctor_set(x_675, 1, x_672); +x_463 = x_675; +x_464 = x_670; +goto block_493; +} +else +{ +lean_object* x_676; lean_object* x_677; lean_object* x_678; lean_object* x_679; +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_676 = lean_ctor_get(x_668, 0); +lean_inc(x_676); +x_677 = lean_ctor_get(x_668, 1); +lean_inc(x_677); +if (lean_is_exclusive(x_668)) { + lean_ctor_release(x_668, 0); + lean_ctor_release(x_668, 1); + x_678 = x_668; +} else { + lean_dec_ref(x_668); + x_678 = lean_box(0); +} +if (lean_is_scalar(x_678)) { + x_679 = lean_alloc_ctor(1, 2, 0); +} else { + x_679 = x_678; +} +lean_ctor_set(x_679, 0, x_676); +lean_ctor_set(x_679, 1, x_677); +return x_679; +} +} +} +} +else +{ +lean_object* x_680; lean_object* x_681; lean_object* x_682; lean_object* x_683; lean_object* x_684; lean_object* x_685; uint8_t x_686; +x_680 = lean_ctor_get(x_494, 1); +lean_inc(x_680); +lean_dec(x_494); +x_681 = lean_ctor_get(x_495, 0); +lean_inc(x_681); +if (lean_is_exclusive(x_495)) { + lean_ctor_release(x_495, 0); + x_682 = x_495; +} else { + lean_dec_ref(x_495); + x_682 = lean_box(0); +} +x_683 = lean_array_get_size(x_163); +x_684 = lean_ctor_get(x_681, 3); +lean_inc(x_684); +lean_dec(x_681); +x_685 = lean_array_get_size(x_684); +lean_dec(x_684); +x_686 = lean_nat_dec_lt(x_683, x_685); +if (x_686 == 0) +{ +uint8_t x_687; +x_687 = lean_nat_dec_eq(x_683, x_685); +if (x_687 == 0) +{ +lean_object* x_688; lean_object* x_689; lean_object* x_690; lean_object* x_691; lean_object* x_692; lean_object* x_693; lean_object* x_694; lean_object* x_695; lean_object* x_696; lean_object* x_697; lean_object* x_698; lean_object* x_699; lean_object* x_700; lean_object* x_701; lean_object* x_702; lean_object* x_703; lean_object* x_704; +x_688 = lean_unsigned_to_nat(0u); +x_689 = l_Array_extract___rarg(x_163, x_688, x_685); +x_690 = l_Array_extract___rarg(x_163, x_685, x_683); +lean_dec(x_683); +x_691 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_691, 0, x_153); +lean_ctor_set(x_691, 1, x_689); +x_692 = lean_ctor_get(x_1, 0); +lean_inc(x_692); +x_693 = l_Lean_IR_ToIR_bindVar(x_692, x_169, x_4, x_5, x_680); +x_694 = lean_ctor_get(x_693, 0); +lean_inc(x_694); +x_695 = lean_ctor_get(x_693, 1); +lean_inc(x_695); +lean_dec(x_693); +x_696 = lean_ctor_get(x_694, 0); +lean_inc(x_696); +x_697 = lean_ctor_get(x_694, 1); +lean_inc(x_697); +lean_dec(x_694); +x_698 = l_Lean_IR_ToIR_newVar(x_697, x_4, x_5, x_695); +x_699 = lean_ctor_get(x_698, 0); +lean_inc(x_699); +x_700 = lean_ctor_get(x_698, 1); +lean_inc(x_700); +lean_dec(x_698); +x_701 = lean_ctor_get(x_699, 0); +lean_inc(x_701); +x_702 = lean_ctor_get(x_699, 1); +lean_inc(x_702); +lean_dec(x_699); +x_703 = lean_ctor_get(x_1, 2); +lean_inc(x_703); +lean_inc(x_5); +lean_inc(x_4); +x_704 = l_Lean_IR_ToIR_lowerType(x_703, x_702, x_4, x_5, x_700); +if (lean_obj_tag(x_704) == 0) +{ +lean_object* x_705; lean_object* x_706; lean_object* x_707; lean_object* x_708; lean_object* x_709; +x_705 = lean_ctor_get(x_704, 0); +lean_inc(x_705); +x_706 = lean_ctor_get(x_704, 1); +lean_inc(x_706); +lean_dec(x_704); +x_707 = lean_ctor_get(x_705, 0); +lean_inc(x_707); +x_708 = lean_ctor_get(x_705, 1); +lean_inc(x_708); +lean_dec(x_705); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_709 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_701, x_690, x_696, x_691, x_707, x_708, x_4, x_5, x_706); +if (lean_obj_tag(x_709) == 0) +{ +lean_object* x_710; lean_object* x_711; lean_object* x_712; lean_object* x_713; lean_object* x_714; lean_object* x_715; lean_object* x_716; +x_710 = lean_ctor_get(x_709, 0); +lean_inc(x_710); +x_711 = lean_ctor_get(x_709, 1); +lean_inc(x_711); +lean_dec(x_709); +x_712 = lean_ctor_get(x_710, 0); +lean_inc(x_712); +x_713 = lean_ctor_get(x_710, 1); +lean_inc(x_713); +if (lean_is_exclusive(x_710)) { + lean_ctor_release(x_710, 0); + lean_ctor_release(x_710, 1); + x_714 = x_710; +} else { + lean_dec_ref(x_710); + x_714 = lean_box(0); +} +if (lean_is_scalar(x_682)) { + x_715 = lean_alloc_ctor(1, 1, 0); +} else { + x_715 = x_682; +} +lean_ctor_set(x_715, 0, x_712); +if (lean_is_scalar(x_714)) { + x_716 = lean_alloc_ctor(0, 2, 0); +} else { + x_716 = x_714; +} +lean_ctor_set(x_716, 0, x_715); +lean_ctor_set(x_716, 1, x_713); +x_463 = x_716; +x_464 = x_711; +goto block_493; +} +else +{ +lean_object* x_717; lean_object* x_718; lean_object* x_719; lean_object* x_720; +lean_dec(x_682); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_717 = lean_ctor_get(x_709, 0); +lean_inc(x_717); +x_718 = lean_ctor_get(x_709, 1); +lean_inc(x_718); +if (lean_is_exclusive(x_709)) { + lean_ctor_release(x_709, 0); + lean_ctor_release(x_709, 1); + x_719 = x_709; +} else { + lean_dec_ref(x_709); + x_719 = lean_box(0); +} +if (lean_is_scalar(x_719)) { + x_720 = lean_alloc_ctor(1, 2, 0); +} else { + x_720 = x_719; +} +lean_ctor_set(x_720, 0, x_717); +lean_ctor_set(x_720, 1, x_718); +return x_720; +} +} +else +{ +lean_object* x_721; lean_object* x_722; lean_object* x_723; lean_object* x_724; +lean_dec(x_701); +lean_dec(x_696); +lean_dec(x_691); +lean_dec(x_690); +lean_dec(x_682); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_721 = lean_ctor_get(x_704, 0); +lean_inc(x_721); +x_722 = lean_ctor_get(x_704, 1); +lean_inc(x_722); +if (lean_is_exclusive(x_704)) { + lean_ctor_release(x_704, 0); + lean_ctor_release(x_704, 1); + x_723 = x_704; +} else { + lean_dec_ref(x_704); + x_723 = lean_box(0); +} +if (lean_is_scalar(x_723)) { + x_724 = lean_alloc_ctor(1, 2, 0); +} else { + x_724 = x_723; +} +lean_ctor_set(x_724, 0, x_721); +lean_ctor_set(x_724, 1, x_722); +return x_724; +} +} +else +{ +lean_object* x_725; lean_object* x_726; lean_object* x_727; lean_object* x_728; lean_object* x_729; lean_object* x_730; lean_object* x_731; lean_object* x_732; lean_object* x_733; +lean_dec(x_685); +lean_dec(x_683); +lean_inc(x_163); +x_725 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_725, 0, x_153); +lean_ctor_set(x_725, 1, x_163); +x_726 = lean_ctor_get(x_1, 0); +lean_inc(x_726); +x_727 = l_Lean_IR_ToIR_bindVar(x_726, x_169, x_4, x_5, x_680); +x_728 = lean_ctor_get(x_727, 0); +lean_inc(x_728); +x_729 = lean_ctor_get(x_727, 1); +lean_inc(x_729); +lean_dec(x_727); +x_730 = lean_ctor_get(x_728, 0); +lean_inc(x_730); +x_731 = lean_ctor_get(x_728, 1); +lean_inc(x_731); +lean_dec(x_728); +x_732 = lean_ctor_get(x_1, 2); +lean_inc(x_732); +lean_inc(x_5); +lean_inc(x_4); +x_733 = l_Lean_IR_ToIR_lowerType(x_732, x_731, x_4, x_5, x_729); +if (lean_obj_tag(x_733) == 0) +{ +lean_object* x_734; lean_object* x_735; lean_object* x_736; lean_object* x_737; lean_object* x_738; +x_734 = lean_ctor_get(x_733, 0); +lean_inc(x_734); +x_735 = lean_ctor_get(x_733, 1); +lean_inc(x_735); +lean_dec(x_733); +x_736 = lean_ctor_get(x_734, 0); +lean_inc(x_736); +x_737 = lean_ctor_get(x_734, 1); +lean_inc(x_737); +lean_dec(x_734); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_738 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_730, x_725, x_736, x_737, x_4, x_5, x_735); +if (lean_obj_tag(x_738) == 0) +{ +lean_object* x_739; lean_object* x_740; lean_object* x_741; lean_object* x_742; lean_object* x_743; lean_object* x_744; lean_object* x_745; +x_739 = lean_ctor_get(x_738, 0); +lean_inc(x_739); +x_740 = lean_ctor_get(x_738, 1); +lean_inc(x_740); +lean_dec(x_738); +x_741 = lean_ctor_get(x_739, 0); +lean_inc(x_741); +x_742 = lean_ctor_get(x_739, 1); +lean_inc(x_742); +if (lean_is_exclusive(x_739)) { + lean_ctor_release(x_739, 0); + lean_ctor_release(x_739, 1); + x_743 = x_739; +} else { + lean_dec_ref(x_739); + x_743 = lean_box(0); +} +if (lean_is_scalar(x_682)) { + x_744 = lean_alloc_ctor(1, 1, 0); +} else { + x_744 = x_682; +} +lean_ctor_set(x_744, 0, x_741); +if (lean_is_scalar(x_743)) { + x_745 = lean_alloc_ctor(0, 2, 0); +} else { + x_745 = x_743; +} +lean_ctor_set(x_745, 0, x_744); +lean_ctor_set(x_745, 1, x_742); +x_463 = x_745; +x_464 = x_740; +goto block_493; +} +else +{ +lean_object* x_746; lean_object* x_747; lean_object* x_748; lean_object* x_749; +lean_dec(x_682); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_746 = lean_ctor_get(x_738, 0); +lean_inc(x_746); +x_747 = lean_ctor_get(x_738, 1); +lean_inc(x_747); +if (lean_is_exclusive(x_738)) { + lean_ctor_release(x_738, 0); + lean_ctor_release(x_738, 1); + x_748 = x_738; +} else { + lean_dec_ref(x_738); + x_748 = lean_box(0); +} +if (lean_is_scalar(x_748)) { + x_749 = lean_alloc_ctor(1, 2, 0); +} else { + x_749 = x_748; +} +lean_ctor_set(x_749, 0, x_746); +lean_ctor_set(x_749, 1, x_747); +return x_749; +} +} +else +{ +lean_object* x_750; lean_object* x_751; lean_object* x_752; lean_object* x_753; +lean_dec(x_730); +lean_dec(x_725); +lean_dec(x_682); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_750 = lean_ctor_get(x_733, 0); +lean_inc(x_750); +x_751 = lean_ctor_get(x_733, 1); +lean_inc(x_751); +if (lean_is_exclusive(x_733)) { + lean_ctor_release(x_733, 0); + lean_ctor_release(x_733, 1); + x_752 = x_733; +} else { + lean_dec_ref(x_733); + x_752 = lean_box(0); +} +if (lean_is_scalar(x_752)) { + x_753 = lean_alloc_ctor(1, 2, 0); +} else { + x_753 = x_752; +} +lean_ctor_set(x_753, 0, x_750); +lean_ctor_set(x_753, 1, x_751); +return x_753; +} +} +} +else +{ +lean_object* x_754; lean_object* x_755; lean_object* x_756; lean_object* x_757; lean_object* x_758; lean_object* x_759; lean_object* x_760; lean_object* x_761; lean_object* x_762; +lean_dec(x_685); +lean_dec(x_683); +lean_inc(x_163); +x_754 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_754, 0, x_153); +lean_ctor_set(x_754, 1, x_163); +x_755 = lean_ctor_get(x_1, 0); +lean_inc(x_755); +x_756 = l_Lean_IR_ToIR_bindVar(x_755, x_169, x_4, x_5, x_680); +x_757 = lean_ctor_get(x_756, 0); +lean_inc(x_757); +x_758 = lean_ctor_get(x_756, 1); +lean_inc(x_758); +lean_dec(x_756); +x_759 = lean_ctor_get(x_757, 0); +lean_inc(x_759); +x_760 = lean_ctor_get(x_757, 1); +lean_inc(x_760); +lean_dec(x_757); +x_761 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_762 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_759, x_754, x_761, x_760, x_4, x_5, x_758); +if (lean_obj_tag(x_762) == 0) +{ +lean_object* x_763; lean_object* x_764; lean_object* x_765; lean_object* x_766; lean_object* x_767; lean_object* x_768; lean_object* x_769; +x_763 = lean_ctor_get(x_762, 0); +lean_inc(x_763); +x_764 = lean_ctor_get(x_762, 1); +lean_inc(x_764); +lean_dec(x_762); +x_765 = lean_ctor_get(x_763, 0); +lean_inc(x_765); +x_766 = lean_ctor_get(x_763, 1); +lean_inc(x_766); +if (lean_is_exclusive(x_763)) { + lean_ctor_release(x_763, 0); + lean_ctor_release(x_763, 1); + x_767 = x_763; +} else { + lean_dec_ref(x_763); + x_767 = lean_box(0); +} +if (lean_is_scalar(x_682)) { + x_768 = lean_alloc_ctor(1, 1, 0); +} else { + x_768 = x_682; +} +lean_ctor_set(x_768, 0, x_765); +if (lean_is_scalar(x_767)) { + x_769 = lean_alloc_ctor(0, 2, 0); +} else { + x_769 = x_767; +} +lean_ctor_set(x_769, 0, x_768); +lean_ctor_set(x_769, 1, x_766); +x_463 = x_769; +x_464 = x_764; +goto block_493; +} +else +{ +lean_object* x_770; lean_object* x_771; lean_object* x_772; lean_object* x_773; +lean_dec(x_682); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_770 = lean_ctor_get(x_762, 0); +lean_inc(x_770); +x_771 = lean_ctor_get(x_762, 1); +lean_inc(x_771); +if (lean_is_exclusive(x_762)) { + lean_ctor_release(x_762, 0); + lean_ctor_release(x_762, 1); + x_772 = x_762; +} else { + lean_dec_ref(x_762); + x_772 = lean_box(0); +} +if (lean_is_scalar(x_772)) { + x_773 = lean_alloc_ctor(1, 2, 0); +} else { + x_773 = x_772; +} +lean_ctor_set(x_773, 0, x_770); +lean_ctor_set(x_773, 1, x_771); +return x_773; +} +} +} +} +block_493: +{ +lean_object* x_465; +x_465 = lean_ctor_get(x_463, 0); +lean_inc(x_465); +if (lean_obj_tag(x_465) == 0) +{ +lean_object* x_466; lean_object* x_467; lean_object* x_468; lean_object* x_469; lean_object* x_470; lean_object* x_471; lean_object* x_472; lean_object* x_473; lean_object* x_474; lean_object* x_475; +lean_dec(x_174); +x_466 = lean_ctor_get(x_463, 1); +lean_inc(x_466); +lean_dec(x_463); +x_467 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_467, 0, x_153); +lean_ctor_set(x_467, 1, x_163); +x_468 = lean_ctor_get(x_1, 0); +lean_inc(x_468); +x_469 = l_Lean_IR_ToIR_bindVar(x_468, x_466, x_4, x_5, x_464); +x_470 = lean_ctor_get(x_469, 0); +lean_inc(x_470); +x_471 = lean_ctor_get(x_469, 1); +lean_inc(x_471); +lean_dec(x_469); +x_472 = lean_ctor_get(x_470, 0); +lean_inc(x_472); +x_473 = lean_ctor_get(x_470, 1); +lean_inc(x_473); +lean_dec(x_470); +x_474 = lean_ctor_get(x_1, 2); +lean_inc(x_474); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_475 = l_Lean_IR_ToIR_lowerType(x_474, x_473, x_4, x_5, x_471); +if (lean_obj_tag(x_475) == 0) +{ +lean_object* x_476; lean_object* x_477; lean_object* x_478; lean_object* x_479; lean_object* x_480; +x_476 = lean_ctor_get(x_475, 0); +lean_inc(x_476); +x_477 = lean_ctor_get(x_475, 1); +lean_inc(x_477); +lean_dec(x_475); +x_478 = lean_ctor_get(x_476, 0); +lean_inc(x_478); +x_479 = lean_ctor_get(x_476, 1); +lean_inc(x_479); +lean_dec(x_476); +x_480 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_472, x_467, x_478, x_479, x_4, x_5, x_477); +return x_480; +} +else +{ +uint8_t x_481; +lean_dec(x_472); +lean_dec(x_467); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_481 = !lean_is_exclusive(x_475); +if (x_481 == 0) +{ +return x_475; +} +else +{ +lean_object* x_482; lean_object* x_483; lean_object* x_484; +x_482 = lean_ctor_get(x_475, 0); +x_483 = lean_ctor_get(x_475, 1); +lean_inc(x_483); +lean_inc(x_482); +lean_dec(x_475); +x_484 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_484, 0, x_482); +lean_ctor_set(x_484, 1, x_483); +return x_484; +} +} +} +else +{ +uint8_t x_485; +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_485 = !lean_is_exclusive(x_463); +if (x_485 == 0) +{ +lean_object* x_486; lean_object* x_487; lean_object* x_488; +x_486 = lean_ctor_get(x_463, 0); +lean_dec(x_486); +x_487 = lean_ctor_get(x_465, 0); +lean_inc(x_487); +lean_dec(x_465); +lean_ctor_set(x_463, 0, x_487); +if (lean_is_scalar(x_174)) { + x_488 = lean_alloc_ctor(0, 2, 0); +} else { + x_488 = x_174; +} +lean_ctor_set(x_488, 0, x_463); +lean_ctor_set(x_488, 1, x_464); +return x_488; +} +else +{ +lean_object* x_489; lean_object* x_490; lean_object* x_491; lean_object* x_492; +x_489 = lean_ctor_get(x_463, 1); +lean_inc(x_489); +lean_dec(x_463); +x_490 = lean_ctor_get(x_465, 0); +lean_inc(x_490); +lean_dec(x_465); +x_491 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_491, 0, x_490); +lean_ctor_set(x_491, 1, x_489); +if (lean_is_scalar(x_174)) { + x_492 = lean_alloc_ctor(0, 2, 0); +} else { + x_492 = x_174; +} +lean_ctor_set(x_492, 0, x_491); +lean_ctor_set(x_492, 1, x_464); +return x_492; +} +} +} +} +case 2: +{ +lean_object* x_774; lean_object* x_775; +lean_dec(x_180); +lean_dec(x_175); +lean_dec(x_174); +lean_free_object(x_165); +lean_dec(x_163); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_2); +lean_dec(x_1); +x_774 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_775 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_774, x_169, x_4, x_5, x_173); +return x_775; +} +case 3: +{ +lean_object* x_776; lean_object* x_777; lean_object* x_807; lean_object* x_808; +lean_dec(x_180); +lean_dec(x_175); +lean_dec(x_155); +lean_dec(x_154); +x_807 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_173); +x_808 = lean_ctor_get(x_807, 0); +lean_inc(x_808); +if (lean_obj_tag(x_808) == 0) +{ +lean_object* x_809; lean_object* x_810; +x_809 = lean_ctor_get(x_807, 1); +lean_inc(x_809); +lean_dec(x_807); +x_810 = lean_box(0); +lean_ctor_set(x_165, 0, x_810); +x_776 = x_165; +x_777 = x_809; +goto block_806; +} +else +{ +uint8_t x_811; +lean_free_object(x_165); +x_811 = !lean_is_exclusive(x_807); +if (x_811 == 0) +{ +lean_object* x_812; lean_object* x_813; uint8_t x_814; +x_812 = lean_ctor_get(x_807, 1); +x_813 = lean_ctor_get(x_807, 0); +lean_dec(x_813); +x_814 = !lean_is_exclusive(x_808); +if (x_814 == 0) +{ +lean_object* x_815; lean_object* x_816; lean_object* x_817; lean_object* x_818; uint8_t x_819; +x_815 = lean_ctor_get(x_808, 0); +x_816 = lean_array_get_size(x_163); +x_817 = lean_ctor_get(x_815, 3); +lean_inc(x_817); +lean_dec(x_815); +x_818 = lean_array_get_size(x_817); +lean_dec(x_817); +x_819 = lean_nat_dec_lt(x_816, x_818); +if (x_819 == 0) +{ +uint8_t x_820; +x_820 = lean_nat_dec_eq(x_816, x_818); +if (x_820 == 0) +{ +lean_object* x_821; lean_object* x_822; lean_object* x_823; lean_object* x_824; lean_object* x_825; lean_object* x_826; lean_object* x_827; lean_object* x_828; lean_object* x_829; lean_object* x_830; lean_object* x_831; lean_object* x_832; lean_object* x_833; lean_object* x_834; lean_object* x_835; lean_object* x_836; +x_821 = lean_unsigned_to_nat(0u); +x_822 = l_Array_extract___rarg(x_163, x_821, x_818); +x_823 = l_Array_extract___rarg(x_163, x_818, x_816); +lean_dec(x_816); +lean_ctor_set_tag(x_807, 6); +lean_ctor_set(x_807, 1, x_822); +lean_ctor_set(x_807, 0, x_153); +x_824 = lean_ctor_get(x_1, 0); +lean_inc(x_824); +x_825 = l_Lean_IR_ToIR_bindVar(x_824, x_169, x_4, x_5, x_812); +x_826 = lean_ctor_get(x_825, 0); +lean_inc(x_826); +x_827 = lean_ctor_get(x_825, 1); +lean_inc(x_827); +lean_dec(x_825); +x_828 = lean_ctor_get(x_826, 0); +lean_inc(x_828); +x_829 = lean_ctor_get(x_826, 1); +lean_inc(x_829); +lean_dec(x_826); +x_830 = l_Lean_IR_ToIR_newVar(x_829, x_4, x_5, x_827); +x_831 = lean_ctor_get(x_830, 0); +lean_inc(x_831); +x_832 = lean_ctor_get(x_830, 1); +lean_inc(x_832); +lean_dec(x_830); +x_833 = lean_ctor_get(x_831, 0); +lean_inc(x_833); +x_834 = lean_ctor_get(x_831, 1); +lean_inc(x_834); +lean_dec(x_831); +x_835 = lean_ctor_get(x_1, 2); +lean_inc(x_835); +lean_inc(x_5); +lean_inc(x_4); +x_836 = l_Lean_IR_ToIR_lowerType(x_835, x_834, x_4, x_5, x_832); +if (lean_obj_tag(x_836) == 0) +{ +lean_object* x_837; lean_object* x_838; lean_object* x_839; lean_object* x_840; lean_object* x_841; +x_837 = lean_ctor_get(x_836, 0); +lean_inc(x_837); +x_838 = lean_ctor_get(x_836, 1); +lean_inc(x_838); +lean_dec(x_836); +x_839 = lean_ctor_get(x_837, 0); +lean_inc(x_839); +x_840 = lean_ctor_get(x_837, 1); +lean_inc(x_840); +lean_dec(x_837); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_841 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_833, x_823, x_828, x_807, x_839, x_840, x_4, x_5, x_838); +if (lean_obj_tag(x_841) == 0) +{ +lean_object* x_842; lean_object* x_843; uint8_t x_844; +x_842 = lean_ctor_get(x_841, 0); +lean_inc(x_842); +x_843 = lean_ctor_get(x_841, 1); +lean_inc(x_843); +lean_dec(x_841); +x_844 = !lean_is_exclusive(x_842); +if (x_844 == 0) +{ +lean_object* x_845; +x_845 = lean_ctor_get(x_842, 0); +lean_ctor_set(x_808, 0, x_845); +lean_ctor_set(x_842, 0, x_808); +x_776 = x_842; +x_777 = x_843; +goto block_806; +} +else +{ +lean_object* x_846; lean_object* x_847; lean_object* x_848; +x_846 = lean_ctor_get(x_842, 0); +x_847 = lean_ctor_get(x_842, 1); +lean_inc(x_847); +lean_inc(x_846); +lean_dec(x_842); +lean_ctor_set(x_808, 0, x_846); +x_848 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_848, 0, x_808); +lean_ctor_set(x_848, 1, x_847); +x_776 = x_848; +x_777 = x_843; +goto block_806; +} +} +else +{ +uint8_t x_849; +lean_free_object(x_808); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_849 = !lean_is_exclusive(x_841); +if (x_849 == 0) +{ +return x_841; +} +else +{ +lean_object* x_850; lean_object* x_851; lean_object* x_852; +x_850 = lean_ctor_get(x_841, 0); +x_851 = lean_ctor_get(x_841, 1); +lean_inc(x_851); +lean_inc(x_850); +lean_dec(x_841); +x_852 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_852, 0, x_850); +lean_ctor_set(x_852, 1, x_851); +return x_852; +} +} +} +else +{ +uint8_t x_853; +lean_dec(x_833); +lean_dec(x_828); +lean_dec(x_807); +lean_dec(x_823); +lean_free_object(x_808); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_853 = !lean_is_exclusive(x_836); +if (x_853 == 0) +{ +return x_836; +} +else +{ +lean_object* x_854; lean_object* x_855; lean_object* x_856; +x_854 = lean_ctor_get(x_836, 0); +x_855 = lean_ctor_get(x_836, 1); +lean_inc(x_855); +lean_inc(x_854); +lean_dec(x_836); +x_856 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_856, 0, x_854); +lean_ctor_set(x_856, 1, x_855); +return x_856; +} +} +} +else +{ +lean_object* x_857; lean_object* x_858; lean_object* x_859; lean_object* x_860; lean_object* x_861; lean_object* x_862; lean_object* x_863; lean_object* x_864; +lean_dec(x_818); +lean_dec(x_816); +lean_inc(x_163); +lean_ctor_set_tag(x_807, 6); +lean_ctor_set(x_807, 1, x_163); +lean_ctor_set(x_807, 0, x_153); +x_857 = lean_ctor_get(x_1, 0); +lean_inc(x_857); +x_858 = l_Lean_IR_ToIR_bindVar(x_857, x_169, x_4, x_5, x_812); +x_859 = lean_ctor_get(x_858, 0); +lean_inc(x_859); +x_860 = lean_ctor_get(x_858, 1); +lean_inc(x_860); +lean_dec(x_858); +x_861 = lean_ctor_get(x_859, 0); +lean_inc(x_861); +x_862 = lean_ctor_get(x_859, 1); +lean_inc(x_862); +lean_dec(x_859); +x_863 = lean_ctor_get(x_1, 2); +lean_inc(x_863); +lean_inc(x_5); +lean_inc(x_4); +x_864 = l_Lean_IR_ToIR_lowerType(x_863, x_862, x_4, x_5, x_860); +if (lean_obj_tag(x_864) == 0) +{ +lean_object* x_865; lean_object* x_866; lean_object* x_867; lean_object* x_868; lean_object* x_869; +x_865 = lean_ctor_get(x_864, 0); +lean_inc(x_865); +x_866 = lean_ctor_get(x_864, 1); +lean_inc(x_866); +lean_dec(x_864); +x_867 = lean_ctor_get(x_865, 0); +lean_inc(x_867); +x_868 = lean_ctor_get(x_865, 1); +lean_inc(x_868); +lean_dec(x_865); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_869 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_861, x_807, x_867, x_868, x_4, x_5, x_866); +if (lean_obj_tag(x_869) == 0) +{ +lean_object* x_870; lean_object* x_871; uint8_t x_872; +x_870 = lean_ctor_get(x_869, 0); +lean_inc(x_870); +x_871 = lean_ctor_get(x_869, 1); +lean_inc(x_871); +lean_dec(x_869); +x_872 = !lean_is_exclusive(x_870); +if (x_872 == 0) +{ +lean_object* x_873; +x_873 = lean_ctor_get(x_870, 0); +lean_ctor_set(x_808, 0, x_873); +lean_ctor_set(x_870, 0, x_808); +x_776 = x_870; +x_777 = x_871; +goto block_806; +} +else +{ +lean_object* x_874; lean_object* x_875; lean_object* x_876; +x_874 = lean_ctor_get(x_870, 0); +x_875 = lean_ctor_get(x_870, 1); +lean_inc(x_875); +lean_inc(x_874); +lean_dec(x_870); +lean_ctor_set(x_808, 0, x_874); +x_876 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_876, 0, x_808); +lean_ctor_set(x_876, 1, x_875); +x_776 = x_876; +x_777 = x_871; +goto block_806; +} +} +else +{ +uint8_t x_877; +lean_free_object(x_808); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_877 = !lean_is_exclusive(x_869); +if (x_877 == 0) +{ +return x_869; +} +else +{ +lean_object* x_878; lean_object* x_879; lean_object* x_880; +x_878 = lean_ctor_get(x_869, 0); +x_879 = lean_ctor_get(x_869, 1); +lean_inc(x_879); +lean_inc(x_878); +lean_dec(x_869); +x_880 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_880, 0, x_878); +lean_ctor_set(x_880, 1, x_879); +return x_880; +} +} +} +else +{ +uint8_t x_881; +lean_dec(x_861); +lean_dec(x_807); +lean_free_object(x_808); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_881 = !lean_is_exclusive(x_864); +if (x_881 == 0) +{ +return x_864; +} +else +{ +lean_object* x_882; lean_object* x_883; lean_object* x_884; +x_882 = lean_ctor_get(x_864, 0); +x_883 = lean_ctor_get(x_864, 1); +lean_inc(x_883); +lean_inc(x_882); +lean_dec(x_864); +x_884 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_884, 0, x_882); +lean_ctor_set(x_884, 1, x_883); +return x_884; +} +} +} +} +else +{ +lean_object* x_885; lean_object* x_886; lean_object* x_887; lean_object* x_888; lean_object* x_889; lean_object* x_890; lean_object* x_891; lean_object* x_892; +lean_dec(x_818); +lean_dec(x_816); +lean_inc(x_163); +lean_ctor_set_tag(x_807, 7); +lean_ctor_set(x_807, 1, x_163); +lean_ctor_set(x_807, 0, x_153); +x_885 = lean_ctor_get(x_1, 0); +lean_inc(x_885); +x_886 = l_Lean_IR_ToIR_bindVar(x_885, x_169, x_4, x_5, x_812); +x_887 = lean_ctor_get(x_886, 0); +lean_inc(x_887); +x_888 = lean_ctor_get(x_886, 1); +lean_inc(x_888); +lean_dec(x_886); +x_889 = lean_ctor_get(x_887, 0); +lean_inc(x_889); +x_890 = lean_ctor_get(x_887, 1); +lean_inc(x_890); +lean_dec(x_887); +x_891 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_892 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_889, x_807, x_891, x_890, x_4, x_5, x_888); +if (lean_obj_tag(x_892) == 0) +{ +lean_object* x_893; lean_object* x_894; uint8_t x_895; +x_893 = lean_ctor_get(x_892, 0); +lean_inc(x_893); +x_894 = lean_ctor_get(x_892, 1); +lean_inc(x_894); +lean_dec(x_892); +x_895 = !lean_is_exclusive(x_893); +if (x_895 == 0) +{ +lean_object* x_896; +x_896 = lean_ctor_get(x_893, 0); +lean_ctor_set(x_808, 0, x_896); +lean_ctor_set(x_893, 0, x_808); +x_776 = x_893; +x_777 = x_894; +goto block_806; +} +else +{ +lean_object* x_897; lean_object* x_898; lean_object* x_899; +x_897 = lean_ctor_get(x_893, 0); +x_898 = lean_ctor_get(x_893, 1); +lean_inc(x_898); +lean_inc(x_897); +lean_dec(x_893); +lean_ctor_set(x_808, 0, x_897); +x_899 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_899, 0, x_808); +lean_ctor_set(x_899, 1, x_898); +x_776 = x_899; +x_777 = x_894; +goto block_806; +} +} +else +{ +uint8_t x_900; +lean_free_object(x_808); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_900 = !lean_is_exclusive(x_892); +if (x_900 == 0) +{ +return x_892; +} +else +{ +lean_object* x_901; lean_object* x_902; lean_object* x_903; +x_901 = lean_ctor_get(x_892, 0); +x_902 = lean_ctor_get(x_892, 1); +lean_inc(x_902); +lean_inc(x_901); +lean_dec(x_892); +x_903 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_903, 0, x_901); +lean_ctor_set(x_903, 1, x_902); +return x_903; +} +} +} +} +else +{ +lean_object* x_904; lean_object* x_905; lean_object* x_906; lean_object* x_907; uint8_t x_908; +x_904 = lean_ctor_get(x_808, 0); +lean_inc(x_904); +lean_dec(x_808); +x_905 = lean_array_get_size(x_163); +x_906 = lean_ctor_get(x_904, 3); +lean_inc(x_906); +lean_dec(x_904); +x_907 = lean_array_get_size(x_906); +lean_dec(x_906); +x_908 = lean_nat_dec_lt(x_905, x_907); +if (x_908 == 0) +{ +uint8_t x_909; +x_909 = lean_nat_dec_eq(x_905, x_907); +if (x_909 == 0) +{ +lean_object* x_910; lean_object* x_911; lean_object* x_912; lean_object* x_913; lean_object* x_914; lean_object* x_915; lean_object* x_916; lean_object* x_917; lean_object* x_918; lean_object* x_919; lean_object* x_920; lean_object* x_921; lean_object* x_922; lean_object* x_923; lean_object* x_924; lean_object* x_925; +x_910 = lean_unsigned_to_nat(0u); +x_911 = l_Array_extract___rarg(x_163, x_910, x_907); +x_912 = l_Array_extract___rarg(x_163, x_907, x_905); +lean_dec(x_905); +lean_ctor_set_tag(x_807, 6); +lean_ctor_set(x_807, 1, x_911); +lean_ctor_set(x_807, 0, x_153); +x_913 = lean_ctor_get(x_1, 0); +lean_inc(x_913); +x_914 = l_Lean_IR_ToIR_bindVar(x_913, x_169, x_4, x_5, x_812); +x_915 = lean_ctor_get(x_914, 0); +lean_inc(x_915); +x_916 = lean_ctor_get(x_914, 1); +lean_inc(x_916); +lean_dec(x_914); +x_917 = lean_ctor_get(x_915, 0); +lean_inc(x_917); +x_918 = lean_ctor_get(x_915, 1); +lean_inc(x_918); +lean_dec(x_915); +x_919 = l_Lean_IR_ToIR_newVar(x_918, x_4, x_5, x_916); +x_920 = lean_ctor_get(x_919, 0); +lean_inc(x_920); +x_921 = lean_ctor_get(x_919, 1); +lean_inc(x_921); +lean_dec(x_919); +x_922 = lean_ctor_get(x_920, 0); +lean_inc(x_922); +x_923 = lean_ctor_get(x_920, 1); +lean_inc(x_923); +lean_dec(x_920); +x_924 = lean_ctor_get(x_1, 2); +lean_inc(x_924); +lean_inc(x_5); +lean_inc(x_4); +x_925 = l_Lean_IR_ToIR_lowerType(x_924, x_923, x_4, x_5, x_921); +if (lean_obj_tag(x_925) == 0) +{ +lean_object* x_926; lean_object* x_927; lean_object* x_928; lean_object* x_929; lean_object* x_930; +x_926 = lean_ctor_get(x_925, 0); +lean_inc(x_926); +x_927 = lean_ctor_get(x_925, 1); +lean_inc(x_927); +lean_dec(x_925); +x_928 = lean_ctor_get(x_926, 0); +lean_inc(x_928); +x_929 = lean_ctor_get(x_926, 1); +lean_inc(x_929); +lean_dec(x_926); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_930 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_922, x_912, x_917, x_807, x_928, x_929, x_4, x_5, x_927); +if (lean_obj_tag(x_930) == 0) +{ +lean_object* x_931; lean_object* x_932; lean_object* x_933; lean_object* x_934; lean_object* x_935; lean_object* x_936; lean_object* x_937; +x_931 = lean_ctor_get(x_930, 0); +lean_inc(x_931); +x_932 = lean_ctor_get(x_930, 1); +lean_inc(x_932); +lean_dec(x_930); +x_933 = lean_ctor_get(x_931, 0); +lean_inc(x_933); +x_934 = lean_ctor_get(x_931, 1); +lean_inc(x_934); +if (lean_is_exclusive(x_931)) { + lean_ctor_release(x_931, 0); + lean_ctor_release(x_931, 1); + x_935 = x_931; +} else { + lean_dec_ref(x_931); + x_935 = lean_box(0); +} +x_936 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_936, 0, x_933); +if (lean_is_scalar(x_935)) { + x_937 = lean_alloc_ctor(0, 2, 0); +} else { + x_937 = x_935; +} +lean_ctor_set(x_937, 0, x_936); +lean_ctor_set(x_937, 1, x_934); +x_776 = x_937; +x_777 = x_932; +goto block_806; +} +else +{ +lean_object* x_938; lean_object* x_939; lean_object* x_940; lean_object* x_941; +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_938 = lean_ctor_get(x_930, 0); +lean_inc(x_938); +x_939 = lean_ctor_get(x_930, 1); +lean_inc(x_939); +if (lean_is_exclusive(x_930)) { + lean_ctor_release(x_930, 0); + lean_ctor_release(x_930, 1); + x_940 = x_930; +} else { + lean_dec_ref(x_930); + x_940 = lean_box(0); +} +if (lean_is_scalar(x_940)) { + x_941 = lean_alloc_ctor(1, 2, 0); +} else { + x_941 = x_940; +} +lean_ctor_set(x_941, 0, x_938); +lean_ctor_set(x_941, 1, x_939); +return x_941; +} +} +else +{ +lean_object* x_942; lean_object* x_943; lean_object* x_944; lean_object* x_945; +lean_dec(x_922); +lean_dec(x_917); +lean_dec(x_807); +lean_dec(x_912); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_942 = lean_ctor_get(x_925, 0); +lean_inc(x_942); +x_943 = lean_ctor_get(x_925, 1); +lean_inc(x_943); +if (lean_is_exclusive(x_925)) { + lean_ctor_release(x_925, 0); + lean_ctor_release(x_925, 1); + x_944 = x_925; +} else { + lean_dec_ref(x_925); + x_944 = lean_box(0); +} +if (lean_is_scalar(x_944)) { + x_945 = lean_alloc_ctor(1, 2, 0); +} else { + x_945 = x_944; +} +lean_ctor_set(x_945, 0, x_942); +lean_ctor_set(x_945, 1, x_943); +return x_945; +} +} +else +{ +lean_object* x_946; lean_object* x_947; lean_object* x_948; lean_object* x_949; lean_object* x_950; lean_object* x_951; lean_object* x_952; lean_object* x_953; +lean_dec(x_907); +lean_dec(x_905); +lean_inc(x_163); +lean_ctor_set_tag(x_807, 6); +lean_ctor_set(x_807, 1, x_163); +lean_ctor_set(x_807, 0, x_153); +x_946 = lean_ctor_get(x_1, 0); +lean_inc(x_946); +x_947 = l_Lean_IR_ToIR_bindVar(x_946, x_169, x_4, x_5, x_812); +x_948 = lean_ctor_get(x_947, 0); +lean_inc(x_948); +x_949 = lean_ctor_get(x_947, 1); +lean_inc(x_949); +lean_dec(x_947); +x_950 = lean_ctor_get(x_948, 0); +lean_inc(x_950); +x_951 = lean_ctor_get(x_948, 1); +lean_inc(x_951); +lean_dec(x_948); +x_952 = lean_ctor_get(x_1, 2); +lean_inc(x_952); +lean_inc(x_5); +lean_inc(x_4); +x_953 = l_Lean_IR_ToIR_lowerType(x_952, x_951, x_4, x_5, x_949); +if (lean_obj_tag(x_953) == 0) +{ +lean_object* x_954; lean_object* x_955; lean_object* x_956; lean_object* x_957; lean_object* x_958; +x_954 = lean_ctor_get(x_953, 0); +lean_inc(x_954); +x_955 = lean_ctor_get(x_953, 1); +lean_inc(x_955); +lean_dec(x_953); +x_956 = lean_ctor_get(x_954, 0); +lean_inc(x_956); +x_957 = lean_ctor_get(x_954, 1); +lean_inc(x_957); +lean_dec(x_954); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_958 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_950, x_807, x_956, x_957, x_4, x_5, x_955); +if (lean_obj_tag(x_958) == 0) +{ +lean_object* x_959; lean_object* x_960; lean_object* x_961; lean_object* x_962; lean_object* x_963; lean_object* x_964; lean_object* x_965; +x_959 = lean_ctor_get(x_958, 0); +lean_inc(x_959); +x_960 = lean_ctor_get(x_958, 1); +lean_inc(x_960); +lean_dec(x_958); +x_961 = lean_ctor_get(x_959, 0); +lean_inc(x_961); +x_962 = lean_ctor_get(x_959, 1); +lean_inc(x_962); +if (lean_is_exclusive(x_959)) { + lean_ctor_release(x_959, 0); + lean_ctor_release(x_959, 1); + x_963 = x_959; +} else { + lean_dec_ref(x_959); + x_963 = lean_box(0); +} +x_964 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_964, 0, x_961); +if (lean_is_scalar(x_963)) { + x_965 = lean_alloc_ctor(0, 2, 0); +} else { + x_965 = x_963; +} +lean_ctor_set(x_965, 0, x_964); +lean_ctor_set(x_965, 1, x_962); +x_776 = x_965; +x_777 = x_960; +goto block_806; +} +else +{ +lean_object* x_966; lean_object* x_967; lean_object* x_968; lean_object* x_969; +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_966 = lean_ctor_get(x_958, 0); +lean_inc(x_966); +x_967 = lean_ctor_get(x_958, 1); +lean_inc(x_967); +if (lean_is_exclusive(x_958)) { + lean_ctor_release(x_958, 0); + lean_ctor_release(x_958, 1); + x_968 = x_958; +} else { + lean_dec_ref(x_958); + x_968 = lean_box(0); +} +if (lean_is_scalar(x_968)) { + x_969 = lean_alloc_ctor(1, 2, 0); +} else { + x_969 = x_968; +} +lean_ctor_set(x_969, 0, x_966); +lean_ctor_set(x_969, 1, x_967); +return x_969; +} +} +else +{ +lean_object* x_970; lean_object* x_971; lean_object* x_972; lean_object* x_973; +lean_dec(x_950); +lean_dec(x_807); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_970 = lean_ctor_get(x_953, 0); +lean_inc(x_970); +x_971 = lean_ctor_get(x_953, 1); +lean_inc(x_971); +if (lean_is_exclusive(x_953)) { + lean_ctor_release(x_953, 0); + lean_ctor_release(x_953, 1); + x_972 = x_953; +} else { + lean_dec_ref(x_953); + x_972 = lean_box(0); +} +if (lean_is_scalar(x_972)) { + x_973 = lean_alloc_ctor(1, 2, 0); +} else { + x_973 = x_972; +} +lean_ctor_set(x_973, 0, x_970); +lean_ctor_set(x_973, 1, x_971); +return x_973; +} +} +} +else +{ +lean_object* x_974; lean_object* x_975; lean_object* x_976; lean_object* x_977; lean_object* x_978; lean_object* x_979; lean_object* x_980; lean_object* x_981; +lean_dec(x_907); +lean_dec(x_905); +lean_inc(x_163); +lean_ctor_set_tag(x_807, 7); +lean_ctor_set(x_807, 1, x_163); +lean_ctor_set(x_807, 0, x_153); +x_974 = lean_ctor_get(x_1, 0); +lean_inc(x_974); +x_975 = l_Lean_IR_ToIR_bindVar(x_974, x_169, x_4, x_5, x_812); +x_976 = lean_ctor_get(x_975, 0); +lean_inc(x_976); +x_977 = lean_ctor_get(x_975, 1); +lean_inc(x_977); +lean_dec(x_975); +x_978 = lean_ctor_get(x_976, 0); +lean_inc(x_978); +x_979 = lean_ctor_get(x_976, 1); +lean_inc(x_979); +lean_dec(x_976); +x_980 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_981 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_978, x_807, x_980, x_979, x_4, x_5, x_977); +if (lean_obj_tag(x_981) == 0) +{ +lean_object* x_982; lean_object* x_983; lean_object* x_984; lean_object* x_985; lean_object* x_986; lean_object* x_987; lean_object* x_988; +x_982 = lean_ctor_get(x_981, 0); +lean_inc(x_982); +x_983 = lean_ctor_get(x_981, 1); +lean_inc(x_983); +lean_dec(x_981); +x_984 = lean_ctor_get(x_982, 0); +lean_inc(x_984); +x_985 = lean_ctor_get(x_982, 1); +lean_inc(x_985); +if (lean_is_exclusive(x_982)) { + lean_ctor_release(x_982, 0); + lean_ctor_release(x_982, 1); + x_986 = x_982; +} else { + lean_dec_ref(x_982); + x_986 = lean_box(0); +} +x_987 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_987, 0, x_984); +if (lean_is_scalar(x_986)) { + x_988 = lean_alloc_ctor(0, 2, 0); +} else { + x_988 = x_986; +} +lean_ctor_set(x_988, 0, x_987); +lean_ctor_set(x_988, 1, x_985); +x_776 = x_988; +x_777 = x_983; +goto block_806; +} +else +{ +lean_object* x_989; lean_object* x_990; lean_object* x_991; lean_object* x_992; +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_989 = lean_ctor_get(x_981, 0); +lean_inc(x_989); +x_990 = lean_ctor_get(x_981, 1); +lean_inc(x_990); +if (lean_is_exclusive(x_981)) { + lean_ctor_release(x_981, 0); + lean_ctor_release(x_981, 1); + x_991 = x_981; +} else { + lean_dec_ref(x_981); + x_991 = lean_box(0); +} +if (lean_is_scalar(x_991)) { + x_992 = lean_alloc_ctor(1, 2, 0); +} else { + x_992 = x_991; +} +lean_ctor_set(x_992, 0, x_989); +lean_ctor_set(x_992, 1, x_990); +return x_992; +} +} +} +} +else +{ +lean_object* x_993; lean_object* x_994; lean_object* x_995; lean_object* x_996; lean_object* x_997; lean_object* x_998; uint8_t x_999; +x_993 = lean_ctor_get(x_807, 1); +lean_inc(x_993); +lean_dec(x_807); +x_994 = lean_ctor_get(x_808, 0); +lean_inc(x_994); +if (lean_is_exclusive(x_808)) { + lean_ctor_release(x_808, 0); + x_995 = x_808; +} else { + lean_dec_ref(x_808); + x_995 = lean_box(0); +} +x_996 = lean_array_get_size(x_163); +x_997 = lean_ctor_get(x_994, 3); +lean_inc(x_997); +lean_dec(x_994); +x_998 = lean_array_get_size(x_997); +lean_dec(x_997); +x_999 = lean_nat_dec_lt(x_996, x_998); +if (x_999 == 0) +{ +uint8_t x_1000; +x_1000 = lean_nat_dec_eq(x_996, x_998); +if (x_1000 == 0) +{ +lean_object* x_1001; lean_object* x_1002; lean_object* x_1003; lean_object* x_1004; lean_object* x_1005; lean_object* x_1006; lean_object* x_1007; lean_object* x_1008; lean_object* x_1009; lean_object* x_1010; lean_object* x_1011; lean_object* x_1012; lean_object* x_1013; lean_object* x_1014; lean_object* x_1015; lean_object* x_1016; lean_object* x_1017; +x_1001 = lean_unsigned_to_nat(0u); +x_1002 = l_Array_extract___rarg(x_163, x_1001, x_998); +x_1003 = l_Array_extract___rarg(x_163, x_998, x_996); +lean_dec(x_996); +x_1004 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_1004, 0, x_153); +lean_ctor_set(x_1004, 1, x_1002); +x_1005 = lean_ctor_get(x_1, 0); +lean_inc(x_1005); +x_1006 = l_Lean_IR_ToIR_bindVar(x_1005, x_169, x_4, x_5, x_993); +x_1007 = lean_ctor_get(x_1006, 0); +lean_inc(x_1007); +x_1008 = lean_ctor_get(x_1006, 1); +lean_inc(x_1008); +lean_dec(x_1006); +x_1009 = lean_ctor_get(x_1007, 0); +lean_inc(x_1009); +x_1010 = lean_ctor_get(x_1007, 1); +lean_inc(x_1010); +lean_dec(x_1007); +x_1011 = l_Lean_IR_ToIR_newVar(x_1010, x_4, x_5, x_1008); +x_1012 = lean_ctor_get(x_1011, 0); +lean_inc(x_1012); +x_1013 = lean_ctor_get(x_1011, 1); +lean_inc(x_1013); +lean_dec(x_1011); +x_1014 = lean_ctor_get(x_1012, 0); +lean_inc(x_1014); +x_1015 = lean_ctor_get(x_1012, 1); +lean_inc(x_1015); +lean_dec(x_1012); +x_1016 = lean_ctor_get(x_1, 2); +lean_inc(x_1016); +lean_inc(x_5); +lean_inc(x_4); +x_1017 = l_Lean_IR_ToIR_lowerType(x_1016, x_1015, x_4, x_5, x_1013); +if (lean_obj_tag(x_1017) == 0) +{ +lean_object* x_1018; lean_object* x_1019; lean_object* x_1020; lean_object* x_1021; lean_object* x_1022; +x_1018 = lean_ctor_get(x_1017, 0); +lean_inc(x_1018); +x_1019 = lean_ctor_get(x_1017, 1); +lean_inc(x_1019); +lean_dec(x_1017); +x_1020 = lean_ctor_get(x_1018, 0); +lean_inc(x_1020); +x_1021 = lean_ctor_get(x_1018, 1); +lean_inc(x_1021); +lean_dec(x_1018); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1022 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_1014, x_1003, x_1009, x_1004, x_1020, x_1021, x_4, x_5, x_1019); +if (lean_obj_tag(x_1022) == 0) +{ +lean_object* x_1023; lean_object* x_1024; lean_object* x_1025; lean_object* x_1026; lean_object* x_1027; lean_object* x_1028; lean_object* x_1029; +x_1023 = lean_ctor_get(x_1022, 0); +lean_inc(x_1023); +x_1024 = lean_ctor_get(x_1022, 1); +lean_inc(x_1024); +lean_dec(x_1022); +x_1025 = lean_ctor_get(x_1023, 0); +lean_inc(x_1025); +x_1026 = lean_ctor_get(x_1023, 1); +lean_inc(x_1026); +if (lean_is_exclusive(x_1023)) { + lean_ctor_release(x_1023, 0); + lean_ctor_release(x_1023, 1); + x_1027 = x_1023; +} else { + lean_dec_ref(x_1023); + x_1027 = lean_box(0); +} +if (lean_is_scalar(x_995)) { + x_1028 = lean_alloc_ctor(1, 1, 0); +} else { + x_1028 = x_995; +} +lean_ctor_set(x_1028, 0, x_1025); +if (lean_is_scalar(x_1027)) { + x_1029 = lean_alloc_ctor(0, 2, 0); +} else { + x_1029 = x_1027; +} +lean_ctor_set(x_1029, 0, x_1028); +lean_ctor_set(x_1029, 1, x_1026); +x_776 = x_1029; +x_777 = x_1024; +goto block_806; +} +else +{ +lean_object* x_1030; lean_object* x_1031; lean_object* x_1032; lean_object* x_1033; +lean_dec(x_995); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1030 = lean_ctor_get(x_1022, 0); +lean_inc(x_1030); +x_1031 = lean_ctor_get(x_1022, 1); +lean_inc(x_1031); +if (lean_is_exclusive(x_1022)) { + lean_ctor_release(x_1022, 0); + lean_ctor_release(x_1022, 1); + x_1032 = x_1022; +} else { + lean_dec_ref(x_1022); + x_1032 = lean_box(0); +} +if (lean_is_scalar(x_1032)) { + x_1033 = lean_alloc_ctor(1, 2, 0); +} else { + x_1033 = x_1032; +} +lean_ctor_set(x_1033, 0, x_1030); +lean_ctor_set(x_1033, 1, x_1031); +return x_1033; +} +} +else +{ +lean_object* x_1034; lean_object* x_1035; lean_object* x_1036; lean_object* x_1037; +lean_dec(x_1014); +lean_dec(x_1009); +lean_dec(x_1004); +lean_dec(x_1003); +lean_dec(x_995); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1034 = lean_ctor_get(x_1017, 0); +lean_inc(x_1034); +x_1035 = lean_ctor_get(x_1017, 1); +lean_inc(x_1035); +if (lean_is_exclusive(x_1017)) { + lean_ctor_release(x_1017, 0); + lean_ctor_release(x_1017, 1); + x_1036 = x_1017; +} else { + lean_dec_ref(x_1017); + x_1036 = lean_box(0); +} +if (lean_is_scalar(x_1036)) { + x_1037 = lean_alloc_ctor(1, 2, 0); +} else { + x_1037 = x_1036; +} +lean_ctor_set(x_1037, 0, x_1034); +lean_ctor_set(x_1037, 1, x_1035); +return x_1037; +} +} +else +{ +lean_object* x_1038; lean_object* x_1039; lean_object* x_1040; lean_object* x_1041; lean_object* x_1042; lean_object* x_1043; lean_object* x_1044; lean_object* x_1045; lean_object* x_1046; +lean_dec(x_998); +lean_dec(x_996); +lean_inc(x_163); +x_1038 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_1038, 0, x_153); +lean_ctor_set(x_1038, 1, x_163); +x_1039 = lean_ctor_get(x_1, 0); +lean_inc(x_1039); +x_1040 = l_Lean_IR_ToIR_bindVar(x_1039, x_169, x_4, x_5, x_993); +x_1041 = lean_ctor_get(x_1040, 0); +lean_inc(x_1041); +x_1042 = lean_ctor_get(x_1040, 1); +lean_inc(x_1042); +lean_dec(x_1040); +x_1043 = lean_ctor_get(x_1041, 0); +lean_inc(x_1043); +x_1044 = lean_ctor_get(x_1041, 1); +lean_inc(x_1044); +lean_dec(x_1041); +x_1045 = lean_ctor_get(x_1, 2); +lean_inc(x_1045); +lean_inc(x_5); +lean_inc(x_4); +x_1046 = l_Lean_IR_ToIR_lowerType(x_1045, x_1044, x_4, x_5, x_1042); +if (lean_obj_tag(x_1046) == 0) +{ +lean_object* x_1047; lean_object* x_1048; lean_object* x_1049; lean_object* x_1050; lean_object* x_1051; +x_1047 = lean_ctor_get(x_1046, 0); +lean_inc(x_1047); +x_1048 = lean_ctor_get(x_1046, 1); +lean_inc(x_1048); +lean_dec(x_1046); +x_1049 = lean_ctor_get(x_1047, 0); +lean_inc(x_1049); +x_1050 = lean_ctor_get(x_1047, 1); +lean_inc(x_1050); +lean_dec(x_1047); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1051 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1043, x_1038, x_1049, x_1050, x_4, x_5, x_1048); +if (lean_obj_tag(x_1051) == 0) +{ +lean_object* x_1052; lean_object* x_1053; lean_object* x_1054; lean_object* x_1055; lean_object* x_1056; lean_object* x_1057; lean_object* x_1058; +x_1052 = lean_ctor_get(x_1051, 0); +lean_inc(x_1052); +x_1053 = lean_ctor_get(x_1051, 1); +lean_inc(x_1053); +lean_dec(x_1051); +x_1054 = lean_ctor_get(x_1052, 0); +lean_inc(x_1054); +x_1055 = lean_ctor_get(x_1052, 1); +lean_inc(x_1055); +if (lean_is_exclusive(x_1052)) { + lean_ctor_release(x_1052, 0); + lean_ctor_release(x_1052, 1); + x_1056 = x_1052; +} else { + lean_dec_ref(x_1052); + x_1056 = lean_box(0); +} +if (lean_is_scalar(x_995)) { + x_1057 = lean_alloc_ctor(1, 1, 0); +} else { + x_1057 = x_995; +} +lean_ctor_set(x_1057, 0, x_1054); +if (lean_is_scalar(x_1056)) { + x_1058 = lean_alloc_ctor(0, 2, 0); +} else { + x_1058 = x_1056; +} +lean_ctor_set(x_1058, 0, x_1057); +lean_ctor_set(x_1058, 1, x_1055); +x_776 = x_1058; +x_777 = x_1053; +goto block_806; +} +else +{ +lean_object* x_1059; lean_object* x_1060; lean_object* x_1061; lean_object* x_1062; +lean_dec(x_995); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1059 = lean_ctor_get(x_1051, 0); +lean_inc(x_1059); +x_1060 = lean_ctor_get(x_1051, 1); +lean_inc(x_1060); +if (lean_is_exclusive(x_1051)) { + lean_ctor_release(x_1051, 0); + lean_ctor_release(x_1051, 1); + x_1061 = x_1051; +} else { + lean_dec_ref(x_1051); + x_1061 = lean_box(0); +} +if (lean_is_scalar(x_1061)) { + x_1062 = lean_alloc_ctor(1, 2, 0); +} else { + x_1062 = x_1061; +} +lean_ctor_set(x_1062, 0, x_1059); +lean_ctor_set(x_1062, 1, x_1060); +return x_1062; +} +} +else +{ +lean_object* x_1063; lean_object* x_1064; lean_object* x_1065; lean_object* x_1066; +lean_dec(x_1043); +lean_dec(x_1038); +lean_dec(x_995); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1063 = lean_ctor_get(x_1046, 0); +lean_inc(x_1063); +x_1064 = lean_ctor_get(x_1046, 1); +lean_inc(x_1064); +if (lean_is_exclusive(x_1046)) { + lean_ctor_release(x_1046, 0); + lean_ctor_release(x_1046, 1); + x_1065 = x_1046; +} else { + lean_dec_ref(x_1046); + x_1065 = lean_box(0); +} +if (lean_is_scalar(x_1065)) { + x_1066 = lean_alloc_ctor(1, 2, 0); +} else { + x_1066 = x_1065; +} +lean_ctor_set(x_1066, 0, x_1063); +lean_ctor_set(x_1066, 1, x_1064); +return x_1066; +} +} +} +else +{ +lean_object* x_1067; lean_object* x_1068; lean_object* x_1069; lean_object* x_1070; lean_object* x_1071; lean_object* x_1072; lean_object* x_1073; lean_object* x_1074; lean_object* x_1075; +lean_dec(x_998); +lean_dec(x_996); +lean_inc(x_163); +x_1067 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_1067, 0, x_153); +lean_ctor_set(x_1067, 1, x_163); +x_1068 = lean_ctor_get(x_1, 0); +lean_inc(x_1068); +x_1069 = l_Lean_IR_ToIR_bindVar(x_1068, x_169, x_4, x_5, x_993); +x_1070 = lean_ctor_get(x_1069, 0); +lean_inc(x_1070); +x_1071 = lean_ctor_get(x_1069, 1); +lean_inc(x_1071); +lean_dec(x_1069); +x_1072 = lean_ctor_get(x_1070, 0); +lean_inc(x_1072); +x_1073 = lean_ctor_get(x_1070, 1); +lean_inc(x_1073); +lean_dec(x_1070); +x_1074 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1075 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1072, x_1067, x_1074, x_1073, x_4, x_5, x_1071); +if (lean_obj_tag(x_1075) == 0) +{ +lean_object* x_1076; lean_object* x_1077; lean_object* x_1078; lean_object* x_1079; lean_object* x_1080; lean_object* x_1081; lean_object* x_1082; +x_1076 = lean_ctor_get(x_1075, 0); +lean_inc(x_1076); +x_1077 = lean_ctor_get(x_1075, 1); +lean_inc(x_1077); +lean_dec(x_1075); +x_1078 = lean_ctor_get(x_1076, 0); +lean_inc(x_1078); +x_1079 = lean_ctor_get(x_1076, 1); +lean_inc(x_1079); +if (lean_is_exclusive(x_1076)) { + lean_ctor_release(x_1076, 0); + lean_ctor_release(x_1076, 1); + x_1080 = x_1076; +} else { + lean_dec_ref(x_1076); + x_1080 = lean_box(0); +} +if (lean_is_scalar(x_995)) { + x_1081 = lean_alloc_ctor(1, 1, 0); +} else { + x_1081 = x_995; +} +lean_ctor_set(x_1081, 0, x_1078); +if (lean_is_scalar(x_1080)) { + x_1082 = lean_alloc_ctor(0, 2, 0); +} else { + x_1082 = x_1080; +} +lean_ctor_set(x_1082, 0, x_1081); +lean_ctor_set(x_1082, 1, x_1079); +x_776 = x_1082; +x_777 = x_1077; +goto block_806; +} +else +{ +lean_object* x_1083; lean_object* x_1084; lean_object* x_1085; lean_object* x_1086; +lean_dec(x_995); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1083 = lean_ctor_get(x_1075, 0); +lean_inc(x_1083); +x_1084 = lean_ctor_get(x_1075, 1); +lean_inc(x_1084); +if (lean_is_exclusive(x_1075)) { + lean_ctor_release(x_1075, 0); + lean_ctor_release(x_1075, 1); + x_1085 = x_1075; +} else { + lean_dec_ref(x_1075); + x_1085 = lean_box(0); +} +if (lean_is_scalar(x_1085)) { + x_1086 = lean_alloc_ctor(1, 2, 0); +} else { + x_1086 = x_1085; +} +lean_ctor_set(x_1086, 0, x_1083); +lean_ctor_set(x_1086, 1, x_1084); +return x_1086; +} +} +} +} +block_806: +{ +lean_object* x_778; +x_778 = lean_ctor_get(x_776, 0); +lean_inc(x_778); +if (lean_obj_tag(x_778) == 0) +{ +lean_object* x_779; lean_object* x_780; lean_object* x_781; lean_object* x_782; lean_object* x_783; lean_object* x_784; lean_object* x_785; lean_object* x_786; lean_object* x_787; lean_object* x_788; +lean_dec(x_174); +x_779 = lean_ctor_get(x_776, 1); +lean_inc(x_779); +lean_dec(x_776); +x_780 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_780, 0, x_153); +lean_ctor_set(x_780, 1, x_163); +x_781 = lean_ctor_get(x_1, 0); +lean_inc(x_781); +x_782 = l_Lean_IR_ToIR_bindVar(x_781, x_779, x_4, x_5, x_777); +x_783 = lean_ctor_get(x_782, 0); +lean_inc(x_783); +x_784 = lean_ctor_get(x_782, 1); +lean_inc(x_784); +lean_dec(x_782); +x_785 = lean_ctor_get(x_783, 0); +lean_inc(x_785); +x_786 = lean_ctor_get(x_783, 1); +lean_inc(x_786); +lean_dec(x_783); +x_787 = lean_ctor_get(x_1, 2); +lean_inc(x_787); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_788 = l_Lean_IR_ToIR_lowerType(x_787, x_786, x_4, x_5, x_784); +if (lean_obj_tag(x_788) == 0) +{ +lean_object* x_789; lean_object* x_790; lean_object* x_791; lean_object* x_792; lean_object* x_793; +x_789 = lean_ctor_get(x_788, 0); +lean_inc(x_789); +x_790 = lean_ctor_get(x_788, 1); +lean_inc(x_790); +lean_dec(x_788); +x_791 = lean_ctor_get(x_789, 0); +lean_inc(x_791); +x_792 = lean_ctor_get(x_789, 1); +lean_inc(x_792); +lean_dec(x_789); +x_793 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_785, x_780, x_791, x_792, x_4, x_5, x_790); +return x_793; +} +else +{ +uint8_t x_794; +lean_dec(x_785); +lean_dec(x_780); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_794 = !lean_is_exclusive(x_788); +if (x_794 == 0) +{ +return x_788; +} +else +{ +lean_object* x_795; lean_object* x_796; lean_object* x_797; +x_795 = lean_ctor_get(x_788, 0); +x_796 = lean_ctor_get(x_788, 1); +lean_inc(x_796); +lean_inc(x_795); +lean_dec(x_788); +x_797 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_797, 0, x_795); +lean_ctor_set(x_797, 1, x_796); +return x_797; +} +} +} +else +{ +uint8_t x_798; +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_798 = !lean_is_exclusive(x_776); +if (x_798 == 0) +{ +lean_object* x_799; lean_object* x_800; lean_object* x_801; +x_799 = lean_ctor_get(x_776, 0); +lean_dec(x_799); +x_800 = lean_ctor_get(x_778, 0); +lean_inc(x_800); +lean_dec(x_778); +lean_ctor_set(x_776, 0, x_800); +if (lean_is_scalar(x_174)) { + x_801 = lean_alloc_ctor(0, 2, 0); +} else { + x_801 = x_174; +} +lean_ctor_set(x_801, 0, x_776); +lean_ctor_set(x_801, 1, x_777); +return x_801; +} +else +{ +lean_object* x_802; lean_object* x_803; lean_object* x_804; lean_object* x_805; +x_802 = lean_ctor_get(x_776, 1); +lean_inc(x_802); +lean_dec(x_776); +x_803 = lean_ctor_get(x_778, 0); +lean_inc(x_803); +lean_dec(x_778); +x_804 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_804, 0, x_803); +lean_ctor_set(x_804, 1, x_802); +if (lean_is_scalar(x_174)) { + x_805 = lean_alloc_ctor(0, 2, 0); +} else { + x_805 = x_174; +} +lean_ctor_set(x_805, 0, x_804); +lean_ctor_set(x_805, 1, x_777); +return x_805; +} +} +} +} +case 4: +{ +uint8_t x_1087; +lean_dec(x_175); +lean_dec(x_174); +lean_free_object(x_165); +lean_dec(x_155); +lean_dec(x_154); +x_1087 = !lean_is_exclusive(x_180); +if (x_1087 == 0) +{ +lean_object* x_1088; lean_object* x_1089; uint8_t x_1090; +x_1088 = lean_ctor_get(x_180, 0); +lean_dec(x_1088); +x_1089 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_1090 = lean_name_eq(x_153, x_1089); +if (x_1090 == 0) +{ +uint8_t x_1091; lean_object* x_1092; lean_object* x_1093; lean_object* x_1094; lean_object* x_1095; lean_object* x_1096; lean_object* x_1097; lean_object* x_1098; lean_object* x_1099; +lean_dec(x_163); +lean_dec(x_2); +lean_dec(x_1); +x_1091 = 1; +x_1092 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_1093 = l_Lean_Name_toString(x_153, x_1091, x_1092); +lean_ctor_set_tag(x_180, 3); +lean_ctor_set(x_180, 0, x_1093); +x_1094 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_1095 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_1095, 0, x_1094); +lean_ctor_set(x_1095, 1, x_180); +x_1096 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_1097 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_1097, 0, x_1095); +lean_ctor_set(x_1097, 1, x_1096); +x_1098 = l_Lean_MessageData_ofFormat(x_1097); +x_1099 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_1098, x_169, x_4, x_5, x_173); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_169); +return x_1099; +} +else +{ +lean_object* x_1100; lean_object* x_1101; lean_object* x_1102; +lean_free_object(x_180); +x_1100 = l_Lean_IR_instInhabitedArg; +x_1101 = lean_unsigned_to_nat(2u); +x_1102 = lean_array_get(x_1100, x_163, x_1101); +lean_dec(x_163); +if (lean_obj_tag(x_1102) == 0) +{ +lean_object* x_1103; lean_object* x_1104; lean_object* x_1105; lean_object* x_1106; lean_object* x_1107; lean_object* x_1108; lean_object* x_1109; +x_1103 = lean_ctor_get(x_1102, 0); +lean_inc(x_1103); +lean_dec(x_1102); +x_1104 = lean_ctor_get(x_1, 0); +lean_inc(x_1104); +lean_dec(x_1); +x_1105 = l_Lean_IR_ToIR_bindVarToVarId(x_1104, x_1103, x_169, x_4, x_5, x_173); +x_1106 = lean_ctor_get(x_1105, 0); +lean_inc(x_1106); +x_1107 = lean_ctor_get(x_1105, 1); +lean_inc(x_1107); +lean_dec(x_1105); +x_1108 = lean_ctor_get(x_1106, 1); +lean_inc(x_1108); +lean_dec(x_1106); +x_1109 = l_Lean_IR_ToIR_lowerCode(x_2, x_1108, x_4, x_5, x_1107); +return x_1109; +} +else +{ +lean_object* x_1110; lean_object* x_1111; lean_object* x_1112; lean_object* x_1113; lean_object* x_1114; lean_object* x_1115; +x_1110 = lean_ctor_get(x_1, 0); +lean_inc(x_1110); +lean_dec(x_1); +x_1111 = l_Lean_IR_ToIR_bindErased(x_1110, x_169, x_4, x_5, x_173); +x_1112 = lean_ctor_get(x_1111, 0); +lean_inc(x_1112); +x_1113 = lean_ctor_get(x_1111, 1); +lean_inc(x_1113); +lean_dec(x_1111); +x_1114 = lean_ctor_get(x_1112, 1); +lean_inc(x_1114); +lean_dec(x_1112); +x_1115 = l_Lean_IR_ToIR_lowerCode(x_2, x_1114, x_4, x_5, x_1113); +return x_1115; +} +} +} +else +{ +lean_object* x_1116; uint8_t x_1117; +lean_dec(x_180); +x_1116 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_1117 = lean_name_eq(x_153, x_1116); +if (x_1117 == 0) +{ +uint8_t x_1118; lean_object* x_1119; lean_object* x_1120; lean_object* x_1121; lean_object* x_1122; lean_object* x_1123; lean_object* x_1124; lean_object* x_1125; lean_object* x_1126; lean_object* x_1127; +lean_dec(x_163); +lean_dec(x_2); +lean_dec(x_1); +x_1118 = 1; +x_1119 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_1120 = l_Lean_Name_toString(x_153, x_1118, x_1119); +x_1121 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_1121, 0, x_1120); +x_1122 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_1123 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_1123, 0, x_1122); +lean_ctor_set(x_1123, 1, x_1121); +x_1124 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_1125 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_1125, 0, x_1123); +lean_ctor_set(x_1125, 1, x_1124); +x_1126 = l_Lean_MessageData_ofFormat(x_1125); +x_1127 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_1126, x_169, x_4, x_5, x_173); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_169); +return x_1127; +} +else +{ +lean_object* x_1128; lean_object* x_1129; lean_object* x_1130; +x_1128 = l_Lean_IR_instInhabitedArg; +x_1129 = lean_unsigned_to_nat(2u); +x_1130 = lean_array_get(x_1128, x_163, x_1129); +lean_dec(x_163); +if (lean_obj_tag(x_1130) == 0) +{ +lean_object* x_1131; lean_object* x_1132; lean_object* x_1133; lean_object* x_1134; lean_object* x_1135; lean_object* x_1136; lean_object* x_1137; +x_1131 = lean_ctor_get(x_1130, 0); +lean_inc(x_1131); +lean_dec(x_1130); +x_1132 = lean_ctor_get(x_1, 0); +lean_inc(x_1132); +lean_dec(x_1); +x_1133 = l_Lean_IR_ToIR_bindVarToVarId(x_1132, x_1131, x_169, x_4, x_5, x_173); +x_1134 = lean_ctor_get(x_1133, 0); +lean_inc(x_1134); +x_1135 = lean_ctor_get(x_1133, 1); +lean_inc(x_1135); +lean_dec(x_1133); +x_1136 = lean_ctor_get(x_1134, 1); +lean_inc(x_1136); +lean_dec(x_1134); +x_1137 = l_Lean_IR_ToIR_lowerCode(x_2, x_1136, x_4, x_5, x_1135); +return x_1137; +} +else +{ +lean_object* x_1138; lean_object* x_1139; lean_object* x_1140; lean_object* x_1141; lean_object* x_1142; lean_object* x_1143; +x_1138 = lean_ctor_get(x_1, 0); +lean_inc(x_1138); +lean_dec(x_1); +x_1139 = l_Lean_IR_ToIR_bindErased(x_1138, x_169, x_4, x_5, x_173); +x_1140 = lean_ctor_get(x_1139, 0); +lean_inc(x_1140); +x_1141 = lean_ctor_get(x_1139, 1); +lean_inc(x_1141); +lean_dec(x_1139); +x_1142 = lean_ctor_get(x_1140, 1); +lean_inc(x_1142); +lean_dec(x_1140); +x_1143 = l_Lean_IR_ToIR_lowerCode(x_2, x_1142, x_4, x_5, x_1141); +return x_1143; +} +} +} +} +case 5: +{ +lean_object* x_1144; lean_object* x_1145; +lean_dec(x_180); +lean_dec(x_175); +lean_dec(x_174); +lean_free_object(x_165); +lean_dec(x_163); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_2); +lean_dec(x_1); +x_1144 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_1145 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_1144, x_169, x_4, x_5, x_173); +return x_1145; +} +case 6: +{ +lean_object* x_1146; uint8_t x_1147; +x_1146 = lean_ctor_get(x_180, 0); +lean_inc(x_1146); +lean_dec(x_180); +x_1147 = l_Lean_isExtern(x_175, x_153); +if (x_1147 == 0) +{ +lean_object* x_1148; +lean_dec(x_174); +lean_free_object(x_165); +lean_dec(x_163); +lean_inc(x_5); +lean_inc(x_4); +x_1148 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_169, x_4, x_5, x_173); +if (lean_obj_tag(x_1148) == 0) +{ +lean_object* x_1149; lean_object* x_1150; lean_object* x_1151; lean_object* x_1152; lean_object* x_1153; lean_object* x_1154; lean_object* x_1155; lean_object* x_1156; lean_object* x_1157; lean_object* x_1158; lean_object* x_1159; lean_object* x_1160; lean_object* x_1161; lean_object* x_1162; lean_object* x_1163; lean_object* x_1164; lean_object* x_1165; lean_object* x_1166; lean_object* x_1167; lean_object* x_1168; +x_1149 = lean_ctor_get(x_1148, 0); +lean_inc(x_1149); +x_1150 = lean_ctor_get(x_1149, 0); +lean_inc(x_1150); +x_1151 = lean_ctor_get(x_1148, 1); +lean_inc(x_1151); +lean_dec(x_1148); +x_1152 = lean_ctor_get(x_1149, 1); +lean_inc(x_1152); +lean_dec(x_1149); +x_1153 = lean_ctor_get(x_1150, 0); +lean_inc(x_1153); +x_1154 = lean_ctor_get(x_1150, 1); +lean_inc(x_1154); +lean_dec(x_1150); +x_1155 = lean_ctor_get(x_1146, 3); +lean_inc(x_1155); +lean_dec(x_1146); +x_1156 = lean_array_get_size(x_154); +x_1157 = l_Array_extract___rarg(x_154, x_1155, x_1156); +lean_dec(x_1156); +lean_dec(x_154); +x_1158 = lean_array_get_size(x_1154); +x_1159 = lean_unsigned_to_nat(0u); +x_1160 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_155)) { + x_1161 = lean_alloc_ctor(0, 3, 0); +} else { + x_1161 = x_155; + lean_ctor_set_tag(x_1161, 0); +} +lean_ctor_set(x_1161, 0, x_1159); +lean_ctor_set(x_1161, 1, x_1158); +lean_ctor_set(x_1161, 2, x_1160); +x_1162 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_1163 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__2(x_1154, x_1157, x_1161, x_1161, x_1162, x_1159, lean_box(0), lean_box(0), x_1152, x_4, x_5, x_1151); +lean_dec(x_1161); +x_1164 = lean_ctor_get(x_1163, 0); +lean_inc(x_1164); +x_1165 = lean_ctor_get(x_1163, 1); +lean_inc(x_1165); +lean_dec(x_1163); +x_1166 = lean_ctor_get(x_1164, 0); +lean_inc(x_1166); +x_1167 = lean_ctor_get(x_1164, 1); +lean_inc(x_1167); +lean_dec(x_1164); +x_1168 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_1153, x_1154, x_1157, x_1166, x_1167, x_4, x_5, x_1165); +lean_dec(x_1157); +lean_dec(x_1154); +return x_1168; +} +else +{ +uint8_t x_1169; +lean_dec(x_1146); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1169 = !lean_is_exclusive(x_1148); +if (x_1169 == 0) +{ +return x_1148; +} +else +{ +lean_object* x_1170; lean_object* x_1171; lean_object* x_1172; +x_1170 = lean_ctor_get(x_1148, 0); +x_1171 = lean_ctor_get(x_1148, 1); +lean_inc(x_1171); +lean_inc(x_1170); +lean_dec(x_1148); +x_1172 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_1172, 0, x_1170); +lean_ctor_set(x_1172, 1, x_1171); +return x_1172; +} +} +} +else +{ +lean_object* x_1173; lean_object* x_1174; lean_object* x_1204; lean_object* x_1205; +lean_dec(x_1146); +lean_dec(x_155); +lean_dec(x_154); +x_1204 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_173); +x_1205 = lean_ctor_get(x_1204, 0); +lean_inc(x_1205); +if (lean_obj_tag(x_1205) == 0) +{ +lean_object* x_1206; lean_object* x_1207; +x_1206 = lean_ctor_get(x_1204, 1); +lean_inc(x_1206); +lean_dec(x_1204); +x_1207 = lean_box(0); +lean_ctor_set(x_165, 0, x_1207); +x_1173 = x_165; +x_1174 = x_1206; +goto block_1203; +} +else +{ +uint8_t x_1208; +lean_free_object(x_165); +x_1208 = !lean_is_exclusive(x_1204); +if (x_1208 == 0) +{ +lean_object* x_1209; lean_object* x_1210; uint8_t x_1211; +x_1209 = lean_ctor_get(x_1204, 1); +x_1210 = lean_ctor_get(x_1204, 0); +lean_dec(x_1210); +x_1211 = !lean_is_exclusive(x_1205); +if (x_1211 == 0) +{ +lean_object* x_1212; lean_object* x_1213; lean_object* x_1214; lean_object* x_1215; uint8_t x_1216; +x_1212 = lean_ctor_get(x_1205, 0); +x_1213 = lean_array_get_size(x_163); +x_1214 = lean_ctor_get(x_1212, 3); +lean_inc(x_1214); +lean_dec(x_1212); +x_1215 = lean_array_get_size(x_1214); +lean_dec(x_1214); +x_1216 = lean_nat_dec_lt(x_1213, x_1215); +if (x_1216 == 0) +{ +uint8_t x_1217; +x_1217 = lean_nat_dec_eq(x_1213, x_1215); +if (x_1217 == 0) +{ +lean_object* x_1218; lean_object* x_1219; lean_object* x_1220; lean_object* x_1221; lean_object* x_1222; lean_object* x_1223; lean_object* x_1224; lean_object* x_1225; lean_object* x_1226; lean_object* x_1227; lean_object* x_1228; lean_object* x_1229; lean_object* x_1230; lean_object* x_1231; lean_object* x_1232; lean_object* x_1233; +x_1218 = lean_unsigned_to_nat(0u); +x_1219 = l_Array_extract___rarg(x_163, x_1218, x_1215); +x_1220 = l_Array_extract___rarg(x_163, x_1215, x_1213); +lean_dec(x_1213); +lean_ctor_set_tag(x_1204, 6); +lean_ctor_set(x_1204, 1, x_1219); +lean_ctor_set(x_1204, 0, x_153); +x_1221 = lean_ctor_get(x_1, 0); +lean_inc(x_1221); +x_1222 = l_Lean_IR_ToIR_bindVar(x_1221, x_169, x_4, x_5, x_1209); +x_1223 = lean_ctor_get(x_1222, 0); +lean_inc(x_1223); +x_1224 = lean_ctor_get(x_1222, 1); +lean_inc(x_1224); +lean_dec(x_1222); +x_1225 = lean_ctor_get(x_1223, 0); +lean_inc(x_1225); +x_1226 = lean_ctor_get(x_1223, 1); +lean_inc(x_1226); +lean_dec(x_1223); +x_1227 = l_Lean_IR_ToIR_newVar(x_1226, x_4, x_5, x_1224); +x_1228 = lean_ctor_get(x_1227, 0); +lean_inc(x_1228); +x_1229 = lean_ctor_get(x_1227, 1); +lean_inc(x_1229); +lean_dec(x_1227); +x_1230 = lean_ctor_get(x_1228, 0); +lean_inc(x_1230); +x_1231 = lean_ctor_get(x_1228, 1); +lean_inc(x_1231); +lean_dec(x_1228); +x_1232 = lean_ctor_get(x_1, 2); +lean_inc(x_1232); +lean_inc(x_5); +lean_inc(x_4); +x_1233 = l_Lean_IR_ToIR_lowerType(x_1232, x_1231, x_4, x_5, x_1229); +if (lean_obj_tag(x_1233) == 0) +{ +lean_object* x_1234; lean_object* x_1235; lean_object* x_1236; lean_object* x_1237; lean_object* x_1238; +x_1234 = lean_ctor_get(x_1233, 0); +lean_inc(x_1234); +x_1235 = lean_ctor_get(x_1233, 1); +lean_inc(x_1235); +lean_dec(x_1233); +x_1236 = lean_ctor_get(x_1234, 0); +lean_inc(x_1236); +x_1237 = lean_ctor_get(x_1234, 1); +lean_inc(x_1237); +lean_dec(x_1234); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1238 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_1230, x_1220, x_1225, x_1204, x_1236, x_1237, x_4, x_5, x_1235); +if (lean_obj_tag(x_1238) == 0) +{ +lean_object* x_1239; lean_object* x_1240; uint8_t x_1241; +x_1239 = lean_ctor_get(x_1238, 0); +lean_inc(x_1239); +x_1240 = lean_ctor_get(x_1238, 1); +lean_inc(x_1240); +lean_dec(x_1238); +x_1241 = !lean_is_exclusive(x_1239); +if (x_1241 == 0) +{ +lean_object* x_1242; +x_1242 = lean_ctor_get(x_1239, 0); +lean_ctor_set(x_1205, 0, x_1242); +lean_ctor_set(x_1239, 0, x_1205); +x_1173 = x_1239; +x_1174 = x_1240; +goto block_1203; +} +else +{ +lean_object* x_1243; lean_object* x_1244; lean_object* x_1245; +x_1243 = lean_ctor_get(x_1239, 0); +x_1244 = lean_ctor_get(x_1239, 1); +lean_inc(x_1244); +lean_inc(x_1243); +lean_dec(x_1239); +lean_ctor_set(x_1205, 0, x_1243); +x_1245 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_1245, 0, x_1205); +lean_ctor_set(x_1245, 1, x_1244); +x_1173 = x_1245; +x_1174 = x_1240; +goto block_1203; +} +} +else +{ +uint8_t x_1246; +lean_free_object(x_1205); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1246 = !lean_is_exclusive(x_1238); +if (x_1246 == 0) +{ +return x_1238; +} +else +{ +lean_object* x_1247; lean_object* x_1248; lean_object* x_1249; +x_1247 = lean_ctor_get(x_1238, 0); +x_1248 = lean_ctor_get(x_1238, 1); +lean_inc(x_1248); +lean_inc(x_1247); +lean_dec(x_1238); +x_1249 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_1249, 0, x_1247); +lean_ctor_set(x_1249, 1, x_1248); +return x_1249; +} +} +} +else +{ +uint8_t x_1250; +lean_dec(x_1230); +lean_dec(x_1225); +lean_dec(x_1204); +lean_dec(x_1220); +lean_free_object(x_1205); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1250 = !lean_is_exclusive(x_1233); +if (x_1250 == 0) +{ +return x_1233; +} +else +{ +lean_object* x_1251; lean_object* x_1252; lean_object* x_1253; +x_1251 = lean_ctor_get(x_1233, 0); +x_1252 = lean_ctor_get(x_1233, 1); +lean_inc(x_1252); +lean_inc(x_1251); +lean_dec(x_1233); +x_1253 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_1253, 0, x_1251); +lean_ctor_set(x_1253, 1, x_1252); +return x_1253; +} +} +} +else +{ +lean_object* x_1254; lean_object* x_1255; lean_object* x_1256; lean_object* x_1257; lean_object* x_1258; lean_object* x_1259; lean_object* x_1260; lean_object* x_1261; +lean_dec(x_1215); +lean_dec(x_1213); +lean_inc(x_163); +lean_ctor_set_tag(x_1204, 6); +lean_ctor_set(x_1204, 1, x_163); +lean_ctor_set(x_1204, 0, x_153); +x_1254 = lean_ctor_get(x_1, 0); +lean_inc(x_1254); +x_1255 = l_Lean_IR_ToIR_bindVar(x_1254, x_169, x_4, x_5, x_1209); +x_1256 = lean_ctor_get(x_1255, 0); +lean_inc(x_1256); +x_1257 = lean_ctor_get(x_1255, 1); +lean_inc(x_1257); +lean_dec(x_1255); +x_1258 = lean_ctor_get(x_1256, 0); +lean_inc(x_1258); +x_1259 = lean_ctor_get(x_1256, 1); +lean_inc(x_1259); +lean_dec(x_1256); +x_1260 = lean_ctor_get(x_1, 2); +lean_inc(x_1260); +lean_inc(x_5); +lean_inc(x_4); +x_1261 = l_Lean_IR_ToIR_lowerType(x_1260, x_1259, x_4, x_5, x_1257); +if (lean_obj_tag(x_1261) == 0) +{ +lean_object* x_1262; lean_object* x_1263; lean_object* x_1264; lean_object* x_1265; lean_object* x_1266; +x_1262 = lean_ctor_get(x_1261, 0); +lean_inc(x_1262); +x_1263 = lean_ctor_get(x_1261, 1); +lean_inc(x_1263); +lean_dec(x_1261); +x_1264 = lean_ctor_get(x_1262, 0); +lean_inc(x_1264); +x_1265 = lean_ctor_get(x_1262, 1); +lean_inc(x_1265); +lean_dec(x_1262); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1266 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1258, x_1204, x_1264, x_1265, x_4, x_5, x_1263); +if (lean_obj_tag(x_1266) == 0) +{ +lean_object* x_1267; lean_object* x_1268; uint8_t x_1269; +x_1267 = lean_ctor_get(x_1266, 0); +lean_inc(x_1267); +x_1268 = lean_ctor_get(x_1266, 1); +lean_inc(x_1268); +lean_dec(x_1266); +x_1269 = !lean_is_exclusive(x_1267); +if (x_1269 == 0) +{ +lean_object* x_1270; +x_1270 = lean_ctor_get(x_1267, 0); +lean_ctor_set(x_1205, 0, x_1270); +lean_ctor_set(x_1267, 0, x_1205); +x_1173 = x_1267; +x_1174 = x_1268; +goto block_1203; +} +else +{ +lean_object* x_1271; lean_object* x_1272; lean_object* x_1273; +x_1271 = lean_ctor_get(x_1267, 0); +x_1272 = lean_ctor_get(x_1267, 1); +lean_inc(x_1272); +lean_inc(x_1271); +lean_dec(x_1267); +lean_ctor_set(x_1205, 0, x_1271); +x_1273 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_1273, 0, x_1205); +lean_ctor_set(x_1273, 1, x_1272); +x_1173 = x_1273; +x_1174 = x_1268; +goto block_1203; +} +} +else +{ +uint8_t x_1274; +lean_free_object(x_1205); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1274 = !lean_is_exclusive(x_1266); +if (x_1274 == 0) +{ +return x_1266; +} +else +{ +lean_object* x_1275; lean_object* x_1276; lean_object* x_1277; +x_1275 = lean_ctor_get(x_1266, 0); +x_1276 = lean_ctor_get(x_1266, 1); +lean_inc(x_1276); +lean_inc(x_1275); +lean_dec(x_1266); +x_1277 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_1277, 0, x_1275); +lean_ctor_set(x_1277, 1, x_1276); +return x_1277; +} +} +} +else +{ +uint8_t x_1278; +lean_dec(x_1258); +lean_dec(x_1204); +lean_free_object(x_1205); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1278 = !lean_is_exclusive(x_1261); +if (x_1278 == 0) +{ +return x_1261; +} +else +{ +lean_object* x_1279; lean_object* x_1280; lean_object* x_1281; +x_1279 = lean_ctor_get(x_1261, 0); +x_1280 = lean_ctor_get(x_1261, 1); +lean_inc(x_1280); +lean_inc(x_1279); +lean_dec(x_1261); +x_1281 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_1281, 0, x_1279); +lean_ctor_set(x_1281, 1, x_1280); +return x_1281; +} +} +} +} +else +{ +lean_object* x_1282; lean_object* x_1283; lean_object* x_1284; lean_object* x_1285; lean_object* x_1286; lean_object* x_1287; lean_object* x_1288; lean_object* x_1289; +lean_dec(x_1215); +lean_dec(x_1213); +lean_inc(x_163); +lean_ctor_set_tag(x_1204, 7); +lean_ctor_set(x_1204, 1, x_163); +lean_ctor_set(x_1204, 0, x_153); +x_1282 = lean_ctor_get(x_1, 0); +lean_inc(x_1282); +x_1283 = l_Lean_IR_ToIR_bindVar(x_1282, x_169, x_4, x_5, x_1209); +x_1284 = lean_ctor_get(x_1283, 0); +lean_inc(x_1284); +x_1285 = lean_ctor_get(x_1283, 1); +lean_inc(x_1285); +lean_dec(x_1283); +x_1286 = lean_ctor_get(x_1284, 0); +lean_inc(x_1286); +x_1287 = lean_ctor_get(x_1284, 1); +lean_inc(x_1287); +lean_dec(x_1284); +x_1288 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1289 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1286, x_1204, x_1288, x_1287, x_4, x_5, x_1285); +if (lean_obj_tag(x_1289) == 0) +{ +lean_object* x_1290; lean_object* x_1291; uint8_t x_1292; +x_1290 = lean_ctor_get(x_1289, 0); +lean_inc(x_1290); +x_1291 = lean_ctor_get(x_1289, 1); +lean_inc(x_1291); +lean_dec(x_1289); +x_1292 = !lean_is_exclusive(x_1290); +if (x_1292 == 0) +{ +lean_object* x_1293; +x_1293 = lean_ctor_get(x_1290, 0); +lean_ctor_set(x_1205, 0, x_1293); +lean_ctor_set(x_1290, 0, x_1205); +x_1173 = x_1290; +x_1174 = x_1291; +goto block_1203; +} +else +{ +lean_object* x_1294; lean_object* x_1295; lean_object* x_1296; +x_1294 = lean_ctor_get(x_1290, 0); +x_1295 = lean_ctor_get(x_1290, 1); +lean_inc(x_1295); +lean_inc(x_1294); +lean_dec(x_1290); +lean_ctor_set(x_1205, 0, x_1294); +x_1296 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_1296, 0, x_1205); +lean_ctor_set(x_1296, 1, x_1295); +x_1173 = x_1296; +x_1174 = x_1291; +goto block_1203; +} +} +else +{ +uint8_t x_1297; +lean_free_object(x_1205); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1297 = !lean_is_exclusive(x_1289); +if (x_1297 == 0) +{ +return x_1289; +} +else +{ +lean_object* x_1298; lean_object* x_1299; lean_object* x_1300; +x_1298 = lean_ctor_get(x_1289, 0); +x_1299 = lean_ctor_get(x_1289, 1); +lean_inc(x_1299); +lean_inc(x_1298); +lean_dec(x_1289); +x_1300 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_1300, 0, x_1298); +lean_ctor_set(x_1300, 1, x_1299); +return x_1300; +} +} +} +} +else +{ +lean_object* x_1301; lean_object* x_1302; lean_object* x_1303; lean_object* x_1304; uint8_t x_1305; +x_1301 = lean_ctor_get(x_1205, 0); +lean_inc(x_1301); +lean_dec(x_1205); +x_1302 = lean_array_get_size(x_163); +x_1303 = lean_ctor_get(x_1301, 3); +lean_inc(x_1303); +lean_dec(x_1301); +x_1304 = lean_array_get_size(x_1303); +lean_dec(x_1303); +x_1305 = lean_nat_dec_lt(x_1302, x_1304); +if (x_1305 == 0) +{ +uint8_t x_1306; +x_1306 = lean_nat_dec_eq(x_1302, x_1304); +if (x_1306 == 0) +{ +lean_object* x_1307; lean_object* x_1308; lean_object* x_1309; lean_object* x_1310; lean_object* x_1311; lean_object* x_1312; lean_object* x_1313; lean_object* x_1314; lean_object* x_1315; lean_object* x_1316; lean_object* x_1317; lean_object* x_1318; lean_object* x_1319; lean_object* x_1320; lean_object* x_1321; lean_object* x_1322; +x_1307 = lean_unsigned_to_nat(0u); +x_1308 = l_Array_extract___rarg(x_163, x_1307, x_1304); +x_1309 = l_Array_extract___rarg(x_163, x_1304, x_1302); +lean_dec(x_1302); +lean_ctor_set_tag(x_1204, 6); +lean_ctor_set(x_1204, 1, x_1308); +lean_ctor_set(x_1204, 0, x_153); +x_1310 = lean_ctor_get(x_1, 0); +lean_inc(x_1310); +x_1311 = l_Lean_IR_ToIR_bindVar(x_1310, x_169, x_4, x_5, x_1209); +x_1312 = lean_ctor_get(x_1311, 0); +lean_inc(x_1312); +x_1313 = lean_ctor_get(x_1311, 1); +lean_inc(x_1313); +lean_dec(x_1311); +x_1314 = lean_ctor_get(x_1312, 0); +lean_inc(x_1314); +x_1315 = lean_ctor_get(x_1312, 1); +lean_inc(x_1315); +lean_dec(x_1312); +x_1316 = l_Lean_IR_ToIR_newVar(x_1315, x_4, x_5, x_1313); +x_1317 = lean_ctor_get(x_1316, 0); +lean_inc(x_1317); +x_1318 = lean_ctor_get(x_1316, 1); +lean_inc(x_1318); +lean_dec(x_1316); +x_1319 = lean_ctor_get(x_1317, 0); +lean_inc(x_1319); +x_1320 = lean_ctor_get(x_1317, 1); +lean_inc(x_1320); +lean_dec(x_1317); +x_1321 = lean_ctor_get(x_1, 2); +lean_inc(x_1321); +lean_inc(x_5); +lean_inc(x_4); +x_1322 = l_Lean_IR_ToIR_lowerType(x_1321, x_1320, x_4, x_5, x_1318); +if (lean_obj_tag(x_1322) == 0) +{ +lean_object* x_1323; lean_object* x_1324; lean_object* x_1325; lean_object* x_1326; lean_object* x_1327; +x_1323 = lean_ctor_get(x_1322, 0); +lean_inc(x_1323); +x_1324 = lean_ctor_get(x_1322, 1); +lean_inc(x_1324); +lean_dec(x_1322); +x_1325 = lean_ctor_get(x_1323, 0); +lean_inc(x_1325); +x_1326 = lean_ctor_get(x_1323, 1); +lean_inc(x_1326); +lean_dec(x_1323); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1327 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_1319, x_1309, x_1314, x_1204, x_1325, x_1326, x_4, x_5, x_1324); +if (lean_obj_tag(x_1327) == 0) +{ +lean_object* x_1328; lean_object* x_1329; lean_object* x_1330; lean_object* x_1331; lean_object* x_1332; lean_object* x_1333; lean_object* x_1334; +x_1328 = lean_ctor_get(x_1327, 0); +lean_inc(x_1328); +x_1329 = lean_ctor_get(x_1327, 1); +lean_inc(x_1329); +lean_dec(x_1327); +x_1330 = lean_ctor_get(x_1328, 0); +lean_inc(x_1330); +x_1331 = lean_ctor_get(x_1328, 1); +lean_inc(x_1331); +if (lean_is_exclusive(x_1328)) { + lean_ctor_release(x_1328, 0); + lean_ctor_release(x_1328, 1); + x_1332 = x_1328; +} else { + lean_dec_ref(x_1328); + x_1332 = lean_box(0); +} +x_1333 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_1333, 0, x_1330); +if (lean_is_scalar(x_1332)) { + x_1334 = lean_alloc_ctor(0, 2, 0); +} else { + x_1334 = x_1332; +} +lean_ctor_set(x_1334, 0, x_1333); +lean_ctor_set(x_1334, 1, x_1331); +x_1173 = x_1334; +x_1174 = x_1329; +goto block_1203; +} +else +{ +lean_object* x_1335; lean_object* x_1336; lean_object* x_1337; lean_object* x_1338; +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1335 = lean_ctor_get(x_1327, 0); +lean_inc(x_1335); +x_1336 = lean_ctor_get(x_1327, 1); +lean_inc(x_1336); +if (lean_is_exclusive(x_1327)) { + lean_ctor_release(x_1327, 0); + lean_ctor_release(x_1327, 1); + x_1337 = x_1327; +} else { + lean_dec_ref(x_1327); + x_1337 = lean_box(0); +} +if (lean_is_scalar(x_1337)) { + x_1338 = lean_alloc_ctor(1, 2, 0); +} else { + x_1338 = x_1337; +} +lean_ctor_set(x_1338, 0, x_1335); +lean_ctor_set(x_1338, 1, x_1336); +return x_1338; +} +} +else +{ +lean_object* x_1339; lean_object* x_1340; lean_object* x_1341; lean_object* x_1342; +lean_dec(x_1319); +lean_dec(x_1314); +lean_dec(x_1204); +lean_dec(x_1309); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1339 = lean_ctor_get(x_1322, 0); +lean_inc(x_1339); +x_1340 = lean_ctor_get(x_1322, 1); +lean_inc(x_1340); +if (lean_is_exclusive(x_1322)) { + lean_ctor_release(x_1322, 0); + lean_ctor_release(x_1322, 1); + x_1341 = x_1322; +} else { + lean_dec_ref(x_1322); + x_1341 = lean_box(0); +} +if (lean_is_scalar(x_1341)) { + x_1342 = lean_alloc_ctor(1, 2, 0); +} else { + x_1342 = x_1341; +} +lean_ctor_set(x_1342, 0, x_1339); +lean_ctor_set(x_1342, 1, x_1340); +return x_1342; +} +} +else +{ +lean_object* x_1343; lean_object* x_1344; lean_object* x_1345; lean_object* x_1346; lean_object* x_1347; lean_object* x_1348; lean_object* x_1349; lean_object* x_1350; +lean_dec(x_1304); +lean_dec(x_1302); +lean_inc(x_163); +lean_ctor_set_tag(x_1204, 6); +lean_ctor_set(x_1204, 1, x_163); +lean_ctor_set(x_1204, 0, x_153); +x_1343 = lean_ctor_get(x_1, 0); +lean_inc(x_1343); +x_1344 = l_Lean_IR_ToIR_bindVar(x_1343, x_169, x_4, x_5, x_1209); +x_1345 = lean_ctor_get(x_1344, 0); +lean_inc(x_1345); +x_1346 = lean_ctor_get(x_1344, 1); +lean_inc(x_1346); +lean_dec(x_1344); +x_1347 = lean_ctor_get(x_1345, 0); +lean_inc(x_1347); +x_1348 = lean_ctor_get(x_1345, 1); +lean_inc(x_1348); +lean_dec(x_1345); +x_1349 = lean_ctor_get(x_1, 2); +lean_inc(x_1349); +lean_inc(x_5); +lean_inc(x_4); +x_1350 = l_Lean_IR_ToIR_lowerType(x_1349, x_1348, x_4, x_5, x_1346); +if (lean_obj_tag(x_1350) == 0) +{ +lean_object* x_1351; lean_object* x_1352; lean_object* x_1353; lean_object* x_1354; lean_object* x_1355; +x_1351 = lean_ctor_get(x_1350, 0); +lean_inc(x_1351); +x_1352 = lean_ctor_get(x_1350, 1); +lean_inc(x_1352); +lean_dec(x_1350); +x_1353 = lean_ctor_get(x_1351, 0); +lean_inc(x_1353); +x_1354 = lean_ctor_get(x_1351, 1); +lean_inc(x_1354); +lean_dec(x_1351); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1355 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1347, x_1204, x_1353, x_1354, x_4, x_5, x_1352); +if (lean_obj_tag(x_1355) == 0) +{ +lean_object* x_1356; lean_object* x_1357; lean_object* x_1358; lean_object* x_1359; lean_object* x_1360; lean_object* x_1361; lean_object* x_1362; +x_1356 = lean_ctor_get(x_1355, 0); +lean_inc(x_1356); +x_1357 = lean_ctor_get(x_1355, 1); +lean_inc(x_1357); +lean_dec(x_1355); +x_1358 = lean_ctor_get(x_1356, 0); +lean_inc(x_1358); +x_1359 = lean_ctor_get(x_1356, 1); +lean_inc(x_1359); +if (lean_is_exclusive(x_1356)) { + lean_ctor_release(x_1356, 0); + lean_ctor_release(x_1356, 1); + x_1360 = x_1356; +} else { + lean_dec_ref(x_1356); + x_1360 = lean_box(0); +} +x_1361 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_1361, 0, x_1358); +if (lean_is_scalar(x_1360)) { + x_1362 = lean_alloc_ctor(0, 2, 0); +} else { + x_1362 = x_1360; +} +lean_ctor_set(x_1362, 0, x_1361); +lean_ctor_set(x_1362, 1, x_1359); +x_1173 = x_1362; +x_1174 = x_1357; +goto block_1203; +} +else +{ +lean_object* x_1363; lean_object* x_1364; lean_object* x_1365; lean_object* x_1366; +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1363 = lean_ctor_get(x_1355, 0); +lean_inc(x_1363); +x_1364 = lean_ctor_get(x_1355, 1); +lean_inc(x_1364); +if (lean_is_exclusive(x_1355)) { + lean_ctor_release(x_1355, 0); + lean_ctor_release(x_1355, 1); + x_1365 = x_1355; +} else { + lean_dec_ref(x_1355); + x_1365 = lean_box(0); +} +if (lean_is_scalar(x_1365)) { + x_1366 = lean_alloc_ctor(1, 2, 0); +} else { + x_1366 = x_1365; +} +lean_ctor_set(x_1366, 0, x_1363); +lean_ctor_set(x_1366, 1, x_1364); +return x_1366; +} +} +else +{ +lean_object* x_1367; lean_object* x_1368; lean_object* x_1369; lean_object* x_1370; +lean_dec(x_1347); +lean_dec(x_1204); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1367 = lean_ctor_get(x_1350, 0); +lean_inc(x_1367); +x_1368 = lean_ctor_get(x_1350, 1); +lean_inc(x_1368); +if (lean_is_exclusive(x_1350)) { + lean_ctor_release(x_1350, 0); + lean_ctor_release(x_1350, 1); + x_1369 = x_1350; +} else { + lean_dec_ref(x_1350); + x_1369 = lean_box(0); +} +if (lean_is_scalar(x_1369)) { + x_1370 = lean_alloc_ctor(1, 2, 0); +} else { + x_1370 = x_1369; +} +lean_ctor_set(x_1370, 0, x_1367); +lean_ctor_set(x_1370, 1, x_1368); +return x_1370; +} +} +} +else +{ +lean_object* x_1371; lean_object* x_1372; lean_object* x_1373; lean_object* x_1374; lean_object* x_1375; lean_object* x_1376; lean_object* x_1377; lean_object* x_1378; +lean_dec(x_1304); +lean_dec(x_1302); +lean_inc(x_163); +lean_ctor_set_tag(x_1204, 7); +lean_ctor_set(x_1204, 1, x_163); +lean_ctor_set(x_1204, 0, x_153); +x_1371 = lean_ctor_get(x_1, 0); +lean_inc(x_1371); +x_1372 = l_Lean_IR_ToIR_bindVar(x_1371, x_169, x_4, x_5, x_1209); +x_1373 = lean_ctor_get(x_1372, 0); +lean_inc(x_1373); +x_1374 = lean_ctor_get(x_1372, 1); +lean_inc(x_1374); +lean_dec(x_1372); +x_1375 = lean_ctor_get(x_1373, 0); +lean_inc(x_1375); +x_1376 = lean_ctor_get(x_1373, 1); +lean_inc(x_1376); +lean_dec(x_1373); +x_1377 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1378 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1375, x_1204, x_1377, x_1376, x_4, x_5, x_1374); +if (lean_obj_tag(x_1378) == 0) +{ +lean_object* x_1379; lean_object* x_1380; lean_object* x_1381; lean_object* x_1382; lean_object* x_1383; lean_object* x_1384; lean_object* x_1385; +x_1379 = lean_ctor_get(x_1378, 0); +lean_inc(x_1379); +x_1380 = lean_ctor_get(x_1378, 1); +lean_inc(x_1380); +lean_dec(x_1378); +x_1381 = lean_ctor_get(x_1379, 0); +lean_inc(x_1381); +x_1382 = lean_ctor_get(x_1379, 1); +lean_inc(x_1382); +if (lean_is_exclusive(x_1379)) { + lean_ctor_release(x_1379, 0); + lean_ctor_release(x_1379, 1); + x_1383 = x_1379; +} else { + lean_dec_ref(x_1379); + x_1383 = lean_box(0); +} +x_1384 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_1384, 0, x_1381); +if (lean_is_scalar(x_1383)) { + x_1385 = lean_alloc_ctor(0, 2, 0); +} else { + x_1385 = x_1383; +} +lean_ctor_set(x_1385, 0, x_1384); +lean_ctor_set(x_1385, 1, x_1382); +x_1173 = x_1385; +x_1174 = x_1380; +goto block_1203; +} +else +{ +lean_object* x_1386; lean_object* x_1387; lean_object* x_1388; lean_object* x_1389; +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1386 = lean_ctor_get(x_1378, 0); +lean_inc(x_1386); +x_1387 = lean_ctor_get(x_1378, 1); +lean_inc(x_1387); +if (lean_is_exclusive(x_1378)) { + lean_ctor_release(x_1378, 0); + lean_ctor_release(x_1378, 1); + x_1388 = x_1378; +} else { + lean_dec_ref(x_1378); + x_1388 = lean_box(0); +} +if (lean_is_scalar(x_1388)) { + x_1389 = lean_alloc_ctor(1, 2, 0); +} else { + x_1389 = x_1388; +} +lean_ctor_set(x_1389, 0, x_1386); +lean_ctor_set(x_1389, 1, x_1387); +return x_1389; +} +} +} +} +else +{ +lean_object* x_1390; lean_object* x_1391; lean_object* x_1392; lean_object* x_1393; lean_object* x_1394; lean_object* x_1395; uint8_t x_1396; +x_1390 = lean_ctor_get(x_1204, 1); +lean_inc(x_1390); +lean_dec(x_1204); +x_1391 = lean_ctor_get(x_1205, 0); +lean_inc(x_1391); +if (lean_is_exclusive(x_1205)) { + lean_ctor_release(x_1205, 0); + x_1392 = x_1205; +} else { + lean_dec_ref(x_1205); + x_1392 = lean_box(0); +} +x_1393 = lean_array_get_size(x_163); +x_1394 = lean_ctor_get(x_1391, 3); +lean_inc(x_1394); +lean_dec(x_1391); +x_1395 = lean_array_get_size(x_1394); +lean_dec(x_1394); +x_1396 = lean_nat_dec_lt(x_1393, x_1395); +if (x_1396 == 0) +{ +uint8_t x_1397; +x_1397 = lean_nat_dec_eq(x_1393, x_1395); +if (x_1397 == 0) +{ +lean_object* x_1398; lean_object* x_1399; lean_object* x_1400; lean_object* x_1401; lean_object* x_1402; lean_object* x_1403; lean_object* x_1404; lean_object* x_1405; lean_object* x_1406; lean_object* x_1407; lean_object* x_1408; lean_object* x_1409; lean_object* x_1410; lean_object* x_1411; lean_object* x_1412; lean_object* x_1413; lean_object* x_1414; +x_1398 = lean_unsigned_to_nat(0u); +x_1399 = l_Array_extract___rarg(x_163, x_1398, x_1395); +x_1400 = l_Array_extract___rarg(x_163, x_1395, x_1393); +lean_dec(x_1393); +x_1401 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_1401, 0, x_153); +lean_ctor_set(x_1401, 1, x_1399); +x_1402 = lean_ctor_get(x_1, 0); +lean_inc(x_1402); +x_1403 = l_Lean_IR_ToIR_bindVar(x_1402, x_169, x_4, x_5, x_1390); +x_1404 = lean_ctor_get(x_1403, 0); +lean_inc(x_1404); +x_1405 = lean_ctor_get(x_1403, 1); +lean_inc(x_1405); +lean_dec(x_1403); +x_1406 = lean_ctor_get(x_1404, 0); +lean_inc(x_1406); +x_1407 = lean_ctor_get(x_1404, 1); +lean_inc(x_1407); +lean_dec(x_1404); +x_1408 = l_Lean_IR_ToIR_newVar(x_1407, x_4, x_5, x_1405); +x_1409 = lean_ctor_get(x_1408, 0); +lean_inc(x_1409); +x_1410 = lean_ctor_get(x_1408, 1); +lean_inc(x_1410); +lean_dec(x_1408); +x_1411 = lean_ctor_get(x_1409, 0); +lean_inc(x_1411); +x_1412 = lean_ctor_get(x_1409, 1); +lean_inc(x_1412); +lean_dec(x_1409); +x_1413 = lean_ctor_get(x_1, 2); +lean_inc(x_1413); +lean_inc(x_5); +lean_inc(x_4); +x_1414 = l_Lean_IR_ToIR_lowerType(x_1413, x_1412, x_4, x_5, x_1410); +if (lean_obj_tag(x_1414) == 0) +{ +lean_object* x_1415; lean_object* x_1416; lean_object* x_1417; lean_object* x_1418; lean_object* x_1419; +x_1415 = lean_ctor_get(x_1414, 0); +lean_inc(x_1415); +x_1416 = lean_ctor_get(x_1414, 1); +lean_inc(x_1416); +lean_dec(x_1414); +x_1417 = lean_ctor_get(x_1415, 0); +lean_inc(x_1417); +x_1418 = lean_ctor_get(x_1415, 1); +lean_inc(x_1418); +lean_dec(x_1415); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1419 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_1411, x_1400, x_1406, x_1401, x_1417, x_1418, x_4, x_5, x_1416); +if (lean_obj_tag(x_1419) == 0) +{ +lean_object* x_1420; lean_object* x_1421; lean_object* x_1422; lean_object* x_1423; lean_object* x_1424; lean_object* x_1425; lean_object* x_1426; +x_1420 = lean_ctor_get(x_1419, 0); +lean_inc(x_1420); +x_1421 = lean_ctor_get(x_1419, 1); +lean_inc(x_1421); +lean_dec(x_1419); +x_1422 = lean_ctor_get(x_1420, 0); +lean_inc(x_1422); +x_1423 = lean_ctor_get(x_1420, 1); +lean_inc(x_1423); +if (lean_is_exclusive(x_1420)) { + lean_ctor_release(x_1420, 0); + lean_ctor_release(x_1420, 1); + x_1424 = x_1420; +} else { + lean_dec_ref(x_1420); + x_1424 = lean_box(0); +} +if (lean_is_scalar(x_1392)) { + x_1425 = lean_alloc_ctor(1, 1, 0); +} else { + x_1425 = x_1392; +} +lean_ctor_set(x_1425, 0, x_1422); +if (lean_is_scalar(x_1424)) { + x_1426 = lean_alloc_ctor(0, 2, 0); +} else { + x_1426 = x_1424; +} +lean_ctor_set(x_1426, 0, x_1425); +lean_ctor_set(x_1426, 1, x_1423); +x_1173 = x_1426; +x_1174 = x_1421; +goto block_1203; +} +else +{ +lean_object* x_1427; lean_object* x_1428; lean_object* x_1429; lean_object* x_1430; +lean_dec(x_1392); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1427 = lean_ctor_get(x_1419, 0); +lean_inc(x_1427); +x_1428 = lean_ctor_get(x_1419, 1); +lean_inc(x_1428); +if (lean_is_exclusive(x_1419)) { + lean_ctor_release(x_1419, 0); + lean_ctor_release(x_1419, 1); + x_1429 = x_1419; +} else { + lean_dec_ref(x_1419); + x_1429 = lean_box(0); +} +if (lean_is_scalar(x_1429)) { + x_1430 = lean_alloc_ctor(1, 2, 0); +} else { + x_1430 = x_1429; +} +lean_ctor_set(x_1430, 0, x_1427); +lean_ctor_set(x_1430, 1, x_1428); +return x_1430; +} +} +else +{ +lean_object* x_1431; lean_object* x_1432; lean_object* x_1433; lean_object* x_1434; +lean_dec(x_1411); +lean_dec(x_1406); +lean_dec(x_1401); +lean_dec(x_1400); +lean_dec(x_1392); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1431 = lean_ctor_get(x_1414, 0); +lean_inc(x_1431); +x_1432 = lean_ctor_get(x_1414, 1); +lean_inc(x_1432); +if (lean_is_exclusive(x_1414)) { + lean_ctor_release(x_1414, 0); + lean_ctor_release(x_1414, 1); + x_1433 = x_1414; +} else { + lean_dec_ref(x_1414); + x_1433 = lean_box(0); +} +if (lean_is_scalar(x_1433)) { + x_1434 = lean_alloc_ctor(1, 2, 0); +} else { + x_1434 = x_1433; +} +lean_ctor_set(x_1434, 0, x_1431); +lean_ctor_set(x_1434, 1, x_1432); +return x_1434; +} +} +else +{ +lean_object* x_1435; lean_object* x_1436; lean_object* x_1437; lean_object* x_1438; lean_object* x_1439; lean_object* x_1440; lean_object* x_1441; lean_object* x_1442; lean_object* x_1443; +lean_dec(x_1395); +lean_dec(x_1393); +lean_inc(x_163); +x_1435 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_1435, 0, x_153); +lean_ctor_set(x_1435, 1, x_163); +x_1436 = lean_ctor_get(x_1, 0); +lean_inc(x_1436); +x_1437 = l_Lean_IR_ToIR_bindVar(x_1436, x_169, x_4, x_5, x_1390); +x_1438 = lean_ctor_get(x_1437, 0); +lean_inc(x_1438); +x_1439 = lean_ctor_get(x_1437, 1); +lean_inc(x_1439); +lean_dec(x_1437); +x_1440 = lean_ctor_get(x_1438, 0); +lean_inc(x_1440); +x_1441 = lean_ctor_get(x_1438, 1); +lean_inc(x_1441); +lean_dec(x_1438); +x_1442 = lean_ctor_get(x_1, 2); +lean_inc(x_1442); +lean_inc(x_5); +lean_inc(x_4); +x_1443 = l_Lean_IR_ToIR_lowerType(x_1442, x_1441, x_4, x_5, x_1439); +if (lean_obj_tag(x_1443) == 0) +{ +lean_object* x_1444; lean_object* x_1445; lean_object* x_1446; lean_object* x_1447; lean_object* x_1448; +x_1444 = lean_ctor_get(x_1443, 0); +lean_inc(x_1444); +x_1445 = lean_ctor_get(x_1443, 1); +lean_inc(x_1445); +lean_dec(x_1443); +x_1446 = lean_ctor_get(x_1444, 0); +lean_inc(x_1446); +x_1447 = lean_ctor_get(x_1444, 1); +lean_inc(x_1447); +lean_dec(x_1444); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1448 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1440, x_1435, x_1446, x_1447, x_4, x_5, x_1445); +if (lean_obj_tag(x_1448) == 0) +{ +lean_object* x_1449; lean_object* x_1450; lean_object* x_1451; lean_object* x_1452; lean_object* x_1453; lean_object* x_1454; lean_object* x_1455; +x_1449 = lean_ctor_get(x_1448, 0); +lean_inc(x_1449); +x_1450 = lean_ctor_get(x_1448, 1); +lean_inc(x_1450); +lean_dec(x_1448); +x_1451 = lean_ctor_get(x_1449, 0); +lean_inc(x_1451); +x_1452 = lean_ctor_get(x_1449, 1); +lean_inc(x_1452); +if (lean_is_exclusive(x_1449)) { + lean_ctor_release(x_1449, 0); + lean_ctor_release(x_1449, 1); + x_1453 = x_1449; +} else { + lean_dec_ref(x_1449); + x_1453 = lean_box(0); +} +if (lean_is_scalar(x_1392)) { + x_1454 = lean_alloc_ctor(1, 1, 0); +} else { + x_1454 = x_1392; +} +lean_ctor_set(x_1454, 0, x_1451); +if (lean_is_scalar(x_1453)) { + x_1455 = lean_alloc_ctor(0, 2, 0); +} else { + x_1455 = x_1453; +} +lean_ctor_set(x_1455, 0, x_1454); +lean_ctor_set(x_1455, 1, x_1452); +x_1173 = x_1455; +x_1174 = x_1450; +goto block_1203; +} +else +{ +lean_object* x_1456; lean_object* x_1457; lean_object* x_1458; lean_object* x_1459; +lean_dec(x_1392); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1456 = lean_ctor_get(x_1448, 0); +lean_inc(x_1456); +x_1457 = lean_ctor_get(x_1448, 1); +lean_inc(x_1457); +if (lean_is_exclusive(x_1448)) { + lean_ctor_release(x_1448, 0); + lean_ctor_release(x_1448, 1); + x_1458 = x_1448; +} else { + lean_dec_ref(x_1448); + x_1458 = lean_box(0); +} +if (lean_is_scalar(x_1458)) { + x_1459 = lean_alloc_ctor(1, 2, 0); +} else { + x_1459 = x_1458; +} +lean_ctor_set(x_1459, 0, x_1456); +lean_ctor_set(x_1459, 1, x_1457); +return x_1459; +} +} +else +{ +lean_object* x_1460; lean_object* x_1461; lean_object* x_1462; lean_object* x_1463; +lean_dec(x_1440); +lean_dec(x_1435); +lean_dec(x_1392); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1460 = lean_ctor_get(x_1443, 0); +lean_inc(x_1460); +x_1461 = lean_ctor_get(x_1443, 1); +lean_inc(x_1461); +if (lean_is_exclusive(x_1443)) { + lean_ctor_release(x_1443, 0); + lean_ctor_release(x_1443, 1); + x_1462 = x_1443; +} else { + lean_dec_ref(x_1443); + x_1462 = lean_box(0); +} +if (lean_is_scalar(x_1462)) { + x_1463 = lean_alloc_ctor(1, 2, 0); +} else { + x_1463 = x_1462; +} +lean_ctor_set(x_1463, 0, x_1460); +lean_ctor_set(x_1463, 1, x_1461); +return x_1463; +} +} +} +else +{ +lean_object* x_1464; lean_object* x_1465; lean_object* x_1466; lean_object* x_1467; lean_object* x_1468; lean_object* x_1469; lean_object* x_1470; lean_object* x_1471; lean_object* x_1472; +lean_dec(x_1395); +lean_dec(x_1393); +lean_inc(x_163); +x_1464 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_1464, 0, x_153); +lean_ctor_set(x_1464, 1, x_163); +x_1465 = lean_ctor_get(x_1, 0); +lean_inc(x_1465); +x_1466 = l_Lean_IR_ToIR_bindVar(x_1465, x_169, x_4, x_5, x_1390); +x_1467 = lean_ctor_get(x_1466, 0); +lean_inc(x_1467); +x_1468 = lean_ctor_get(x_1466, 1); +lean_inc(x_1468); +lean_dec(x_1466); +x_1469 = lean_ctor_get(x_1467, 0); +lean_inc(x_1469); +x_1470 = lean_ctor_get(x_1467, 1); +lean_inc(x_1470); +lean_dec(x_1467); +x_1471 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1472 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1469, x_1464, x_1471, x_1470, x_4, x_5, x_1468); +if (lean_obj_tag(x_1472) == 0) +{ +lean_object* x_1473; lean_object* x_1474; lean_object* x_1475; lean_object* x_1476; lean_object* x_1477; lean_object* x_1478; lean_object* x_1479; +x_1473 = lean_ctor_get(x_1472, 0); +lean_inc(x_1473); +x_1474 = lean_ctor_get(x_1472, 1); +lean_inc(x_1474); +lean_dec(x_1472); +x_1475 = lean_ctor_get(x_1473, 0); +lean_inc(x_1475); +x_1476 = lean_ctor_get(x_1473, 1); +lean_inc(x_1476); +if (lean_is_exclusive(x_1473)) { + lean_ctor_release(x_1473, 0); + lean_ctor_release(x_1473, 1); + x_1477 = x_1473; +} else { + lean_dec_ref(x_1473); + x_1477 = lean_box(0); +} +if (lean_is_scalar(x_1392)) { + x_1478 = lean_alloc_ctor(1, 1, 0); +} else { + x_1478 = x_1392; +} +lean_ctor_set(x_1478, 0, x_1475); +if (lean_is_scalar(x_1477)) { + x_1479 = lean_alloc_ctor(0, 2, 0); +} else { + x_1479 = x_1477; +} +lean_ctor_set(x_1479, 0, x_1478); +lean_ctor_set(x_1479, 1, x_1476); +x_1173 = x_1479; +x_1174 = x_1474; +goto block_1203; +} +else +{ +lean_object* x_1480; lean_object* x_1481; lean_object* x_1482; lean_object* x_1483; +lean_dec(x_1392); +lean_dec(x_174); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1480 = lean_ctor_get(x_1472, 0); +lean_inc(x_1480); +x_1481 = lean_ctor_get(x_1472, 1); +lean_inc(x_1481); +if (lean_is_exclusive(x_1472)) { + lean_ctor_release(x_1472, 0); + lean_ctor_release(x_1472, 1); + x_1482 = x_1472; +} else { + lean_dec_ref(x_1472); + x_1482 = lean_box(0); +} +if (lean_is_scalar(x_1482)) { + x_1483 = lean_alloc_ctor(1, 2, 0); +} else { + x_1483 = x_1482; +} +lean_ctor_set(x_1483, 0, x_1480); +lean_ctor_set(x_1483, 1, x_1481); +return x_1483; +} +} +} +} +block_1203: +{ +lean_object* x_1175; +x_1175 = lean_ctor_get(x_1173, 0); +lean_inc(x_1175); +if (lean_obj_tag(x_1175) == 0) +{ +lean_object* x_1176; lean_object* x_1177; lean_object* x_1178; lean_object* x_1179; lean_object* x_1180; lean_object* x_1181; lean_object* x_1182; lean_object* x_1183; lean_object* x_1184; lean_object* x_1185; +lean_dec(x_174); +x_1176 = lean_ctor_get(x_1173, 1); +lean_inc(x_1176); +lean_dec(x_1173); +x_1177 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_1177, 0, x_153); +lean_ctor_set(x_1177, 1, x_163); +x_1178 = lean_ctor_get(x_1, 0); +lean_inc(x_1178); +x_1179 = l_Lean_IR_ToIR_bindVar(x_1178, x_1176, x_4, x_5, x_1174); +x_1180 = lean_ctor_get(x_1179, 0); +lean_inc(x_1180); +x_1181 = lean_ctor_get(x_1179, 1); +lean_inc(x_1181); +lean_dec(x_1179); +x_1182 = lean_ctor_get(x_1180, 0); +lean_inc(x_1182); +x_1183 = lean_ctor_get(x_1180, 1); +lean_inc(x_1183); +lean_dec(x_1180); +x_1184 = lean_ctor_get(x_1, 2); +lean_inc(x_1184); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_1185 = l_Lean_IR_ToIR_lowerType(x_1184, x_1183, x_4, x_5, x_1181); +if (lean_obj_tag(x_1185) == 0) +{ +lean_object* x_1186; lean_object* x_1187; lean_object* x_1188; lean_object* x_1189; lean_object* x_1190; +x_1186 = lean_ctor_get(x_1185, 0); +lean_inc(x_1186); +x_1187 = lean_ctor_get(x_1185, 1); +lean_inc(x_1187); +lean_dec(x_1185); +x_1188 = lean_ctor_get(x_1186, 0); +lean_inc(x_1188); +x_1189 = lean_ctor_get(x_1186, 1); +lean_inc(x_1189); +lean_dec(x_1186); +x_1190 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1182, x_1177, x_1188, x_1189, x_4, x_5, x_1187); +return x_1190; +} +else +{ +uint8_t x_1191; +lean_dec(x_1182); +lean_dec(x_1177); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_1191 = !lean_is_exclusive(x_1185); +if (x_1191 == 0) +{ +return x_1185; +} +else +{ +lean_object* x_1192; lean_object* x_1193; lean_object* x_1194; +x_1192 = lean_ctor_get(x_1185, 0); +x_1193 = lean_ctor_get(x_1185, 1); +lean_inc(x_1193); +lean_inc(x_1192); +lean_dec(x_1185); +x_1194 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_1194, 0, x_1192); +lean_ctor_set(x_1194, 1, x_1193); +return x_1194; +} +} +} +else +{ +uint8_t x_1195; +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1195 = !lean_is_exclusive(x_1173); +if (x_1195 == 0) +{ +lean_object* x_1196; lean_object* x_1197; lean_object* x_1198; +x_1196 = lean_ctor_get(x_1173, 0); +lean_dec(x_1196); +x_1197 = lean_ctor_get(x_1175, 0); +lean_inc(x_1197); +lean_dec(x_1175); +lean_ctor_set(x_1173, 0, x_1197); +if (lean_is_scalar(x_174)) { + x_1198 = lean_alloc_ctor(0, 2, 0); +} else { + x_1198 = x_174; +} +lean_ctor_set(x_1198, 0, x_1173); +lean_ctor_set(x_1198, 1, x_1174); +return x_1198; +} +else +{ +lean_object* x_1199; lean_object* x_1200; lean_object* x_1201; lean_object* x_1202; +x_1199 = lean_ctor_get(x_1173, 1); +lean_inc(x_1199); +lean_dec(x_1173); +x_1200 = lean_ctor_get(x_1175, 0); +lean_inc(x_1200); +lean_dec(x_1175); +x_1201 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_1201, 0, x_1200); +lean_ctor_set(x_1201, 1, x_1199); +if (lean_is_scalar(x_174)) { + x_1202 = lean_alloc_ctor(0, 2, 0); +} else { + x_1202 = x_174; +} +lean_ctor_set(x_1202, 0, x_1201); +lean_ctor_set(x_1202, 1, x_1174); +return x_1202; +} +} +} +} +} +default: +{ +uint8_t x_1484; +lean_dec(x_175); +lean_dec(x_174); +lean_free_object(x_165); +lean_dec(x_163); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_2); +lean_dec(x_1); +x_1484 = !lean_is_exclusive(x_180); +if (x_1484 == 0) +{ +lean_object* x_1485; uint8_t x_1486; lean_object* x_1487; lean_object* x_1488; lean_object* x_1489; lean_object* x_1490; lean_object* x_1491; lean_object* x_1492; lean_object* x_1493; lean_object* x_1494; +x_1485 = lean_ctor_get(x_180, 0); +lean_dec(x_1485); +x_1486 = 1; +x_1487 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_1488 = l_Lean_Name_toString(x_153, x_1486, x_1487); +lean_ctor_set_tag(x_180, 3); +lean_ctor_set(x_180, 0, x_1488); +x_1489 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_1490 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_1490, 0, x_1489); +lean_ctor_set(x_1490, 1, x_180); +x_1491 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_1492 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_1492, 0, x_1490); +lean_ctor_set(x_1492, 1, x_1491); +x_1493 = l_Lean_MessageData_ofFormat(x_1492); +x_1494 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_1493, x_169, x_4, x_5, x_173); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_169); +return x_1494; +} +else +{ +uint8_t x_1495; lean_object* x_1496; lean_object* x_1497; lean_object* x_1498; lean_object* x_1499; lean_object* x_1500; lean_object* x_1501; lean_object* x_1502; lean_object* x_1503; lean_object* x_1504; +lean_dec(x_180); +x_1495 = 1; +x_1496 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_1497 = l_Lean_Name_toString(x_153, x_1495, x_1496); +x_1498 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_1498, 0, x_1497); +x_1499 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_1500 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_1500, 0, x_1499); +lean_ctor_set(x_1500, 1, x_1498); +x_1501 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_1502 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_1502, 0, x_1500); +lean_ctor_set(x_1502, 1, x_1501); +x_1503 = l_Lean_MessageData_ofFormat(x_1502); +x_1504 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_1503, x_169, x_4, x_5, x_173); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_169); +return x_1504; +} +} +} +} +} +else +{ +lean_object* x_1505; lean_object* x_1506; lean_object* x_1507; lean_object* x_1508; lean_object* x_1509; lean_object* x_1510; uint8_t x_1511; lean_object* x_1512; +x_1505 = lean_ctor_get(x_165, 1); +lean_inc(x_1505); +lean_dec(x_165); +x_1506 = lean_st_ref_get(x_5, x_166); +x_1507 = lean_ctor_get(x_1506, 0); +lean_inc(x_1507); +x_1508 = lean_ctor_get(x_1506, 1); +lean_inc(x_1508); +if (lean_is_exclusive(x_1506)) { + lean_ctor_release(x_1506, 0); + lean_ctor_release(x_1506, 1); + x_1509 = x_1506; +} else { + lean_dec_ref(x_1506); + x_1509 = lean_box(0); +} +x_1510 = lean_ctor_get(x_1507, 0); +lean_inc(x_1510); +lean_dec(x_1507); +x_1511 = 0; +lean_inc(x_1510); +x_1512 = l_Lean_Environment_find_x3f(x_1510, x_153, x_1511); +if (lean_obj_tag(x_1512) == 0) +{ +lean_object* x_1513; lean_object* x_1514; +lean_dec(x_1510); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_2); +lean_dec(x_1); +x_1513 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_1514 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_1513, x_1505, x_4, x_5, x_1508); +return x_1514; +} +else +{ +lean_object* x_1515; +x_1515 = lean_ctor_get(x_1512, 0); +lean_inc(x_1515); +lean_dec(x_1512); +switch (lean_obj_tag(x_1515)) { +case 0: +{ +lean_object* x_1516; lean_object* x_1517; uint8_t x_1518; +lean_dec(x_1510); +lean_dec(x_155); +lean_dec(x_154); +if (lean_is_exclusive(x_1515)) { + lean_ctor_release(x_1515, 0); + x_1516 = x_1515; +} else { + lean_dec_ref(x_1515); + x_1516 = lean_box(0); +} +x_1517 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_1518 = lean_name_eq(x_153, x_1517); +if (x_1518 == 0) +{ +lean_object* x_1519; uint8_t x_1520; +x_1519 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_1520 = lean_name_eq(x_153, x_1519); +if (x_1520 == 0) +{ +lean_object* x_1521; lean_object* x_1522; lean_object* x_1523; +lean_dec(x_1509); +x_1521 = l_Lean_IR_ToIR_findDecl(x_153, x_1505, x_4, x_5, x_1508); +x_1522 = lean_ctor_get(x_1521, 0); +lean_inc(x_1522); +x_1523 = lean_ctor_get(x_1522, 0); +lean_inc(x_1523); +if (lean_obj_tag(x_1523) == 0) +{ +lean_object* x_1524; lean_object* x_1525; lean_object* x_1526; lean_object* x_1527; uint8_t x_1528; lean_object* x_1529; lean_object* x_1530; lean_object* x_1531; lean_object* x_1532; lean_object* x_1533; lean_object* x_1534; lean_object* x_1535; lean_object* x_1536; lean_object* x_1537; +lean_dec(x_163); +lean_dec(x_2); +lean_dec(x_1); +x_1524 = lean_ctor_get(x_1521, 1); +lean_inc(x_1524); +if (lean_is_exclusive(x_1521)) { + lean_ctor_release(x_1521, 0); + lean_ctor_release(x_1521, 1); + x_1525 = x_1521; +} else { + lean_dec_ref(x_1521); + x_1525 = lean_box(0); +} +x_1526 = lean_ctor_get(x_1522, 1); +lean_inc(x_1526); +if (lean_is_exclusive(x_1522)) { + lean_ctor_release(x_1522, 0); + lean_ctor_release(x_1522, 1); + x_1527 = x_1522; +} else { + lean_dec_ref(x_1522); + x_1527 = lean_box(0); +} +x_1528 = 1; +x_1529 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_1530 = l_Lean_Name_toString(x_153, x_1528, x_1529); +if (lean_is_scalar(x_1516)) { + x_1531 = lean_alloc_ctor(3, 1, 0); +} else { + x_1531 = x_1516; + lean_ctor_set_tag(x_1531, 3); +} +lean_ctor_set(x_1531, 0, x_1530); +x_1532 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_1527)) { + x_1533 = lean_alloc_ctor(5, 2, 0); +} else { + x_1533 = x_1527; + lean_ctor_set_tag(x_1533, 5); +} +lean_ctor_set(x_1533, 0, x_1532); +lean_ctor_set(x_1533, 1, x_1531); +x_1534 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_1525)) { + x_1535 = lean_alloc_ctor(5, 2, 0); +} else { + x_1535 = x_1525; + lean_ctor_set_tag(x_1535, 5); +} +lean_ctor_set(x_1535, 0, x_1533); +lean_ctor_set(x_1535, 1, x_1534); +x_1536 = l_Lean_MessageData_ofFormat(x_1535); +x_1537 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_1536, x_1526, x_4, x_5, x_1524); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_1526); +return x_1537; +} +else +{ +lean_object* x_1538; lean_object* x_1539; lean_object* x_1540; lean_object* x_1541; lean_object* x_1542; lean_object* x_1543; lean_object* x_1544; uint8_t x_1545; +lean_dec(x_1516); +x_1538 = lean_ctor_get(x_1521, 1); +lean_inc(x_1538); +lean_dec(x_1521); +x_1539 = lean_ctor_get(x_1522, 1); +lean_inc(x_1539); +if (lean_is_exclusive(x_1522)) { + lean_ctor_release(x_1522, 0); + lean_ctor_release(x_1522, 1); + x_1540 = x_1522; +} else { + lean_dec_ref(x_1522); + x_1540 = lean_box(0); +} +x_1541 = lean_ctor_get(x_1523, 0); +lean_inc(x_1541); +lean_dec(x_1523); +x_1542 = lean_array_get_size(x_163); +x_1543 = l_Lean_IR_Decl_params(x_1541); +lean_dec(x_1541); +x_1544 = lean_array_get_size(x_1543); +lean_dec(x_1543); +x_1545 = lean_nat_dec_lt(x_1542, x_1544); +if (x_1545 == 0) +{ +uint8_t x_1546; +x_1546 = lean_nat_dec_eq(x_1542, x_1544); +if (x_1546 == 0) +{ +lean_object* x_1547; lean_object* x_1548; lean_object* x_1549; lean_object* x_1550; lean_object* x_1551; lean_object* x_1552; lean_object* x_1553; lean_object* x_1554; lean_object* x_1555; lean_object* x_1556; lean_object* x_1557; lean_object* x_1558; lean_object* x_1559; lean_object* x_1560; lean_object* x_1561; lean_object* x_1562; lean_object* x_1563; +x_1547 = lean_unsigned_to_nat(0u); +x_1548 = l_Array_extract___rarg(x_163, x_1547, x_1544); +x_1549 = l_Array_extract___rarg(x_163, x_1544, x_1542); +lean_dec(x_1542); +lean_dec(x_163); +if (lean_is_scalar(x_1540)) { + x_1550 = lean_alloc_ctor(6, 2, 0); +} else { + x_1550 = x_1540; + lean_ctor_set_tag(x_1550, 6); +} +lean_ctor_set(x_1550, 0, x_153); +lean_ctor_set(x_1550, 1, x_1548); +x_1551 = lean_ctor_get(x_1, 0); +lean_inc(x_1551); +x_1552 = l_Lean_IR_ToIR_bindVar(x_1551, x_1539, x_4, x_5, x_1538); +x_1553 = lean_ctor_get(x_1552, 0); +lean_inc(x_1553); +x_1554 = lean_ctor_get(x_1552, 1); +lean_inc(x_1554); +lean_dec(x_1552); +x_1555 = lean_ctor_get(x_1553, 0); +lean_inc(x_1555); +x_1556 = lean_ctor_get(x_1553, 1); +lean_inc(x_1556); +lean_dec(x_1553); +x_1557 = l_Lean_IR_ToIR_newVar(x_1556, x_4, x_5, x_1554); +x_1558 = lean_ctor_get(x_1557, 0); +lean_inc(x_1558); +x_1559 = lean_ctor_get(x_1557, 1); +lean_inc(x_1559); +lean_dec(x_1557); +x_1560 = lean_ctor_get(x_1558, 0); +lean_inc(x_1560); +x_1561 = lean_ctor_get(x_1558, 1); +lean_inc(x_1561); +lean_dec(x_1558); +x_1562 = lean_ctor_get(x_1, 2); +lean_inc(x_1562); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_1563 = l_Lean_IR_ToIR_lowerType(x_1562, x_1561, x_4, x_5, x_1559); +if (lean_obj_tag(x_1563) == 0) +{ +lean_object* x_1564; lean_object* x_1565; lean_object* x_1566; lean_object* x_1567; lean_object* x_1568; +x_1564 = lean_ctor_get(x_1563, 0); +lean_inc(x_1564); +x_1565 = lean_ctor_get(x_1563, 1); +lean_inc(x_1565); +lean_dec(x_1563); +x_1566 = lean_ctor_get(x_1564, 0); +lean_inc(x_1566); +x_1567 = lean_ctor_get(x_1564, 1); +lean_inc(x_1567); +lean_dec(x_1564); +x_1568 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_1560, x_1549, x_1555, x_1550, x_1566, x_1567, x_4, x_5, x_1565); +return x_1568; +} +else +{ +lean_object* x_1569; lean_object* x_1570; lean_object* x_1571; lean_object* x_1572; +lean_dec(x_1560); +lean_dec(x_1555); +lean_dec(x_1550); +lean_dec(x_1549); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_1569 = lean_ctor_get(x_1563, 0); +lean_inc(x_1569); +x_1570 = lean_ctor_get(x_1563, 1); +lean_inc(x_1570); +if (lean_is_exclusive(x_1563)) { + lean_ctor_release(x_1563, 0); + lean_ctor_release(x_1563, 1); + x_1571 = x_1563; +} else { + lean_dec_ref(x_1563); + x_1571 = lean_box(0); +} +if (lean_is_scalar(x_1571)) { + x_1572 = lean_alloc_ctor(1, 2, 0); +} else { + x_1572 = x_1571; +} +lean_ctor_set(x_1572, 0, x_1569); +lean_ctor_set(x_1572, 1, x_1570); +return x_1572; +} +} +else +{ +lean_object* x_1573; lean_object* x_1574; lean_object* x_1575; lean_object* x_1576; lean_object* x_1577; lean_object* x_1578; lean_object* x_1579; lean_object* x_1580; lean_object* x_1581; +lean_dec(x_1544); +lean_dec(x_1542); +if (lean_is_scalar(x_1540)) { + x_1573 = lean_alloc_ctor(6, 2, 0); +} else { + x_1573 = x_1540; + lean_ctor_set_tag(x_1573, 6); +} +lean_ctor_set(x_1573, 0, x_153); +lean_ctor_set(x_1573, 1, x_163); +x_1574 = lean_ctor_get(x_1, 0); +lean_inc(x_1574); +x_1575 = l_Lean_IR_ToIR_bindVar(x_1574, x_1539, x_4, x_5, x_1538); +x_1576 = lean_ctor_get(x_1575, 0); +lean_inc(x_1576); +x_1577 = lean_ctor_get(x_1575, 1); +lean_inc(x_1577); +lean_dec(x_1575); +x_1578 = lean_ctor_get(x_1576, 0); +lean_inc(x_1578); +x_1579 = lean_ctor_get(x_1576, 1); +lean_inc(x_1579); +lean_dec(x_1576); +x_1580 = lean_ctor_get(x_1, 2); +lean_inc(x_1580); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_1581 = l_Lean_IR_ToIR_lowerType(x_1580, x_1579, x_4, x_5, x_1577); +if (lean_obj_tag(x_1581) == 0) +{ +lean_object* x_1582; lean_object* x_1583; lean_object* x_1584; lean_object* x_1585; lean_object* x_1586; +x_1582 = lean_ctor_get(x_1581, 0); +lean_inc(x_1582); +x_1583 = lean_ctor_get(x_1581, 1); +lean_inc(x_1583); +lean_dec(x_1581); +x_1584 = lean_ctor_get(x_1582, 0); +lean_inc(x_1584); +x_1585 = lean_ctor_get(x_1582, 1); +lean_inc(x_1585); +lean_dec(x_1582); +x_1586 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1578, x_1573, x_1584, x_1585, x_4, x_5, x_1583); +return x_1586; +} +else +{ +lean_object* x_1587; lean_object* x_1588; lean_object* x_1589; lean_object* x_1590; +lean_dec(x_1578); +lean_dec(x_1573); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_1587 = lean_ctor_get(x_1581, 0); +lean_inc(x_1587); +x_1588 = lean_ctor_get(x_1581, 1); +lean_inc(x_1588); +if (lean_is_exclusive(x_1581)) { + lean_ctor_release(x_1581, 0); + lean_ctor_release(x_1581, 1); + x_1589 = x_1581; +} else { + lean_dec_ref(x_1581); + x_1589 = lean_box(0); +} +if (lean_is_scalar(x_1589)) { + x_1590 = lean_alloc_ctor(1, 2, 0); +} else { + x_1590 = x_1589; +} +lean_ctor_set(x_1590, 0, x_1587); +lean_ctor_set(x_1590, 1, x_1588); +return x_1590; +} +} +} +else +{ +lean_object* x_1591; lean_object* x_1592; lean_object* x_1593; lean_object* x_1594; lean_object* x_1595; lean_object* x_1596; lean_object* x_1597; lean_object* x_1598; lean_object* x_1599; +lean_dec(x_1544); +lean_dec(x_1542); +if (lean_is_scalar(x_1540)) { + x_1591 = lean_alloc_ctor(7, 2, 0); +} else { + x_1591 = x_1540; + lean_ctor_set_tag(x_1591, 7); +} +lean_ctor_set(x_1591, 0, x_153); +lean_ctor_set(x_1591, 1, x_163); +x_1592 = lean_ctor_get(x_1, 0); +lean_inc(x_1592); +lean_dec(x_1); +x_1593 = l_Lean_IR_ToIR_bindVar(x_1592, x_1539, x_4, x_5, x_1538); +x_1594 = lean_ctor_get(x_1593, 0); +lean_inc(x_1594); +x_1595 = lean_ctor_get(x_1593, 1); +lean_inc(x_1595); +lean_dec(x_1593); +x_1596 = lean_ctor_get(x_1594, 0); +lean_inc(x_1596); +x_1597 = lean_ctor_get(x_1594, 1); +lean_inc(x_1597); +lean_dec(x_1594); +x_1598 = lean_box(7); +x_1599 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1596, x_1591, x_1598, x_1597, x_4, x_5, x_1595); +return x_1599; +} +} +} +else +{ +lean_object* x_1600; lean_object* x_1601; lean_object* x_1602; +lean_dec(x_1516); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1600 = lean_box(13); +x_1601 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_1601, 0, x_1600); +lean_ctor_set(x_1601, 1, x_1505); +if (lean_is_scalar(x_1509)) { + x_1602 = lean_alloc_ctor(0, 2, 0); +} else { + x_1602 = x_1509; +} +lean_ctor_set(x_1602, 0, x_1601); +lean_ctor_set(x_1602, 1, x_1508); +return x_1602; +} +} +else +{ +lean_object* x_1603; lean_object* x_1604; lean_object* x_1605; +lean_dec(x_1516); +lean_dec(x_1509); +x_1603 = l_Lean_IR_instInhabitedArg; +x_1604 = lean_unsigned_to_nat(2u); +x_1605 = lean_array_get(x_1603, x_163, x_1604); +lean_dec(x_163); +if (lean_obj_tag(x_1605) == 0) +{ +lean_object* x_1606; lean_object* x_1607; lean_object* x_1608; lean_object* x_1609; lean_object* x_1610; lean_object* x_1611; lean_object* x_1612; +x_1606 = lean_ctor_get(x_1605, 0); +lean_inc(x_1606); +lean_dec(x_1605); +x_1607 = lean_ctor_get(x_1, 0); +lean_inc(x_1607); +lean_dec(x_1); +x_1608 = l_Lean_IR_ToIR_bindVarToVarId(x_1607, x_1606, x_1505, x_4, x_5, x_1508); +x_1609 = lean_ctor_get(x_1608, 0); +lean_inc(x_1609); +x_1610 = lean_ctor_get(x_1608, 1); +lean_inc(x_1610); +lean_dec(x_1608); +x_1611 = lean_ctor_get(x_1609, 1); +lean_inc(x_1611); +lean_dec(x_1609); +x_1612 = l_Lean_IR_ToIR_lowerCode(x_2, x_1611, x_4, x_5, x_1610); +return x_1612; +} +else +{ +lean_object* x_1613; lean_object* x_1614; lean_object* x_1615; lean_object* x_1616; lean_object* x_1617; lean_object* x_1618; +x_1613 = lean_ctor_get(x_1, 0); +lean_inc(x_1613); +lean_dec(x_1); +x_1614 = l_Lean_IR_ToIR_bindErased(x_1613, x_1505, x_4, x_5, x_1508); +x_1615 = lean_ctor_get(x_1614, 0); +lean_inc(x_1615); +x_1616 = lean_ctor_get(x_1614, 1); +lean_inc(x_1616); +lean_dec(x_1614); +x_1617 = lean_ctor_get(x_1615, 1); +lean_inc(x_1617); +lean_dec(x_1615); +x_1618 = l_Lean_IR_ToIR_lowerCode(x_2, x_1617, x_4, x_5, x_1616); +return x_1618; +} +} +} +case 1: +{ +lean_object* x_1619; lean_object* x_1620; lean_object* x_1647; lean_object* x_1648; +lean_dec(x_1515); +lean_dec(x_1510); +lean_dec(x_155); +lean_dec(x_154); +x_1647 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_1508); +x_1648 = lean_ctor_get(x_1647, 0); +lean_inc(x_1648); +if (lean_obj_tag(x_1648) == 0) +{ +lean_object* x_1649; lean_object* x_1650; lean_object* x_1651; +x_1649 = lean_ctor_get(x_1647, 1); +lean_inc(x_1649); +lean_dec(x_1647); +x_1650 = lean_box(0); +x_1651 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_1651, 0, x_1650); +lean_ctor_set(x_1651, 1, x_1505); +x_1619 = x_1651; +x_1620 = x_1649; +goto block_1646; +} +else +{ +lean_object* x_1652; lean_object* x_1653; lean_object* x_1654; lean_object* x_1655; lean_object* x_1656; lean_object* x_1657; lean_object* x_1658; uint8_t x_1659; +x_1652 = lean_ctor_get(x_1647, 1); +lean_inc(x_1652); +if (lean_is_exclusive(x_1647)) { + lean_ctor_release(x_1647, 0); + lean_ctor_release(x_1647, 1); + x_1653 = x_1647; +} else { + lean_dec_ref(x_1647); + x_1653 = lean_box(0); +} +x_1654 = lean_ctor_get(x_1648, 0); +lean_inc(x_1654); +if (lean_is_exclusive(x_1648)) { + lean_ctor_release(x_1648, 0); + x_1655 = x_1648; +} else { + lean_dec_ref(x_1648); + x_1655 = lean_box(0); +} +x_1656 = lean_array_get_size(x_163); +x_1657 = lean_ctor_get(x_1654, 3); +lean_inc(x_1657); +lean_dec(x_1654); +x_1658 = lean_array_get_size(x_1657); +lean_dec(x_1657); +x_1659 = lean_nat_dec_lt(x_1656, x_1658); +if (x_1659 == 0) +{ +uint8_t x_1660; +x_1660 = lean_nat_dec_eq(x_1656, x_1658); +if (x_1660 == 0) +{ +lean_object* x_1661; lean_object* x_1662; lean_object* x_1663; lean_object* x_1664; lean_object* x_1665; lean_object* x_1666; lean_object* x_1667; lean_object* x_1668; lean_object* x_1669; lean_object* x_1670; lean_object* x_1671; lean_object* x_1672; lean_object* x_1673; lean_object* x_1674; lean_object* x_1675; lean_object* x_1676; lean_object* x_1677; +x_1661 = lean_unsigned_to_nat(0u); +x_1662 = l_Array_extract___rarg(x_163, x_1661, x_1658); +x_1663 = l_Array_extract___rarg(x_163, x_1658, x_1656); +lean_dec(x_1656); +if (lean_is_scalar(x_1653)) { + x_1664 = lean_alloc_ctor(6, 2, 0); +} else { + x_1664 = x_1653; + lean_ctor_set_tag(x_1664, 6); +} +lean_ctor_set(x_1664, 0, x_153); +lean_ctor_set(x_1664, 1, x_1662); +x_1665 = lean_ctor_get(x_1, 0); +lean_inc(x_1665); +x_1666 = l_Lean_IR_ToIR_bindVar(x_1665, x_1505, x_4, x_5, x_1652); +x_1667 = lean_ctor_get(x_1666, 0); +lean_inc(x_1667); +x_1668 = lean_ctor_get(x_1666, 1); +lean_inc(x_1668); +lean_dec(x_1666); +x_1669 = lean_ctor_get(x_1667, 0); +lean_inc(x_1669); +x_1670 = lean_ctor_get(x_1667, 1); +lean_inc(x_1670); +lean_dec(x_1667); +x_1671 = l_Lean_IR_ToIR_newVar(x_1670, x_4, x_5, x_1668); +x_1672 = lean_ctor_get(x_1671, 0); +lean_inc(x_1672); +x_1673 = lean_ctor_get(x_1671, 1); +lean_inc(x_1673); +lean_dec(x_1671); +x_1674 = lean_ctor_get(x_1672, 0); +lean_inc(x_1674); +x_1675 = lean_ctor_get(x_1672, 1); +lean_inc(x_1675); +lean_dec(x_1672); +x_1676 = lean_ctor_get(x_1, 2); +lean_inc(x_1676); +lean_inc(x_5); +lean_inc(x_4); +x_1677 = l_Lean_IR_ToIR_lowerType(x_1676, x_1675, x_4, x_5, x_1673); +if (lean_obj_tag(x_1677) == 0) +{ +lean_object* x_1678; lean_object* x_1679; lean_object* x_1680; lean_object* x_1681; lean_object* x_1682; +x_1678 = lean_ctor_get(x_1677, 0); +lean_inc(x_1678); +x_1679 = lean_ctor_get(x_1677, 1); +lean_inc(x_1679); +lean_dec(x_1677); +x_1680 = lean_ctor_get(x_1678, 0); +lean_inc(x_1680); +x_1681 = lean_ctor_get(x_1678, 1); +lean_inc(x_1681); +lean_dec(x_1678); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1682 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_1674, x_1663, x_1669, x_1664, x_1680, x_1681, x_4, x_5, x_1679); +if (lean_obj_tag(x_1682) == 0) +{ +lean_object* x_1683; lean_object* x_1684; lean_object* x_1685; lean_object* x_1686; lean_object* x_1687; lean_object* x_1688; lean_object* x_1689; +x_1683 = lean_ctor_get(x_1682, 0); +lean_inc(x_1683); +x_1684 = lean_ctor_get(x_1682, 1); +lean_inc(x_1684); +lean_dec(x_1682); +x_1685 = lean_ctor_get(x_1683, 0); +lean_inc(x_1685); +x_1686 = lean_ctor_get(x_1683, 1); +lean_inc(x_1686); +if (lean_is_exclusive(x_1683)) { + lean_ctor_release(x_1683, 0); + lean_ctor_release(x_1683, 1); + x_1687 = x_1683; +} else { + lean_dec_ref(x_1683); + x_1687 = lean_box(0); +} +if (lean_is_scalar(x_1655)) { + x_1688 = lean_alloc_ctor(1, 1, 0); +} else { + x_1688 = x_1655; +} +lean_ctor_set(x_1688, 0, x_1685); +if (lean_is_scalar(x_1687)) { + x_1689 = lean_alloc_ctor(0, 2, 0); +} else { + x_1689 = x_1687; +} +lean_ctor_set(x_1689, 0, x_1688); +lean_ctor_set(x_1689, 1, x_1686); +x_1619 = x_1689; +x_1620 = x_1684; +goto block_1646; +} +else +{ +lean_object* x_1690; lean_object* x_1691; lean_object* x_1692; lean_object* x_1693; +lean_dec(x_1655); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1690 = lean_ctor_get(x_1682, 0); +lean_inc(x_1690); +x_1691 = lean_ctor_get(x_1682, 1); +lean_inc(x_1691); +if (lean_is_exclusive(x_1682)) { + lean_ctor_release(x_1682, 0); + lean_ctor_release(x_1682, 1); + x_1692 = x_1682; +} else { + lean_dec_ref(x_1682); + x_1692 = lean_box(0); +} +if (lean_is_scalar(x_1692)) { + x_1693 = lean_alloc_ctor(1, 2, 0); +} else { + x_1693 = x_1692; +} +lean_ctor_set(x_1693, 0, x_1690); +lean_ctor_set(x_1693, 1, x_1691); +return x_1693; +} +} +else +{ +lean_object* x_1694; lean_object* x_1695; lean_object* x_1696; lean_object* x_1697; +lean_dec(x_1674); +lean_dec(x_1669); +lean_dec(x_1664); +lean_dec(x_1663); +lean_dec(x_1655); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1694 = lean_ctor_get(x_1677, 0); +lean_inc(x_1694); +x_1695 = lean_ctor_get(x_1677, 1); +lean_inc(x_1695); +if (lean_is_exclusive(x_1677)) { + lean_ctor_release(x_1677, 0); + lean_ctor_release(x_1677, 1); + x_1696 = x_1677; +} else { + lean_dec_ref(x_1677); + x_1696 = lean_box(0); +} +if (lean_is_scalar(x_1696)) { + x_1697 = lean_alloc_ctor(1, 2, 0); +} else { + x_1697 = x_1696; +} +lean_ctor_set(x_1697, 0, x_1694); +lean_ctor_set(x_1697, 1, x_1695); +return x_1697; +} +} +else +{ +lean_object* x_1698; lean_object* x_1699; lean_object* x_1700; lean_object* x_1701; lean_object* x_1702; lean_object* x_1703; lean_object* x_1704; lean_object* x_1705; lean_object* x_1706; +lean_dec(x_1658); +lean_dec(x_1656); +lean_inc(x_163); +if (lean_is_scalar(x_1653)) { + x_1698 = lean_alloc_ctor(6, 2, 0); +} else { + x_1698 = x_1653; + lean_ctor_set_tag(x_1698, 6); +} +lean_ctor_set(x_1698, 0, x_153); +lean_ctor_set(x_1698, 1, x_163); +x_1699 = lean_ctor_get(x_1, 0); +lean_inc(x_1699); +x_1700 = l_Lean_IR_ToIR_bindVar(x_1699, x_1505, x_4, x_5, x_1652); +x_1701 = lean_ctor_get(x_1700, 0); +lean_inc(x_1701); +x_1702 = lean_ctor_get(x_1700, 1); +lean_inc(x_1702); +lean_dec(x_1700); +x_1703 = lean_ctor_get(x_1701, 0); +lean_inc(x_1703); +x_1704 = lean_ctor_get(x_1701, 1); +lean_inc(x_1704); +lean_dec(x_1701); +x_1705 = lean_ctor_get(x_1, 2); +lean_inc(x_1705); +lean_inc(x_5); +lean_inc(x_4); +x_1706 = l_Lean_IR_ToIR_lowerType(x_1705, x_1704, x_4, x_5, x_1702); +if (lean_obj_tag(x_1706) == 0) +{ +lean_object* x_1707; lean_object* x_1708; lean_object* x_1709; lean_object* x_1710; lean_object* x_1711; +x_1707 = lean_ctor_get(x_1706, 0); +lean_inc(x_1707); +x_1708 = lean_ctor_get(x_1706, 1); +lean_inc(x_1708); +lean_dec(x_1706); +x_1709 = lean_ctor_get(x_1707, 0); +lean_inc(x_1709); +x_1710 = lean_ctor_get(x_1707, 1); +lean_inc(x_1710); +lean_dec(x_1707); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1711 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1703, x_1698, x_1709, x_1710, x_4, x_5, x_1708); +if (lean_obj_tag(x_1711) == 0) +{ +lean_object* x_1712; lean_object* x_1713; lean_object* x_1714; lean_object* x_1715; lean_object* x_1716; lean_object* x_1717; lean_object* x_1718; +x_1712 = lean_ctor_get(x_1711, 0); +lean_inc(x_1712); +x_1713 = lean_ctor_get(x_1711, 1); +lean_inc(x_1713); +lean_dec(x_1711); +x_1714 = lean_ctor_get(x_1712, 0); +lean_inc(x_1714); +x_1715 = lean_ctor_get(x_1712, 1); +lean_inc(x_1715); +if (lean_is_exclusive(x_1712)) { + lean_ctor_release(x_1712, 0); + lean_ctor_release(x_1712, 1); + x_1716 = x_1712; +} else { + lean_dec_ref(x_1712); + x_1716 = lean_box(0); +} +if (lean_is_scalar(x_1655)) { + x_1717 = lean_alloc_ctor(1, 1, 0); +} else { + x_1717 = x_1655; +} +lean_ctor_set(x_1717, 0, x_1714); +if (lean_is_scalar(x_1716)) { + x_1718 = lean_alloc_ctor(0, 2, 0); +} else { + x_1718 = x_1716; +} +lean_ctor_set(x_1718, 0, x_1717); +lean_ctor_set(x_1718, 1, x_1715); +x_1619 = x_1718; +x_1620 = x_1713; +goto block_1646; +} +else +{ +lean_object* x_1719; lean_object* x_1720; lean_object* x_1721; lean_object* x_1722; +lean_dec(x_1655); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1719 = lean_ctor_get(x_1711, 0); +lean_inc(x_1719); +x_1720 = lean_ctor_get(x_1711, 1); +lean_inc(x_1720); +if (lean_is_exclusive(x_1711)) { + lean_ctor_release(x_1711, 0); + lean_ctor_release(x_1711, 1); + x_1721 = x_1711; +} else { + lean_dec_ref(x_1711); + x_1721 = lean_box(0); +} +if (lean_is_scalar(x_1721)) { + x_1722 = lean_alloc_ctor(1, 2, 0); +} else { + x_1722 = x_1721; +} +lean_ctor_set(x_1722, 0, x_1719); +lean_ctor_set(x_1722, 1, x_1720); +return x_1722; +} +} +else +{ +lean_object* x_1723; lean_object* x_1724; lean_object* x_1725; lean_object* x_1726; +lean_dec(x_1703); +lean_dec(x_1698); +lean_dec(x_1655); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1723 = lean_ctor_get(x_1706, 0); +lean_inc(x_1723); +x_1724 = lean_ctor_get(x_1706, 1); +lean_inc(x_1724); +if (lean_is_exclusive(x_1706)) { + lean_ctor_release(x_1706, 0); + lean_ctor_release(x_1706, 1); + x_1725 = x_1706; +} else { + lean_dec_ref(x_1706); + x_1725 = lean_box(0); +} +if (lean_is_scalar(x_1725)) { + x_1726 = lean_alloc_ctor(1, 2, 0); +} else { + x_1726 = x_1725; +} +lean_ctor_set(x_1726, 0, x_1723); +lean_ctor_set(x_1726, 1, x_1724); +return x_1726; +} +} +} +else +{ +lean_object* x_1727; lean_object* x_1728; lean_object* x_1729; lean_object* x_1730; lean_object* x_1731; lean_object* x_1732; lean_object* x_1733; lean_object* x_1734; lean_object* x_1735; +lean_dec(x_1658); +lean_dec(x_1656); +lean_inc(x_163); +if (lean_is_scalar(x_1653)) { + x_1727 = lean_alloc_ctor(7, 2, 0); +} else { + x_1727 = x_1653; + lean_ctor_set_tag(x_1727, 7); +} +lean_ctor_set(x_1727, 0, x_153); +lean_ctor_set(x_1727, 1, x_163); +x_1728 = lean_ctor_get(x_1, 0); +lean_inc(x_1728); +x_1729 = l_Lean_IR_ToIR_bindVar(x_1728, x_1505, x_4, x_5, x_1652); +x_1730 = lean_ctor_get(x_1729, 0); +lean_inc(x_1730); +x_1731 = lean_ctor_get(x_1729, 1); +lean_inc(x_1731); +lean_dec(x_1729); +x_1732 = lean_ctor_get(x_1730, 0); +lean_inc(x_1732); +x_1733 = lean_ctor_get(x_1730, 1); +lean_inc(x_1733); +lean_dec(x_1730); +x_1734 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1735 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1732, x_1727, x_1734, x_1733, x_4, x_5, x_1731); +if (lean_obj_tag(x_1735) == 0) +{ +lean_object* x_1736; lean_object* x_1737; lean_object* x_1738; lean_object* x_1739; lean_object* x_1740; lean_object* x_1741; lean_object* x_1742; +x_1736 = lean_ctor_get(x_1735, 0); +lean_inc(x_1736); +x_1737 = lean_ctor_get(x_1735, 1); +lean_inc(x_1737); +lean_dec(x_1735); +x_1738 = lean_ctor_get(x_1736, 0); +lean_inc(x_1738); +x_1739 = lean_ctor_get(x_1736, 1); +lean_inc(x_1739); +if (lean_is_exclusive(x_1736)) { + lean_ctor_release(x_1736, 0); + lean_ctor_release(x_1736, 1); + x_1740 = x_1736; +} else { + lean_dec_ref(x_1736); + x_1740 = lean_box(0); +} +if (lean_is_scalar(x_1655)) { + x_1741 = lean_alloc_ctor(1, 1, 0); +} else { + x_1741 = x_1655; +} +lean_ctor_set(x_1741, 0, x_1738); +if (lean_is_scalar(x_1740)) { + x_1742 = lean_alloc_ctor(0, 2, 0); +} else { + x_1742 = x_1740; +} +lean_ctor_set(x_1742, 0, x_1741); +lean_ctor_set(x_1742, 1, x_1739); +x_1619 = x_1742; +x_1620 = x_1737; +goto block_1646; +} +else +{ +lean_object* x_1743; lean_object* x_1744; lean_object* x_1745; lean_object* x_1746; +lean_dec(x_1655); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1743 = lean_ctor_get(x_1735, 0); +lean_inc(x_1743); +x_1744 = lean_ctor_get(x_1735, 1); +lean_inc(x_1744); +if (lean_is_exclusive(x_1735)) { + lean_ctor_release(x_1735, 0); + lean_ctor_release(x_1735, 1); + x_1745 = x_1735; +} else { + lean_dec_ref(x_1735); + x_1745 = lean_box(0); +} +if (lean_is_scalar(x_1745)) { + x_1746 = lean_alloc_ctor(1, 2, 0); +} else { + x_1746 = x_1745; +} +lean_ctor_set(x_1746, 0, x_1743); +lean_ctor_set(x_1746, 1, x_1744); +return x_1746; +} +} +} +block_1646: +{ +lean_object* x_1621; +x_1621 = lean_ctor_get(x_1619, 0); +lean_inc(x_1621); +if (lean_obj_tag(x_1621) == 0) +{ +lean_object* x_1622; lean_object* x_1623; lean_object* x_1624; lean_object* x_1625; lean_object* x_1626; lean_object* x_1627; lean_object* x_1628; lean_object* x_1629; lean_object* x_1630; lean_object* x_1631; +lean_dec(x_1509); +x_1622 = lean_ctor_get(x_1619, 1); +lean_inc(x_1622); +lean_dec(x_1619); +x_1623 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_1623, 0, x_153); +lean_ctor_set(x_1623, 1, x_163); +x_1624 = lean_ctor_get(x_1, 0); +lean_inc(x_1624); +x_1625 = l_Lean_IR_ToIR_bindVar(x_1624, x_1622, x_4, x_5, x_1620); +x_1626 = lean_ctor_get(x_1625, 0); +lean_inc(x_1626); +x_1627 = lean_ctor_get(x_1625, 1); +lean_inc(x_1627); +lean_dec(x_1625); +x_1628 = lean_ctor_get(x_1626, 0); +lean_inc(x_1628); +x_1629 = lean_ctor_get(x_1626, 1); +lean_inc(x_1629); +lean_dec(x_1626); +x_1630 = lean_ctor_get(x_1, 2); +lean_inc(x_1630); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_1631 = l_Lean_IR_ToIR_lowerType(x_1630, x_1629, x_4, x_5, x_1627); +if (lean_obj_tag(x_1631) == 0) +{ +lean_object* x_1632; lean_object* x_1633; lean_object* x_1634; lean_object* x_1635; lean_object* x_1636; +x_1632 = lean_ctor_get(x_1631, 0); +lean_inc(x_1632); +x_1633 = lean_ctor_get(x_1631, 1); +lean_inc(x_1633); +lean_dec(x_1631); +x_1634 = lean_ctor_get(x_1632, 0); +lean_inc(x_1634); +x_1635 = lean_ctor_get(x_1632, 1); +lean_inc(x_1635); +lean_dec(x_1632); +x_1636 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1628, x_1623, x_1634, x_1635, x_4, x_5, x_1633); +return x_1636; +} +else +{ +lean_object* x_1637; lean_object* x_1638; lean_object* x_1639; lean_object* x_1640; +lean_dec(x_1628); +lean_dec(x_1623); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_1637 = lean_ctor_get(x_1631, 0); +lean_inc(x_1637); +x_1638 = lean_ctor_get(x_1631, 1); +lean_inc(x_1638); +if (lean_is_exclusive(x_1631)) { + lean_ctor_release(x_1631, 0); + lean_ctor_release(x_1631, 1); + x_1639 = x_1631; +} else { + lean_dec_ref(x_1631); + x_1639 = lean_box(0); +} +if (lean_is_scalar(x_1639)) { + x_1640 = lean_alloc_ctor(1, 2, 0); +} else { + x_1640 = x_1639; +} +lean_ctor_set(x_1640, 0, x_1637); +lean_ctor_set(x_1640, 1, x_1638); +return x_1640; +} +} +else +{ +lean_object* x_1641; lean_object* x_1642; lean_object* x_1643; lean_object* x_1644; lean_object* x_1645; +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1641 = lean_ctor_get(x_1619, 1); +lean_inc(x_1641); +if (lean_is_exclusive(x_1619)) { + lean_ctor_release(x_1619, 0); + lean_ctor_release(x_1619, 1); + x_1642 = x_1619; +} else { + lean_dec_ref(x_1619); + x_1642 = lean_box(0); +} +x_1643 = lean_ctor_get(x_1621, 0); +lean_inc(x_1643); +lean_dec(x_1621); +if (lean_is_scalar(x_1642)) { + x_1644 = lean_alloc_ctor(0, 2, 0); +} else { + x_1644 = x_1642; +} +lean_ctor_set(x_1644, 0, x_1643); +lean_ctor_set(x_1644, 1, x_1641); +if (lean_is_scalar(x_1509)) { + x_1645 = lean_alloc_ctor(0, 2, 0); +} else { + x_1645 = x_1509; +} +lean_ctor_set(x_1645, 0, x_1644); +lean_ctor_set(x_1645, 1, x_1620); +return x_1645; +} +} +} +case 2: +{ +lean_object* x_1747; lean_object* x_1748; +lean_dec(x_1515); +lean_dec(x_1510); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_2); +lean_dec(x_1); +x_1747 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_1748 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_1747, x_1505, x_4, x_5, x_1508); +return x_1748; +} +case 3: +{ +lean_object* x_1749; lean_object* x_1750; lean_object* x_1777; lean_object* x_1778; +lean_dec(x_1515); +lean_dec(x_1510); +lean_dec(x_155); +lean_dec(x_154); +x_1777 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_1508); +x_1778 = lean_ctor_get(x_1777, 0); +lean_inc(x_1778); +if (lean_obj_tag(x_1778) == 0) +{ +lean_object* x_1779; lean_object* x_1780; lean_object* x_1781; +x_1779 = lean_ctor_get(x_1777, 1); +lean_inc(x_1779); +lean_dec(x_1777); +x_1780 = lean_box(0); +x_1781 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_1781, 0, x_1780); +lean_ctor_set(x_1781, 1, x_1505); +x_1749 = x_1781; +x_1750 = x_1779; +goto block_1776; +} +else +{ +lean_object* x_1782; lean_object* x_1783; lean_object* x_1784; lean_object* x_1785; lean_object* x_1786; lean_object* x_1787; lean_object* x_1788; uint8_t x_1789; +x_1782 = lean_ctor_get(x_1777, 1); +lean_inc(x_1782); +if (lean_is_exclusive(x_1777)) { + lean_ctor_release(x_1777, 0); + lean_ctor_release(x_1777, 1); + x_1783 = x_1777; +} else { + lean_dec_ref(x_1777); + x_1783 = lean_box(0); +} +x_1784 = lean_ctor_get(x_1778, 0); +lean_inc(x_1784); +if (lean_is_exclusive(x_1778)) { + lean_ctor_release(x_1778, 0); + x_1785 = x_1778; +} else { + lean_dec_ref(x_1778); + x_1785 = lean_box(0); +} +x_1786 = lean_array_get_size(x_163); +x_1787 = lean_ctor_get(x_1784, 3); +lean_inc(x_1787); +lean_dec(x_1784); +x_1788 = lean_array_get_size(x_1787); +lean_dec(x_1787); +x_1789 = lean_nat_dec_lt(x_1786, x_1788); +if (x_1789 == 0) +{ +uint8_t x_1790; +x_1790 = lean_nat_dec_eq(x_1786, x_1788); +if (x_1790 == 0) +{ +lean_object* x_1791; lean_object* x_1792; lean_object* x_1793; lean_object* x_1794; lean_object* x_1795; lean_object* x_1796; lean_object* x_1797; lean_object* x_1798; lean_object* x_1799; lean_object* x_1800; lean_object* x_1801; lean_object* x_1802; lean_object* x_1803; lean_object* x_1804; lean_object* x_1805; lean_object* x_1806; lean_object* x_1807; +x_1791 = lean_unsigned_to_nat(0u); +x_1792 = l_Array_extract___rarg(x_163, x_1791, x_1788); +x_1793 = l_Array_extract___rarg(x_163, x_1788, x_1786); +lean_dec(x_1786); +if (lean_is_scalar(x_1783)) { + x_1794 = lean_alloc_ctor(6, 2, 0); +} else { + x_1794 = x_1783; + lean_ctor_set_tag(x_1794, 6); +} +lean_ctor_set(x_1794, 0, x_153); +lean_ctor_set(x_1794, 1, x_1792); +x_1795 = lean_ctor_get(x_1, 0); +lean_inc(x_1795); +x_1796 = l_Lean_IR_ToIR_bindVar(x_1795, x_1505, x_4, x_5, x_1782); +x_1797 = lean_ctor_get(x_1796, 0); +lean_inc(x_1797); +x_1798 = lean_ctor_get(x_1796, 1); +lean_inc(x_1798); +lean_dec(x_1796); +x_1799 = lean_ctor_get(x_1797, 0); +lean_inc(x_1799); +x_1800 = lean_ctor_get(x_1797, 1); +lean_inc(x_1800); +lean_dec(x_1797); +x_1801 = l_Lean_IR_ToIR_newVar(x_1800, x_4, x_5, x_1798); +x_1802 = lean_ctor_get(x_1801, 0); +lean_inc(x_1802); +x_1803 = lean_ctor_get(x_1801, 1); +lean_inc(x_1803); +lean_dec(x_1801); +x_1804 = lean_ctor_get(x_1802, 0); +lean_inc(x_1804); +x_1805 = lean_ctor_get(x_1802, 1); +lean_inc(x_1805); +lean_dec(x_1802); +x_1806 = lean_ctor_get(x_1, 2); +lean_inc(x_1806); +lean_inc(x_5); +lean_inc(x_4); +x_1807 = l_Lean_IR_ToIR_lowerType(x_1806, x_1805, x_4, x_5, x_1803); +if (lean_obj_tag(x_1807) == 0) +{ +lean_object* x_1808; lean_object* x_1809; lean_object* x_1810; lean_object* x_1811; lean_object* x_1812; +x_1808 = lean_ctor_get(x_1807, 0); +lean_inc(x_1808); +x_1809 = lean_ctor_get(x_1807, 1); +lean_inc(x_1809); +lean_dec(x_1807); +x_1810 = lean_ctor_get(x_1808, 0); +lean_inc(x_1810); +x_1811 = lean_ctor_get(x_1808, 1); +lean_inc(x_1811); +lean_dec(x_1808); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1812 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_1804, x_1793, x_1799, x_1794, x_1810, x_1811, x_4, x_5, x_1809); +if (lean_obj_tag(x_1812) == 0) +{ +lean_object* x_1813; lean_object* x_1814; lean_object* x_1815; lean_object* x_1816; lean_object* x_1817; lean_object* x_1818; lean_object* x_1819; +x_1813 = lean_ctor_get(x_1812, 0); +lean_inc(x_1813); +x_1814 = lean_ctor_get(x_1812, 1); +lean_inc(x_1814); +lean_dec(x_1812); +x_1815 = lean_ctor_get(x_1813, 0); +lean_inc(x_1815); +x_1816 = lean_ctor_get(x_1813, 1); +lean_inc(x_1816); +if (lean_is_exclusive(x_1813)) { + lean_ctor_release(x_1813, 0); + lean_ctor_release(x_1813, 1); + x_1817 = x_1813; +} else { + lean_dec_ref(x_1813); + x_1817 = lean_box(0); +} +if (lean_is_scalar(x_1785)) { + x_1818 = lean_alloc_ctor(1, 1, 0); +} else { + x_1818 = x_1785; +} +lean_ctor_set(x_1818, 0, x_1815); +if (lean_is_scalar(x_1817)) { + x_1819 = lean_alloc_ctor(0, 2, 0); +} else { + x_1819 = x_1817; +} +lean_ctor_set(x_1819, 0, x_1818); +lean_ctor_set(x_1819, 1, x_1816); +x_1749 = x_1819; +x_1750 = x_1814; +goto block_1776; +} +else +{ +lean_object* x_1820; lean_object* x_1821; lean_object* x_1822; lean_object* x_1823; +lean_dec(x_1785); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1820 = lean_ctor_get(x_1812, 0); +lean_inc(x_1820); +x_1821 = lean_ctor_get(x_1812, 1); +lean_inc(x_1821); +if (lean_is_exclusive(x_1812)) { + lean_ctor_release(x_1812, 0); + lean_ctor_release(x_1812, 1); + x_1822 = x_1812; +} else { + lean_dec_ref(x_1812); + x_1822 = lean_box(0); +} +if (lean_is_scalar(x_1822)) { + x_1823 = lean_alloc_ctor(1, 2, 0); +} else { + x_1823 = x_1822; +} +lean_ctor_set(x_1823, 0, x_1820); +lean_ctor_set(x_1823, 1, x_1821); +return x_1823; +} +} +else +{ +lean_object* x_1824; lean_object* x_1825; lean_object* x_1826; lean_object* x_1827; +lean_dec(x_1804); +lean_dec(x_1799); +lean_dec(x_1794); +lean_dec(x_1793); +lean_dec(x_1785); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1824 = lean_ctor_get(x_1807, 0); +lean_inc(x_1824); +x_1825 = lean_ctor_get(x_1807, 1); +lean_inc(x_1825); +if (lean_is_exclusive(x_1807)) { + lean_ctor_release(x_1807, 0); + lean_ctor_release(x_1807, 1); + x_1826 = x_1807; +} else { + lean_dec_ref(x_1807); + x_1826 = lean_box(0); +} +if (lean_is_scalar(x_1826)) { + x_1827 = lean_alloc_ctor(1, 2, 0); +} else { + x_1827 = x_1826; +} +lean_ctor_set(x_1827, 0, x_1824); +lean_ctor_set(x_1827, 1, x_1825); +return x_1827; +} +} +else +{ +lean_object* x_1828; lean_object* x_1829; lean_object* x_1830; lean_object* x_1831; lean_object* x_1832; lean_object* x_1833; lean_object* x_1834; lean_object* x_1835; lean_object* x_1836; +lean_dec(x_1788); +lean_dec(x_1786); +lean_inc(x_163); +if (lean_is_scalar(x_1783)) { + x_1828 = lean_alloc_ctor(6, 2, 0); +} else { + x_1828 = x_1783; + lean_ctor_set_tag(x_1828, 6); +} +lean_ctor_set(x_1828, 0, x_153); +lean_ctor_set(x_1828, 1, x_163); +x_1829 = lean_ctor_get(x_1, 0); +lean_inc(x_1829); +x_1830 = l_Lean_IR_ToIR_bindVar(x_1829, x_1505, x_4, x_5, x_1782); +x_1831 = lean_ctor_get(x_1830, 0); +lean_inc(x_1831); +x_1832 = lean_ctor_get(x_1830, 1); +lean_inc(x_1832); +lean_dec(x_1830); +x_1833 = lean_ctor_get(x_1831, 0); +lean_inc(x_1833); +x_1834 = lean_ctor_get(x_1831, 1); +lean_inc(x_1834); +lean_dec(x_1831); +x_1835 = lean_ctor_get(x_1, 2); +lean_inc(x_1835); +lean_inc(x_5); +lean_inc(x_4); +x_1836 = l_Lean_IR_ToIR_lowerType(x_1835, x_1834, x_4, x_5, x_1832); +if (lean_obj_tag(x_1836) == 0) +{ +lean_object* x_1837; lean_object* x_1838; lean_object* x_1839; lean_object* x_1840; lean_object* x_1841; +x_1837 = lean_ctor_get(x_1836, 0); +lean_inc(x_1837); +x_1838 = lean_ctor_get(x_1836, 1); +lean_inc(x_1838); +lean_dec(x_1836); +x_1839 = lean_ctor_get(x_1837, 0); +lean_inc(x_1839); +x_1840 = lean_ctor_get(x_1837, 1); +lean_inc(x_1840); +lean_dec(x_1837); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1841 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1833, x_1828, x_1839, x_1840, x_4, x_5, x_1838); +if (lean_obj_tag(x_1841) == 0) +{ +lean_object* x_1842; lean_object* x_1843; lean_object* x_1844; lean_object* x_1845; lean_object* x_1846; lean_object* x_1847; lean_object* x_1848; +x_1842 = lean_ctor_get(x_1841, 0); +lean_inc(x_1842); +x_1843 = lean_ctor_get(x_1841, 1); +lean_inc(x_1843); +lean_dec(x_1841); +x_1844 = lean_ctor_get(x_1842, 0); +lean_inc(x_1844); +x_1845 = lean_ctor_get(x_1842, 1); +lean_inc(x_1845); +if (lean_is_exclusive(x_1842)) { + lean_ctor_release(x_1842, 0); + lean_ctor_release(x_1842, 1); + x_1846 = x_1842; +} else { + lean_dec_ref(x_1842); + x_1846 = lean_box(0); +} +if (lean_is_scalar(x_1785)) { + x_1847 = lean_alloc_ctor(1, 1, 0); +} else { + x_1847 = x_1785; +} +lean_ctor_set(x_1847, 0, x_1844); +if (lean_is_scalar(x_1846)) { + x_1848 = lean_alloc_ctor(0, 2, 0); +} else { + x_1848 = x_1846; +} +lean_ctor_set(x_1848, 0, x_1847); +lean_ctor_set(x_1848, 1, x_1845); +x_1749 = x_1848; +x_1750 = x_1843; +goto block_1776; +} +else +{ +lean_object* x_1849; lean_object* x_1850; lean_object* x_1851; lean_object* x_1852; +lean_dec(x_1785); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1849 = lean_ctor_get(x_1841, 0); +lean_inc(x_1849); +x_1850 = lean_ctor_get(x_1841, 1); +lean_inc(x_1850); +if (lean_is_exclusive(x_1841)) { + lean_ctor_release(x_1841, 0); + lean_ctor_release(x_1841, 1); + x_1851 = x_1841; +} else { + lean_dec_ref(x_1841); + x_1851 = lean_box(0); +} +if (lean_is_scalar(x_1851)) { + x_1852 = lean_alloc_ctor(1, 2, 0); +} else { + x_1852 = x_1851; +} +lean_ctor_set(x_1852, 0, x_1849); +lean_ctor_set(x_1852, 1, x_1850); +return x_1852; +} +} +else +{ +lean_object* x_1853; lean_object* x_1854; lean_object* x_1855; lean_object* x_1856; +lean_dec(x_1833); +lean_dec(x_1828); +lean_dec(x_1785); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1853 = lean_ctor_get(x_1836, 0); +lean_inc(x_1853); +x_1854 = lean_ctor_get(x_1836, 1); +lean_inc(x_1854); +if (lean_is_exclusive(x_1836)) { + lean_ctor_release(x_1836, 0); + lean_ctor_release(x_1836, 1); + x_1855 = x_1836; +} else { + lean_dec_ref(x_1836); + x_1855 = lean_box(0); +} +if (lean_is_scalar(x_1855)) { + x_1856 = lean_alloc_ctor(1, 2, 0); +} else { + x_1856 = x_1855; +} +lean_ctor_set(x_1856, 0, x_1853); +lean_ctor_set(x_1856, 1, x_1854); +return x_1856; +} +} +} +else +{ +lean_object* x_1857; lean_object* x_1858; lean_object* x_1859; lean_object* x_1860; lean_object* x_1861; lean_object* x_1862; lean_object* x_1863; lean_object* x_1864; lean_object* x_1865; +lean_dec(x_1788); +lean_dec(x_1786); +lean_inc(x_163); +if (lean_is_scalar(x_1783)) { + x_1857 = lean_alloc_ctor(7, 2, 0); +} else { + x_1857 = x_1783; + lean_ctor_set_tag(x_1857, 7); +} +lean_ctor_set(x_1857, 0, x_153); +lean_ctor_set(x_1857, 1, x_163); +x_1858 = lean_ctor_get(x_1, 0); +lean_inc(x_1858); +x_1859 = l_Lean_IR_ToIR_bindVar(x_1858, x_1505, x_4, x_5, x_1782); +x_1860 = lean_ctor_get(x_1859, 0); +lean_inc(x_1860); +x_1861 = lean_ctor_get(x_1859, 1); +lean_inc(x_1861); +lean_dec(x_1859); +x_1862 = lean_ctor_get(x_1860, 0); +lean_inc(x_1862); +x_1863 = lean_ctor_get(x_1860, 1); +lean_inc(x_1863); +lean_dec(x_1860); +x_1864 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1865 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1862, x_1857, x_1864, x_1863, x_4, x_5, x_1861); +if (lean_obj_tag(x_1865) == 0) +{ +lean_object* x_1866; lean_object* x_1867; lean_object* x_1868; lean_object* x_1869; lean_object* x_1870; lean_object* x_1871; lean_object* x_1872; +x_1866 = lean_ctor_get(x_1865, 0); +lean_inc(x_1866); +x_1867 = lean_ctor_get(x_1865, 1); +lean_inc(x_1867); +lean_dec(x_1865); +x_1868 = lean_ctor_get(x_1866, 0); +lean_inc(x_1868); +x_1869 = lean_ctor_get(x_1866, 1); +lean_inc(x_1869); +if (lean_is_exclusive(x_1866)) { + lean_ctor_release(x_1866, 0); + lean_ctor_release(x_1866, 1); + x_1870 = x_1866; +} else { + lean_dec_ref(x_1866); + x_1870 = lean_box(0); +} +if (lean_is_scalar(x_1785)) { + x_1871 = lean_alloc_ctor(1, 1, 0); +} else { + x_1871 = x_1785; +} +lean_ctor_set(x_1871, 0, x_1868); +if (lean_is_scalar(x_1870)) { + x_1872 = lean_alloc_ctor(0, 2, 0); +} else { + x_1872 = x_1870; +} +lean_ctor_set(x_1872, 0, x_1871); +lean_ctor_set(x_1872, 1, x_1869); +x_1749 = x_1872; +x_1750 = x_1867; +goto block_1776; +} +else +{ +lean_object* x_1873; lean_object* x_1874; lean_object* x_1875; lean_object* x_1876; +lean_dec(x_1785); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1873 = lean_ctor_get(x_1865, 0); +lean_inc(x_1873); +x_1874 = lean_ctor_get(x_1865, 1); +lean_inc(x_1874); +if (lean_is_exclusive(x_1865)) { + lean_ctor_release(x_1865, 0); + lean_ctor_release(x_1865, 1); + x_1875 = x_1865; +} else { + lean_dec_ref(x_1865); + x_1875 = lean_box(0); +} +if (lean_is_scalar(x_1875)) { + x_1876 = lean_alloc_ctor(1, 2, 0); +} else { + x_1876 = x_1875; +} +lean_ctor_set(x_1876, 0, x_1873); +lean_ctor_set(x_1876, 1, x_1874); +return x_1876; +} +} +} +block_1776: +{ +lean_object* x_1751; +x_1751 = lean_ctor_get(x_1749, 0); +lean_inc(x_1751); +if (lean_obj_tag(x_1751) == 0) +{ +lean_object* x_1752; lean_object* x_1753; lean_object* x_1754; lean_object* x_1755; lean_object* x_1756; lean_object* x_1757; lean_object* x_1758; lean_object* x_1759; lean_object* x_1760; lean_object* x_1761; +lean_dec(x_1509); +x_1752 = lean_ctor_get(x_1749, 1); +lean_inc(x_1752); +lean_dec(x_1749); +x_1753 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_1753, 0, x_153); +lean_ctor_set(x_1753, 1, x_163); +x_1754 = lean_ctor_get(x_1, 0); +lean_inc(x_1754); +x_1755 = l_Lean_IR_ToIR_bindVar(x_1754, x_1752, x_4, x_5, x_1750); +x_1756 = lean_ctor_get(x_1755, 0); +lean_inc(x_1756); +x_1757 = lean_ctor_get(x_1755, 1); +lean_inc(x_1757); +lean_dec(x_1755); +x_1758 = lean_ctor_get(x_1756, 0); +lean_inc(x_1758); +x_1759 = lean_ctor_get(x_1756, 1); +lean_inc(x_1759); +lean_dec(x_1756); +x_1760 = lean_ctor_get(x_1, 2); +lean_inc(x_1760); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_1761 = l_Lean_IR_ToIR_lowerType(x_1760, x_1759, x_4, x_5, x_1757); +if (lean_obj_tag(x_1761) == 0) +{ +lean_object* x_1762; lean_object* x_1763; lean_object* x_1764; lean_object* x_1765; lean_object* x_1766; +x_1762 = lean_ctor_get(x_1761, 0); +lean_inc(x_1762); +x_1763 = lean_ctor_get(x_1761, 1); +lean_inc(x_1763); +lean_dec(x_1761); +x_1764 = lean_ctor_get(x_1762, 0); +lean_inc(x_1764); +x_1765 = lean_ctor_get(x_1762, 1); +lean_inc(x_1765); +lean_dec(x_1762); +x_1766 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1758, x_1753, x_1764, x_1765, x_4, x_5, x_1763); +return x_1766; +} +else +{ +lean_object* x_1767; lean_object* x_1768; lean_object* x_1769; lean_object* x_1770; +lean_dec(x_1758); +lean_dec(x_1753); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_1767 = lean_ctor_get(x_1761, 0); +lean_inc(x_1767); +x_1768 = lean_ctor_get(x_1761, 1); +lean_inc(x_1768); +if (lean_is_exclusive(x_1761)) { + lean_ctor_release(x_1761, 0); + lean_ctor_release(x_1761, 1); + x_1769 = x_1761; +} else { + lean_dec_ref(x_1761); + x_1769 = lean_box(0); +} +if (lean_is_scalar(x_1769)) { + x_1770 = lean_alloc_ctor(1, 2, 0); +} else { + x_1770 = x_1769; +} +lean_ctor_set(x_1770, 0, x_1767); +lean_ctor_set(x_1770, 1, x_1768); +return x_1770; +} +} +else +{ +lean_object* x_1771; lean_object* x_1772; lean_object* x_1773; lean_object* x_1774; lean_object* x_1775; +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1771 = lean_ctor_get(x_1749, 1); +lean_inc(x_1771); +if (lean_is_exclusive(x_1749)) { + lean_ctor_release(x_1749, 0); + lean_ctor_release(x_1749, 1); + x_1772 = x_1749; +} else { + lean_dec_ref(x_1749); + x_1772 = lean_box(0); +} +x_1773 = lean_ctor_get(x_1751, 0); +lean_inc(x_1773); +lean_dec(x_1751); +if (lean_is_scalar(x_1772)) { + x_1774 = lean_alloc_ctor(0, 2, 0); +} else { + x_1774 = x_1772; +} +lean_ctor_set(x_1774, 0, x_1773); +lean_ctor_set(x_1774, 1, x_1771); +if (lean_is_scalar(x_1509)) { + x_1775 = lean_alloc_ctor(0, 2, 0); +} else { + x_1775 = x_1509; +} +lean_ctor_set(x_1775, 0, x_1774); +lean_ctor_set(x_1775, 1, x_1750); +return x_1775; +} +} +} +case 4: +{ +lean_object* x_1877; lean_object* x_1878; uint8_t x_1879; +lean_dec(x_1510); +lean_dec(x_1509); +lean_dec(x_155); +lean_dec(x_154); +if (lean_is_exclusive(x_1515)) { + lean_ctor_release(x_1515, 0); + x_1877 = x_1515; +} else { + lean_dec_ref(x_1515); + x_1877 = lean_box(0); +} +x_1878 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_1879 = lean_name_eq(x_153, x_1878); +if (x_1879 == 0) +{ +uint8_t x_1880; lean_object* x_1881; lean_object* x_1882; lean_object* x_1883; lean_object* x_1884; lean_object* x_1885; lean_object* x_1886; lean_object* x_1887; lean_object* x_1888; lean_object* x_1889; +lean_dec(x_163); +lean_dec(x_2); +lean_dec(x_1); +x_1880 = 1; +x_1881 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_1882 = l_Lean_Name_toString(x_153, x_1880, x_1881); +if (lean_is_scalar(x_1877)) { + x_1883 = lean_alloc_ctor(3, 1, 0); +} else { + x_1883 = x_1877; + lean_ctor_set_tag(x_1883, 3); +} +lean_ctor_set(x_1883, 0, x_1882); +x_1884 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_1885 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_1885, 0, x_1884); +lean_ctor_set(x_1885, 1, x_1883); +x_1886 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_1887 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_1887, 0, x_1885); +lean_ctor_set(x_1887, 1, x_1886); +x_1888 = l_Lean_MessageData_ofFormat(x_1887); +x_1889 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_1888, x_1505, x_4, x_5, x_1508); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_1505); +return x_1889; +} +else +{ +lean_object* x_1890; lean_object* x_1891; lean_object* x_1892; +lean_dec(x_1877); +x_1890 = l_Lean_IR_instInhabitedArg; +x_1891 = lean_unsigned_to_nat(2u); +x_1892 = lean_array_get(x_1890, x_163, x_1891); +lean_dec(x_163); +if (lean_obj_tag(x_1892) == 0) +{ +lean_object* x_1893; lean_object* x_1894; lean_object* x_1895; lean_object* x_1896; lean_object* x_1897; lean_object* x_1898; lean_object* x_1899; +x_1893 = lean_ctor_get(x_1892, 0); +lean_inc(x_1893); +lean_dec(x_1892); +x_1894 = lean_ctor_get(x_1, 0); +lean_inc(x_1894); +lean_dec(x_1); +x_1895 = l_Lean_IR_ToIR_bindVarToVarId(x_1894, x_1893, x_1505, x_4, x_5, x_1508); +x_1896 = lean_ctor_get(x_1895, 0); +lean_inc(x_1896); +x_1897 = lean_ctor_get(x_1895, 1); +lean_inc(x_1897); +lean_dec(x_1895); +x_1898 = lean_ctor_get(x_1896, 1); +lean_inc(x_1898); +lean_dec(x_1896); +x_1899 = l_Lean_IR_ToIR_lowerCode(x_2, x_1898, x_4, x_5, x_1897); +return x_1899; +} +else +{ +lean_object* x_1900; lean_object* x_1901; lean_object* x_1902; lean_object* x_1903; lean_object* x_1904; lean_object* x_1905; +x_1900 = lean_ctor_get(x_1, 0); +lean_inc(x_1900); +lean_dec(x_1); +x_1901 = l_Lean_IR_ToIR_bindErased(x_1900, x_1505, x_4, x_5, x_1508); +x_1902 = lean_ctor_get(x_1901, 0); +lean_inc(x_1902); +x_1903 = lean_ctor_get(x_1901, 1); +lean_inc(x_1903); +lean_dec(x_1901); +x_1904 = lean_ctor_get(x_1902, 1); +lean_inc(x_1904); +lean_dec(x_1902); +x_1905 = l_Lean_IR_ToIR_lowerCode(x_2, x_1904, x_4, x_5, x_1903); +return x_1905; +} +} +} +case 5: +{ +lean_object* x_1906; lean_object* x_1907; +lean_dec(x_1515); +lean_dec(x_1510); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_2); +lean_dec(x_1); +x_1906 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_1907 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_1906, x_1505, x_4, x_5, x_1508); +return x_1907; +} +case 6: +{ +lean_object* x_1908; uint8_t x_1909; +x_1908 = lean_ctor_get(x_1515, 0); +lean_inc(x_1908); +lean_dec(x_1515); +x_1909 = l_Lean_isExtern(x_1510, x_153); +if (x_1909 == 0) +{ +lean_object* x_1910; +lean_dec(x_1509); +lean_dec(x_163); +lean_inc(x_5); +lean_inc(x_4); +x_1910 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_1505, x_4, x_5, x_1508); +if (lean_obj_tag(x_1910) == 0) +{ +lean_object* x_1911; lean_object* x_1912; lean_object* x_1913; lean_object* x_1914; lean_object* x_1915; lean_object* x_1916; lean_object* x_1917; lean_object* x_1918; lean_object* x_1919; lean_object* x_1920; lean_object* x_1921; lean_object* x_1922; lean_object* x_1923; lean_object* x_1924; lean_object* x_1925; lean_object* x_1926; lean_object* x_1927; lean_object* x_1928; lean_object* x_1929; lean_object* x_1930; +x_1911 = lean_ctor_get(x_1910, 0); +lean_inc(x_1911); +x_1912 = lean_ctor_get(x_1911, 0); +lean_inc(x_1912); +x_1913 = lean_ctor_get(x_1910, 1); +lean_inc(x_1913); +lean_dec(x_1910); +x_1914 = lean_ctor_get(x_1911, 1); +lean_inc(x_1914); +lean_dec(x_1911); +x_1915 = lean_ctor_get(x_1912, 0); +lean_inc(x_1915); +x_1916 = lean_ctor_get(x_1912, 1); +lean_inc(x_1916); +lean_dec(x_1912); +x_1917 = lean_ctor_get(x_1908, 3); +lean_inc(x_1917); +lean_dec(x_1908); +x_1918 = lean_array_get_size(x_154); +x_1919 = l_Array_extract___rarg(x_154, x_1917, x_1918); +lean_dec(x_1918); +lean_dec(x_154); +x_1920 = lean_array_get_size(x_1916); +x_1921 = lean_unsigned_to_nat(0u); +x_1922 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_155)) { + x_1923 = lean_alloc_ctor(0, 3, 0); +} else { + x_1923 = x_155; + lean_ctor_set_tag(x_1923, 0); +} +lean_ctor_set(x_1923, 0, x_1921); +lean_ctor_set(x_1923, 1, x_1920); +lean_ctor_set(x_1923, 2, x_1922); +x_1924 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_1925 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__2(x_1916, x_1919, x_1923, x_1923, x_1924, x_1921, lean_box(0), lean_box(0), x_1914, x_4, x_5, x_1913); +lean_dec(x_1923); +x_1926 = lean_ctor_get(x_1925, 0); +lean_inc(x_1926); +x_1927 = lean_ctor_get(x_1925, 1); +lean_inc(x_1927); +lean_dec(x_1925); +x_1928 = lean_ctor_get(x_1926, 0); +lean_inc(x_1928); +x_1929 = lean_ctor_get(x_1926, 1); +lean_inc(x_1929); +lean_dec(x_1926); +x_1930 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_1915, x_1916, x_1919, x_1928, x_1929, x_4, x_5, x_1927); +lean_dec(x_1919); +lean_dec(x_1916); +return x_1930; +} +else +{ +lean_object* x_1931; lean_object* x_1932; lean_object* x_1933; lean_object* x_1934; +lean_dec(x_1908); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1931 = lean_ctor_get(x_1910, 0); +lean_inc(x_1931); +x_1932 = lean_ctor_get(x_1910, 1); +lean_inc(x_1932); +if (lean_is_exclusive(x_1910)) { + lean_ctor_release(x_1910, 0); + lean_ctor_release(x_1910, 1); + x_1933 = x_1910; +} else { + lean_dec_ref(x_1910); + x_1933 = lean_box(0); +} +if (lean_is_scalar(x_1933)) { + x_1934 = lean_alloc_ctor(1, 2, 0); +} else { + x_1934 = x_1933; +} +lean_ctor_set(x_1934, 0, x_1931); +lean_ctor_set(x_1934, 1, x_1932); +return x_1934; +} +} +else +{ +lean_object* x_1935; lean_object* x_1936; lean_object* x_1963; lean_object* x_1964; +lean_dec(x_1908); +lean_dec(x_155); +lean_dec(x_154); +x_1963 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_1508); +x_1964 = lean_ctor_get(x_1963, 0); +lean_inc(x_1964); +if (lean_obj_tag(x_1964) == 0) +{ +lean_object* x_1965; lean_object* x_1966; lean_object* x_1967; +x_1965 = lean_ctor_get(x_1963, 1); +lean_inc(x_1965); +lean_dec(x_1963); +x_1966 = lean_box(0); +x_1967 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_1967, 0, x_1966); +lean_ctor_set(x_1967, 1, x_1505); +x_1935 = x_1967; +x_1936 = x_1965; +goto block_1962; +} +else +{ +lean_object* x_1968; lean_object* x_1969; lean_object* x_1970; lean_object* x_1971; lean_object* x_1972; lean_object* x_1973; lean_object* x_1974; uint8_t x_1975; +x_1968 = lean_ctor_get(x_1963, 1); +lean_inc(x_1968); +if (lean_is_exclusive(x_1963)) { + lean_ctor_release(x_1963, 0); + lean_ctor_release(x_1963, 1); + x_1969 = x_1963; +} else { + lean_dec_ref(x_1963); + x_1969 = lean_box(0); +} +x_1970 = lean_ctor_get(x_1964, 0); +lean_inc(x_1970); +if (lean_is_exclusive(x_1964)) { + lean_ctor_release(x_1964, 0); + x_1971 = x_1964; +} else { + lean_dec_ref(x_1964); + x_1971 = lean_box(0); +} +x_1972 = lean_array_get_size(x_163); +x_1973 = lean_ctor_get(x_1970, 3); +lean_inc(x_1973); +lean_dec(x_1970); +x_1974 = lean_array_get_size(x_1973); +lean_dec(x_1973); +x_1975 = lean_nat_dec_lt(x_1972, x_1974); +if (x_1975 == 0) +{ +uint8_t x_1976; +x_1976 = lean_nat_dec_eq(x_1972, x_1974); +if (x_1976 == 0) +{ +lean_object* x_1977; lean_object* x_1978; lean_object* x_1979; lean_object* x_1980; lean_object* x_1981; lean_object* x_1982; lean_object* x_1983; lean_object* x_1984; lean_object* x_1985; lean_object* x_1986; lean_object* x_1987; lean_object* x_1988; lean_object* x_1989; lean_object* x_1990; lean_object* x_1991; lean_object* x_1992; lean_object* x_1993; +x_1977 = lean_unsigned_to_nat(0u); +x_1978 = l_Array_extract___rarg(x_163, x_1977, x_1974); +x_1979 = l_Array_extract___rarg(x_163, x_1974, x_1972); +lean_dec(x_1972); +if (lean_is_scalar(x_1969)) { + x_1980 = lean_alloc_ctor(6, 2, 0); +} else { + x_1980 = x_1969; + lean_ctor_set_tag(x_1980, 6); +} +lean_ctor_set(x_1980, 0, x_153); +lean_ctor_set(x_1980, 1, x_1978); +x_1981 = lean_ctor_get(x_1, 0); +lean_inc(x_1981); +x_1982 = l_Lean_IR_ToIR_bindVar(x_1981, x_1505, x_4, x_5, x_1968); +x_1983 = lean_ctor_get(x_1982, 0); +lean_inc(x_1983); +x_1984 = lean_ctor_get(x_1982, 1); +lean_inc(x_1984); +lean_dec(x_1982); +x_1985 = lean_ctor_get(x_1983, 0); +lean_inc(x_1985); +x_1986 = lean_ctor_get(x_1983, 1); +lean_inc(x_1986); +lean_dec(x_1983); +x_1987 = l_Lean_IR_ToIR_newVar(x_1986, x_4, x_5, x_1984); +x_1988 = lean_ctor_get(x_1987, 0); +lean_inc(x_1988); +x_1989 = lean_ctor_get(x_1987, 1); +lean_inc(x_1989); +lean_dec(x_1987); +x_1990 = lean_ctor_get(x_1988, 0); +lean_inc(x_1990); +x_1991 = lean_ctor_get(x_1988, 1); +lean_inc(x_1991); +lean_dec(x_1988); +x_1992 = lean_ctor_get(x_1, 2); +lean_inc(x_1992); +lean_inc(x_5); +lean_inc(x_4); +x_1993 = l_Lean_IR_ToIR_lowerType(x_1992, x_1991, x_4, x_5, x_1989); +if (lean_obj_tag(x_1993) == 0) +{ +lean_object* x_1994; lean_object* x_1995; lean_object* x_1996; lean_object* x_1997; lean_object* x_1998; +x_1994 = lean_ctor_get(x_1993, 0); +lean_inc(x_1994); +x_1995 = lean_ctor_get(x_1993, 1); +lean_inc(x_1995); +lean_dec(x_1993); +x_1996 = lean_ctor_get(x_1994, 0); +lean_inc(x_1996); +x_1997 = lean_ctor_get(x_1994, 1); +lean_inc(x_1997); +lean_dec(x_1994); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_1998 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_1990, x_1979, x_1985, x_1980, x_1996, x_1997, x_4, x_5, x_1995); +if (lean_obj_tag(x_1998) == 0) +{ +lean_object* x_1999; lean_object* x_2000; lean_object* x_2001; lean_object* x_2002; lean_object* x_2003; lean_object* x_2004; lean_object* x_2005; +x_1999 = lean_ctor_get(x_1998, 0); +lean_inc(x_1999); +x_2000 = lean_ctor_get(x_1998, 1); +lean_inc(x_2000); +lean_dec(x_1998); +x_2001 = lean_ctor_get(x_1999, 0); +lean_inc(x_2001); +x_2002 = lean_ctor_get(x_1999, 1); +lean_inc(x_2002); +if (lean_is_exclusive(x_1999)) { + lean_ctor_release(x_1999, 0); + lean_ctor_release(x_1999, 1); + x_2003 = x_1999; +} else { + lean_dec_ref(x_1999); + x_2003 = lean_box(0); +} +if (lean_is_scalar(x_1971)) { + x_2004 = lean_alloc_ctor(1, 1, 0); +} else { + x_2004 = x_1971; +} +lean_ctor_set(x_2004, 0, x_2001); +if (lean_is_scalar(x_2003)) { + x_2005 = lean_alloc_ctor(0, 2, 0); +} else { + x_2005 = x_2003; +} +lean_ctor_set(x_2005, 0, x_2004); +lean_ctor_set(x_2005, 1, x_2002); +x_1935 = x_2005; +x_1936 = x_2000; +goto block_1962; +} +else +{ +lean_object* x_2006; lean_object* x_2007; lean_object* x_2008; lean_object* x_2009; +lean_dec(x_1971); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2006 = lean_ctor_get(x_1998, 0); +lean_inc(x_2006); +x_2007 = lean_ctor_get(x_1998, 1); +lean_inc(x_2007); +if (lean_is_exclusive(x_1998)) { + lean_ctor_release(x_1998, 0); + lean_ctor_release(x_1998, 1); + x_2008 = x_1998; +} else { + lean_dec_ref(x_1998); + x_2008 = lean_box(0); +} +if (lean_is_scalar(x_2008)) { + x_2009 = lean_alloc_ctor(1, 2, 0); +} else { + x_2009 = x_2008; +} +lean_ctor_set(x_2009, 0, x_2006); +lean_ctor_set(x_2009, 1, x_2007); +return x_2009; +} +} +else +{ +lean_object* x_2010; lean_object* x_2011; lean_object* x_2012; lean_object* x_2013; +lean_dec(x_1990); +lean_dec(x_1985); +lean_dec(x_1980); +lean_dec(x_1979); +lean_dec(x_1971); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2010 = lean_ctor_get(x_1993, 0); +lean_inc(x_2010); +x_2011 = lean_ctor_get(x_1993, 1); +lean_inc(x_2011); +if (lean_is_exclusive(x_1993)) { + lean_ctor_release(x_1993, 0); + lean_ctor_release(x_1993, 1); + x_2012 = x_1993; +} else { + lean_dec_ref(x_1993); + x_2012 = lean_box(0); +} +if (lean_is_scalar(x_2012)) { + x_2013 = lean_alloc_ctor(1, 2, 0); +} else { + x_2013 = x_2012; +} +lean_ctor_set(x_2013, 0, x_2010); +lean_ctor_set(x_2013, 1, x_2011); +return x_2013; +} +} +else +{ +lean_object* x_2014; lean_object* x_2015; lean_object* x_2016; lean_object* x_2017; lean_object* x_2018; lean_object* x_2019; lean_object* x_2020; lean_object* x_2021; lean_object* x_2022; +lean_dec(x_1974); +lean_dec(x_1972); +lean_inc(x_163); +if (lean_is_scalar(x_1969)) { + x_2014 = lean_alloc_ctor(6, 2, 0); +} else { + x_2014 = x_1969; + lean_ctor_set_tag(x_2014, 6); +} +lean_ctor_set(x_2014, 0, x_153); +lean_ctor_set(x_2014, 1, x_163); +x_2015 = lean_ctor_get(x_1, 0); +lean_inc(x_2015); +x_2016 = l_Lean_IR_ToIR_bindVar(x_2015, x_1505, x_4, x_5, x_1968); +x_2017 = lean_ctor_get(x_2016, 0); +lean_inc(x_2017); +x_2018 = lean_ctor_get(x_2016, 1); +lean_inc(x_2018); +lean_dec(x_2016); +x_2019 = lean_ctor_get(x_2017, 0); +lean_inc(x_2019); +x_2020 = lean_ctor_get(x_2017, 1); +lean_inc(x_2020); +lean_dec(x_2017); +x_2021 = lean_ctor_get(x_1, 2); +lean_inc(x_2021); +lean_inc(x_5); +lean_inc(x_4); +x_2022 = l_Lean_IR_ToIR_lowerType(x_2021, x_2020, x_4, x_5, x_2018); +if (lean_obj_tag(x_2022) == 0) +{ +lean_object* x_2023; lean_object* x_2024; lean_object* x_2025; lean_object* x_2026; lean_object* x_2027; +x_2023 = lean_ctor_get(x_2022, 0); +lean_inc(x_2023); +x_2024 = lean_ctor_get(x_2022, 1); +lean_inc(x_2024); +lean_dec(x_2022); +x_2025 = lean_ctor_get(x_2023, 0); +lean_inc(x_2025); +x_2026 = lean_ctor_get(x_2023, 1); +lean_inc(x_2026); +lean_dec(x_2023); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2027 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2019, x_2014, x_2025, x_2026, x_4, x_5, x_2024); +if (lean_obj_tag(x_2027) == 0) +{ +lean_object* x_2028; lean_object* x_2029; lean_object* x_2030; lean_object* x_2031; lean_object* x_2032; lean_object* x_2033; lean_object* x_2034; +x_2028 = lean_ctor_get(x_2027, 0); +lean_inc(x_2028); +x_2029 = lean_ctor_get(x_2027, 1); +lean_inc(x_2029); +lean_dec(x_2027); +x_2030 = lean_ctor_get(x_2028, 0); +lean_inc(x_2030); +x_2031 = lean_ctor_get(x_2028, 1); +lean_inc(x_2031); +if (lean_is_exclusive(x_2028)) { + lean_ctor_release(x_2028, 0); + lean_ctor_release(x_2028, 1); + x_2032 = x_2028; +} else { + lean_dec_ref(x_2028); + x_2032 = lean_box(0); +} +if (lean_is_scalar(x_1971)) { + x_2033 = lean_alloc_ctor(1, 1, 0); +} else { + x_2033 = x_1971; +} +lean_ctor_set(x_2033, 0, x_2030); +if (lean_is_scalar(x_2032)) { + x_2034 = lean_alloc_ctor(0, 2, 0); +} else { + x_2034 = x_2032; +} +lean_ctor_set(x_2034, 0, x_2033); +lean_ctor_set(x_2034, 1, x_2031); +x_1935 = x_2034; +x_1936 = x_2029; +goto block_1962; +} +else +{ +lean_object* x_2035; lean_object* x_2036; lean_object* x_2037; lean_object* x_2038; +lean_dec(x_1971); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2035 = lean_ctor_get(x_2027, 0); +lean_inc(x_2035); +x_2036 = lean_ctor_get(x_2027, 1); +lean_inc(x_2036); +if (lean_is_exclusive(x_2027)) { + lean_ctor_release(x_2027, 0); + lean_ctor_release(x_2027, 1); + x_2037 = x_2027; +} else { + lean_dec_ref(x_2027); + x_2037 = lean_box(0); +} +if (lean_is_scalar(x_2037)) { + x_2038 = lean_alloc_ctor(1, 2, 0); +} else { + x_2038 = x_2037; +} +lean_ctor_set(x_2038, 0, x_2035); +lean_ctor_set(x_2038, 1, x_2036); +return x_2038; +} +} +else +{ +lean_object* x_2039; lean_object* x_2040; lean_object* x_2041; lean_object* x_2042; +lean_dec(x_2019); +lean_dec(x_2014); +lean_dec(x_1971); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2039 = lean_ctor_get(x_2022, 0); +lean_inc(x_2039); +x_2040 = lean_ctor_get(x_2022, 1); +lean_inc(x_2040); +if (lean_is_exclusive(x_2022)) { + lean_ctor_release(x_2022, 0); + lean_ctor_release(x_2022, 1); + x_2041 = x_2022; +} else { + lean_dec_ref(x_2022); + x_2041 = lean_box(0); +} +if (lean_is_scalar(x_2041)) { + x_2042 = lean_alloc_ctor(1, 2, 0); +} else { + x_2042 = x_2041; +} +lean_ctor_set(x_2042, 0, x_2039); +lean_ctor_set(x_2042, 1, x_2040); +return x_2042; +} +} +} +else +{ +lean_object* x_2043; lean_object* x_2044; lean_object* x_2045; lean_object* x_2046; lean_object* x_2047; lean_object* x_2048; lean_object* x_2049; lean_object* x_2050; lean_object* x_2051; +lean_dec(x_1974); +lean_dec(x_1972); +lean_inc(x_163); +if (lean_is_scalar(x_1969)) { + x_2043 = lean_alloc_ctor(7, 2, 0); +} else { + x_2043 = x_1969; + lean_ctor_set_tag(x_2043, 7); +} +lean_ctor_set(x_2043, 0, x_153); +lean_ctor_set(x_2043, 1, x_163); +x_2044 = lean_ctor_get(x_1, 0); +lean_inc(x_2044); +x_2045 = l_Lean_IR_ToIR_bindVar(x_2044, x_1505, x_4, x_5, x_1968); +x_2046 = lean_ctor_get(x_2045, 0); +lean_inc(x_2046); +x_2047 = lean_ctor_get(x_2045, 1); +lean_inc(x_2047); +lean_dec(x_2045); +x_2048 = lean_ctor_get(x_2046, 0); +lean_inc(x_2048); +x_2049 = lean_ctor_get(x_2046, 1); +lean_inc(x_2049); +lean_dec(x_2046); +x_2050 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2051 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2048, x_2043, x_2050, x_2049, x_4, x_5, x_2047); +if (lean_obj_tag(x_2051) == 0) +{ +lean_object* x_2052; lean_object* x_2053; lean_object* x_2054; lean_object* x_2055; lean_object* x_2056; lean_object* x_2057; lean_object* x_2058; +x_2052 = lean_ctor_get(x_2051, 0); +lean_inc(x_2052); +x_2053 = lean_ctor_get(x_2051, 1); +lean_inc(x_2053); +lean_dec(x_2051); +x_2054 = lean_ctor_get(x_2052, 0); +lean_inc(x_2054); +x_2055 = lean_ctor_get(x_2052, 1); +lean_inc(x_2055); +if (lean_is_exclusive(x_2052)) { + lean_ctor_release(x_2052, 0); + lean_ctor_release(x_2052, 1); + x_2056 = x_2052; +} else { + lean_dec_ref(x_2052); + x_2056 = lean_box(0); +} +if (lean_is_scalar(x_1971)) { + x_2057 = lean_alloc_ctor(1, 1, 0); +} else { + x_2057 = x_1971; +} +lean_ctor_set(x_2057, 0, x_2054); +if (lean_is_scalar(x_2056)) { + x_2058 = lean_alloc_ctor(0, 2, 0); +} else { + x_2058 = x_2056; +} +lean_ctor_set(x_2058, 0, x_2057); +lean_ctor_set(x_2058, 1, x_2055); +x_1935 = x_2058; +x_1936 = x_2053; +goto block_1962; +} +else +{ +lean_object* x_2059; lean_object* x_2060; lean_object* x_2061; lean_object* x_2062; +lean_dec(x_1971); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2059 = lean_ctor_get(x_2051, 0); +lean_inc(x_2059); +x_2060 = lean_ctor_get(x_2051, 1); +lean_inc(x_2060); +if (lean_is_exclusive(x_2051)) { + lean_ctor_release(x_2051, 0); + lean_ctor_release(x_2051, 1); + x_2061 = x_2051; +} else { + lean_dec_ref(x_2051); + x_2061 = lean_box(0); +} +if (lean_is_scalar(x_2061)) { + x_2062 = lean_alloc_ctor(1, 2, 0); +} else { + x_2062 = x_2061; +} +lean_ctor_set(x_2062, 0, x_2059); +lean_ctor_set(x_2062, 1, x_2060); +return x_2062; +} +} +} +block_1962: +{ +lean_object* x_1937; +x_1937 = lean_ctor_get(x_1935, 0); +lean_inc(x_1937); +if (lean_obj_tag(x_1937) == 0) +{ +lean_object* x_1938; lean_object* x_1939; lean_object* x_1940; lean_object* x_1941; lean_object* x_1942; lean_object* x_1943; lean_object* x_1944; lean_object* x_1945; lean_object* x_1946; lean_object* x_1947; +lean_dec(x_1509); +x_1938 = lean_ctor_get(x_1935, 1); +lean_inc(x_1938); +lean_dec(x_1935); +x_1939 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_1939, 0, x_153); +lean_ctor_set(x_1939, 1, x_163); +x_1940 = lean_ctor_get(x_1, 0); +lean_inc(x_1940); +x_1941 = l_Lean_IR_ToIR_bindVar(x_1940, x_1938, x_4, x_5, x_1936); +x_1942 = lean_ctor_get(x_1941, 0); +lean_inc(x_1942); +x_1943 = lean_ctor_get(x_1941, 1); +lean_inc(x_1943); +lean_dec(x_1941); +x_1944 = lean_ctor_get(x_1942, 0); +lean_inc(x_1944); +x_1945 = lean_ctor_get(x_1942, 1); +lean_inc(x_1945); +lean_dec(x_1942); +x_1946 = lean_ctor_get(x_1, 2); +lean_inc(x_1946); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_1947 = l_Lean_IR_ToIR_lowerType(x_1946, x_1945, x_4, x_5, x_1943); +if (lean_obj_tag(x_1947) == 0) +{ +lean_object* x_1948; lean_object* x_1949; lean_object* x_1950; lean_object* x_1951; lean_object* x_1952; +x_1948 = lean_ctor_get(x_1947, 0); +lean_inc(x_1948); +x_1949 = lean_ctor_get(x_1947, 1); +lean_inc(x_1949); +lean_dec(x_1947); +x_1950 = lean_ctor_get(x_1948, 0); +lean_inc(x_1950); +x_1951 = lean_ctor_get(x_1948, 1); +lean_inc(x_1951); +lean_dec(x_1948); +x_1952 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_1944, x_1939, x_1950, x_1951, x_4, x_5, x_1949); +return x_1952; +} +else +{ +lean_object* x_1953; lean_object* x_1954; lean_object* x_1955; lean_object* x_1956; +lean_dec(x_1944); +lean_dec(x_1939); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_1953 = lean_ctor_get(x_1947, 0); +lean_inc(x_1953); +x_1954 = lean_ctor_get(x_1947, 1); +lean_inc(x_1954); +if (lean_is_exclusive(x_1947)) { + lean_ctor_release(x_1947, 0); + lean_ctor_release(x_1947, 1); + x_1955 = x_1947; +} else { + lean_dec_ref(x_1947); + x_1955 = lean_box(0); +} +if (lean_is_scalar(x_1955)) { + x_1956 = lean_alloc_ctor(1, 2, 0); +} else { + x_1956 = x_1955; +} +lean_ctor_set(x_1956, 0, x_1953); +lean_ctor_set(x_1956, 1, x_1954); +return x_1956; +} +} +else +{ +lean_object* x_1957; lean_object* x_1958; lean_object* x_1959; lean_object* x_1960; lean_object* x_1961; +lean_dec(x_163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_1957 = lean_ctor_get(x_1935, 1); +lean_inc(x_1957); +if (lean_is_exclusive(x_1935)) { + lean_ctor_release(x_1935, 0); + lean_ctor_release(x_1935, 1); + x_1958 = x_1935; +} else { + lean_dec_ref(x_1935); + x_1958 = lean_box(0); +} +x_1959 = lean_ctor_get(x_1937, 0); +lean_inc(x_1959); +lean_dec(x_1937); +if (lean_is_scalar(x_1958)) { + x_1960 = lean_alloc_ctor(0, 2, 0); +} else { + x_1960 = x_1958; +} +lean_ctor_set(x_1960, 0, x_1959); +lean_ctor_set(x_1960, 1, x_1957); +if (lean_is_scalar(x_1509)) { + x_1961 = lean_alloc_ctor(0, 2, 0); +} else { + x_1961 = x_1509; +} +lean_ctor_set(x_1961, 0, x_1960); +lean_ctor_set(x_1961, 1, x_1936); +return x_1961; +} +} +} +} +default: +{ +lean_object* x_2063; uint8_t x_2064; lean_object* x_2065; lean_object* x_2066; lean_object* x_2067; lean_object* x_2068; lean_object* x_2069; lean_object* x_2070; lean_object* x_2071; lean_object* x_2072; lean_object* x_2073; +lean_dec(x_1510); +lean_dec(x_1509); +lean_dec(x_163); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_1515)) { + lean_ctor_release(x_1515, 0); + x_2063 = x_1515; +} else { + lean_dec_ref(x_1515); + x_2063 = lean_box(0); +} +x_2064 = 1; +x_2065 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_2066 = l_Lean_Name_toString(x_153, x_2064, x_2065); +if (lean_is_scalar(x_2063)) { + x_2067 = lean_alloc_ctor(3, 1, 0); +} else { + x_2067 = x_2063; + lean_ctor_set_tag(x_2067, 3); +} +lean_ctor_set(x_2067, 0, x_2066); +x_2068 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_2069 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_2069, 0, x_2068); +lean_ctor_set(x_2069, 1, x_2067); +x_2070 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_2071 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_2071, 0, x_2069); +lean_ctor_set(x_2071, 1, x_2070); +x_2072 = l_Lean_MessageData_ofFormat(x_2071); +x_2073 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_2072, x_1505, x_4, x_5, x_1508); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_1505); +return x_2073; +} +} +} +} +} +else +{ +uint8_t x_2074; +lean_dec(x_163); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2074 = !lean_is_exclusive(x_165); +if (x_2074 == 0) +{ +lean_object* x_2075; lean_object* x_2076; lean_object* x_2077; +x_2075 = lean_ctor_get(x_165, 0); +lean_dec(x_2075); +x_2076 = lean_ctor_get(x_167, 0); +lean_inc(x_2076); +lean_dec(x_167); +lean_ctor_set(x_165, 0, x_2076); +if (lean_is_scalar(x_161)) { + x_2077 = lean_alloc_ctor(0, 2, 0); +} else { + x_2077 = x_161; +} +lean_ctor_set(x_2077, 0, x_165); +lean_ctor_set(x_2077, 1, x_166); +return x_2077; +} +else +{ +lean_object* x_2078; lean_object* x_2079; lean_object* x_2080; lean_object* x_2081; +x_2078 = lean_ctor_get(x_165, 1); +lean_inc(x_2078); +lean_dec(x_165); +x_2079 = lean_ctor_get(x_167, 0); +lean_inc(x_2079); +lean_dec(x_167); +x_2080 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_2080, 0, x_2079); +lean_ctor_set(x_2080, 1, x_2078); +if (lean_is_scalar(x_161)) { + x_2081 = lean_alloc_ctor(0, 2, 0); +} else { + x_2081 = x_161; +} +lean_ctor_set(x_2081, 0, x_2080); +lean_ctor_set(x_2081, 1, x_166); +return x_2081; +} +} +} +} +else +{ +lean_object* x_2363; lean_object* x_2364; lean_object* x_2365; lean_object* x_2366; lean_object* x_2944; lean_object* x_2945; +x_2363 = lean_ctor_get(x_159, 0); +x_2364 = lean_ctor_get(x_159, 1); +lean_inc(x_2364); +lean_inc(x_2363); +lean_dec(x_159); +x_2944 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_160); +x_2945 = lean_ctor_get(x_2944, 0); +lean_inc(x_2945); +if (lean_obj_tag(x_2945) == 0) +{ +lean_object* x_2946; lean_object* x_2947; lean_object* x_2948; +x_2946 = lean_ctor_get(x_2944, 1); +lean_inc(x_2946); +lean_dec(x_2944); +x_2947 = lean_box(0); +x_2948 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_2948, 0, x_2947); +lean_ctor_set(x_2948, 1, x_2364); +x_2365 = x_2948; +x_2366 = x_2946; +goto block_2943; +} +else +{ +lean_object* x_2949; lean_object* x_2950; lean_object* x_2951; lean_object* x_2952; lean_object* x_2953; lean_object* x_2954; lean_object* x_2955; uint8_t x_2956; +x_2949 = lean_ctor_get(x_2944, 1); +lean_inc(x_2949); +if (lean_is_exclusive(x_2944)) { + lean_ctor_release(x_2944, 0); + lean_ctor_release(x_2944, 1); + x_2950 = x_2944; +} else { + lean_dec_ref(x_2944); + x_2950 = lean_box(0); +} +x_2951 = lean_ctor_get(x_2945, 0); +lean_inc(x_2951); +if (lean_is_exclusive(x_2945)) { + lean_ctor_release(x_2945, 0); + x_2952 = x_2945; +} else { + lean_dec_ref(x_2945); + x_2952 = lean_box(0); +} +x_2953 = lean_array_get_size(x_2363); +x_2954 = lean_ctor_get(x_2951, 3); +lean_inc(x_2954); +lean_dec(x_2951); +x_2955 = lean_array_get_size(x_2954); +lean_dec(x_2954); +x_2956 = lean_nat_dec_lt(x_2953, x_2955); +if (x_2956 == 0) +{ +uint8_t x_2957; +x_2957 = lean_nat_dec_eq(x_2953, x_2955); +if (x_2957 == 0) +{ +lean_object* x_2958; lean_object* x_2959; lean_object* x_2960; lean_object* x_2961; lean_object* x_2962; lean_object* x_2963; lean_object* x_2964; lean_object* x_2965; lean_object* x_2966; lean_object* x_2967; lean_object* x_2968; lean_object* x_2969; lean_object* x_2970; lean_object* x_2971; lean_object* x_2972; lean_object* x_2973; lean_object* x_2974; +x_2958 = lean_unsigned_to_nat(0u); +x_2959 = l_Array_extract___rarg(x_2363, x_2958, x_2955); +x_2960 = l_Array_extract___rarg(x_2363, x_2955, x_2953); +lean_dec(x_2953); +if (lean_is_scalar(x_2950)) { + x_2961 = lean_alloc_ctor(6, 2, 0); +} else { + x_2961 = x_2950; + lean_ctor_set_tag(x_2961, 6); +} +lean_ctor_set(x_2961, 0, x_153); +lean_ctor_set(x_2961, 1, x_2959); +x_2962 = lean_ctor_get(x_1, 0); +lean_inc(x_2962); +x_2963 = l_Lean_IR_ToIR_bindVar(x_2962, x_2364, x_4, x_5, x_2949); +x_2964 = lean_ctor_get(x_2963, 0); +lean_inc(x_2964); +x_2965 = lean_ctor_get(x_2963, 1); +lean_inc(x_2965); +lean_dec(x_2963); +x_2966 = lean_ctor_get(x_2964, 0); +lean_inc(x_2966); +x_2967 = lean_ctor_get(x_2964, 1); +lean_inc(x_2967); +lean_dec(x_2964); +x_2968 = l_Lean_IR_ToIR_newVar(x_2967, x_4, x_5, x_2965); +x_2969 = lean_ctor_get(x_2968, 0); +lean_inc(x_2969); +x_2970 = lean_ctor_get(x_2968, 1); +lean_inc(x_2970); +lean_dec(x_2968); +x_2971 = lean_ctor_get(x_2969, 0); +lean_inc(x_2971); +x_2972 = lean_ctor_get(x_2969, 1); +lean_inc(x_2972); +lean_dec(x_2969); +x_2973 = lean_ctor_get(x_1, 2); +lean_inc(x_2973); +lean_inc(x_5); +lean_inc(x_4); +x_2974 = l_Lean_IR_ToIR_lowerType(x_2973, x_2972, x_4, x_5, x_2970); +if (lean_obj_tag(x_2974) == 0) +{ +lean_object* x_2975; lean_object* x_2976; lean_object* x_2977; lean_object* x_2978; lean_object* x_2979; +x_2975 = lean_ctor_get(x_2974, 0); +lean_inc(x_2975); +x_2976 = lean_ctor_get(x_2974, 1); +lean_inc(x_2976); +lean_dec(x_2974); +x_2977 = lean_ctor_get(x_2975, 0); +lean_inc(x_2977); +x_2978 = lean_ctor_get(x_2975, 1); +lean_inc(x_2978); +lean_dec(x_2975); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2979 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_2971, x_2960, x_2966, x_2961, x_2977, x_2978, x_4, x_5, x_2976); +if (lean_obj_tag(x_2979) == 0) +{ +lean_object* x_2980; lean_object* x_2981; lean_object* x_2982; lean_object* x_2983; lean_object* x_2984; lean_object* x_2985; lean_object* x_2986; +x_2980 = lean_ctor_get(x_2979, 0); +lean_inc(x_2980); +x_2981 = lean_ctor_get(x_2979, 1); +lean_inc(x_2981); +lean_dec(x_2979); +x_2982 = lean_ctor_get(x_2980, 0); +lean_inc(x_2982); +x_2983 = lean_ctor_get(x_2980, 1); +lean_inc(x_2983); +if (lean_is_exclusive(x_2980)) { + lean_ctor_release(x_2980, 0); + lean_ctor_release(x_2980, 1); + x_2984 = x_2980; +} else { + lean_dec_ref(x_2980); + x_2984 = lean_box(0); +} +if (lean_is_scalar(x_2952)) { + x_2985 = lean_alloc_ctor(1, 1, 0); +} else { + x_2985 = x_2952; +} +lean_ctor_set(x_2985, 0, x_2982); +if (lean_is_scalar(x_2984)) { + x_2986 = lean_alloc_ctor(0, 2, 0); +} else { + x_2986 = x_2984; +} +lean_ctor_set(x_2986, 0, x_2985); +lean_ctor_set(x_2986, 1, x_2983); +x_2365 = x_2986; +x_2366 = x_2981; +goto block_2943; +} +else +{ +lean_object* x_2987; lean_object* x_2988; lean_object* x_2989; lean_object* x_2990; +lean_dec(x_2952); +lean_dec(x_2363); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2987 = lean_ctor_get(x_2979, 0); +lean_inc(x_2987); +x_2988 = lean_ctor_get(x_2979, 1); +lean_inc(x_2988); +if (lean_is_exclusive(x_2979)) { + lean_ctor_release(x_2979, 0); + lean_ctor_release(x_2979, 1); + x_2989 = x_2979; +} else { + lean_dec_ref(x_2979); + x_2989 = lean_box(0); +} +if (lean_is_scalar(x_2989)) { + x_2990 = lean_alloc_ctor(1, 2, 0); +} else { + x_2990 = x_2989; +} +lean_ctor_set(x_2990, 0, x_2987); +lean_ctor_set(x_2990, 1, x_2988); +return x_2990; +} +} +else +{ +lean_object* x_2991; lean_object* x_2992; lean_object* x_2993; lean_object* x_2994; +lean_dec(x_2971); +lean_dec(x_2966); +lean_dec(x_2961); +lean_dec(x_2960); +lean_dec(x_2952); +lean_dec(x_2363); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2991 = lean_ctor_get(x_2974, 0); +lean_inc(x_2991); +x_2992 = lean_ctor_get(x_2974, 1); +lean_inc(x_2992); +if (lean_is_exclusive(x_2974)) { + lean_ctor_release(x_2974, 0); + lean_ctor_release(x_2974, 1); + x_2993 = x_2974; +} else { + lean_dec_ref(x_2974); + x_2993 = lean_box(0); +} +if (lean_is_scalar(x_2993)) { + x_2994 = lean_alloc_ctor(1, 2, 0); +} else { + x_2994 = x_2993; +} +lean_ctor_set(x_2994, 0, x_2991); +lean_ctor_set(x_2994, 1, x_2992); +return x_2994; +} +} +else +{ +lean_object* x_2995; lean_object* x_2996; lean_object* x_2997; lean_object* x_2998; lean_object* x_2999; lean_object* x_3000; lean_object* x_3001; lean_object* x_3002; lean_object* x_3003; +lean_dec(x_2955); +lean_dec(x_2953); +lean_inc(x_2363); +if (lean_is_scalar(x_2950)) { + x_2995 = lean_alloc_ctor(6, 2, 0); +} else { + x_2995 = x_2950; + lean_ctor_set_tag(x_2995, 6); +} +lean_ctor_set(x_2995, 0, x_153); +lean_ctor_set(x_2995, 1, x_2363); +x_2996 = lean_ctor_get(x_1, 0); +lean_inc(x_2996); +x_2997 = l_Lean_IR_ToIR_bindVar(x_2996, x_2364, x_4, x_5, x_2949); +x_2998 = lean_ctor_get(x_2997, 0); +lean_inc(x_2998); +x_2999 = lean_ctor_get(x_2997, 1); +lean_inc(x_2999); +lean_dec(x_2997); +x_3000 = lean_ctor_get(x_2998, 0); +lean_inc(x_3000); +x_3001 = lean_ctor_get(x_2998, 1); +lean_inc(x_3001); +lean_dec(x_2998); +x_3002 = lean_ctor_get(x_1, 2); +lean_inc(x_3002); +lean_inc(x_5); +lean_inc(x_4); +x_3003 = l_Lean_IR_ToIR_lowerType(x_3002, x_3001, x_4, x_5, x_2999); +if (lean_obj_tag(x_3003) == 0) +{ +lean_object* x_3004; lean_object* x_3005; lean_object* x_3006; lean_object* x_3007; lean_object* x_3008; +x_3004 = lean_ctor_get(x_3003, 0); +lean_inc(x_3004); +x_3005 = lean_ctor_get(x_3003, 1); +lean_inc(x_3005); +lean_dec(x_3003); +x_3006 = lean_ctor_get(x_3004, 0); +lean_inc(x_3006); +x_3007 = lean_ctor_get(x_3004, 1); +lean_inc(x_3007); +lean_dec(x_3004); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3008 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3000, x_2995, x_3006, x_3007, x_4, x_5, x_3005); +if (lean_obj_tag(x_3008) == 0) +{ +lean_object* x_3009; lean_object* x_3010; lean_object* x_3011; lean_object* x_3012; lean_object* x_3013; lean_object* x_3014; lean_object* x_3015; +x_3009 = lean_ctor_get(x_3008, 0); +lean_inc(x_3009); +x_3010 = lean_ctor_get(x_3008, 1); +lean_inc(x_3010); +lean_dec(x_3008); +x_3011 = lean_ctor_get(x_3009, 0); +lean_inc(x_3011); +x_3012 = lean_ctor_get(x_3009, 1); +lean_inc(x_3012); +if (lean_is_exclusive(x_3009)) { + lean_ctor_release(x_3009, 0); + lean_ctor_release(x_3009, 1); + x_3013 = x_3009; +} else { + lean_dec_ref(x_3009); + x_3013 = lean_box(0); +} +if (lean_is_scalar(x_2952)) { + x_3014 = lean_alloc_ctor(1, 1, 0); +} else { + x_3014 = x_2952; +} +lean_ctor_set(x_3014, 0, x_3011); +if (lean_is_scalar(x_3013)) { + x_3015 = lean_alloc_ctor(0, 2, 0); +} else { + x_3015 = x_3013; +} +lean_ctor_set(x_3015, 0, x_3014); +lean_ctor_set(x_3015, 1, x_3012); +x_2365 = x_3015; +x_2366 = x_3010; +goto block_2943; +} +else +{ +lean_object* x_3016; lean_object* x_3017; lean_object* x_3018; lean_object* x_3019; +lean_dec(x_2952); +lean_dec(x_2363); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3016 = lean_ctor_get(x_3008, 0); +lean_inc(x_3016); +x_3017 = lean_ctor_get(x_3008, 1); +lean_inc(x_3017); +if (lean_is_exclusive(x_3008)) { + lean_ctor_release(x_3008, 0); + lean_ctor_release(x_3008, 1); + x_3018 = x_3008; +} else { + lean_dec_ref(x_3008); + x_3018 = lean_box(0); +} +if (lean_is_scalar(x_3018)) { + x_3019 = lean_alloc_ctor(1, 2, 0); +} else { + x_3019 = x_3018; +} +lean_ctor_set(x_3019, 0, x_3016); +lean_ctor_set(x_3019, 1, x_3017); +return x_3019; +} +} +else +{ +lean_object* x_3020; lean_object* x_3021; lean_object* x_3022; lean_object* x_3023; +lean_dec(x_3000); +lean_dec(x_2995); +lean_dec(x_2952); +lean_dec(x_2363); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3020 = lean_ctor_get(x_3003, 0); +lean_inc(x_3020); +x_3021 = lean_ctor_get(x_3003, 1); +lean_inc(x_3021); +if (lean_is_exclusive(x_3003)) { + lean_ctor_release(x_3003, 0); + lean_ctor_release(x_3003, 1); + x_3022 = x_3003; +} else { + lean_dec_ref(x_3003); + x_3022 = lean_box(0); +} +if (lean_is_scalar(x_3022)) { + x_3023 = lean_alloc_ctor(1, 2, 0); +} else { + x_3023 = x_3022; +} +lean_ctor_set(x_3023, 0, x_3020); +lean_ctor_set(x_3023, 1, x_3021); +return x_3023; +} +} +} +else +{ +lean_object* x_3024; lean_object* x_3025; lean_object* x_3026; lean_object* x_3027; lean_object* x_3028; lean_object* x_3029; lean_object* x_3030; lean_object* x_3031; lean_object* x_3032; +lean_dec(x_2955); +lean_dec(x_2953); +lean_inc(x_2363); +if (lean_is_scalar(x_2950)) { + x_3024 = lean_alloc_ctor(7, 2, 0); +} else { + x_3024 = x_2950; + lean_ctor_set_tag(x_3024, 7); +} +lean_ctor_set(x_3024, 0, x_153); +lean_ctor_set(x_3024, 1, x_2363); +x_3025 = lean_ctor_get(x_1, 0); +lean_inc(x_3025); +x_3026 = l_Lean_IR_ToIR_bindVar(x_3025, x_2364, x_4, x_5, x_2949); +x_3027 = lean_ctor_get(x_3026, 0); +lean_inc(x_3027); +x_3028 = lean_ctor_get(x_3026, 1); +lean_inc(x_3028); +lean_dec(x_3026); +x_3029 = lean_ctor_get(x_3027, 0); +lean_inc(x_3029); +x_3030 = lean_ctor_get(x_3027, 1); +lean_inc(x_3030); +lean_dec(x_3027); +x_3031 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3032 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3029, x_3024, x_3031, x_3030, x_4, x_5, x_3028); +if (lean_obj_tag(x_3032) == 0) +{ +lean_object* x_3033; lean_object* x_3034; lean_object* x_3035; lean_object* x_3036; lean_object* x_3037; lean_object* x_3038; lean_object* x_3039; +x_3033 = lean_ctor_get(x_3032, 0); +lean_inc(x_3033); +x_3034 = lean_ctor_get(x_3032, 1); +lean_inc(x_3034); +lean_dec(x_3032); +x_3035 = lean_ctor_get(x_3033, 0); +lean_inc(x_3035); +x_3036 = lean_ctor_get(x_3033, 1); +lean_inc(x_3036); +if (lean_is_exclusive(x_3033)) { + lean_ctor_release(x_3033, 0); + lean_ctor_release(x_3033, 1); + x_3037 = x_3033; +} else { + lean_dec_ref(x_3033); + x_3037 = lean_box(0); +} +if (lean_is_scalar(x_2952)) { + x_3038 = lean_alloc_ctor(1, 1, 0); +} else { + x_3038 = x_2952; +} +lean_ctor_set(x_3038, 0, x_3035); +if (lean_is_scalar(x_3037)) { + x_3039 = lean_alloc_ctor(0, 2, 0); +} else { + x_3039 = x_3037; +} +lean_ctor_set(x_3039, 0, x_3038); +lean_ctor_set(x_3039, 1, x_3036); +x_2365 = x_3039; +x_2366 = x_3034; +goto block_2943; +} +else +{ +lean_object* x_3040; lean_object* x_3041; lean_object* x_3042; lean_object* x_3043; +lean_dec(x_2952); +lean_dec(x_2363); +lean_dec(x_161); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3040 = lean_ctor_get(x_3032, 0); +lean_inc(x_3040); +x_3041 = lean_ctor_get(x_3032, 1); +lean_inc(x_3041); +if (lean_is_exclusive(x_3032)) { + lean_ctor_release(x_3032, 0); + lean_ctor_release(x_3032, 1); + x_3042 = x_3032; +} else { + lean_dec_ref(x_3032); + x_3042 = lean_box(0); +} +if (lean_is_scalar(x_3042)) { + x_3043 = lean_alloc_ctor(1, 2, 0); +} else { + x_3043 = x_3042; +} +lean_ctor_set(x_3043, 0, x_3040); +lean_ctor_set(x_3043, 1, x_3041); +return x_3043; +} +} +} +block_2943: +{ +lean_object* x_2367; +x_2367 = lean_ctor_get(x_2365, 0); +lean_inc(x_2367); +if (lean_obj_tag(x_2367) == 0) +{ +lean_object* x_2368; lean_object* x_2369; lean_object* x_2370; lean_object* x_2371; lean_object* x_2372; lean_object* x_2373; lean_object* x_2374; uint8_t x_2375; lean_object* x_2376; +lean_dec(x_161); +x_2368 = lean_ctor_get(x_2365, 1); +lean_inc(x_2368); +if (lean_is_exclusive(x_2365)) { + lean_ctor_release(x_2365, 0); + lean_ctor_release(x_2365, 1); + x_2369 = x_2365; +} else { + lean_dec_ref(x_2365); + x_2369 = lean_box(0); +} +x_2370 = lean_st_ref_get(x_5, x_2366); +x_2371 = lean_ctor_get(x_2370, 0); +lean_inc(x_2371); +x_2372 = lean_ctor_get(x_2370, 1); +lean_inc(x_2372); +if (lean_is_exclusive(x_2370)) { + lean_ctor_release(x_2370, 0); + lean_ctor_release(x_2370, 1); + x_2373 = x_2370; +} else { + lean_dec_ref(x_2370); + x_2373 = lean_box(0); +} +x_2374 = lean_ctor_get(x_2371, 0); +lean_inc(x_2374); +lean_dec(x_2371); +x_2375 = 0; +lean_inc(x_2374); +x_2376 = l_Lean_Environment_find_x3f(x_2374, x_153, x_2375); +if (lean_obj_tag(x_2376) == 0) +{ +lean_object* x_2377; lean_object* x_2378; +lean_dec(x_2374); +lean_dec(x_2373); +lean_dec(x_2369); +lean_dec(x_2363); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_2); +lean_dec(x_1); +x_2377 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_2378 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_2377, x_2368, x_4, x_5, x_2372); +return x_2378; +} +else +{ +lean_object* x_2379; +x_2379 = lean_ctor_get(x_2376, 0); +lean_inc(x_2379); +lean_dec(x_2376); +switch (lean_obj_tag(x_2379)) { +case 0: +{ +lean_object* x_2380; lean_object* x_2381; uint8_t x_2382; +lean_dec(x_2374); +lean_dec(x_155); +lean_dec(x_154); +if (lean_is_exclusive(x_2379)) { + lean_ctor_release(x_2379, 0); + x_2380 = x_2379; +} else { + lean_dec_ref(x_2379); + x_2380 = lean_box(0); +} +x_2381 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_2382 = lean_name_eq(x_153, x_2381); +if (x_2382 == 0) +{ +lean_object* x_2383; uint8_t x_2384; +x_2383 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_2384 = lean_name_eq(x_153, x_2383); +if (x_2384 == 0) +{ +lean_object* x_2385; lean_object* x_2386; lean_object* x_2387; +lean_dec(x_2373); +lean_dec(x_2369); +x_2385 = l_Lean_IR_ToIR_findDecl(x_153, x_2368, x_4, x_5, x_2372); +x_2386 = lean_ctor_get(x_2385, 0); +lean_inc(x_2386); +x_2387 = lean_ctor_get(x_2386, 0); +lean_inc(x_2387); +if (lean_obj_tag(x_2387) == 0) +{ +lean_object* x_2388; lean_object* x_2389; lean_object* x_2390; lean_object* x_2391; uint8_t x_2392; lean_object* x_2393; lean_object* x_2394; lean_object* x_2395; lean_object* x_2396; lean_object* x_2397; lean_object* x_2398; lean_object* x_2399; lean_object* x_2400; lean_object* x_2401; +lean_dec(x_2363); +lean_dec(x_2); +lean_dec(x_1); +x_2388 = lean_ctor_get(x_2385, 1); +lean_inc(x_2388); +if (lean_is_exclusive(x_2385)) { + lean_ctor_release(x_2385, 0); + lean_ctor_release(x_2385, 1); + x_2389 = x_2385; +} else { + lean_dec_ref(x_2385); + x_2389 = lean_box(0); +} +x_2390 = lean_ctor_get(x_2386, 1); +lean_inc(x_2390); +if (lean_is_exclusive(x_2386)) { + lean_ctor_release(x_2386, 0); + lean_ctor_release(x_2386, 1); + x_2391 = x_2386; +} else { + lean_dec_ref(x_2386); + x_2391 = lean_box(0); +} +x_2392 = 1; +x_2393 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_2394 = l_Lean_Name_toString(x_153, x_2392, x_2393); +if (lean_is_scalar(x_2380)) { + x_2395 = lean_alloc_ctor(3, 1, 0); +} else { + x_2395 = x_2380; + lean_ctor_set_tag(x_2395, 3); +} +lean_ctor_set(x_2395, 0, x_2394); +x_2396 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_2391)) { + x_2397 = lean_alloc_ctor(5, 2, 0); +} else { + x_2397 = x_2391; + lean_ctor_set_tag(x_2397, 5); +} +lean_ctor_set(x_2397, 0, x_2396); +lean_ctor_set(x_2397, 1, x_2395); +x_2398 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_2389)) { + x_2399 = lean_alloc_ctor(5, 2, 0); +} else { + x_2399 = x_2389; + lean_ctor_set_tag(x_2399, 5); +} +lean_ctor_set(x_2399, 0, x_2397); +lean_ctor_set(x_2399, 1, x_2398); +x_2400 = l_Lean_MessageData_ofFormat(x_2399); +x_2401 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_2400, x_2390, x_4, x_5, x_2388); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2390); +return x_2401; +} +else +{ +lean_object* x_2402; lean_object* x_2403; lean_object* x_2404; lean_object* x_2405; lean_object* x_2406; lean_object* x_2407; lean_object* x_2408; uint8_t x_2409; +lean_dec(x_2380); +x_2402 = lean_ctor_get(x_2385, 1); +lean_inc(x_2402); +lean_dec(x_2385); +x_2403 = lean_ctor_get(x_2386, 1); +lean_inc(x_2403); +if (lean_is_exclusive(x_2386)) { + lean_ctor_release(x_2386, 0); + lean_ctor_release(x_2386, 1); + x_2404 = x_2386; +} else { + lean_dec_ref(x_2386); + x_2404 = lean_box(0); +} +x_2405 = lean_ctor_get(x_2387, 0); +lean_inc(x_2405); +lean_dec(x_2387); +x_2406 = lean_array_get_size(x_2363); +x_2407 = l_Lean_IR_Decl_params(x_2405); +lean_dec(x_2405); +x_2408 = lean_array_get_size(x_2407); +lean_dec(x_2407); +x_2409 = lean_nat_dec_lt(x_2406, x_2408); +if (x_2409 == 0) +{ +uint8_t x_2410; +x_2410 = lean_nat_dec_eq(x_2406, x_2408); +if (x_2410 == 0) +{ +lean_object* x_2411; lean_object* x_2412; lean_object* x_2413; lean_object* x_2414; lean_object* x_2415; lean_object* x_2416; lean_object* x_2417; lean_object* x_2418; lean_object* x_2419; lean_object* x_2420; lean_object* x_2421; lean_object* x_2422; lean_object* x_2423; lean_object* x_2424; lean_object* x_2425; lean_object* x_2426; lean_object* x_2427; +x_2411 = lean_unsigned_to_nat(0u); +x_2412 = l_Array_extract___rarg(x_2363, x_2411, x_2408); +x_2413 = l_Array_extract___rarg(x_2363, x_2408, x_2406); +lean_dec(x_2406); +lean_dec(x_2363); +if (lean_is_scalar(x_2404)) { + x_2414 = lean_alloc_ctor(6, 2, 0); +} else { + x_2414 = x_2404; + lean_ctor_set_tag(x_2414, 6); +} +lean_ctor_set(x_2414, 0, x_153); +lean_ctor_set(x_2414, 1, x_2412); +x_2415 = lean_ctor_get(x_1, 0); +lean_inc(x_2415); +x_2416 = l_Lean_IR_ToIR_bindVar(x_2415, x_2403, x_4, x_5, x_2402); +x_2417 = lean_ctor_get(x_2416, 0); +lean_inc(x_2417); +x_2418 = lean_ctor_get(x_2416, 1); +lean_inc(x_2418); +lean_dec(x_2416); +x_2419 = lean_ctor_get(x_2417, 0); +lean_inc(x_2419); +x_2420 = lean_ctor_get(x_2417, 1); +lean_inc(x_2420); +lean_dec(x_2417); +x_2421 = l_Lean_IR_ToIR_newVar(x_2420, x_4, x_5, x_2418); +x_2422 = lean_ctor_get(x_2421, 0); +lean_inc(x_2422); +x_2423 = lean_ctor_get(x_2421, 1); +lean_inc(x_2423); +lean_dec(x_2421); +x_2424 = lean_ctor_get(x_2422, 0); +lean_inc(x_2424); +x_2425 = lean_ctor_get(x_2422, 1); +lean_inc(x_2425); +lean_dec(x_2422); +x_2426 = lean_ctor_get(x_1, 2); +lean_inc(x_2426); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_2427 = l_Lean_IR_ToIR_lowerType(x_2426, x_2425, x_4, x_5, x_2423); +if (lean_obj_tag(x_2427) == 0) +{ +lean_object* x_2428; lean_object* x_2429; lean_object* x_2430; lean_object* x_2431; lean_object* x_2432; +x_2428 = lean_ctor_get(x_2427, 0); +lean_inc(x_2428); +x_2429 = lean_ctor_get(x_2427, 1); +lean_inc(x_2429); +lean_dec(x_2427); +x_2430 = lean_ctor_get(x_2428, 0); +lean_inc(x_2430); +x_2431 = lean_ctor_get(x_2428, 1); +lean_inc(x_2431); +lean_dec(x_2428); +x_2432 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_2424, x_2413, x_2419, x_2414, x_2430, x_2431, x_4, x_5, x_2429); +return x_2432; +} +else +{ +lean_object* x_2433; lean_object* x_2434; lean_object* x_2435; lean_object* x_2436; +lean_dec(x_2424); +lean_dec(x_2419); +lean_dec(x_2414); +lean_dec(x_2413); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_2433 = lean_ctor_get(x_2427, 0); +lean_inc(x_2433); +x_2434 = lean_ctor_get(x_2427, 1); +lean_inc(x_2434); +if (lean_is_exclusive(x_2427)) { + lean_ctor_release(x_2427, 0); + lean_ctor_release(x_2427, 1); + x_2435 = x_2427; +} else { + lean_dec_ref(x_2427); + x_2435 = lean_box(0); +} +if (lean_is_scalar(x_2435)) { + x_2436 = lean_alloc_ctor(1, 2, 0); +} else { + x_2436 = x_2435; +} +lean_ctor_set(x_2436, 0, x_2433); +lean_ctor_set(x_2436, 1, x_2434); +return x_2436; +} +} +else +{ +lean_object* x_2437; lean_object* x_2438; lean_object* x_2439; lean_object* x_2440; lean_object* x_2441; lean_object* x_2442; lean_object* x_2443; lean_object* x_2444; lean_object* x_2445; +lean_dec(x_2408); +lean_dec(x_2406); +if (lean_is_scalar(x_2404)) { + x_2437 = lean_alloc_ctor(6, 2, 0); +} else { + x_2437 = x_2404; + lean_ctor_set_tag(x_2437, 6); +} +lean_ctor_set(x_2437, 0, x_153); +lean_ctor_set(x_2437, 1, x_2363); +x_2438 = lean_ctor_get(x_1, 0); +lean_inc(x_2438); +x_2439 = l_Lean_IR_ToIR_bindVar(x_2438, x_2403, x_4, x_5, x_2402); +x_2440 = lean_ctor_get(x_2439, 0); +lean_inc(x_2440); +x_2441 = lean_ctor_get(x_2439, 1); +lean_inc(x_2441); +lean_dec(x_2439); +x_2442 = lean_ctor_get(x_2440, 0); +lean_inc(x_2442); +x_2443 = lean_ctor_get(x_2440, 1); +lean_inc(x_2443); +lean_dec(x_2440); +x_2444 = lean_ctor_get(x_1, 2); +lean_inc(x_2444); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_2445 = l_Lean_IR_ToIR_lowerType(x_2444, x_2443, x_4, x_5, x_2441); +if (lean_obj_tag(x_2445) == 0) +{ +lean_object* x_2446; lean_object* x_2447; lean_object* x_2448; lean_object* x_2449; lean_object* x_2450; +x_2446 = lean_ctor_get(x_2445, 0); +lean_inc(x_2446); +x_2447 = lean_ctor_get(x_2445, 1); +lean_inc(x_2447); +lean_dec(x_2445); +x_2448 = lean_ctor_get(x_2446, 0); +lean_inc(x_2448); +x_2449 = lean_ctor_get(x_2446, 1); +lean_inc(x_2449); +lean_dec(x_2446); +x_2450 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2442, x_2437, x_2448, x_2449, x_4, x_5, x_2447); +return x_2450; +} +else +{ +lean_object* x_2451; lean_object* x_2452; lean_object* x_2453; lean_object* x_2454; +lean_dec(x_2442); +lean_dec(x_2437); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_2451 = lean_ctor_get(x_2445, 0); +lean_inc(x_2451); +x_2452 = lean_ctor_get(x_2445, 1); +lean_inc(x_2452); +if (lean_is_exclusive(x_2445)) { + lean_ctor_release(x_2445, 0); + lean_ctor_release(x_2445, 1); + x_2453 = x_2445; +} else { + lean_dec_ref(x_2445); + x_2453 = lean_box(0); +} +if (lean_is_scalar(x_2453)) { + x_2454 = lean_alloc_ctor(1, 2, 0); +} else { + x_2454 = x_2453; +} +lean_ctor_set(x_2454, 0, x_2451); +lean_ctor_set(x_2454, 1, x_2452); +return x_2454; +} +} +} +else +{ +lean_object* x_2455; lean_object* x_2456; lean_object* x_2457; lean_object* x_2458; lean_object* x_2459; lean_object* x_2460; lean_object* x_2461; lean_object* x_2462; lean_object* x_2463; +lean_dec(x_2408); +lean_dec(x_2406); +if (lean_is_scalar(x_2404)) { + x_2455 = lean_alloc_ctor(7, 2, 0); +} else { + x_2455 = x_2404; + lean_ctor_set_tag(x_2455, 7); +} +lean_ctor_set(x_2455, 0, x_153); +lean_ctor_set(x_2455, 1, x_2363); +x_2456 = lean_ctor_get(x_1, 0); +lean_inc(x_2456); +lean_dec(x_1); +x_2457 = l_Lean_IR_ToIR_bindVar(x_2456, x_2403, x_4, x_5, x_2402); +x_2458 = lean_ctor_get(x_2457, 0); +lean_inc(x_2458); +x_2459 = lean_ctor_get(x_2457, 1); +lean_inc(x_2459); +lean_dec(x_2457); +x_2460 = lean_ctor_get(x_2458, 0); +lean_inc(x_2460); +x_2461 = lean_ctor_get(x_2458, 1); +lean_inc(x_2461); +lean_dec(x_2458); +x_2462 = lean_box(7); +x_2463 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2460, x_2455, x_2462, x_2461, x_4, x_5, x_2459); +return x_2463; +} +} +} +else +{ +lean_object* x_2464; lean_object* x_2465; lean_object* x_2466; +lean_dec(x_2380); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2464 = lean_box(13); +if (lean_is_scalar(x_2369)) { + x_2465 = lean_alloc_ctor(0, 2, 0); +} else { + x_2465 = x_2369; +} +lean_ctor_set(x_2465, 0, x_2464); +lean_ctor_set(x_2465, 1, x_2368); +if (lean_is_scalar(x_2373)) { + x_2466 = lean_alloc_ctor(0, 2, 0); +} else { + x_2466 = x_2373; +} +lean_ctor_set(x_2466, 0, x_2465); +lean_ctor_set(x_2466, 1, x_2372); +return x_2466; +} +} +else +{ +lean_object* x_2467; lean_object* x_2468; lean_object* x_2469; +lean_dec(x_2380); +lean_dec(x_2373); +lean_dec(x_2369); +x_2467 = l_Lean_IR_instInhabitedArg; +x_2468 = lean_unsigned_to_nat(2u); +x_2469 = lean_array_get(x_2467, x_2363, x_2468); +lean_dec(x_2363); +if (lean_obj_tag(x_2469) == 0) +{ +lean_object* x_2470; lean_object* x_2471; lean_object* x_2472; lean_object* x_2473; lean_object* x_2474; lean_object* x_2475; lean_object* x_2476; +x_2470 = lean_ctor_get(x_2469, 0); +lean_inc(x_2470); +lean_dec(x_2469); +x_2471 = lean_ctor_get(x_1, 0); +lean_inc(x_2471); +lean_dec(x_1); +x_2472 = l_Lean_IR_ToIR_bindVarToVarId(x_2471, x_2470, x_2368, x_4, x_5, x_2372); +x_2473 = lean_ctor_get(x_2472, 0); +lean_inc(x_2473); +x_2474 = lean_ctor_get(x_2472, 1); +lean_inc(x_2474); +lean_dec(x_2472); +x_2475 = lean_ctor_get(x_2473, 1); +lean_inc(x_2475); +lean_dec(x_2473); +x_2476 = l_Lean_IR_ToIR_lowerCode(x_2, x_2475, x_4, x_5, x_2474); +return x_2476; +} +else +{ +lean_object* x_2477; lean_object* x_2478; lean_object* x_2479; lean_object* x_2480; lean_object* x_2481; lean_object* x_2482; +x_2477 = lean_ctor_get(x_1, 0); +lean_inc(x_2477); +lean_dec(x_1); +x_2478 = l_Lean_IR_ToIR_bindErased(x_2477, x_2368, x_4, x_5, x_2372); +x_2479 = lean_ctor_get(x_2478, 0); +lean_inc(x_2479); +x_2480 = lean_ctor_get(x_2478, 1); +lean_inc(x_2480); +lean_dec(x_2478); +x_2481 = lean_ctor_get(x_2479, 1); +lean_inc(x_2481); +lean_dec(x_2479); +x_2482 = l_Lean_IR_ToIR_lowerCode(x_2, x_2481, x_4, x_5, x_2480); +return x_2482; +} +} +} +case 1: +{ +lean_object* x_2483; lean_object* x_2484; lean_object* x_2511; lean_object* x_2512; +lean_dec(x_2379); +lean_dec(x_2374); +lean_dec(x_155); +lean_dec(x_154); +x_2511 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_2372); +x_2512 = lean_ctor_get(x_2511, 0); +lean_inc(x_2512); +if (lean_obj_tag(x_2512) == 0) +{ +lean_object* x_2513; lean_object* x_2514; lean_object* x_2515; +x_2513 = lean_ctor_get(x_2511, 1); +lean_inc(x_2513); +lean_dec(x_2511); +x_2514 = lean_box(0); +if (lean_is_scalar(x_2369)) { + x_2515 = lean_alloc_ctor(0, 2, 0); +} else { + x_2515 = x_2369; +} +lean_ctor_set(x_2515, 0, x_2514); +lean_ctor_set(x_2515, 1, x_2368); +x_2483 = x_2515; +x_2484 = x_2513; +goto block_2510; +} +else +{ +lean_object* x_2516; lean_object* x_2517; lean_object* x_2518; lean_object* x_2519; lean_object* x_2520; lean_object* x_2521; lean_object* x_2522; uint8_t x_2523; +lean_dec(x_2369); +x_2516 = lean_ctor_get(x_2511, 1); +lean_inc(x_2516); +if (lean_is_exclusive(x_2511)) { + lean_ctor_release(x_2511, 0); + lean_ctor_release(x_2511, 1); + x_2517 = x_2511; +} else { + lean_dec_ref(x_2511); + x_2517 = lean_box(0); +} +x_2518 = lean_ctor_get(x_2512, 0); +lean_inc(x_2518); +if (lean_is_exclusive(x_2512)) { + lean_ctor_release(x_2512, 0); + x_2519 = x_2512; +} else { + lean_dec_ref(x_2512); + x_2519 = lean_box(0); +} +x_2520 = lean_array_get_size(x_2363); +x_2521 = lean_ctor_get(x_2518, 3); +lean_inc(x_2521); +lean_dec(x_2518); +x_2522 = lean_array_get_size(x_2521); +lean_dec(x_2521); +x_2523 = lean_nat_dec_lt(x_2520, x_2522); +if (x_2523 == 0) +{ +uint8_t x_2524; +x_2524 = lean_nat_dec_eq(x_2520, x_2522); +if (x_2524 == 0) +{ +lean_object* x_2525; lean_object* x_2526; lean_object* x_2527; lean_object* x_2528; lean_object* x_2529; lean_object* x_2530; lean_object* x_2531; lean_object* x_2532; lean_object* x_2533; lean_object* x_2534; lean_object* x_2535; lean_object* x_2536; lean_object* x_2537; lean_object* x_2538; lean_object* x_2539; lean_object* x_2540; lean_object* x_2541; +x_2525 = lean_unsigned_to_nat(0u); +x_2526 = l_Array_extract___rarg(x_2363, x_2525, x_2522); +x_2527 = l_Array_extract___rarg(x_2363, x_2522, x_2520); +lean_dec(x_2520); +if (lean_is_scalar(x_2517)) { + x_2528 = lean_alloc_ctor(6, 2, 0); +} else { + x_2528 = x_2517; + lean_ctor_set_tag(x_2528, 6); +} +lean_ctor_set(x_2528, 0, x_153); +lean_ctor_set(x_2528, 1, x_2526); +x_2529 = lean_ctor_get(x_1, 0); +lean_inc(x_2529); +x_2530 = l_Lean_IR_ToIR_bindVar(x_2529, x_2368, x_4, x_5, x_2516); +x_2531 = lean_ctor_get(x_2530, 0); +lean_inc(x_2531); +x_2532 = lean_ctor_get(x_2530, 1); +lean_inc(x_2532); +lean_dec(x_2530); +x_2533 = lean_ctor_get(x_2531, 0); +lean_inc(x_2533); +x_2534 = lean_ctor_get(x_2531, 1); +lean_inc(x_2534); +lean_dec(x_2531); +x_2535 = l_Lean_IR_ToIR_newVar(x_2534, x_4, x_5, x_2532); +x_2536 = lean_ctor_get(x_2535, 0); +lean_inc(x_2536); +x_2537 = lean_ctor_get(x_2535, 1); +lean_inc(x_2537); +lean_dec(x_2535); +x_2538 = lean_ctor_get(x_2536, 0); +lean_inc(x_2538); +x_2539 = lean_ctor_get(x_2536, 1); +lean_inc(x_2539); +lean_dec(x_2536); +x_2540 = lean_ctor_get(x_1, 2); +lean_inc(x_2540); +lean_inc(x_5); +lean_inc(x_4); +x_2541 = l_Lean_IR_ToIR_lowerType(x_2540, x_2539, x_4, x_5, x_2537); +if (lean_obj_tag(x_2541) == 0) +{ +lean_object* x_2542; lean_object* x_2543; lean_object* x_2544; lean_object* x_2545; lean_object* x_2546; +x_2542 = lean_ctor_get(x_2541, 0); +lean_inc(x_2542); +x_2543 = lean_ctor_get(x_2541, 1); +lean_inc(x_2543); +lean_dec(x_2541); +x_2544 = lean_ctor_get(x_2542, 0); +lean_inc(x_2544); +x_2545 = lean_ctor_get(x_2542, 1); +lean_inc(x_2545); +lean_dec(x_2542); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2546 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_2538, x_2527, x_2533, x_2528, x_2544, x_2545, x_4, x_5, x_2543); +if (lean_obj_tag(x_2546) == 0) +{ +lean_object* x_2547; lean_object* x_2548; lean_object* x_2549; lean_object* x_2550; lean_object* x_2551; lean_object* x_2552; lean_object* x_2553; +x_2547 = lean_ctor_get(x_2546, 0); +lean_inc(x_2547); +x_2548 = lean_ctor_get(x_2546, 1); +lean_inc(x_2548); +lean_dec(x_2546); +x_2549 = lean_ctor_get(x_2547, 0); +lean_inc(x_2549); +x_2550 = lean_ctor_get(x_2547, 1); +lean_inc(x_2550); +if (lean_is_exclusive(x_2547)) { + lean_ctor_release(x_2547, 0); + lean_ctor_release(x_2547, 1); + x_2551 = x_2547; +} else { + lean_dec_ref(x_2547); + x_2551 = lean_box(0); +} +if (lean_is_scalar(x_2519)) { + x_2552 = lean_alloc_ctor(1, 1, 0); +} else { + x_2552 = x_2519; +} +lean_ctor_set(x_2552, 0, x_2549); +if (lean_is_scalar(x_2551)) { + x_2553 = lean_alloc_ctor(0, 2, 0); +} else { + x_2553 = x_2551; +} +lean_ctor_set(x_2553, 0, x_2552); +lean_ctor_set(x_2553, 1, x_2550); +x_2483 = x_2553; +x_2484 = x_2548; +goto block_2510; +} +else +{ +lean_object* x_2554; lean_object* x_2555; lean_object* x_2556; lean_object* x_2557; +lean_dec(x_2519); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2554 = lean_ctor_get(x_2546, 0); +lean_inc(x_2554); +x_2555 = lean_ctor_get(x_2546, 1); +lean_inc(x_2555); +if (lean_is_exclusive(x_2546)) { + lean_ctor_release(x_2546, 0); + lean_ctor_release(x_2546, 1); + x_2556 = x_2546; +} else { + lean_dec_ref(x_2546); + x_2556 = lean_box(0); +} +if (lean_is_scalar(x_2556)) { + x_2557 = lean_alloc_ctor(1, 2, 0); +} else { + x_2557 = x_2556; +} +lean_ctor_set(x_2557, 0, x_2554); +lean_ctor_set(x_2557, 1, x_2555); +return x_2557; +} +} +else +{ +lean_object* x_2558; lean_object* x_2559; lean_object* x_2560; lean_object* x_2561; +lean_dec(x_2538); +lean_dec(x_2533); +lean_dec(x_2528); +lean_dec(x_2527); +lean_dec(x_2519); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2558 = lean_ctor_get(x_2541, 0); +lean_inc(x_2558); +x_2559 = lean_ctor_get(x_2541, 1); +lean_inc(x_2559); +if (lean_is_exclusive(x_2541)) { + lean_ctor_release(x_2541, 0); + lean_ctor_release(x_2541, 1); + x_2560 = x_2541; +} else { + lean_dec_ref(x_2541); + x_2560 = lean_box(0); +} +if (lean_is_scalar(x_2560)) { + x_2561 = lean_alloc_ctor(1, 2, 0); +} else { + x_2561 = x_2560; +} +lean_ctor_set(x_2561, 0, x_2558); +lean_ctor_set(x_2561, 1, x_2559); +return x_2561; +} +} +else +{ +lean_object* x_2562; lean_object* x_2563; lean_object* x_2564; lean_object* x_2565; lean_object* x_2566; lean_object* x_2567; lean_object* x_2568; lean_object* x_2569; lean_object* x_2570; +lean_dec(x_2522); +lean_dec(x_2520); +lean_inc(x_2363); +if (lean_is_scalar(x_2517)) { + x_2562 = lean_alloc_ctor(6, 2, 0); +} else { + x_2562 = x_2517; + lean_ctor_set_tag(x_2562, 6); +} +lean_ctor_set(x_2562, 0, x_153); +lean_ctor_set(x_2562, 1, x_2363); +x_2563 = lean_ctor_get(x_1, 0); +lean_inc(x_2563); +x_2564 = l_Lean_IR_ToIR_bindVar(x_2563, x_2368, x_4, x_5, x_2516); +x_2565 = lean_ctor_get(x_2564, 0); +lean_inc(x_2565); +x_2566 = lean_ctor_get(x_2564, 1); +lean_inc(x_2566); +lean_dec(x_2564); +x_2567 = lean_ctor_get(x_2565, 0); +lean_inc(x_2567); +x_2568 = lean_ctor_get(x_2565, 1); +lean_inc(x_2568); +lean_dec(x_2565); +x_2569 = lean_ctor_get(x_1, 2); +lean_inc(x_2569); +lean_inc(x_5); +lean_inc(x_4); +x_2570 = l_Lean_IR_ToIR_lowerType(x_2569, x_2568, x_4, x_5, x_2566); +if (lean_obj_tag(x_2570) == 0) +{ +lean_object* x_2571; lean_object* x_2572; lean_object* x_2573; lean_object* x_2574; lean_object* x_2575; +x_2571 = lean_ctor_get(x_2570, 0); +lean_inc(x_2571); +x_2572 = lean_ctor_get(x_2570, 1); +lean_inc(x_2572); +lean_dec(x_2570); +x_2573 = lean_ctor_get(x_2571, 0); +lean_inc(x_2573); +x_2574 = lean_ctor_get(x_2571, 1); +lean_inc(x_2574); +lean_dec(x_2571); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2575 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2567, x_2562, x_2573, x_2574, x_4, x_5, x_2572); +if (lean_obj_tag(x_2575) == 0) +{ +lean_object* x_2576; lean_object* x_2577; lean_object* x_2578; lean_object* x_2579; lean_object* x_2580; lean_object* x_2581; lean_object* x_2582; +x_2576 = lean_ctor_get(x_2575, 0); +lean_inc(x_2576); +x_2577 = lean_ctor_get(x_2575, 1); +lean_inc(x_2577); +lean_dec(x_2575); +x_2578 = lean_ctor_get(x_2576, 0); +lean_inc(x_2578); +x_2579 = lean_ctor_get(x_2576, 1); +lean_inc(x_2579); +if (lean_is_exclusive(x_2576)) { + lean_ctor_release(x_2576, 0); + lean_ctor_release(x_2576, 1); + x_2580 = x_2576; +} else { + lean_dec_ref(x_2576); + x_2580 = lean_box(0); +} +if (lean_is_scalar(x_2519)) { + x_2581 = lean_alloc_ctor(1, 1, 0); +} else { + x_2581 = x_2519; +} +lean_ctor_set(x_2581, 0, x_2578); +if (lean_is_scalar(x_2580)) { + x_2582 = lean_alloc_ctor(0, 2, 0); +} else { + x_2582 = x_2580; +} +lean_ctor_set(x_2582, 0, x_2581); +lean_ctor_set(x_2582, 1, x_2579); +x_2483 = x_2582; +x_2484 = x_2577; +goto block_2510; +} +else +{ +lean_object* x_2583; lean_object* x_2584; lean_object* x_2585; lean_object* x_2586; +lean_dec(x_2519); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2583 = lean_ctor_get(x_2575, 0); +lean_inc(x_2583); +x_2584 = lean_ctor_get(x_2575, 1); +lean_inc(x_2584); +if (lean_is_exclusive(x_2575)) { + lean_ctor_release(x_2575, 0); + lean_ctor_release(x_2575, 1); + x_2585 = x_2575; +} else { + lean_dec_ref(x_2575); + x_2585 = lean_box(0); +} +if (lean_is_scalar(x_2585)) { + x_2586 = lean_alloc_ctor(1, 2, 0); +} else { + x_2586 = x_2585; +} +lean_ctor_set(x_2586, 0, x_2583); +lean_ctor_set(x_2586, 1, x_2584); +return x_2586; +} +} +else +{ +lean_object* x_2587; lean_object* x_2588; lean_object* x_2589; lean_object* x_2590; +lean_dec(x_2567); +lean_dec(x_2562); +lean_dec(x_2519); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2587 = lean_ctor_get(x_2570, 0); +lean_inc(x_2587); +x_2588 = lean_ctor_get(x_2570, 1); +lean_inc(x_2588); +if (lean_is_exclusive(x_2570)) { + lean_ctor_release(x_2570, 0); + lean_ctor_release(x_2570, 1); + x_2589 = x_2570; +} else { + lean_dec_ref(x_2570); + x_2589 = lean_box(0); +} +if (lean_is_scalar(x_2589)) { + x_2590 = lean_alloc_ctor(1, 2, 0); +} else { + x_2590 = x_2589; +} +lean_ctor_set(x_2590, 0, x_2587); +lean_ctor_set(x_2590, 1, x_2588); +return x_2590; +} +} +} +else +{ +lean_object* x_2591; lean_object* x_2592; lean_object* x_2593; lean_object* x_2594; lean_object* x_2595; lean_object* x_2596; lean_object* x_2597; lean_object* x_2598; lean_object* x_2599; +lean_dec(x_2522); +lean_dec(x_2520); +lean_inc(x_2363); +if (lean_is_scalar(x_2517)) { + x_2591 = lean_alloc_ctor(7, 2, 0); +} else { + x_2591 = x_2517; + lean_ctor_set_tag(x_2591, 7); +} +lean_ctor_set(x_2591, 0, x_153); +lean_ctor_set(x_2591, 1, x_2363); +x_2592 = lean_ctor_get(x_1, 0); +lean_inc(x_2592); +x_2593 = l_Lean_IR_ToIR_bindVar(x_2592, x_2368, x_4, x_5, x_2516); +x_2594 = lean_ctor_get(x_2593, 0); +lean_inc(x_2594); +x_2595 = lean_ctor_get(x_2593, 1); +lean_inc(x_2595); +lean_dec(x_2593); +x_2596 = lean_ctor_get(x_2594, 0); +lean_inc(x_2596); +x_2597 = lean_ctor_get(x_2594, 1); +lean_inc(x_2597); +lean_dec(x_2594); +x_2598 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2599 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2596, x_2591, x_2598, x_2597, x_4, x_5, x_2595); +if (lean_obj_tag(x_2599) == 0) +{ +lean_object* x_2600; lean_object* x_2601; lean_object* x_2602; lean_object* x_2603; lean_object* x_2604; lean_object* x_2605; lean_object* x_2606; +x_2600 = lean_ctor_get(x_2599, 0); +lean_inc(x_2600); +x_2601 = lean_ctor_get(x_2599, 1); +lean_inc(x_2601); +lean_dec(x_2599); +x_2602 = lean_ctor_get(x_2600, 0); +lean_inc(x_2602); +x_2603 = lean_ctor_get(x_2600, 1); +lean_inc(x_2603); +if (lean_is_exclusive(x_2600)) { + lean_ctor_release(x_2600, 0); + lean_ctor_release(x_2600, 1); + x_2604 = x_2600; +} else { + lean_dec_ref(x_2600); + x_2604 = lean_box(0); +} +if (lean_is_scalar(x_2519)) { + x_2605 = lean_alloc_ctor(1, 1, 0); +} else { + x_2605 = x_2519; +} +lean_ctor_set(x_2605, 0, x_2602); +if (lean_is_scalar(x_2604)) { + x_2606 = lean_alloc_ctor(0, 2, 0); +} else { + x_2606 = x_2604; +} +lean_ctor_set(x_2606, 0, x_2605); +lean_ctor_set(x_2606, 1, x_2603); +x_2483 = x_2606; +x_2484 = x_2601; +goto block_2510; +} +else +{ +lean_object* x_2607; lean_object* x_2608; lean_object* x_2609; lean_object* x_2610; +lean_dec(x_2519); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2607 = lean_ctor_get(x_2599, 0); +lean_inc(x_2607); +x_2608 = lean_ctor_get(x_2599, 1); +lean_inc(x_2608); +if (lean_is_exclusive(x_2599)) { + lean_ctor_release(x_2599, 0); + lean_ctor_release(x_2599, 1); + x_2609 = x_2599; +} else { + lean_dec_ref(x_2599); + x_2609 = lean_box(0); +} +if (lean_is_scalar(x_2609)) { + x_2610 = lean_alloc_ctor(1, 2, 0); +} else { + x_2610 = x_2609; +} +lean_ctor_set(x_2610, 0, x_2607); +lean_ctor_set(x_2610, 1, x_2608); +return x_2610; +} +} +} +block_2510: +{ +lean_object* x_2485; +x_2485 = lean_ctor_get(x_2483, 0); +lean_inc(x_2485); +if (lean_obj_tag(x_2485) == 0) +{ +lean_object* x_2486; lean_object* x_2487; lean_object* x_2488; lean_object* x_2489; lean_object* x_2490; lean_object* x_2491; lean_object* x_2492; lean_object* x_2493; lean_object* x_2494; lean_object* x_2495; +lean_dec(x_2373); +x_2486 = lean_ctor_get(x_2483, 1); +lean_inc(x_2486); +lean_dec(x_2483); +x_2487 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_2487, 0, x_153); +lean_ctor_set(x_2487, 1, x_2363); +x_2488 = lean_ctor_get(x_1, 0); +lean_inc(x_2488); +x_2489 = l_Lean_IR_ToIR_bindVar(x_2488, x_2486, x_4, x_5, x_2484); +x_2490 = lean_ctor_get(x_2489, 0); +lean_inc(x_2490); +x_2491 = lean_ctor_get(x_2489, 1); +lean_inc(x_2491); +lean_dec(x_2489); +x_2492 = lean_ctor_get(x_2490, 0); +lean_inc(x_2492); +x_2493 = lean_ctor_get(x_2490, 1); +lean_inc(x_2493); +lean_dec(x_2490); +x_2494 = lean_ctor_get(x_1, 2); +lean_inc(x_2494); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_2495 = l_Lean_IR_ToIR_lowerType(x_2494, x_2493, x_4, x_5, x_2491); +if (lean_obj_tag(x_2495) == 0) +{ +lean_object* x_2496; lean_object* x_2497; lean_object* x_2498; lean_object* x_2499; lean_object* x_2500; +x_2496 = lean_ctor_get(x_2495, 0); +lean_inc(x_2496); +x_2497 = lean_ctor_get(x_2495, 1); +lean_inc(x_2497); +lean_dec(x_2495); +x_2498 = lean_ctor_get(x_2496, 0); +lean_inc(x_2498); +x_2499 = lean_ctor_get(x_2496, 1); +lean_inc(x_2499); +lean_dec(x_2496); +x_2500 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2492, x_2487, x_2498, x_2499, x_4, x_5, x_2497); +return x_2500; +} +else +{ +lean_object* x_2501; lean_object* x_2502; lean_object* x_2503; lean_object* x_2504; +lean_dec(x_2492); +lean_dec(x_2487); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_2501 = lean_ctor_get(x_2495, 0); +lean_inc(x_2501); +x_2502 = lean_ctor_get(x_2495, 1); +lean_inc(x_2502); +if (lean_is_exclusive(x_2495)) { + lean_ctor_release(x_2495, 0); + lean_ctor_release(x_2495, 1); + x_2503 = x_2495; +} else { + lean_dec_ref(x_2495); + x_2503 = lean_box(0); +} +if (lean_is_scalar(x_2503)) { + x_2504 = lean_alloc_ctor(1, 2, 0); +} else { + x_2504 = x_2503; +} +lean_ctor_set(x_2504, 0, x_2501); +lean_ctor_set(x_2504, 1, x_2502); +return x_2504; +} +} +else +{ +lean_object* x_2505; lean_object* x_2506; lean_object* x_2507; lean_object* x_2508; lean_object* x_2509; +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2505 = lean_ctor_get(x_2483, 1); +lean_inc(x_2505); +if (lean_is_exclusive(x_2483)) { + lean_ctor_release(x_2483, 0); + lean_ctor_release(x_2483, 1); + x_2506 = x_2483; +} else { + lean_dec_ref(x_2483); + x_2506 = lean_box(0); +} +x_2507 = lean_ctor_get(x_2485, 0); +lean_inc(x_2507); +lean_dec(x_2485); +if (lean_is_scalar(x_2506)) { + x_2508 = lean_alloc_ctor(0, 2, 0); +} else { + x_2508 = x_2506; +} +lean_ctor_set(x_2508, 0, x_2507); +lean_ctor_set(x_2508, 1, x_2505); +if (lean_is_scalar(x_2373)) { + x_2509 = lean_alloc_ctor(0, 2, 0); +} else { + x_2509 = x_2373; +} +lean_ctor_set(x_2509, 0, x_2508); +lean_ctor_set(x_2509, 1, x_2484); +return x_2509; +} +} +} +case 2: +{ +lean_object* x_2611; lean_object* x_2612; +lean_dec(x_2379); +lean_dec(x_2374); +lean_dec(x_2373); +lean_dec(x_2369); +lean_dec(x_2363); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_2); +lean_dec(x_1); +x_2611 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_2612 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_2611, x_2368, x_4, x_5, x_2372); +return x_2612; +} +case 3: +{ +lean_object* x_2613; lean_object* x_2614; lean_object* x_2641; lean_object* x_2642; +lean_dec(x_2379); +lean_dec(x_2374); +lean_dec(x_155); +lean_dec(x_154); +x_2641 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_2372); +x_2642 = lean_ctor_get(x_2641, 0); +lean_inc(x_2642); +if (lean_obj_tag(x_2642) == 0) +{ +lean_object* x_2643; lean_object* x_2644; lean_object* x_2645; +x_2643 = lean_ctor_get(x_2641, 1); +lean_inc(x_2643); +lean_dec(x_2641); +x_2644 = lean_box(0); +if (lean_is_scalar(x_2369)) { + x_2645 = lean_alloc_ctor(0, 2, 0); +} else { + x_2645 = x_2369; +} +lean_ctor_set(x_2645, 0, x_2644); +lean_ctor_set(x_2645, 1, x_2368); +x_2613 = x_2645; +x_2614 = x_2643; +goto block_2640; +} +else +{ +lean_object* x_2646; lean_object* x_2647; lean_object* x_2648; lean_object* x_2649; lean_object* x_2650; lean_object* x_2651; lean_object* x_2652; uint8_t x_2653; +lean_dec(x_2369); +x_2646 = lean_ctor_get(x_2641, 1); +lean_inc(x_2646); +if (lean_is_exclusive(x_2641)) { + lean_ctor_release(x_2641, 0); + lean_ctor_release(x_2641, 1); + x_2647 = x_2641; +} else { + lean_dec_ref(x_2641); + x_2647 = lean_box(0); +} +x_2648 = lean_ctor_get(x_2642, 0); +lean_inc(x_2648); +if (lean_is_exclusive(x_2642)) { + lean_ctor_release(x_2642, 0); + x_2649 = x_2642; +} else { + lean_dec_ref(x_2642); + x_2649 = lean_box(0); +} +x_2650 = lean_array_get_size(x_2363); +x_2651 = lean_ctor_get(x_2648, 3); +lean_inc(x_2651); +lean_dec(x_2648); +x_2652 = lean_array_get_size(x_2651); +lean_dec(x_2651); +x_2653 = lean_nat_dec_lt(x_2650, x_2652); +if (x_2653 == 0) +{ +uint8_t x_2654; +x_2654 = lean_nat_dec_eq(x_2650, x_2652); +if (x_2654 == 0) +{ +lean_object* x_2655; lean_object* x_2656; lean_object* x_2657; lean_object* x_2658; lean_object* x_2659; lean_object* x_2660; lean_object* x_2661; lean_object* x_2662; lean_object* x_2663; lean_object* x_2664; lean_object* x_2665; lean_object* x_2666; lean_object* x_2667; lean_object* x_2668; lean_object* x_2669; lean_object* x_2670; lean_object* x_2671; +x_2655 = lean_unsigned_to_nat(0u); +x_2656 = l_Array_extract___rarg(x_2363, x_2655, x_2652); +x_2657 = l_Array_extract___rarg(x_2363, x_2652, x_2650); +lean_dec(x_2650); +if (lean_is_scalar(x_2647)) { + x_2658 = lean_alloc_ctor(6, 2, 0); +} else { + x_2658 = x_2647; + lean_ctor_set_tag(x_2658, 6); +} +lean_ctor_set(x_2658, 0, x_153); +lean_ctor_set(x_2658, 1, x_2656); +x_2659 = lean_ctor_get(x_1, 0); +lean_inc(x_2659); +x_2660 = l_Lean_IR_ToIR_bindVar(x_2659, x_2368, x_4, x_5, x_2646); +x_2661 = lean_ctor_get(x_2660, 0); +lean_inc(x_2661); +x_2662 = lean_ctor_get(x_2660, 1); +lean_inc(x_2662); +lean_dec(x_2660); +x_2663 = lean_ctor_get(x_2661, 0); +lean_inc(x_2663); +x_2664 = lean_ctor_get(x_2661, 1); +lean_inc(x_2664); +lean_dec(x_2661); +x_2665 = l_Lean_IR_ToIR_newVar(x_2664, x_4, x_5, x_2662); +x_2666 = lean_ctor_get(x_2665, 0); +lean_inc(x_2666); +x_2667 = lean_ctor_get(x_2665, 1); +lean_inc(x_2667); +lean_dec(x_2665); +x_2668 = lean_ctor_get(x_2666, 0); +lean_inc(x_2668); +x_2669 = lean_ctor_get(x_2666, 1); +lean_inc(x_2669); +lean_dec(x_2666); +x_2670 = lean_ctor_get(x_1, 2); +lean_inc(x_2670); +lean_inc(x_5); +lean_inc(x_4); +x_2671 = l_Lean_IR_ToIR_lowerType(x_2670, x_2669, x_4, x_5, x_2667); +if (lean_obj_tag(x_2671) == 0) +{ +lean_object* x_2672; lean_object* x_2673; lean_object* x_2674; lean_object* x_2675; lean_object* x_2676; +x_2672 = lean_ctor_get(x_2671, 0); +lean_inc(x_2672); +x_2673 = lean_ctor_get(x_2671, 1); +lean_inc(x_2673); +lean_dec(x_2671); +x_2674 = lean_ctor_get(x_2672, 0); +lean_inc(x_2674); +x_2675 = lean_ctor_get(x_2672, 1); +lean_inc(x_2675); +lean_dec(x_2672); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2676 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_2668, x_2657, x_2663, x_2658, x_2674, x_2675, x_4, x_5, x_2673); +if (lean_obj_tag(x_2676) == 0) +{ +lean_object* x_2677; lean_object* x_2678; lean_object* x_2679; lean_object* x_2680; lean_object* x_2681; lean_object* x_2682; lean_object* x_2683; +x_2677 = lean_ctor_get(x_2676, 0); +lean_inc(x_2677); +x_2678 = lean_ctor_get(x_2676, 1); +lean_inc(x_2678); +lean_dec(x_2676); +x_2679 = lean_ctor_get(x_2677, 0); +lean_inc(x_2679); +x_2680 = lean_ctor_get(x_2677, 1); +lean_inc(x_2680); +if (lean_is_exclusive(x_2677)) { + lean_ctor_release(x_2677, 0); + lean_ctor_release(x_2677, 1); + x_2681 = x_2677; +} else { + lean_dec_ref(x_2677); + x_2681 = lean_box(0); +} +if (lean_is_scalar(x_2649)) { + x_2682 = lean_alloc_ctor(1, 1, 0); +} else { + x_2682 = x_2649; +} +lean_ctor_set(x_2682, 0, x_2679); +if (lean_is_scalar(x_2681)) { + x_2683 = lean_alloc_ctor(0, 2, 0); +} else { + x_2683 = x_2681; +} +lean_ctor_set(x_2683, 0, x_2682); +lean_ctor_set(x_2683, 1, x_2680); +x_2613 = x_2683; +x_2614 = x_2678; +goto block_2640; +} +else +{ +lean_object* x_2684; lean_object* x_2685; lean_object* x_2686; lean_object* x_2687; +lean_dec(x_2649); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2684 = lean_ctor_get(x_2676, 0); +lean_inc(x_2684); +x_2685 = lean_ctor_get(x_2676, 1); +lean_inc(x_2685); +if (lean_is_exclusive(x_2676)) { + lean_ctor_release(x_2676, 0); + lean_ctor_release(x_2676, 1); + x_2686 = x_2676; +} else { + lean_dec_ref(x_2676); + x_2686 = lean_box(0); +} +if (lean_is_scalar(x_2686)) { + x_2687 = lean_alloc_ctor(1, 2, 0); +} else { + x_2687 = x_2686; +} +lean_ctor_set(x_2687, 0, x_2684); +lean_ctor_set(x_2687, 1, x_2685); +return x_2687; +} +} +else +{ +lean_object* x_2688; lean_object* x_2689; lean_object* x_2690; lean_object* x_2691; +lean_dec(x_2668); +lean_dec(x_2663); +lean_dec(x_2658); +lean_dec(x_2657); +lean_dec(x_2649); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2688 = lean_ctor_get(x_2671, 0); +lean_inc(x_2688); +x_2689 = lean_ctor_get(x_2671, 1); +lean_inc(x_2689); +if (lean_is_exclusive(x_2671)) { + lean_ctor_release(x_2671, 0); + lean_ctor_release(x_2671, 1); + x_2690 = x_2671; +} else { + lean_dec_ref(x_2671); + x_2690 = lean_box(0); +} +if (lean_is_scalar(x_2690)) { + x_2691 = lean_alloc_ctor(1, 2, 0); +} else { + x_2691 = x_2690; +} +lean_ctor_set(x_2691, 0, x_2688); +lean_ctor_set(x_2691, 1, x_2689); +return x_2691; +} +} +else +{ +lean_object* x_2692; lean_object* x_2693; lean_object* x_2694; lean_object* x_2695; lean_object* x_2696; lean_object* x_2697; lean_object* x_2698; lean_object* x_2699; lean_object* x_2700; +lean_dec(x_2652); +lean_dec(x_2650); +lean_inc(x_2363); +if (lean_is_scalar(x_2647)) { + x_2692 = lean_alloc_ctor(6, 2, 0); +} else { + x_2692 = x_2647; + lean_ctor_set_tag(x_2692, 6); +} +lean_ctor_set(x_2692, 0, x_153); +lean_ctor_set(x_2692, 1, x_2363); +x_2693 = lean_ctor_get(x_1, 0); +lean_inc(x_2693); +x_2694 = l_Lean_IR_ToIR_bindVar(x_2693, x_2368, x_4, x_5, x_2646); +x_2695 = lean_ctor_get(x_2694, 0); +lean_inc(x_2695); +x_2696 = lean_ctor_get(x_2694, 1); +lean_inc(x_2696); +lean_dec(x_2694); +x_2697 = lean_ctor_get(x_2695, 0); +lean_inc(x_2697); +x_2698 = lean_ctor_get(x_2695, 1); +lean_inc(x_2698); +lean_dec(x_2695); +x_2699 = lean_ctor_get(x_1, 2); +lean_inc(x_2699); +lean_inc(x_5); +lean_inc(x_4); +x_2700 = l_Lean_IR_ToIR_lowerType(x_2699, x_2698, x_4, x_5, x_2696); +if (lean_obj_tag(x_2700) == 0) +{ +lean_object* x_2701; lean_object* x_2702; lean_object* x_2703; lean_object* x_2704; lean_object* x_2705; +x_2701 = lean_ctor_get(x_2700, 0); +lean_inc(x_2701); +x_2702 = lean_ctor_get(x_2700, 1); +lean_inc(x_2702); +lean_dec(x_2700); +x_2703 = lean_ctor_get(x_2701, 0); +lean_inc(x_2703); +x_2704 = lean_ctor_get(x_2701, 1); +lean_inc(x_2704); +lean_dec(x_2701); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2705 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2697, x_2692, x_2703, x_2704, x_4, x_5, x_2702); +if (lean_obj_tag(x_2705) == 0) +{ +lean_object* x_2706; lean_object* x_2707; lean_object* x_2708; lean_object* x_2709; lean_object* x_2710; lean_object* x_2711; lean_object* x_2712; +x_2706 = lean_ctor_get(x_2705, 0); +lean_inc(x_2706); +x_2707 = lean_ctor_get(x_2705, 1); +lean_inc(x_2707); +lean_dec(x_2705); +x_2708 = lean_ctor_get(x_2706, 0); +lean_inc(x_2708); +x_2709 = lean_ctor_get(x_2706, 1); +lean_inc(x_2709); +if (lean_is_exclusive(x_2706)) { + lean_ctor_release(x_2706, 0); + lean_ctor_release(x_2706, 1); + x_2710 = x_2706; +} else { + lean_dec_ref(x_2706); + x_2710 = lean_box(0); +} +if (lean_is_scalar(x_2649)) { + x_2711 = lean_alloc_ctor(1, 1, 0); +} else { + x_2711 = x_2649; +} +lean_ctor_set(x_2711, 0, x_2708); +if (lean_is_scalar(x_2710)) { + x_2712 = lean_alloc_ctor(0, 2, 0); +} else { + x_2712 = x_2710; +} +lean_ctor_set(x_2712, 0, x_2711); +lean_ctor_set(x_2712, 1, x_2709); +x_2613 = x_2712; +x_2614 = x_2707; +goto block_2640; +} +else +{ +lean_object* x_2713; lean_object* x_2714; lean_object* x_2715; lean_object* x_2716; +lean_dec(x_2649); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2713 = lean_ctor_get(x_2705, 0); +lean_inc(x_2713); +x_2714 = lean_ctor_get(x_2705, 1); +lean_inc(x_2714); +if (lean_is_exclusive(x_2705)) { + lean_ctor_release(x_2705, 0); + lean_ctor_release(x_2705, 1); + x_2715 = x_2705; +} else { + lean_dec_ref(x_2705); + x_2715 = lean_box(0); +} +if (lean_is_scalar(x_2715)) { + x_2716 = lean_alloc_ctor(1, 2, 0); +} else { + x_2716 = x_2715; +} +lean_ctor_set(x_2716, 0, x_2713); +lean_ctor_set(x_2716, 1, x_2714); +return x_2716; +} +} +else +{ +lean_object* x_2717; lean_object* x_2718; lean_object* x_2719; lean_object* x_2720; +lean_dec(x_2697); +lean_dec(x_2692); +lean_dec(x_2649); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2717 = lean_ctor_get(x_2700, 0); +lean_inc(x_2717); +x_2718 = lean_ctor_get(x_2700, 1); +lean_inc(x_2718); +if (lean_is_exclusive(x_2700)) { + lean_ctor_release(x_2700, 0); + lean_ctor_release(x_2700, 1); + x_2719 = x_2700; +} else { + lean_dec_ref(x_2700); + x_2719 = lean_box(0); +} +if (lean_is_scalar(x_2719)) { + x_2720 = lean_alloc_ctor(1, 2, 0); +} else { + x_2720 = x_2719; +} +lean_ctor_set(x_2720, 0, x_2717); +lean_ctor_set(x_2720, 1, x_2718); +return x_2720; +} +} +} +else +{ +lean_object* x_2721; lean_object* x_2722; lean_object* x_2723; lean_object* x_2724; lean_object* x_2725; lean_object* x_2726; lean_object* x_2727; lean_object* x_2728; lean_object* x_2729; +lean_dec(x_2652); +lean_dec(x_2650); +lean_inc(x_2363); +if (lean_is_scalar(x_2647)) { + x_2721 = lean_alloc_ctor(7, 2, 0); +} else { + x_2721 = x_2647; + lean_ctor_set_tag(x_2721, 7); +} +lean_ctor_set(x_2721, 0, x_153); +lean_ctor_set(x_2721, 1, x_2363); +x_2722 = lean_ctor_get(x_1, 0); +lean_inc(x_2722); +x_2723 = l_Lean_IR_ToIR_bindVar(x_2722, x_2368, x_4, x_5, x_2646); +x_2724 = lean_ctor_get(x_2723, 0); +lean_inc(x_2724); +x_2725 = lean_ctor_get(x_2723, 1); +lean_inc(x_2725); +lean_dec(x_2723); +x_2726 = lean_ctor_get(x_2724, 0); +lean_inc(x_2726); +x_2727 = lean_ctor_get(x_2724, 1); +lean_inc(x_2727); +lean_dec(x_2724); +x_2728 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2729 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2726, x_2721, x_2728, x_2727, x_4, x_5, x_2725); +if (lean_obj_tag(x_2729) == 0) +{ +lean_object* x_2730; lean_object* x_2731; lean_object* x_2732; lean_object* x_2733; lean_object* x_2734; lean_object* x_2735; lean_object* x_2736; +x_2730 = lean_ctor_get(x_2729, 0); +lean_inc(x_2730); +x_2731 = lean_ctor_get(x_2729, 1); +lean_inc(x_2731); +lean_dec(x_2729); +x_2732 = lean_ctor_get(x_2730, 0); +lean_inc(x_2732); +x_2733 = lean_ctor_get(x_2730, 1); +lean_inc(x_2733); +if (lean_is_exclusive(x_2730)) { + lean_ctor_release(x_2730, 0); + lean_ctor_release(x_2730, 1); + x_2734 = x_2730; +} else { + lean_dec_ref(x_2730); + x_2734 = lean_box(0); +} +if (lean_is_scalar(x_2649)) { + x_2735 = lean_alloc_ctor(1, 1, 0); +} else { + x_2735 = x_2649; +} +lean_ctor_set(x_2735, 0, x_2732); +if (lean_is_scalar(x_2734)) { + x_2736 = lean_alloc_ctor(0, 2, 0); +} else { + x_2736 = x_2734; +} +lean_ctor_set(x_2736, 0, x_2735); +lean_ctor_set(x_2736, 1, x_2733); +x_2613 = x_2736; +x_2614 = x_2731; +goto block_2640; +} +else +{ +lean_object* x_2737; lean_object* x_2738; lean_object* x_2739; lean_object* x_2740; +lean_dec(x_2649); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2737 = lean_ctor_get(x_2729, 0); +lean_inc(x_2737); +x_2738 = lean_ctor_get(x_2729, 1); +lean_inc(x_2738); +if (lean_is_exclusive(x_2729)) { + lean_ctor_release(x_2729, 0); + lean_ctor_release(x_2729, 1); + x_2739 = x_2729; +} else { + lean_dec_ref(x_2729); + x_2739 = lean_box(0); +} +if (lean_is_scalar(x_2739)) { + x_2740 = lean_alloc_ctor(1, 2, 0); +} else { + x_2740 = x_2739; +} +lean_ctor_set(x_2740, 0, x_2737); +lean_ctor_set(x_2740, 1, x_2738); +return x_2740; +} +} +} +block_2640: +{ +lean_object* x_2615; +x_2615 = lean_ctor_get(x_2613, 0); +lean_inc(x_2615); +if (lean_obj_tag(x_2615) == 0) +{ +lean_object* x_2616; lean_object* x_2617; lean_object* x_2618; lean_object* x_2619; lean_object* x_2620; lean_object* x_2621; lean_object* x_2622; lean_object* x_2623; lean_object* x_2624; lean_object* x_2625; +lean_dec(x_2373); +x_2616 = lean_ctor_get(x_2613, 1); +lean_inc(x_2616); +lean_dec(x_2613); +x_2617 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_2617, 0, x_153); +lean_ctor_set(x_2617, 1, x_2363); +x_2618 = lean_ctor_get(x_1, 0); +lean_inc(x_2618); +x_2619 = l_Lean_IR_ToIR_bindVar(x_2618, x_2616, x_4, x_5, x_2614); +x_2620 = lean_ctor_get(x_2619, 0); +lean_inc(x_2620); +x_2621 = lean_ctor_get(x_2619, 1); +lean_inc(x_2621); +lean_dec(x_2619); +x_2622 = lean_ctor_get(x_2620, 0); +lean_inc(x_2622); +x_2623 = lean_ctor_get(x_2620, 1); +lean_inc(x_2623); +lean_dec(x_2620); +x_2624 = lean_ctor_get(x_1, 2); +lean_inc(x_2624); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_2625 = l_Lean_IR_ToIR_lowerType(x_2624, x_2623, x_4, x_5, x_2621); +if (lean_obj_tag(x_2625) == 0) +{ +lean_object* x_2626; lean_object* x_2627; lean_object* x_2628; lean_object* x_2629; lean_object* x_2630; +x_2626 = lean_ctor_get(x_2625, 0); +lean_inc(x_2626); +x_2627 = lean_ctor_get(x_2625, 1); +lean_inc(x_2627); +lean_dec(x_2625); +x_2628 = lean_ctor_get(x_2626, 0); +lean_inc(x_2628); +x_2629 = lean_ctor_get(x_2626, 1); +lean_inc(x_2629); +lean_dec(x_2626); +x_2630 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2622, x_2617, x_2628, x_2629, x_4, x_5, x_2627); +return x_2630; +} +else +{ +lean_object* x_2631; lean_object* x_2632; lean_object* x_2633; lean_object* x_2634; +lean_dec(x_2622); +lean_dec(x_2617); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_2631 = lean_ctor_get(x_2625, 0); +lean_inc(x_2631); +x_2632 = lean_ctor_get(x_2625, 1); +lean_inc(x_2632); +if (lean_is_exclusive(x_2625)) { + lean_ctor_release(x_2625, 0); + lean_ctor_release(x_2625, 1); + x_2633 = x_2625; +} else { + lean_dec_ref(x_2625); + x_2633 = lean_box(0); +} +if (lean_is_scalar(x_2633)) { + x_2634 = lean_alloc_ctor(1, 2, 0); +} else { + x_2634 = x_2633; +} +lean_ctor_set(x_2634, 0, x_2631); +lean_ctor_set(x_2634, 1, x_2632); +return x_2634; +} +} +else +{ +lean_object* x_2635; lean_object* x_2636; lean_object* x_2637; lean_object* x_2638; lean_object* x_2639; +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2635 = lean_ctor_get(x_2613, 1); +lean_inc(x_2635); +if (lean_is_exclusive(x_2613)) { + lean_ctor_release(x_2613, 0); + lean_ctor_release(x_2613, 1); + x_2636 = x_2613; +} else { + lean_dec_ref(x_2613); + x_2636 = lean_box(0); +} +x_2637 = lean_ctor_get(x_2615, 0); +lean_inc(x_2637); +lean_dec(x_2615); +if (lean_is_scalar(x_2636)) { + x_2638 = lean_alloc_ctor(0, 2, 0); +} else { + x_2638 = x_2636; +} +lean_ctor_set(x_2638, 0, x_2637); +lean_ctor_set(x_2638, 1, x_2635); +if (lean_is_scalar(x_2373)) { + x_2639 = lean_alloc_ctor(0, 2, 0); +} else { + x_2639 = x_2373; +} +lean_ctor_set(x_2639, 0, x_2638); +lean_ctor_set(x_2639, 1, x_2614); +return x_2639; +} +} +} +case 4: +{ +lean_object* x_2741; lean_object* x_2742; uint8_t x_2743; +lean_dec(x_2374); +lean_dec(x_2373); +lean_dec(x_2369); +lean_dec(x_155); +lean_dec(x_154); +if (lean_is_exclusive(x_2379)) { + lean_ctor_release(x_2379, 0); + x_2741 = x_2379; +} else { + lean_dec_ref(x_2379); + x_2741 = lean_box(0); +} +x_2742 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_2743 = lean_name_eq(x_153, x_2742); +if (x_2743 == 0) +{ +uint8_t x_2744; lean_object* x_2745; lean_object* x_2746; lean_object* x_2747; lean_object* x_2748; lean_object* x_2749; lean_object* x_2750; lean_object* x_2751; lean_object* x_2752; lean_object* x_2753; +lean_dec(x_2363); +lean_dec(x_2); +lean_dec(x_1); +x_2744 = 1; +x_2745 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_2746 = l_Lean_Name_toString(x_153, x_2744, x_2745); +if (lean_is_scalar(x_2741)) { + x_2747 = lean_alloc_ctor(3, 1, 0); +} else { + x_2747 = x_2741; + lean_ctor_set_tag(x_2747, 3); +} +lean_ctor_set(x_2747, 0, x_2746); +x_2748 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_2749 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_2749, 0, x_2748); +lean_ctor_set(x_2749, 1, x_2747); +x_2750 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_2751 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_2751, 0, x_2749); +lean_ctor_set(x_2751, 1, x_2750); +x_2752 = l_Lean_MessageData_ofFormat(x_2751); +x_2753 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_2752, x_2368, x_4, x_5, x_2372); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2368); +return x_2753; +} +else +{ +lean_object* x_2754; lean_object* x_2755; lean_object* x_2756; +lean_dec(x_2741); +x_2754 = l_Lean_IR_instInhabitedArg; +x_2755 = lean_unsigned_to_nat(2u); +x_2756 = lean_array_get(x_2754, x_2363, x_2755); +lean_dec(x_2363); +if (lean_obj_tag(x_2756) == 0) +{ +lean_object* x_2757; lean_object* x_2758; lean_object* x_2759; lean_object* x_2760; lean_object* x_2761; lean_object* x_2762; lean_object* x_2763; +x_2757 = lean_ctor_get(x_2756, 0); +lean_inc(x_2757); +lean_dec(x_2756); +x_2758 = lean_ctor_get(x_1, 0); +lean_inc(x_2758); +lean_dec(x_1); +x_2759 = l_Lean_IR_ToIR_bindVarToVarId(x_2758, x_2757, x_2368, x_4, x_5, x_2372); +x_2760 = lean_ctor_get(x_2759, 0); +lean_inc(x_2760); +x_2761 = lean_ctor_get(x_2759, 1); +lean_inc(x_2761); +lean_dec(x_2759); +x_2762 = lean_ctor_get(x_2760, 1); +lean_inc(x_2762); +lean_dec(x_2760); +x_2763 = l_Lean_IR_ToIR_lowerCode(x_2, x_2762, x_4, x_5, x_2761); +return x_2763; +} +else +{ +lean_object* x_2764; lean_object* x_2765; lean_object* x_2766; lean_object* x_2767; lean_object* x_2768; lean_object* x_2769; +x_2764 = lean_ctor_get(x_1, 0); +lean_inc(x_2764); +lean_dec(x_1); +x_2765 = l_Lean_IR_ToIR_bindErased(x_2764, x_2368, x_4, x_5, x_2372); +x_2766 = lean_ctor_get(x_2765, 0); +lean_inc(x_2766); +x_2767 = lean_ctor_get(x_2765, 1); +lean_inc(x_2767); +lean_dec(x_2765); +x_2768 = lean_ctor_get(x_2766, 1); +lean_inc(x_2768); +lean_dec(x_2766); +x_2769 = l_Lean_IR_ToIR_lowerCode(x_2, x_2768, x_4, x_5, x_2767); +return x_2769; +} +} +} +case 5: +{ +lean_object* x_2770; lean_object* x_2771; +lean_dec(x_2379); +lean_dec(x_2374); +lean_dec(x_2373); +lean_dec(x_2369); +lean_dec(x_2363); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_2); +lean_dec(x_1); +x_2770 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_2771 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_2770, x_2368, x_4, x_5, x_2372); +return x_2771; +} +case 6: +{ +lean_object* x_2772; uint8_t x_2773; +x_2772 = lean_ctor_get(x_2379, 0); +lean_inc(x_2772); +lean_dec(x_2379); +x_2773 = l_Lean_isExtern(x_2374, x_153); +if (x_2773 == 0) +{ +lean_object* x_2774; +lean_dec(x_2373); +lean_dec(x_2369); +lean_dec(x_2363); +lean_inc(x_5); +lean_inc(x_4); +x_2774 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_2368, x_4, x_5, x_2372); +if (lean_obj_tag(x_2774) == 0) +{ +lean_object* x_2775; lean_object* x_2776; lean_object* x_2777; lean_object* x_2778; lean_object* x_2779; lean_object* x_2780; lean_object* x_2781; lean_object* x_2782; lean_object* x_2783; lean_object* x_2784; lean_object* x_2785; lean_object* x_2786; lean_object* x_2787; lean_object* x_2788; lean_object* x_2789; lean_object* x_2790; lean_object* x_2791; lean_object* x_2792; lean_object* x_2793; lean_object* x_2794; +x_2775 = lean_ctor_get(x_2774, 0); +lean_inc(x_2775); +x_2776 = lean_ctor_get(x_2775, 0); +lean_inc(x_2776); +x_2777 = lean_ctor_get(x_2774, 1); +lean_inc(x_2777); +lean_dec(x_2774); +x_2778 = lean_ctor_get(x_2775, 1); +lean_inc(x_2778); +lean_dec(x_2775); +x_2779 = lean_ctor_get(x_2776, 0); +lean_inc(x_2779); +x_2780 = lean_ctor_get(x_2776, 1); +lean_inc(x_2780); +lean_dec(x_2776); +x_2781 = lean_ctor_get(x_2772, 3); +lean_inc(x_2781); +lean_dec(x_2772); +x_2782 = lean_array_get_size(x_154); +x_2783 = l_Array_extract___rarg(x_154, x_2781, x_2782); +lean_dec(x_2782); +lean_dec(x_154); +x_2784 = lean_array_get_size(x_2780); +x_2785 = lean_unsigned_to_nat(0u); +x_2786 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_155)) { + x_2787 = lean_alloc_ctor(0, 3, 0); +} else { + x_2787 = x_155; + lean_ctor_set_tag(x_2787, 0); +} +lean_ctor_set(x_2787, 0, x_2785); +lean_ctor_set(x_2787, 1, x_2784); +lean_ctor_set(x_2787, 2, x_2786); +x_2788 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_2789 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__2(x_2780, x_2783, x_2787, x_2787, x_2788, x_2785, lean_box(0), lean_box(0), x_2778, x_4, x_5, x_2777); +lean_dec(x_2787); +x_2790 = lean_ctor_get(x_2789, 0); +lean_inc(x_2790); +x_2791 = lean_ctor_get(x_2789, 1); +lean_inc(x_2791); +lean_dec(x_2789); +x_2792 = lean_ctor_get(x_2790, 0); +lean_inc(x_2792); +x_2793 = lean_ctor_get(x_2790, 1); +lean_inc(x_2793); +lean_dec(x_2790); +x_2794 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_2779, x_2780, x_2783, x_2792, x_2793, x_4, x_5, x_2791); +lean_dec(x_2783); +lean_dec(x_2780); +return x_2794; +} +else +{ +lean_object* x_2795; lean_object* x_2796; lean_object* x_2797; lean_object* x_2798; +lean_dec(x_2772); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2795 = lean_ctor_get(x_2774, 0); +lean_inc(x_2795); +x_2796 = lean_ctor_get(x_2774, 1); +lean_inc(x_2796); +if (lean_is_exclusive(x_2774)) { + lean_ctor_release(x_2774, 0); + lean_ctor_release(x_2774, 1); + x_2797 = x_2774; +} else { + lean_dec_ref(x_2774); + x_2797 = lean_box(0); +} +if (lean_is_scalar(x_2797)) { + x_2798 = lean_alloc_ctor(1, 2, 0); +} else { + x_2798 = x_2797; +} +lean_ctor_set(x_2798, 0, x_2795); +lean_ctor_set(x_2798, 1, x_2796); +return x_2798; +} +} +else +{ +lean_object* x_2799; lean_object* x_2800; lean_object* x_2827; lean_object* x_2828; +lean_dec(x_2772); +lean_dec(x_155); +lean_dec(x_154); +x_2827 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_2372); +x_2828 = lean_ctor_get(x_2827, 0); +lean_inc(x_2828); +if (lean_obj_tag(x_2828) == 0) +{ +lean_object* x_2829; lean_object* x_2830; lean_object* x_2831; +x_2829 = lean_ctor_get(x_2827, 1); +lean_inc(x_2829); +lean_dec(x_2827); +x_2830 = lean_box(0); +if (lean_is_scalar(x_2369)) { + x_2831 = lean_alloc_ctor(0, 2, 0); +} else { + x_2831 = x_2369; +} +lean_ctor_set(x_2831, 0, x_2830); +lean_ctor_set(x_2831, 1, x_2368); +x_2799 = x_2831; +x_2800 = x_2829; +goto block_2826; +} +else +{ +lean_object* x_2832; lean_object* x_2833; lean_object* x_2834; lean_object* x_2835; lean_object* x_2836; lean_object* x_2837; lean_object* x_2838; uint8_t x_2839; +lean_dec(x_2369); +x_2832 = lean_ctor_get(x_2827, 1); +lean_inc(x_2832); +if (lean_is_exclusive(x_2827)) { + lean_ctor_release(x_2827, 0); + lean_ctor_release(x_2827, 1); + x_2833 = x_2827; +} else { + lean_dec_ref(x_2827); + x_2833 = lean_box(0); +} +x_2834 = lean_ctor_get(x_2828, 0); +lean_inc(x_2834); +if (lean_is_exclusive(x_2828)) { + lean_ctor_release(x_2828, 0); + x_2835 = x_2828; +} else { + lean_dec_ref(x_2828); + x_2835 = lean_box(0); +} +x_2836 = lean_array_get_size(x_2363); +x_2837 = lean_ctor_get(x_2834, 3); +lean_inc(x_2837); +lean_dec(x_2834); +x_2838 = lean_array_get_size(x_2837); +lean_dec(x_2837); +x_2839 = lean_nat_dec_lt(x_2836, x_2838); +if (x_2839 == 0) +{ +uint8_t x_2840; +x_2840 = lean_nat_dec_eq(x_2836, x_2838); +if (x_2840 == 0) +{ +lean_object* x_2841; lean_object* x_2842; lean_object* x_2843; lean_object* x_2844; lean_object* x_2845; lean_object* x_2846; lean_object* x_2847; lean_object* x_2848; lean_object* x_2849; lean_object* x_2850; lean_object* x_2851; lean_object* x_2852; lean_object* x_2853; lean_object* x_2854; lean_object* x_2855; lean_object* x_2856; lean_object* x_2857; +x_2841 = lean_unsigned_to_nat(0u); +x_2842 = l_Array_extract___rarg(x_2363, x_2841, x_2838); +x_2843 = l_Array_extract___rarg(x_2363, x_2838, x_2836); +lean_dec(x_2836); +if (lean_is_scalar(x_2833)) { + x_2844 = lean_alloc_ctor(6, 2, 0); +} else { + x_2844 = x_2833; + lean_ctor_set_tag(x_2844, 6); +} +lean_ctor_set(x_2844, 0, x_153); +lean_ctor_set(x_2844, 1, x_2842); +x_2845 = lean_ctor_get(x_1, 0); +lean_inc(x_2845); +x_2846 = l_Lean_IR_ToIR_bindVar(x_2845, x_2368, x_4, x_5, x_2832); +x_2847 = lean_ctor_get(x_2846, 0); +lean_inc(x_2847); +x_2848 = lean_ctor_get(x_2846, 1); +lean_inc(x_2848); +lean_dec(x_2846); +x_2849 = lean_ctor_get(x_2847, 0); +lean_inc(x_2849); +x_2850 = lean_ctor_get(x_2847, 1); +lean_inc(x_2850); +lean_dec(x_2847); +x_2851 = l_Lean_IR_ToIR_newVar(x_2850, x_4, x_5, x_2848); +x_2852 = lean_ctor_get(x_2851, 0); +lean_inc(x_2852); +x_2853 = lean_ctor_get(x_2851, 1); +lean_inc(x_2853); +lean_dec(x_2851); +x_2854 = lean_ctor_get(x_2852, 0); +lean_inc(x_2854); +x_2855 = lean_ctor_get(x_2852, 1); +lean_inc(x_2855); +lean_dec(x_2852); +x_2856 = lean_ctor_get(x_1, 2); +lean_inc(x_2856); +lean_inc(x_5); +lean_inc(x_4); +x_2857 = l_Lean_IR_ToIR_lowerType(x_2856, x_2855, x_4, x_5, x_2853); +if (lean_obj_tag(x_2857) == 0) +{ +lean_object* x_2858; lean_object* x_2859; lean_object* x_2860; lean_object* x_2861; lean_object* x_2862; +x_2858 = lean_ctor_get(x_2857, 0); +lean_inc(x_2858); +x_2859 = lean_ctor_get(x_2857, 1); +lean_inc(x_2859); +lean_dec(x_2857); +x_2860 = lean_ctor_get(x_2858, 0); +lean_inc(x_2860); +x_2861 = lean_ctor_get(x_2858, 1); +lean_inc(x_2861); +lean_dec(x_2858); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2862 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_2854, x_2843, x_2849, x_2844, x_2860, x_2861, x_4, x_5, x_2859); +if (lean_obj_tag(x_2862) == 0) +{ +lean_object* x_2863; lean_object* x_2864; lean_object* x_2865; lean_object* x_2866; lean_object* x_2867; lean_object* x_2868; lean_object* x_2869; +x_2863 = lean_ctor_get(x_2862, 0); +lean_inc(x_2863); +x_2864 = lean_ctor_get(x_2862, 1); +lean_inc(x_2864); +lean_dec(x_2862); +x_2865 = lean_ctor_get(x_2863, 0); +lean_inc(x_2865); +x_2866 = lean_ctor_get(x_2863, 1); +lean_inc(x_2866); +if (lean_is_exclusive(x_2863)) { + lean_ctor_release(x_2863, 0); + lean_ctor_release(x_2863, 1); + x_2867 = x_2863; +} else { + lean_dec_ref(x_2863); + x_2867 = lean_box(0); +} +if (lean_is_scalar(x_2835)) { + x_2868 = lean_alloc_ctor(1, 1, 0); +} else { + x_2868 = x_2835; +} +lean_ctor_set(x_2868, 0, x_2865); +if (lean_is_scalar(x_2867)) { + x_2869 = lean_alloc_ctor(0, 2, 0); +} else { + x_2869 = x_2867; +} +lean_ctor_set(x_2869, 0, x_2868); +lean_ctor_set(x_2869, 1, x_2866); +x_2799 = x_2869; +x_2800 = x_2864; +goto block_2826; +} +else +{ +lean_object* x_2870; lean_object* x_2871; lean_object* x_2872; lean_object* x_2873; +lean_dec(x_2835); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2870 = lean_ctor_get(x_2862, 0); +lean_inc(x_2870); +x_2871 = lean_ctor_get(x_2862, 1); +lean_inc(x_2871); +if (lean_is_exclusive(x_2862)) { + lean_ctor_release(x_2862, 0); + lean_ctor_release(x_2862, 1); + x_2872 = x_2862; +} else { + lean_dec_ref(x_2862); + x_2872 = lean_box(0); +} +if (lean_is_scalar(x_2872)) { + x_2873 = lean_alloc_ctor(1, 2, 0); +} else { + x_2873 = x_2872; +} +lean_ctor_set(x_2873, 0, x_2870); +lean_ctor_set(x_2873, 1, x_2871); +return x_2873; +} +} +else +{ +lean_object* x_2874; lean_object* x_2875; lean_object* x_2876; lean_object* x_2877; +lean_dec(x_2854); +lean_dec(x_2849); +lean_dec(x_2844); +lean_dec(x_2843); +lean_dec(x_2835); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2874 = lean_ctor_get(x_2857, 0); +lean_inc(x_2874); +x_2875 = lean_ctor_get(x_2857, 1); +lean_inc(x_2875); +if (lean_is_exclusive(x_2857)) { + lean_ctor_release(x_2857, 0); + lean_ctor_release(x_2857, 1); + x_2876 = x_2857; +} else { + lean_dec_ref(x_2857); + x_2876 = lean_box(0); +} +if (lean_is_scalar(x_2876)) { + x_2877 = lean_alloc_ctor(1, 2, 0); +} else { + x_2877 = x_2876; +} +lean_ctor_set(x_2877, 0, x_2874); +lean_ctor_set(x_2877, 1, x_2875); +return x_2877; +} +} +else +{ +lean_object* x_2878; lean_object* x_2879; lean_object* x_2880; lean_object* x_2881; lean_object* x_2882; lean_object* x_2883; lean_object* x_2884; lean_object* x_2885; lean_object* x_2886; +lean_dec(x_2838); +lean_dec(x_2836); +lean_inc(x_2363); +if (lean_is_scalar(x_2833)) { + x_2878 = lean_alloc_ctor(6, 2, 0); +} else { + x_2878 = x_2833; + lean_ctor_set_tag(x_2878, 6); +} +lean_ctor_set(x_2878, 0, x_153); +lean_ctor_set(x_2878, 1, x_2363); +x_2879 = lean_ctor_get(x_1, 0); +lean_inc(x_2879); +x_2880 = l_Lean_IR_ToIR_bindVar(x_2879, x_2368, x_4, x_5, x_2832); +x_2881 = lean_ctor_get(x_2880, 0); +lean_inc(x_2881); +x_2882 = lean_ctor_get(x_2880, 1); +lean_inc(x_2882); +lean_dec(x_2880); +x_2883 = lean_ctor_get(x_2881, 0); +lean_inc(x_2883); +x_2884 = lean_ctor_get(x_2881, 1); +lean_inc(x_2884); +lean_dec(x_2881); +x_2885 = lean_ctor_get(x_1, 2); +lean_inc(x_2885); +lean_inc(x_5); +lean_inc(x_4); +x_2886 = l_Lean_IR_ToIR_lowerType(x_2885, x_2884, x_4, x_5, x_2882); +if (lean_obj_tag(x_2886) == 0) +{ +lean_object* x_2887; lean_object* x_2888; lean_object* x_2889; lean_object* x_2890; lean_object* x_2891; +x_2887 = lean_ctor_get(x_2886, 0); +lean_inc(x_2887); +x_2888 = lean_ctor_get(x_2886, 1); +lean_inc(x_2888); +lean_dec(x_2886); +x_2889 = lean_ctor_get(x_2887, 0); +lean_inc(x_2889); +x_2890 = lean_ctor_get(x_2887, 1); +lean_inc(x_2890); +lean_dec(x_2887); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2891 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2883, x_2878, x_2889, x_2890, x_4, x_5, x_2888); +if (lean_obj_tag(x_2891) == 0) +{ +lean_object* x_2892; lean_object* x_2893; lean_object* x_2894; lean_object* x_2895; lean_object* x_2896; lean_object* x_2897; lean_object* x_2898; +x_2892 = lean_ctor_get(x_2891, 0); +lean_inc(x_2892); +x_2893 = lean_ctor_get(x_2891, 1); +lean_inc(x_2893); +lean_dec(x_2891); +x_2894 = lean_ctor_get(x_2892, 0); +lean_inc(x_2894); +x_2895 = lean_ctor_get(x_2892, 1); +lean_inc(x_2895); +if (lean_is_exclusive(x_2892)) { + lean_ctor_release(x_2892, 0); + lean_ctor_release(x_2892, 1); + x_2896 = x_2892; +} else { + lean_dec_ref(x_2892); + x_2896 = lean_box(0); +} +if (lean_is_scalar(x_2835)) { + x_2897 = lean_alloc_ctor(1, 1, 0); +} else { + x_2897 = x_2835; +} +lean_ctor_set(x_2897, 0, x_2894); +if (lean_is_scalar(x_2896)) { + x_2898 = lean_alloc_ctor(0, 2, 0); +} else { + x_2898 = x_2896; +} +lean_ctor_set(x_2898, 0, x_2897); +lean_ctor_set(x_2898, 1, x_2895); +x_2799 = x_2898; +x_2800 = x_2893; +goto block_2826; +} +else +{ +lean_object* x_2899; lean_object* x_2900; lean_object* x_2901; lean_object* x_2902; +lean_dec(x_2835); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2899 = lean_ctor_get(x_2891, 0); +lean_inc(x_2899); +x_2900 = lean_ctor_get(x_2891, 1); +lean_inc(x_2900); +if (lean_is_exclusive(x_2891)) { + lean_ctor_release(x_2891, 0); + lean_ctor_release(x_2891, 1); + x_2901 = x_2891; +} else { + lean_dec_ref(x_2891); + x_2901 = lean_box(0); +} +if (lean_is_scalar(x_2901)) { + x_2902 = lean_alloc_ctor(1, 2, 0); +} else { + x_2902 = x_2901; +} +lean_ctor_set(x_2902, 0, x_2899); +lean_ctor_set(x_2902, 1, x_2900); +return x_2902; +} +} +else +{ +lean_object* x_2903; lean_object* x_2904; lean_object* x_2905; lean_object* x_2906; +lean_dec(x_2883); +lean_dec(x_2878); +lean_dec(x_2835); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2903 = lean_ctor_get(x_2886, 0); +lean_inc(x_2903); +x_2904 = lean_ctor_get(x_2886, 1); +lean_inc(x_2904); +if (lean_is_exclusive(x_2886)) { + lean_ctor_release(x_2886, 0); + lean_ctor_release(x_2886, 1); + x_2905 = x_2886; +} else { + lean_dec_ref(x_2886); + x_2905 = lean_box(0); +} +if (lean_is_scalar(x_2905)) { + x_2906 = lean_alloc_ctor(1, 2, 0); +} else { + x_2906 = x_2905; +} +lean_ctor_set(x_2906, 0, x_2903); +lean_ctor_set(x_2906, 1, x_2904); +return x_2906; +} +} +} +else +{ +lean_object* x_2907; lean_object* x_2908; lean_object* x_2909; lean_object* x_2910; lean_object* x_2911; lean_object* x_2912; lean_object* x_2913; lean_object* x_2914; lean_object* x_2915; +lean_dec(x_2838); +lean_dec(x_2836); +lean_inc(x_2363); +if (lean_is_scalar(x_2833)) { + x_2907 = lean_alloc_ctor(7, 2, 0); +} else { + x_2907 = x_2833; + lean_ctor_set_tag(x_2907, 7); +} +lean_ctor_set(x_2907, 0, x_153); +lean_ctor_set(x_2907, 1, x_2363); +x_2908 = lean_ctor_get(x_1, 0); +lean_inc(x_2908); +x_2909 = l_Lean_IR_ToIR_bindVar(x_2908, x_2368, x_4, x_5, x_2832); +x_2910 = lean_ctor_get(x_2909, 0); +lean_inc(x_2910); +x_2911 = lean_ctor_get(x_2909, 1); +lean_inc(x_2911); +lean_dec(x_2909); +x_2912 = lean_ctor_get(x_2910, 0); +lean_inc(x_2912); +x_2913 = lean_ctor_get(x_2910, 1); +lean_inc(x_2913); +lean_dec(x_2910); +x_2914 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_2915 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2912, x_2907, x_2914, x_2913, x_4, x_5, x_2911); +if (lean_obj_tag(x_2915) == 0) +{ +lean_object* x_2916; lean_object* x_2917; lean_object* x_2918; lean_object* x_2919; lean_object* x_2920; lean_object* x_2921; lean_object* x_2922; +x_2916 = lean_ctor_get(x_2915, 0); +lean_inc(x_2916); +x_2917 = lean_ctor_get(x_2915, 1); +lean_inc(x_2917); +lean_dec(x_2915); +x_2918 = lean_ctor_get(x_2916, 0); +lean_inc(x_2918); +x_2919 = lean_ctor_get(x_2916, 1); +lean_inc(x_2919); +if (lean_is_exclusive(x_2916)) { + lean_ctor_release(x_2916, 0); + lean_ctor_release(x_2916, 1); + x_2920 = x_2916; +} else { + lean_dec_ref(x_2916); + x_2920 = lean_box(0); +} +if (lean_is_scalar(x_2835)) { + x_2921 = lean_alloc_ctor(1, 1, 0); +} else { + x_2921 = x_2835; +} +lean_ctor_set(x_2921, 0, x_2918); +if (lean_is_scalar(x_2920)) { + x_2922 = lean_alloc_ctor(0, 2, 0); +} else { + x_2922 = x_2920; +} +lean_ctor_set(x_2922, 0, x_2921); +lean_ctor_set(x_2922, 1, x_2919); +x_2799 = x_2922; +x_2800 = x_2917; +goto block_2826; +} +else +{ +lean_object* x_2923; lean_object* x_2924; lean_object* x_2925; lean_object* x_2926; +lean_dec(x_2835); +lean_dec(x_2373); +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2923 = lean_ctor_get(x_2915, 0); +lean_inc(x_2923); +x_2924 = lean_ctor_get(x_2915, 1); +lean_inc(x_2924); +if (lean_is_exclusive(x_2915)) { + lean_ctor_release(x_2915, 0); + lean_ctor_release(x_2915, 1); + x_2925 = x_2915; +} else { + lean_dec_ref(x_2915); + x_2925 = lean_box(0); +} +if (lean_is_scalar(x_2925)) { + x_2926 = lean_alloc_ctor(1, 2, 0); +} else { + x_2926 = x_2925; +} +lean_ctor_set(x_2926, 0, x_2923); +lean_ctor_set(x_2926, 1, x_2924); +return x_2926; +} +} +} +block_2826: +{ +lean_object* x_2801; +x_2801 = lean_ctor_get(x_2799, 0); +lean_inc(x_2801); +if (lean_obj_tag(x_2801) == 0) +{ +lean_object* x_2802; lean_object* x_2803; lean_object* x_2804; lean_object* x_2805; lean_object* x_2806; lean_object* x_2807; lean_object* x_2808; lean_object* x_2809; lean_object* x_2810; lean_object* x_2811; +lean_dec(x_2373); +x_2802 = lean_ctor_get(x_2799, 1); +lean_inc(x_2802); +lean_dec(x_2799); +x_2803 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_2803, 0, x_153); +lean_ctor_set(x_2803, 1, x_2363); +x_2804 = lean_ctor_get(x_1, 0); +lean_inc(x_2804); +x_2805 = l_Lean_IR_ToIR_bindVar(x_2804, x_2802, x_4, x_5, x_2800); +x_2806 = lean_ctor_get(x_2805, 0); +lean_inc(x_2806); +x_2807 = lean_ctor_get(x_2805, 1); +lean_inc(x_2807); +lean_dec(x_2805); +x_2808 = lean_ctor_get(x_2806, 0); +lean_inc(x_2808); +x_2809 = lean_ctor_get(x_2806, 1); +lean_inc(x_2809); +lean_dec(x_2806); +x_2810 = lean_ctor_get(x_1, 2); +lean_inc(x_2810); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_2811 = l_Lean_IR_ToIR_lowerType(x_2810, x_2809, x_4, x_5, x_2807); +if (lean_obj_tag(x_2811) == 0) +{ +lean_object* x_2812; lean_object* x_2813; lean_object* x_2814; lean_object* x_2815; lean_object* x_2816; +x_2812 = lean_ctor_get(x_2811, 0); +lean_inc(x_2812); +x_2813 = lean_ctor_get(x_2811, 1); +lean_inc(x_2813); +lean_dec(x_2811); +x_2814 = lean_ctor_get(x_2812, 0); +lean_inc(x_2814); +x_2815 = lean_ctor_get(x_2812, 1); +lean_inc(x_2815); +lean_dec(x_2812); +x_2816 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_2808, x_2803, x_2814, x_2815, x_4, x_5, x_2813); +return x_2816; +} +else +{ +lean_object* x_2817; lean_object* x_2818; lean_object* x_2819; lean_object* x_2820; +lean_dec(x_2808); +lean_dec(x_2803); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_2817 = lean_ctor_get(x_2811, 0); +lean_inc(x_2817); +x_2818 = lean_ctor_get(x_2811, 1); +lean_inc(x_2818); +if (lean_is_exclusive(x_2811)) { + lean_ctor_release(x_2811, 0); + lean_ctor_release(x_2811, 1); + x_2819 = x_2811; +} else { + lean_dec_ref(x_2811); + x_2819 = lean_box(0); +} +if (lean_is_scalar(x_2819)) { + x_2820 = lean_alloc_ctor(1, 2, 0); +} else { + x_2820 = x_2819; +} +lean_ctor_set(x_2820, 0, x_2817); +lean_ctor_set(x_2820, 1, x_2818); +return x_2820; +} +} +else +{ +lean_object* x_2821; lean_object* x_2822; lean_object* x_2823; lean_object* x_2824; lean_object* x_2825; +lean_dec(x_2363); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2821 = lean_ctor_get(x_2799, 1); +lean_inc(x_2821); +if (lean_is_exclusive(x_2799)) { + lean_ctor_release(x_2799, 0); + lean_ctor_release(x_2799, 1); + x_2822 = x_2799; +} else { + lean_dec_ref(x_2799); + x_2822 = lean_box(0); +} +x_2823 = lean_ctor_get(x_2801, 0); +lean_inc(x_2823); +lean_dec(x_2801); +if (lean_is_scalar(x_2822)) { + x_2824 = lean_alloc_ctor(0, 2, 0); +} else { + x_2824 = x_2822; +} +lean_ctor_set(x_2824, 0, x_2823); +lean_ctor_set(x_2824, 1, x_2821); +if (lean_is_scalar(x_2373)) { + x_2825 = lean_alloc_ctor(0, 2, 0); +} else { + x_2825 = x_2373; +} +lean_ctor_set(x_2825, 0, x_2824); +lean_ctor_set(x_2825, 1, x_2800); +return x_2825; +} +} +} +} +default: +{ +lean_object* x_2927; uint8_t x_2928; lean_object* x_2929; lean_object* x_2930; lean_object* x_2931; lean_object* x_2932; lean_object* x_2933; lean_object* x_2934; lean_object* x_2935; lean_object* x_2936; lean_object* x_2937; +lean_dec(x_2374); +lean_dec(x_2373); +lean_dec(x_2369); +lean_dec(x_2363); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_2379)) { + lean_ctor_release(x_2379, 0); + x_2927 = x_2379; +} else { + lean_dec_ref(x_2379); + x_2927 = lean_box(0); +} +x_2928 = 1; +x_2929 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_2930 = l_Lean_Name_toString(x_153, x_2928, x_2929); +if (lean_is_scalar(x_2927)) { + x_2931 = lean_alloc_ctor(3, 1, 0); +} else { + x_2931 = x_2927; + lean_ctor_set_tag(x_2931, 3); +} +lean_ctor_set(x_2931, 0, x_2930); +x_2932 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_2933 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_2933, 0, x_2932); +lean_ctor_set(x_2933, 1, x_2931); +x_2934 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_2935 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_2935, 0, x_2933); +lean_ctor_set(x_2935, 1, x_2934); +x_2936 = l_Lean_MessageData_ofFormat(x_2935); +x_2937 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_2936, x_2368, x_4, x_5, x_2372); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2368); +return x_2937; +} +} +} +} +else +{ +lean_object* x_2938; lean_object* x_2939; lean_object* x_2940; lean_object* x_2941; lean_object* x_2942; +lean_dec(x_2363); +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_2938 = lean_ctor_get(x_2365, 1); +lean_inc(x_2938); +if (lean_is_exclusive(x_2365)) { + lean_ctor_release(x_2365, 0); + lean_ctor_release(x_2365, 1); + x_2939 = x_2365; +} else { + lean_dec_ref(x_2365); + x_2939 = lean_box(0); +} +x_2940 = lean_ctor_get(x_2367, 0); +lean_inc(x_2940); +lean_dec(x_2367); +if (lean_is_scalar(x_2939)) { + x_2941 = lean_alloc_ctor(0, 2, 0); +} else { + x_2941 = x_2939; +} +lean_ctor_set(x_2941, 0, x_2940); +lean_ctor_set(x_2941, 1, x_2938); +if (lean_is_scalar(x_161)) { + x_2942 = lean_alloc_ctor(0, 2, 0); +} else { + x_2942 = x_161; +} +lean_ctor_set(x_2942, 0, x_2941); +lean_ctor_set(x_2942, 1, x_2366); +return x_2942; +} +} +} +} +else +{ +uint8_t x_3044; +lean_dec(x_155); +lean_dec(x_154); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3044 = !lean_is_exclusive(x_158); +if (x_3044 == 0) +{ +return x_158; +} +else +{ +lean_object* x_3045; lean_object* x_3046; lean_object* x_3047; +x_3045 = lean_ctor_get(x_158, 0); +x_3046 = lean_ctor_get(x_158, 1); +lean_inc(x_3046); +lean_inc(x_3045); +lean_dec(x_158); +x_3047 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3047, 0, x_3045); +lean_ctor_set(x_3047, 1, x_3046); +return x_3047; +} +} +} +case 1: +{ +lean_object* x_3048; +x_3048 = lean_ctor_get(x_153, 0); +lean_inc(x_3048); +switch (lean_obj_tag(x_3048)) { +case 0: +{ +lean_object* x_3049; lean_object* x_3050; size_t x_3051; size_t x_3052; lean_object* x_3053; +x_3049 = lean_ctor_get(x_7, 2); +lean_inc(x_3049); +if (lean_is_exclusive(x_7)) { + lean_ctor_release(x_7, 0); + lean_ctor_release(x_7, 1); + lean_ctor_release(x_7, 2); + x_3050 = x_7; +} else { + lean_dec_ref(x_7); + x_3050 = lean_box(0); +} +x_3051 = lean_array_size(x_3049); +x_3052 = 0; +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_3049); +x_3053 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_3051, x_3052, x_3049, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_3053) == 0) +{ +lean_object* x_3054; lean_object* x_3055; lean_object* x_3056; uint8_t x_3057; +x_3054 = lean_ctor_get(x_3053, 0); +lean_inc(x_3054); +x_3055 = lean_ctor_get(x_3053, 1); +lean_inc(x_3055); +if (lean_is_exclusive(x_3053)) { + lean_ctor_release(x_3053, 0); + lean_ctor_release(x_3053, 1); + x_3056 = x_3053; +} else { + lean_dec_ref(x_3053); + x_3056 = lean_box(0); +} +x_3057 = !lean_is_exclusive(x_3054); +if (x_3057 == 0) +{ +lean_object* x_3058; lean_object* x_3059; lean_object* x_3060; lean_object* x_3061; lean_object* x_4978; lean_object* x_4979; +x_3058 = lean_ctor_get(x_3054, 0); +x_3059 = lean_ctor_get(x_3054, 1); +lean_inc(x_153); +x_4978 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_3055); +x_4979 = lean_ctor_get(x_4978, 0); +lean_inc(x_4979); +if (lean_obj_tag(x_4979) == 0) +{ +lean_object* x_4980; lean_object* x_4981; +x_4980 = lean_ctor_get(x_4978, 1); +lean_inc(x_4980); +lean_dec(x_4978); +x_4981 = lean_box(0); +lean_ctor_set(x_3054, 0, x_4981); +x_3060 = x_3054; +x_3061 = x_4980; +goto block_4977; +} +else +{ +uint8_t x_4982; +lean_free_object(x_3054); +x_4982 = !lean_is_exclusive(x_4978); +if (x_4982 == 0) +{ +lean_object* x_4983; lean_object* x_4984; uint8_t x_4985; +x_4983 = lean_ctor_get(x_4978, 1); +x_4984 = lean_ctor_get(x_4978, 0); +lean_dec(x_4984); +x_4985 = !lean_is_exclusive(x_4979); +if (x_4985 == 0) +{ +lean_object* x_4986; lean_object* x_4987; lean_object* x_4988; lean_object* x_4989; uint8_t x_4990; +x_4986 = lean_ctor_get(x_4979, 0); +x_4987 = lean_array_get_size(x_3058); +x_4988 = lean_ctor_get(x_4986, 3); +lean_inc(x_4988); +lean_dec(x_4986); +x_4989 = lean_array_get_size(x_4988); +lean_dec(x_4988); +x_4990 = lean_nat_dec_lt(x_4987, x_4989); +if (x_4990 == 0) +{ +uint8_t x_4991; +x_4991 = lean_nat_dec_eq(x_4987, x_4989); +if (x_4991 == 0) +{ +lean_object* x_4992; lean_object* x_4993; lean_object* x_4994; lean_object* x_4995; lean_object* x_4996; lean_object* x_4997; lean_object* x_4998; lean_object* x_4999; lean_object* x_5000; lean_object* x_5001; lean_object* x_5002; lean_object* x_5003; lean_object* x_5004; lean_object* x_5005; lean_object* x_5006; lean_object* x_5007; +x_4992 = lean_unsigned_to_nat(0u); +x_4993 = l_Array_extract___rarg(x_3058, x_4992, x_4989); +x_4994 = l_Array_extract___rarg(x_3058, x_4989, x_4987); +lean_dec(x_4987); +lean_inc(x_153); +lean_ctor_set_tag(x_4978, 6); +lean_ctor_set(x_4978, 1, x_4993); +lean_ctor_set(x_4978, 0, x_153); +x_4995 = lean_ctor_get(x_1, 0); +lean_inc(x_4995); +x_4996 = l_Lean_IR_ToIR_bindVar(x_4995, x_3059, x_4, x_5, x_4983); +x_4997 = lean_ctor_get(x_4996, 0); +lean_inc(x_4997); +x_4998 = lean_ctor_get(x_4996, 1); +lean_inc(x_4998); +lean_dec(x_4996); +x_4999 = lean_ctor_get(x_4997, 0); +lean_inc(x_4999); +x_5000 = lean_ctor_get(x_4997, 1); +lean_inc(x_5000); +lean_dec(x_4997); +x_5001 = l_Lean_IR_ToIR_newVar(x_5000, x_4, x_5, x_4998); +x_5002 = lean_ctor_get(x_5001, 0); +lean_inc(x_5002); +x_5003 = lean_ctor_get(x_5001, 1); +lean_inc(x_5003); +lean_dec(x_5001); +x_5004 = lean_ctor_get(x_5002, 0); +lean_inc(x_5004); +x_5005 = lean_ctor_get(x_5002, 1); +lean_inc(x_5005); +lean_dec(x_5002); +x_5006 = lean_ctor_get(x_1, 2); +lean_inc(x_5006); +lean_inc(x_5); +lean_inc(x_4); +x_5007 = l_Lean_IR_ToIR_lowerType(x_5006, x_5005, x_4, x_5, x_5003); +if (lean_obj_tag(x_5007) == 0) +{ +lean_object* x_5008; lean_object* x_5009; lean_object* x_5010; lean_object* x_5011; lean_object* x_5012; +x_5008 = lean_ctor_get(x_5007, 0); +lean_inc(x_5008); +x_5009 = lean_ctor_get(x_5007, 1); +lean_inc(x_5009); +lean_dec(x_5007); +x_5010 = lean_ctor_get(x_5008, 0); +lean_inc(x_5010); +x_5011 = lean_ctor_get(x_5008, 1); +lean_inc(x_5011); +lean_dec(x_5008); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5012 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_5004, x_4994, x_4999, x_4978, x_5010, x_5011, x_4, x_5, x_5009); +if (lean_obj_tag(x_5012) == 0) +{ +lean_object* x_5013; lean_object* x_5014; uint8_t x_5015; +x_5013 = lean_ctor_get(x_5012, 0); +lean_inc(x_5013); +x_5014 = lean_ctor_get(x_5012, 1); +lean_inc(x_5014); +lean_dec(x_5012); +x_5015 = !lean_is_exclusive(x_5013); +if (x_5015 == 0) +{ +lean_object* x_5016; +x_5016 = lean_ctor_get(x_5013, 0); +lean_ctor_set(x_4979, 0, x_5016); +lean_ctor_set(x_5013, 0, x_4979); +x_3060 = x_5013; +x_3061 = x_5014; +goto block_4977; +} +else +{ +lean_object* x_5017; lean_object* x_5018; lean_object* x_5019; +x_5017 = lean_ctor_get(x_5013, 0); +x_5018 = lean_ctor_get(x_5013, 1); +lean_inc(x_5018); +lean_inc(x_5017); +lean_dec(x_5013); +lean_ctor_set(x_4979, 0, x_5017); +x_5019 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_5019, 0, x_4979); +lean_ctor_set(x_5019, 1, x_5018); +x_3060 = x_5019; +x_3061 = x_5014; +goto block_4977; +} +} +else +{ +uint8_t x_5020; +lean_free_object(x_4979); +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5020 = !lean_is_exclusive(x_5012); +if (x_5020 == 0) +{ +return x_5012; +} +else +{ +lean_object* x_5021; lean_object* x_5022; lean_object* x_5023; +x_5021 = lean_ctor_get(x_5012, 0); +x_5022 = lean_ctor_get(x_5012, 1); +lean_inc(x_5022); +lean_inc(x_5021); +lean_dec(x_5012); +x_5023 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_5023, 0, x_5021); +lean_ctor_set(x_5023, 1, x_5022); +return x_5023; +} +} +} +else +{ +uint8_t x_5024; +lean_dec(x_5004); +lean_dec(x_4999); +lean_dec(x_4978); +lean_dec(x_4994); +lean_free_object(x_4979); +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5024 = !lean_is_exclusive(x_5007); +if (x_5024 == 0) +{ +return x_5007; +} +else +{ +lean_object* x_5025; lean_object* x_5026; lean_object* x_5027; +x_5025 = lean_ctor_get(x_5007, 0); +x_5026 = lean_ctor_get(x_5007, 1); +lean_inc(x_5026); +lean_inc(x_5025); +lean_dec(x_5007); +x_5027 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_5027, 0, x_5025); +lean_ctor_set(x_5027, 1, x_5026); +return x_5027; +} +} +} +else +{ +lean_object* x_5028; lean_object* x_5029; lean_object* x_5030; lean_object* x_5031; lean_object* x_5032; lean_object* x_5033; lean_object* x_5034; lean_object* x_5035; +lean_dec(x_4989); +lean_dec(x_4987); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_4978, 6); +lean_ctor_set(x_4978, 1, x_3058); +lean_ctor_set(x_4978, 0, x_153); +x_5028 = lean_ctor_get(x_1, 0); +lean_inc(x_5028); +x_5029 = l_Lean_IR_ToIR_bindVar(x_5028, x_3059, x_4, x_5, x_4983); +x_5030 = lean_ctor_get(x_5029, 0); +lean_inc(x_5030); +x_5031 = lean_ctor_get(x_5029, 1); +lean_inc(x_5031); +lean_dec(x_5029); +x_5032 = lean_ctor_get(x_5030, 0); +lean_inc(x_5032); +x_5033 = lean_ctor_get(x_5030, 1); +lean_inc(x_5033); +lean_dec(x_5030); +x_5034 = lean_ctor_get(x_1, 2); +lean_inc(x_5034); +lean_inc(x_5); +lean_inc(x_4); +x_5035 = l_Lean_IR_ToIR_lowerType(x_5034, x_5033, x_4, x_5, x_5031); +if (lean_obj_tag(x_5035) == 0) +{ +lean_object* x_5036; lean_object* x_5037; lean_object* x_5038; lean_object* x_5039; lean_object* x_5040; +x_5036 = lean_ctor_get(x_5035, 0); +lean_inc(x_5036); +x_5037 = lean_ctor_get(x_5035, 1); +lean_inc(x_5037); +lean_dec(x_5035); +x_5038 = lean_ctor_get(x_5036, 0); +lean_inc(x_5038); +x_5039 = lean_ctor_get(x_5036, 1); +lean_inc(x_5039); +lean_dec(x_5036); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5040 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5032, x_4978, x_5038, x_5039, x_4, x_5, x_5037); +if (lean_obj_tag(x_5040) == 0) +{ +lean_object* x_5041; lean_object* x_5042; uint8_t x_5043; +x_5041 = lean_ctor_get(x_5040, 0); +lean_inc(x_5041); +x_5042 = lean_ctor_get(x_5040, 1); +lean_inc(x_5042); +lean_dec(x_5040); +x_5043 = !lean_is_exclusive(x_5041); +if (x_5043 == 0) +{ +lean_object* x_5044; +x_5044 = lean_ctor_get(x_5041, 0); +lean_ctor_set(x_4979, 0, x_5044); +lean_ctor_set(x_5041, 0, x_4979); +x_3060 = x_5041; +x_3061 = x_5042; +goto block_4977; +} +else +{ +lean_object* x_5045; lean_object* x_5046; lean_object* x_5047; +x_5045 = lean_ctor_get(x_5041, 0); +x_5046 = lean_ctor_get(x_5041, 1); +lean_inc(x_5046); +lean_inc(x_5045); +lean_dec(x_5041); +lean_ctor_set(x_4979, 0, x_5045); +x_5047 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_5047, 0, x_4979); +lean_ctor_set(x_5047, 1, x_5046); +x_3060 = x_5047; +x_3061 = x_5042; +goto block_4977; +} +} +else +{ +uint8_t x_5048; +lean_free_object(x_4979); +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5048 = !lean_is_exclusive(x_5040); +if (x_5048 == 0) +{ +return x_5040; +} +else +{ +lean_object* x_5049; lean_object* x_5050; lean_object* x_5051; +x_5049 = lean_ctor_get(x_5040, 0); +x_5050 = lean_ctor_get(x_5040, 1); +lean_inc(x_5050); +lean_inc(x_5049); +lean_dec(x_5040); +x_5051 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_5051, 0, x_5049); +lean_ctor_set(x_5051, 1, x_5050); +return x_5051; +} +} +} +else +{ +uint8_t x_5052; +lean_dec(x_5032); +lean_dec(x_4978); +lean_free_object(x_4979); +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5052 = !lean_is_exclusive(x_5035); +if (x_5052 == 0) +{ +return x_5035; +} +else +{ +lean_object* x_5053; lean_object* x_5054; lean_object* x_5055; +x_5053 = lean_ctor_get(x_5035, 0); +x_5054 = lean_ctor_get(x_5035, 1); +lean_inc(x_5054); +lean_inc(x_5053); +lean_dec(x_5035); +x_5055 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_5055, 0, x_5053); +lean_ctor_set(x_5055, 1, x_5054); +return x_5055; +} +} +} +} +else +{ +lean_object* x_5056; lean_object* x_5057; lean_object* x_5058; lean_object* x_5059; lean_object* x_5060; lean_object* x_5061; lean_object* x_5062; lean_object* x_5063; +lean_dec(x_4989); +lean_dec(x_4987); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_4978, 7); +lean_ctor_set(x_4978, 1, x_3058); +lean_ctor_set(x_4978, 0, x_153); +x_5056 = lean_ctor_get(x_1, 0); +lean_inc(x_5056); +x_5057 = l_Lean_IR_ToIR_bindVar(x_5056, x_3059, x_4, x_5, x_4983); +x_5058 = lean_ctor_get(x_5057, 0); +lean_inc(x_5058); +x_5059 = lean_ctor_get(x_5057, 1); +lean_inc(x_5059); +lean_dec(x_5057); +x_5060 = lean_ctor_get(x_5058, 0); +lean_inc(x_5060); +x_5061 = lean_ctor_get(x_5058, 1); +lean_inc(x_5061); +lean_dec(x_5058); +x_5062 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5063 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5060, x_4978, x_5062, x_5061, x_4, x_5, x_5059); +if (lean_obj_tag(x_5063) == 0) +{ +lean_object* x_5064; lean_object* x_5065; uint8_t x_5066; +x_5064 = lean_ctor_get(x_5063, 0); +lean_inc(x_5064); +x_5065 = lean_ctor_get(x_5063, 1); +lean_inc(x_5065); +lean_dec(x_5063); +x_5066 = !lean_is_exclusive(x_5064); +if (x_5066 == 0) +{ +lean_object* x_5067; +x_5067 = lean_ctor_get(x_5064, 0); +lean_ctor_set(x_4979, 0, x_5067); +lean_ctor_set(x_5064, 0, x_4979); +x_3060 = x_5064; +x_3061 = x_5065; +goto block_4977; +} +else +{ +lean_object* x_5068; lean_object* x_5069; lean_object* x_5070; +x_5068 = lean_ctor_get(x_5064, 0); +x_5069 = lean_ctor_get(x_5064, 1); +lean_inc(x_5069); +lean_inc(x_5068); +lean_dec(x_5064); +lean_ctor_set(x_4979, 0, x_5068); +x_5070 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_5070, 0, x_4979); +lean_ctor_set(x_5070, 1, x_5069); +x_3060 = x_5070; +x_3061 = x_5065; +goto block_4977; +} +} +else +{ +uint8_t x_5071; +lean_free_object(x_4979); +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5071 = !lean_is_exclusive(x_5063); +if (x_5071 == 0) +{ +return x_5063; +} +else +{ +lean_object* x_5072; lean_object* x_5073; lean_object* x_5074; +x_5072 = lean_ctor_get(x_5063, 0); +x_5073 = lean_ctor_get(x_5063, 1); +lean_inc(x_5073); +lean_inc(x_5072); +lean_dec(x_5063); +x_5074 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_5074, 0, x_5072); +lean_ctor_set(x_5074, 1, x_5073); +return x_5074; +} +} +} +} +else +{ +lean_object* x_5075; lean_object* x_5076; lean_object* x_5077; lean_object* x_5078; uint8_t x_5079; +x_5075 = lean_ctor_get(x_4979, 0); +lean_inc(x_5075); +lean_dec(x_4979); +x_5076 = lean_array_get_size(x_3058); +x_5077 = lean_ctor_get(x_5075, 3); +lean_inc(x_5077); +lean_dec(x_5075); +x_5078 = lean_array_get_size(x_5077); +lean_dec(x_5077); +x_5079 = lean_nat_dec_lt(x_5076, x_5078); +if (x_5079 == 0) +{ +uint8_t x_5080; +x_5080 = lean_nat_dec_eq(x_5076, x_5078); +if (x_5080 == 0) +{ +lean_object* x_5081; lean_object* x_5082; lean_object* x_5083; lean_object* x_5084; lean_object* x_5085; lean_object* x_5086; lean_object* x_5087; lean_object* x_5088; lean_object* x_5089; lean_object* x_5090; lean_object* x_5091; lean_object* x_5092; lean_object* x_5093; lean_object* x_5094; lean_object* x_5095; lean_object* x_5096; +x_5081 = lean_unsigned_to_nat(0u); +x_5082 = l_Array_extract___rarg(x_3058, x_5081, x_5078); +x_5083 = l_Array_extract___rarg(x_3058, x_5078, x_5076); +lean_dec(x_5076); +lean_inc(x_153); +lean_ctor_set_tag(x_4978, 6); +lean_ctor_set(x_4978, 1, x_5082); +lean_ctor_set(x_4978, 0, x_153); +x_5084 = lean_ctor_get(x_1, 0); +lean_inc(x_5084); +x_5085 = l_Lean_IR_ToIR_bindVar(x_5084, x_3059, x_4, x_5, x_4983); +x_5086 = lean_ctor_get(x_5085, 0); +lean_inc(x_5086); +x_5087 = lean_ctor_get(x_5085, 1); +lean_inc(x_5087); +lean_dec(x_5085); +x_5088 = lean_ctor_get(x_5086, 0); +lean_inc(x_5088); +x_5089 = lean_ctor_get(x_5086, 1); +lean_inc(x_5089); +lean_dec(x_5086); +x_5090 = l_Lean_IR_ToIR_newVar(x_5089, x_4, x_5, x_5087); +x_5091 = lean_ctor_get(x_5090, 0); +lean_inc(x_5091); +x_5092 = lean_ctor_get(x_5090, 1); +lean_inc(x_5092); +lean_dec(x_5090); +x_5093 = lean_ctor_get(x_5091, 0); +lean_inc(x_5093); +x_5094 = lean_ctor_get(x_5091, 1); +lean_inc(x_5094); +lean_dec(x_5091); +x_5095 = lean_ctor_get(x_1, 2); +lean_inc(x_5095); +lean_inc(x_5); +lean_inc(x_4); +x_5096 = l_Lean_IR_ToIR_lowerType(x_5095, x_5094, x_4, x_5, x_5092); +if (lean_obj_tag(x_5096) == 0) +{ +lean_object* x_5097; lean_object* x_5098; lean_object* x_5099; lean_object* x_5100; lean_object* x_5101; +x_5097 = lean_ctor_get(x_5096, 0); +lean_inc(x_5097); +x_5098 = lean_ctor_get(x_5096, 1); +lean_inc(x_5098); +lean_dec(x_5096); +x_5099 = lean_ctor_get(x_5097, 0); +lean_inc(x_5099); +x_5100 = lean_ctor_get(x_5097, 1); +lean_inc(x_5100); +lean_dec(x_5097); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5101 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_5093, x_5083, x_5088, x_4978, x_5099, x_5100, x_4, x_5, x_5098); +if (lean_obj_tag(x_5101) == 0) +{ +lean_object* x_5102; lean_object* x_5103; lean_object* x_5104; lean_object* x_5105; lean_object* x_5106; lean_object* x_5107; lean_object* x_5108; +x_5102 = lean_ctor_get(x_5101, 0); +lean_inc(x_5102); +x_5103 = lean_ctor_get(x_5101, 1); +lean_inc(x_5103); +lean_dec(x_5101); +x_5104 = lean_ctor_get(x_5102, 0); +lean_inc(x_5104); +x_5105 = lean_ctor_get(x_5102, 1); +lean_inc(x_5105); +if (lean_is_exclusive(x_5102)) { + lean_ctor_release(x_5102, 0); + lean_ctor_release(x_5102, 1); + x_5106 = x_5102; +} else { + lean_dec_ref(x_5102); + x_5106 = lean_box(0); +} +x_5107 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_5107, 0, x_5104); +if (lean_is_scalar(x_5106)) { + x_5108 = lean_alloc_ctor(0, 2, 0); +} else { + x_5108 = x_5106; +} +lean_ctor_set(x_5108, 0, x_5107); +lean_ctor_set(x_5108, 1, x_5105); +x_3060 = x_5108; +x_3061 = x_5103; +goto block_4977; +} +else +{ +lean_object* x_5109; lean_object* x_5110; lean_object* x_5111; lean_object* x_5112; +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5109 = lean_ctor_get(x_5101, 0); +lean_inc(x_5109); +x_5110 = lean_ctor_get(x_5101, 1); +lean_inc(x_5110); +if (lean_is_exclusive(x_5101)) { + lean_ctor_release(x_5101, 0); + lean_ctor_release(x_5101, 1); + x_5111 = x_5101; +} else { + lean_dec_ref(x_5101); + x_5111 = lean_box(0); +} +if (lean_is_scalar(x_5111)) { + x_5112 = lean_alloc_ctor(1, 2, 0); +} else { + x_5112 = x_5111; +} +lean_ctor_set(x_5112, 0, x_5109); +lean_ctor_set(x_5112, 1, x_5110); +return x_5112; +} +} +else +{ +lean_object* x_5113; lean_object* x_5114; lean_object* x_5115; lean_object* x_5116; +lean_dec(x_5093); +lean_dec(x_5088); +lean_dec(x_4978); +lean_dec(x_5083); +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5113 = lean_ctor_get(x_5096, 0); +lean_inc(x_5113); +x_5114 = lean_ctor_get(x_5096, 1); +lean_inc(x_5114); +if (lean_is_exclusive(x_5096)) { + lean_ctor_release(x_5096, 0); + lean_ctor_release(x_5096, 1); + x_5115 = x_5096; +} else { + lean_dec_ref(x_5096); + x_5115 = lean_box(0); +} +if (lean_is_scalar(x_5115)) { + x_5116 = lean_alloc_ctor(1, 2, 0); +} else { + x_5116 = x_5115; +} +lean_ctor_set(x_5116, 0, x_5113); +lean_ctor_set(x_5116, 1, x_5114); +return x_5116; +} +} +else +{ +lean_object* x_5117; lean_object* x_5118; lean_object* x_5119; lean_object* x_5120; lean_object* x_5121; lean_object* x_5122; lean_object* x_5123; lean_object* x_5124; +lean_dec(x_5078); +lean_dec(x_5076); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_4978, 6); +lean_ctor_set(x_4978, 1, x_3058); +lean_ctor_set(x_4978, 0, x_153); +x_5117 = lean_ctor_get(x_1, 0); +lean_inc(x_5117); +x_5118 = l_Lean_IR_ToIR_bindVar(x_5117, x_3059, x_4, x_5, x_4983); +x_5119 = lean_ctor_get(x_5118, 0); +lean_inc(x_5119); +x_5120 = lean_ctor_get(x_5118, 1); +lean_inc(x_5120); +lean_dec(x_5118); +x_5121 = lean_ctor_get(x_5119, 0); +lean_inc(x_5121); +x_5122 = lean_ctor_get(x_5119, 1); +lean_inc(x_5122); +lean_dec(x_5119); +x_5123 = lean_ctor_get(x_1, 2); +lean_inc(x_5123); +lean_inc(x_5); +lean_inc(x_4); +x_5124 = l_Lean_IR_ToIR_lowerType(x_5123, x_5122, x_4, x_5, x_5120); +if (lean_obj_tag(x_5124) == 0) +{ +lean_object* x_5125; lean_object* x_5126; lean_object* x_5127; lean_object* x_5128; lean_object* x_5129; +x_5125 = lean_ctor_get(x_5124, 0); +lean_inc(x_5125); +x_5126 = lean_ctor_get(x_5124, 1); +lean_inc(x_5126); +lean_dec(x_5124); +x_5127 = lean_ctor_get(x_5125, 0); +lean_inc(x_5127); +x_5128 = lean_ctor_get(x_5125, 1); +lean_inc(x_5128); +lean_dec(x_5125); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5129 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5121, x_4978, x_5127, x_5128, x_4, x_5, x_5126); +if (lean_obj_tag(x_5129) == 0) +{ +lean_object* x_5130; lean_object* x_5131; lean_object* x_5132; lean_object* x_5133; lean_object* x_5134; lean_object* x_5135; lean_object* x_5136; +x_5130 = lean_ctor_get(x_5129, 0); +lean_inc(x_5130); +x_5131 = lean_ctor_get(x_5129, 1); +lean_inc(x_5131); +lean_dec(x_5129); +x_5132 = lean_ctor_get(x_5130, 0); +lean_inc(x_5132); +x_5133 = lean_ctor_get(x_5130, 1); +lean_inc(x_5133); +if (lean_is_exclusive(x_5130)) { + lean_ctor_release(x_5130, 0); + lean_ctor_release(x_5130, 1); + x_5134 = x_5130; +} else { + lean_dec_ref(x_5130); + x_5134 = lean_box(0); +} +x_5135 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_5135, 0, x_5132); +if (lean_is_scalar(x_5134)) { + x_5136 = lean_alloc_ctor(0, 2, 0); +} else { + x_5136 = x_5134; +} +lean_ctor_set(x_5136, 0, x_5135); +lean_ctor_set(x_5136, 1, x_5133); +x_3060 = x_5136; +x_3061 = x_5131; +goto block_4977; +} +else +{ +lean_object* x_5137; lean_object* x_5138; lean_object* x_5139; lean_object* x_5140; +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5137 = lean_ctor_get(x_5129, 0); +lean_inc(x_5137); +x_5138 = lean_ctor_get(x_5129, 1); +lean_inc(x_5138); +if (lean_is_exclusive(x_5129)) { + lean_ctor_release(x_5129, 0); + lean_ctor_release(x_5129, 1); + x_5139 = x_5129; +} else { + lean_dec_ref(x_5129); + x_5139 = lean_box(0); +} +if (lean_is_scalar(x_5139)) { + x_5140 = lean_alloc_ctor(1, 2, 0); +} else { + x_5140 = x_5139; +} +lean_ctor_set(x_5140, 0, x_5137); +lean_ctor_set(x_5140, 1, x_5138); +return x_5140; +} +} +else +{ +lean_object* x_5141; lean_object* x_5142; lean_object* x_5143; lean_object* x_5144; +lean_dec(x_5121); +lean_dec(x_4978); +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5141 = lean_ctor_get(x_5124, 0); +lean_inc(x_5141); +x_5142 = lean_ctor_get(x_5124, 1); +lean_inc(x_5142); +if (lean_is_exclusive(x_5124)) { + lean_ctor_release(x_5124, 0); + lean_ctor_release(x_5124, 1); + x_5143 = x_5124; +} else { + lean_dec_ref(x_5124); + x_5143 = lean_box(0); +} +if (lean_is_scalar(x_5143)) { + x_5144 = lean_alloc_ctor(1, 2, 0); +} else { + x_5144 = x_5143; +} +lean_ctor_set(x_5144, 0, x_5141); +lean_ctor_set(x_5144, 1, x_5142); +return x_5144; +} +} +} +else +{ +lean_object* x_5145; lean_object* x_5146; lean_object* x_5147; lean_object* x_5148; lean_object* x_5149; lean_object* x_5150; lean_object* x_5151; lean_object* x_5152; +lean_dec(x_5078); +lean_dec(x_5076); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_4978, 7); +lean_ctor_set(x_4978, 1, x_3058); +lean_ctor_set(x_4978, 0, x_153); +x_5145 = lean_ctor_get(x_1, 0); +lean_inc(x_5145); +x_5146 = l_Lean_IR_ToIR_bindVar(x_5145, x_3059, x_4, x_5, x_4983); +x_5147 = lean_ctor_get(x_5146, 0); +lean_inc(x_5147); +x_5148 = lean_ctor_get(x_5146, 1); +lean_inc(x_5148); +lean_dec(x_5146); +x_5149 = lean_ctor_get(x_5147, 0); +lean_inc(x_5149); +x_5150 = lean_ctor_get(x_5147, 1); +lean_inc(x_5150); +lean_dec(x_5147); +x_5151 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5152 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5149, x_4978, x_5151, x_5150, x_4, x_5, x_5148); +if (lean_obj_tag(x_5152) == 0) +{ +lean_object* x_5153; lean_object* x_5154; lean_object* x_5155; lean_object* x_5156; lean_object* x_5157; lean_object* x_5158; lean_object* x_5159; +x_5153 = lean_ctor_get(x_5152, 0); +lean_inc(x_5153); +x_5154 = lean_ctor_get(x_5152, 1); +lean_inc(x_5154); +lean_dec(x_5152); +x_5155 = lean_ctor_get(x_5153, 0); +lean_inc(x_5155); +x_5156 = lean_ctor_get(x_5153, 1); +lean_inc(x_5156); +if (lean_is_exclusive(x_5153)) { + lean_ctor_release(x_5153, 0); + lean_ctor_release(x_5153, 1); + x_5157 = x_5153; +} else { + lean_dec_ref(x_5153); + x_5157 = lean_box(0); +} +x_5158 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_5158, 0, x_5155); +if (lean_is_scalar(x_5157)) { + x_5159 = lean_alloc_ctor(0, 2, 0); +} else { + x_5159 = x_5157; +} +lean_ctor_set(x_5159, 0, x_5158); +lean_ctor_set(x_5159, 1, x_5156); +x_3060 = x_5159; +x_3061 = x_5154; +goto block_4977; +} +else +{ +lean_object* x_5160; lean_object* x_5161; lean_object* x_5162; lean_object* x_5163; +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5160 = lean_ctor_get(x_5152, 0); +lean_inc(x_5160); +x_5161 = lean_ctor_get(x_5152, 1); +lean_inc(x_5161); +if (lean_is_exclusive(x_5152)) { + lean_ctor_release(x_5152, 0); + lean_ctor_release(x_5152, 1); + x_5162 = x_5152; +} else { + lean_dec_ref(x_5152); + x_5162 = lean_box(0); +} +if (lean_is_scalar(x_5162)) { + x_5163 = lean_alloc_ctor(1, 2, 0); +} else { + x_5163 = x_5162; +} +lean_ctor_set(x_5163, 0, x_5160); +lean_ctor_set(x_5163, 1, x_5161); +return x_5163; +} +} +} +} +else +{ +lean_object* x_5164; lean_object* x_5165; lean_object* x_5166; lean_object* x_5167; lean_object* x_5168; lean_object* x_5169; uint8_t x_5170; +x_5164 = lean_ctor_get(x_4978, 1); +lean_inc(x_5164); +lean_dec(x_4978); +x_5165 = lean_ctor_get(x_4979, 0); +lean_inc(x_5165); +if (lean_is_exclusive(x_4979)) { + lean_ctor_release(x_4979, 0); + x_5166 = x_4979; +} else { + lean_dec_ref(x_4979); + x_5166 = lean_box(0); +} +x_5167 = lean_array_get_size(x_3058); +x_5168 = lean_ctor_get(x_5165, 3); +lean_inc(x_5168); +lean_dec(x_5165); +x_5169 = lean_array_get_size(x_5168); +lean_dec(x_5168); +x_5170 = lean_nat_dec_lt(x_5167, x_5169); +if (x_5170 == 0) +{ +uint8_t x_5171; +x_5171 = lean_nat_dec_eq(x_5167, x_5169); +if (x_5171 == 0) +{ +lean_object* x_5172; lean_object* x_5173; lean_object* x_5174; lean_object* x_5175; lean_object* x_5176; lean_object* x_5177; lean_object* x_5178; lean_object* x_5179; lean_object* x_5180; lean_object* x_5181; lean_object* x_5182; lean_object* x_5183; lean_object* x_5184; lean_object* x_5185; lean_object* x_5186; lean_object* x_5187; lean_object* x_5188; +x_5172 = lean_unsigned_to_nat(0u); +x_5173 = l_Array_extract___rarg(x_3058, x_5172, x_5169); +x_5174 = l_Array_extract___rarg(x_3058, x_5169, x_5167); +lean_dec(x_5167); +lean_inc(x_153); +x_5175 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_5175, 0, x_153); +lean_ctor_set(x_5175, 1, x_5173); +x_5176 = lean_ctor_get(x_1, 0); +lean_inc(x_5176); +x_5177 = l_Lean_IR_ToIR_bindVar(x_5176, x_3059, x_4, x_5, x_5164); +x_5178 = lean_ctor_get(x_5177, 0); +lean_inc(x_5178); +x_5179 = lean_ctor_get(x_5177, 1); +lean_inc(x_5179); +lean_dec(x_5177); +x_5180 = lean_ctor_get(x_5178, 0); +lean_inc(x_5180); +x_5181 = lean_ctor_get(x_5178, 1); +lean_inc(x_5181); +lean_dec(x_5178); +x_5182 = l_Lean_IR_ToIR_newVar(x_5181, x_4, x_5, x_5179); +x_5183 = lean_ctor_get(x_5182, 0); +lean_inc(x_5183); +x_5184 = lean_ctor_get(x_5182, 1); +lean_inc(x_5184); +lean_dec(x_5182); +x_5185 = lean_ctor_get(x_5183, 0); +lean_inc(x_5185); +x_5186 = lean_ctor_get(x_5183, 1); +lean_inc(x_5186); +lean_dec(x_5183); +x_5187 = lean_ctor_get(x_1, 2); +lean_inc(x_5187); +lean_inc(x_5); +lean_inc(x_4); +x_5188 = l_Lean_IR_ToIR_lowerType(x_5187, x_5186, x_4, x_5, x_5184); +if (lean_obj_tag(x_5188) == 0) +{ +lean_object* x_5189; lean_object* x_5190; lean_object* x_5191; lean_object* x_5192; lean_object* x_5193; +x_5189 = lean_ctor_get(x_5188, 0); +lean_inc(x_5189); +x_5190 = lean_ctor_get(x_5188, 1); +lean_inc(x_5190); +lean_dec(x_5188); +x_5191 = lean_ctor_get(x_5189, 0); +lean_inc(x_5191); +x_5192 = lean_ctor_get(x_5189, 1); +lean_inc(x_5192); +lean_dec(x_5189); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5193 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_5185, x_5174, x_5180, x_5175, x_5191, x_5192, x_4, x_5, x_5190); +if (lean_obj_tag(x_5193) == 0) +{ +lean_object* x_5194; lean_object* x_5195; lean_object* x_5196; lean_object* x_5197; lean_object* x_5198; lean_object* x_5199; lean_object* x_5200; +x_5194 = lean_ctor_get(x_5193, 0); +lean_inc(x_5194); +x_5195 = lean_ctor_get(x_5193, 1); +lean_inc(x_5195); +lean_dec(x_5193); +x_5196 = lean_ctor_get(x_5194, 0); +lean_inc(x_5196); +x_5197 = lean_ctor_get(x_5194, 1); +lean_inc(x_5197); +if (lean_is_exclusive(x_5194)) { + lean_ctor_release(x_5194, 0); + lean_ctor_release(x_5194, 1); + x_5198 = x_5194; +} else { + lean_dec_ref(x_5194); + x_5198 = lean_box(0); +} +if (lean_is_scalar(x_5166)) { + x_5199 = lean_alloc_ctor(1, 1, 0); +} else { + x_5199 = x_5166; +} +lean_ctor_set(x_5199, 0, x_5196); +if (lean_is_scalar(x_5198)) { + x_5200 = lean_alloc_ctor(0, 2, 0); +} else { + x_5200 = x_5198; +} +lean_ctor_set(x_5200, 0, x_5199); +lean_ctor_set(x_5200, 1, x_5197); +x_3060 = x_5200; +x_3061 = x_5195; +goto block_4977; +} +else +{ +lean_object* x_5201; lean_object* x_5202; lean_object* x_5203; lean_object* x_5204; +lean_dec(x_5166); +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5201 = lean_ctor_get(x_5193, 0); +lean_inc(x_5201); +x_5202 = lean_ctor_get(x_5193, 1); +lean_inc(x_5202); +if (lean_is_exclusive(x_5193)) { + lean_ctor_release(x_5193, 0); + lean_ctor_release(x_5193, 1); + x_5203 = x_5193; +} else { + lean_dec_ref(x_5193); + x_5203 = lean_box(0); +} +if (lean_is_scalar(x_5203)) { + x_5204 = lean_alloc_ctor(1, 2, 0); +} else { + x_5204 = x_5203; +} +lean_ctor_set(x_5204, 0, x_5201); +lean_ctor_set(x_5204, 1, x_5202); +return x_5204; +} +} +else +{ +lean_object* x_5205; lean_object* x_5206; lean_object* x_5207; lean_object* x_5208; +lean_dec(x_5185); +lean_dec(x_5180); +lean_dec(x_5175); +lean_dec(x_5174); +lean_dec(x_5166); +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5205 = lean_ctor_get(x_5188, 0); +lean_inc(x_5205); +x_5206 = lean_ctor_get(x_5188, 1); +lean_inc(x_5206); +if (lean_is_exclusive(x_5188)) { + lean_ctor_release(x_5188, 0); + lean_ctor_release(x_5188, 1); + x_5207 = x_5188; +} else { + lean_dec_ref(x_5188); + x_5207 = lean_box(0); +} +if (lean_is_scalar(x_5207)) { + x_5208 = lean_alloc_ctor(1, 2, 0); +} else { + x_5208 = x_5207; +} +lean_ctor_set(x_5208, 0, x_5205); +lean_ctor_set(x_5208, 1, x_5206); +return x_5208; +} +} +else +{ +lean_object* x_5209; lean_object* x_5210; lean_object* x_5211; lean_object* x_5212; lean_object* x_5213; lean_object* x_5214; lean_object* x_5215; lean_object* x_5216; lean_object* x_5217; +lean_dec(x_5169); +lean_dec(x_5167); +lean_inc(x_3058); +lean_inc(x_153); +x_5209 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_5209, 0, x_153); +lean_ctor_set(x_5209, 1, x_3058); +x_5210 = lean_ctor_get(x_1, 0); +lean_inc(x_5210); +x_5211 = l_Lean_IR_ToIR_bindVar(x_5210, x_3059, x_4, x_5, x_5164); +x_5212 = lean_ctor_get(x_5211, 0); +lean_inc(x_5212); +x_5213 = lean_ctor_get(x_5211, 1); +lean_inc(x_5213); +lean_dec(x_5211); +x_5214 = lean_ctor_get(x_5212, 0); +lean_inc(x_5214); +x_5215 = lean_ctor_get(x_5212, 1); +lean_inc(x_5215); +lean_dec(x_5212); +x_5216 = lean_ctor_get(x_1, 2); +lean_inc(x_5216); +lean_inc(x_5); +lean_inc(x_4); +x_5217 = l_Lean_IR_ToIR_lowerType(x_5216, x_5215, x_4, x_5, x_5213); +if (lean_obj_tag(x_5217) == 0) +{ +lean_object* x_5218; lean_object* x_5219; lean_object* x_5220; lean_object* x_5221; lean_object* x_5222; +x_5218 = lean_ctor_get(x_5217, 0); +lean_inc(x_5218); +x_5219 = lean_ctor_get(x_5217, 1); +lean_inc(x_5219); +lean_dec(x_5217); +x_5220 = lean_ctor_get(x_5218, 0); +lean_inc(x_5220); +x_5221 = lean_ctor_get(x_5218, 1); +lean_inc(x_5221); +lean_dec(x_5218); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5222 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5214, x_5209, x_5220, x_5221, x_4, x_5, x_5219); +if (lean_obj_tag(x_5222) == 0) +{ +lean_object* x_5223; lean_object* x_5224; lean_object* x_5225; lean_object* x_5226; lean_object* x_5227; lean_object* x_5228; lean_object* x_5229; +x_5223 = lean_ctor_get(x_5222, 0); +lean_inc(x_5223); +x_5224 = lean_ctor_get(x_5222, 1); +lean_inc(x_5224); +lean_dec(x_5222); +x_5225 = lean_ctor_get(x_5223, 0); +lean_inc(x_5225); +x_5226 = lean_ctor_get(x_5223, 1); +lean_inc(x_5226); +if (lean_is_exclusive(x_5223)) { + lean_ctor_release(x_5223, 0); + lean_ctor_release(x_5223, 1); + x_5227 = x_5223; +} else { + lean_dec_ref(x_5223); + x_5227 = lean_box(0); +} +if (lean_is_scalar(x_5166)) { + x_5228 = lean_alloc_ctor(1, 1, 0); +} else { + x_5228 = x_5166; +} +lean_ctor_set(x_5228, 0, x_5225); +if (lean_is_scalar(x_5227)) { + x_5229 = lean_alloc_ctor(0, 2, 0); +} else { + x_5229 = x_5227; +} +lean_ctor_set(x_5229, 0, x_5228); +lean_ctor_set(x_5229, 1, x_5226); +x_3060 = x_5229; +x_3061 = x_5224; +goto block_4977; +} +else +{ +lean_object* x_5230; lean_object* x_5231; lean_object* x_5232; lean_object* x_5233; +lean_dec(x_5166); +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5230 = lean_ctor_get(x_5222, 0); +lean_inc(x_5230); +x_5231 = lean_ctor_get(x_5222, 1); +lean_inc(x_5231); +if (lean_is_exclusive(x_5222)) { + lean_ctor_release(x_5222, 0); + lean_ctor_release(x_5222, 1); + x_5232 = x_5222; +} else { + lean_dec_ref(x_5222); + x_5232 = lean_box(0); +} +if (lean_is_scalar(x_5232)) { + x_5233 = lean_alloc_ctor(1, 2, 0); +} else { + x_5233 = x_5232; +} +lean_ctor_set(x_5233, 0, x_5230); +lean_ctor_set(x_5233, 1, x_5231); +return x_5233; +} +} +else +{ +lean_object* x_5234; lean_object* x_5235; lean_object* x_5236; lean_object* x_5237; +lean_dec(x_5214); +lean_dec(x_5209); +lean_dec(x_5166); +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5234 = lean_ctor_get(x_5217, 0); +lean_inc(x_5234); +x_5235 = lean_ctor_get(x_5217, 1); +lean_inc(x_5235); +if (lean_is_exclusive(x_5217)) { + lean_ctor_release(x_5217, 0); + lean_ctor_release(x_5217, 1); + x_5236 = x_5217; +} else { + lean_dec_ref(x_5217); + x_5236 = lean_box(0); +} +if (lean_is_scalar(x_5236)) { + x_5237 = lean_alloc_ctor(1, 2, 0); +} else { + x_5237 = x_5236; +} +lean_ctor_set(x_5237, 0, x_5234); +lean_ctor_set(x_5237, 1, x_5235); +return x_5237; +} +} +} +else +{ +lean_object* x_5238; lean_object* x_5239; lean_object* x_5240; lean_object* x_5241; lean_object* x_5242; lean_object* x_5243; lean_object* x_5244; lean_object* x_5245; lean_object* x_5246; +lean_dec(x_5169); +lean_dec(x_5167); +lean_inc(x_3058); +lean_inc(x_153); +x_5238 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_5238, 0, x_153); +lean_ctor_set(x_5238, 1, x_3058); +x_5239 = lean_ctor_get(x_1, 0); +lean_inc(x_5239); +x_5240 = l_Lean_IR_ToIR_bindVar(x_5239, x_3059, x_4, x_5, x_5164); +x_5241 = lean_ctor_get(x_5240, 0); +lean_inc(x_5241); +x_5242 = lean_ctor_get(x_5240, 1); +lean_inc(x_5242); +lean_dec(x_5240); +x_5243 = lean_ctor_get(x_5241, 0); +lean_inc(x_5243); +x_5244 = lean_ctor_get(x_5241, 1); +lean_inc(x_5244); +lean_dec(x_5241); +x_5245 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5246 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5243, x_5238, x_5245, x_5244, x_4, x_5, x_5242); +if (lean_obj_tag(x_5246) == 0) +{ +lean_object* x_5247; lean_object* x_5248; lean_object* x_5249; lean_object* x_5250; lean_object* x_5251; lean_object* x_5252; lean_object* x_5253; +x_5247 = lean_ctor_get(x_5246, 0); +lean_inc(x_5247); +x_5248 = lean_ctor_get(x_5246, 1); +lean_inc(x_5248); +lean_dec(x_5246); +x_5249 = lean_ctor_get(x_5247, 0); +lean_inc(x_5249); +x_5250 = lean_ctor_get(x_5247, 1); +lean_inc(x_5250); +if (lean_is_exclusive(x_5247)) { + lean_ctor_release(x_5247, 0); + lean_ctor_release(x_5247, 1); + x_5251 = x_5247; +} else { + lean_dec_ref(x_5247); + x_5251 = lean_box(0); +} +if (lean_is_scalar(x_5166)) { + x_5252 = lean_alloc_ctor(1, 1, 0); +} else { + x_5252 = x_5166; +} +lean_ctor_set(x_5252, 0, x_5249); +if (lean_is_scalar(x_5251)) { + x_5253 = lean_alloc_ctor(0, 2, 0); +} else { + x_5253 = x_5251; +} +lean_ctor_set(x_5253, 0, x_5252); +lean_ctor_set(x_5253, 1, x_5250); +x_3060 = x_5253; +x_3061 = x_5248; +goto block_4977; +} +else +{ +lean_object* x_5254; lean_object* x_5255; lean_object* x_5256; lean_object* x_5257; +lean_dec(x_5166); +lean_dec(x_3058); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5254 = lean_ctor_get(x_5246, 0); +lean_inc(x_5254); +x_5255 = lean_ctor_get(x_5246, 1); +lean_inc(x_5255); +if (lean_is_exclusive(x_5246)) { + lean_ctor_release(x_5246, 0); + lean_ctor_release(x_5246, 1); + x_5256 = x_5246; +} else { + lean_dec_ref(x_5246); + x_5256 = lean_box(0); +} +if (lean_is_scalar(x_5256)) { + x_5257 = lean_alloc_ctor(1, 2, 0); +} else { + x_5257 = x_5256; +} +lean_ctor_set(x_5257, 0, x_5254); +lean_ctor_set(x_5257, 1, x_5255); +return x_5257; +} +} +} +} +block_4977: +{ +lean_object* x_3062; +x_3062 = lean_ctor_get(x_3060, 0); +lean_inc(x_3062); +if (lean_obj_tag(x_3062) == 0) +{ +uint8_t x_3063; +lean_dec(x_3056); +x_3063 = !lean_is_exclusive(x_3060); +if (x_3063 == 0) +{ +lean_object* x_3064; lean_object* x_3065; lean_object* x_3066; lean_object* x_3067; lean_object* x_3068; lean_object* x_3069; lean_object* x_3070; uint8_t x_3071; lean_object* x_3072; +x_3064 = lean_ctor_get(x_3060, 1); +x_3065 = lean_ctor_get(x_3060, 0); +lean_dec(x_3065); +x_3066 = lean_st_ref_get(x_5, x_3061); +x_3067 = lean_ctor_get(x_3066, 0); +lean_inc(x_3067); +x_3068 = lean_ctor_get(x_3066, 1); +lean_inc(x_3068); +if (lean_is_exclusive(x_3066)) { + lean_ctor_release(x_3066, 0); + lean_ctor_release(x_3066, 1); + x_3069 = x_3066; +} else { + lean_dec_ref(x_3066); + x_3069 = lean_box(0); +} +x_3070 = lean_ctor_get(x_3067, 0); +lean_inc(x_3070); +lean_dec(x_3067); +x_3071 = 0; +lean_inc(x_153); +lean_inc(x_3070); +x_3072 = l_Lean_Environment_find_x3f(x_3070, x_153, x_3071); +if (lean_obj_tag(x_3072) == 0) +{ +lean_object* x_3073; lean_object* x_3074; +lean_dec(x_3070); +lean_dec(x_3069); +lean_free_object(x_3060); +lean_dec(x_3058); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_3073 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_3074 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_3073, x_3064, x_4, x_5, x_3068); +return x_3074; +} +else +{ +lean_object* x_3075; +x_3075 = lean_ctor_get(x_3072, 0); +lean_inc(x_3075); +lean_dec(x_3072); +switch (lean_obj_tag(x_3075)) { +case 0: +{ +uint8_t x_3076; +lean_dec(x_3070); +lean_dec(x_3050); +lean_dec(x_3049); +x_3076 = !lean_is_exclusive(x_3075); +if (x_3076 == 0) +{ +lean_object* x_3077; lean_object* x_3078; uint8_t x_3079; +x_3077 = lean_ctor_get(x_3075, 0); +lean_dec(x_3077); +x_3078 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_3079 = lean_name_eq(x_153, x_3078); +if (x_3079 == 0) +{ +lean_object* x_3080; uint8_t x_3081; +x_3080 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_3081 = lean_name_eq(x_153, x_3080); +if (x_3081 == 0) +{ +lean_object* x_3082; lean_object* x_3083; lean_object* x_3084; +lean_dec(x_3069); +lean_free_object(x_3060); +lean_inc(x_153); +x_3082 = l_Lean_IR_ToIR_findDecl(x_153, x_3064, x_4, x_5, x_3068); +x_3083 = lean_ctor_get(x_3082, 0); +lean_inc(x_3083); +x_3084 = lean_ctor_get(x_3083, 0); +lean_inc(x_3084); +if (lean_obj_tag(x_3084) == 0) +{ +uint8_t x_3085; +lean_dec(x_3058); +lean_dec(x_2); +lean_dec(x_1); +x_3085 = !lean_is_exclusive(x_3082); +if (x_3085 == 0) +{ +lean_object* x_3086; lean_object* x_3087; uint8_t x_3088; +x_3086 = lean_ctor_get(x_3082, 1); +x_3087 = lean_ctor_get(x_3082, 0); +lean_dec(x_3087); +x_3088 = !lean_is_exclusive(x_3083); +if (x_3088 == 0) +{ +lean_object* x_3089; lean_object* x_3090; uint8_t x_3091; lean_object* x_3092; lean_object* x_3093; lean_object* x_3094; lean_object* x_3095; lean_object* x_3096; lean_object* x_3097; +x_3089 = lean_ctor_get(x_3083, 1); +x_3090 = lean_ctor_get(x_3083, 0); +lean_dec(x_3090); +x_3091 = 1; +x_3092 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_3093 = l_Lean_Name_toString(x_153, x_3091, x_3092); +lean_ctor_set_tag(x_3075, 3); +lean_ctor_set(x_3075, 0, x_3093); +x_3094 = l_Lean_IR_ToIR_lowerLet___closed__13; +lean_ctor_set_tag(x_3083, 5); +lean_ctor_set(x_3083, 1, x_3075); +lean_ctor_set(x_3083, 0, x_3094); +x_3095 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_3082, 5); +lean_ctor_set(x_3082, 1, x_3095); +x_3096 = l_Lean_MessageData_ofFormat(x_3082); +x_3097 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_3096, x_3089, x_4, x_5, x_3086); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3089); +return x_3097; +} +else +{ +lean_object* x_3098; uint8_t x_3099; lean_object* x_3100; lean_object* x_3101; lean_object* x_3102; lean_object* x_3103; lean_object* x_3104; lean_object* x_3105; lean_object* x_3106; +x_3098 = lean_ctor_get(x_3083, 1); +lean_inc(x_3098); +lean_dec(x_3083); +x_3099 = 1; +x_3100 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_3101 = l_Lean_Name_toString(x_153, x_3099, x_3100); +lean_ctor_set_tag(x_3075, 3); +lean_ctor_set(x_3075, 0, x_3101); +x_3102 = l_Lean_IR_ToIR_lowerLet___closed__13; +x_3103 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_3103, 0, x_3102); +lean_ctor_set(x_3103, 1, x_3075); +x_3104 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_3082, 5); +lean_ctor_set(x_3082, 1, x_3104); +lean_ctor_set(x_3082, 0, x_3103); +x_3105 = l_Lean_MessageData_ofFormat(x_3082); +x_3106 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_3105, x_3098, x_4, x_5, x_3086); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3098); +return x_3106; +} +} +else +{ +lean_object* x_3107; lean_object* x_3108; lean_object* x_3109; uint8_t x_3110; lean_object* x_3111; lean_object* x_3112; lean_object* x_3113; lean_object* x_3114; lean_object* x_3115; lean_object* x_3116; lean_object* x_3117; lean_object* x_3118; +x_3107 = lean_ctor_get(x_3082, 1); +lean_inc(x_3107); +lean_dec(x_3082); +x_3108 = lean_ctor_get(x_3083, 1); +lean_inc(x_3108); +if (lean_is_exclusive(x_3083)) { + lean_ctor_release(x_3083, 0); + lean_ctor_release(x_3083, 1); + x_3109 = x_3083; +} else { + lean_dec_ref(x_3083); + x_3109 = lean_box(0); +} +x_3110 = 1; +x_3111 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_3112 = l_Lean_Name_toString(x_153, x_3110, x_3111); +lean_ctor_set_tag(x_3075, 3); +lean_ctor_set(x_3075, 0, x_3112); +x_3113 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_3109)) { + x_3114 = lean_alloc_ctor(5, 2, 0); +} else { + x_3114 = x_3109; + lean_ctor_set_tag(x_3114, 5); +} +lean_ctor_set(x_3114, 0, x_3113); +lean_ctor_set(x_3114, 1, x_3075); +x_3115 = l_Lean_IR_ToIR_lowerLet___closed__16; +x_3116 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_3116, 0, x_3114); +lean_ctor_set(x_3116, 1, x_3115); +x_3117 = l_Lean_MessageData_ofFormat(x_3116); +x_3118 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_3117, x_3108, x_4, x_5, x_3107); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3108); +return x_3118; +} +} +else +{ +lean_object* x_3119; uint8_t x_3120; +lean_free_object(x_3075); +x_3119 = lean_ctor_get(x_3082, 1); +lean_inc(x_3119); +lean_dec(x_3082); +x_3120 = !lean_is_exclusive(x_3083); +if (x_3120 == 0) +{ +lean_object* x_3121; lean_object* x_3122; lean_object* x_3123; lean_object* x_3124; lean_object* x_3125; lean_object* x_3126; uint8_t x_3127; +x_3121 = lean_ctor_get(x_3083, 1); +x_3122 = lean_ctor_get(x_3083, 0); +lean_dec(x_3122); +x_3123 = lean_ctor_get(x_3084, 0); +lean_inc(x_3123); +lean_dec(x_3084); +x_3124 = lean_array_get_size(x_3058); +x_3125 = l_Lean_IR_Decl_params(x_3123); +lean_dec(x_3123); +x_3126 = lean_array_get_size(x_3125); +lean_dec(x_3125); +x_3127 = lean_nat_dec_lt(x_3124, x_3126); +if (x_3127 == 0) +{ +uint8_t x_3128; +x_3128 = lean_nat_dec_eq(x_3124, x_3126); +if (x_3128 == 0) +{ +lean_object* x_3129; lean_object* x_3130; lean_object* x_3131; lean_object* x_3132; lean_object* x_3133; lean_object* x_3134; lean_object* x_3135; lean_object* x_3136; lean_object* x_3137; lean_object* x_3138; lean_object* x_3139; lean_object* x_3140; lean_object* x_3141; lean_object* x_3142; lean_object* x_3143; lean_object* x_3144; +x_3129 = lean_unsigned_to_nat(0u); +x_3130 = l_Array_extract___rarg(x_3058, x_3129, x_3126); +x_3131 = l_Array_extract___rarg(x_3058, x_3126, x_3124); +lean_dec(x_3124); +lean_dec(x_3058); +lean_ctor_set_tag(x_3083, 6); +lean_ctor_set(x_3083, 1, x_3130); +lean_ctor_set(x_3083, 0, x_153); +x_3132 = lean_ctor_get(x_1, 0); +lean_inc(x_3132); +x_3133 = l_Lean_IR_ToIR_bindVar(x_3132, x_3121, x_4, x_5, x_3119); +x_3134 = lean_ctor_get(x_3133, 0); +lean_inc(x_3134); +x_3135 = lean_ctor_get(x_3133, 1); +lean_inc(x_3135); +lean_dec(x_3133); +x_3136 = lean_ctor_get(x_3134, 0); +lean_inc(x_3136); +x_3137 = lean_ctor_get(x_3134, 1); +lean_inc(x_3137); +lean_dec(x_3134); +x_3138 = l_Lean_IR_ToIR_newVar(x_3137, x_4, x_5, x_3135); +x_3139 = lean_ctor_get(x_3138, 0); +lean_inc(x_3139); +x_3140 = lean_ctor_get(x_3138, 1); +lean_inc(x_3140); +lean_dec(x_3138); +x_3141 = lean_ctor_get(x_3139, 0); +lean_inc(x_3141); +x_3142 = lean_ctor_get(x_3139, 1); +lean_inc(x_3142); +lean_dec(x_3139); +x_3143 = lean_ctor_get(x_1, 2); +lean_inc(x_3143); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_3144 = l_Lean_IR_ToIR_lowerType(x_3143, x_3142, x_4, x_5, x_3140); +if (lean_obj_tag(x_3144) == 0) +{ +lean_object* x_3145; lean_object* x_3146; lean_object* x_3147; lean_object* x_3148; lean_object* x_3149; +x_3145 = lean_ctor_get(x_3144, 0); +lean_inc(x_3145); +x_3146 = lean_ctor_get(x_3144, 1); +lean_inc(x_3146); +lean_dec(x_3144); +x_3147 = lean_ctor_get(x_3145, 0); +lean_inc(x_3147); +x_3148 = lean_ctor_get(x_3145, 1); +lean_inc(x_3148); +lean_dec(x_3145); +x_3149 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_3141, x_3131, x_3136, x_3083, x_3147, x_3148, x_4, x_5, x_3146); +return x_3149; +} +else +{ +uint8_t x_3150; +lean_dec(x_3141); +lean_dec(x_3136); +lean_dec(x_3083); +lean_dec(x_3131); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_3150 = !lean_is_exclusive(x_3144); +if (x_3150 == 0) +{ +return x_3144; +} +else +{ +lean_object* x_3151; lean_object* x_3152; lean_object* x_3153; +x_3151 = lean_ctor_get(x_3144, 0); +x_3152 = lean_ctor_get(x_3144, 1); +lean_inc(x_3152); +lean_inc(x_3151); +lean_dec(x_3144); +x_3153 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3153, 0, x_3151); +lean_ctor_set(x_3153, 1, x_3152); +return x_3153; +} +} +} +else +{ +lean_object* x_3154; lean_object* x_3155; lean_object* x_3156; lean_object* x_3157; lean_object* x_3158; lean_object* x_3159; lean_object* x_3160; lean_object* x_3161; +lean_dec(x_3126); +lean_dec(x_3124); +lean_ctor_set_tag(x_3083, 6); +lean_ctor_set(x_3083, 1, x_3058); +lean_ctor_set(x_3083, 0, x_153); +x_3154 = lean_ctor_get(x_1, 0); +lean_inc(x_3154); +x_3155 = l_Lean_IR_ToIR_bindVar(x_3154, x_3121, x_4, x_5, x_3119); +x_3156 = lean_ctor_get(x_3155, 0); +lean_inc(x_3156); +x_3157 = lean_ctor_get(x_3155, 1); +lean_inc(x_3157); +lean_dec(x_3155); +x_3158 = lean_ctor_get(x_3156, 0); +lean_inc(x_3158); +x_3159 = lean_ctor_get(x_3156, 1); +lean_inc(x_3159); +lean_dec(x_3156); +x_3160 = lean_ctor_get(x_1, 2); +lean_inc(x_3160); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_3161 = l_Lean_IR_ToIR_lowerType(x_3160, x_3159, x_4, x_5, x_3157); +if (lean_obj_tag(x_3161) == 0) +{ +lean_object* x_3162; lean_object* x_3163; lean_object* x_3164; lean_object* x_3165; lean_object* x_3166; +x_3162 = lean_ctor_get(x_3161, 0); +lean_inc(x_3162); +x_3163 = lean_ctor_get(x_3161, 1); +lean_inc(x_3163); +lean_dec(x_3161); +x_3164 = lean_ctor_get(x_3162, 0); +lean_inc(x_3164); +x_3165 = lean_ctor_get(x_3162, 1); +lean_inc(x_3165); +lean_dec(x_3162); +x_3166 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3158, x_3083, x_3164, x_3165, x_4, x_5, x_3163); +return x_3166; +} +else +{ +uint8_t x_3167; +lean_dec(x_3158); +lean_dec(x_3083); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_3167 = !lean_is_exclusive(x_3161); +if (x_3167 == 0) +{ +return x_3161; +} +else +{ +lean_object* x_3168; lean_object* x_3169; lean_object* x_3170; +x_3168 = lean_ctor_get(x_3161, 0); +x_3169 = lean_ctor_get(x_3161, 1); +lean_inc(x_3169); +lean_inc(x_3168); +lean_dec(x_3161); +x_3170 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3170, 0, x_3168); +lean_ctor_set(x_3170, 1, x_3169); +return x_3170; +} +} +} +} +else +{ +lean_object* x_3171; lean_object* x_3172; lean_object* x_3173; lean_object* x_3174; lean_object* x_3175; lean_object* x_3176; lean_object* x_3177; lean_object* x_3178; +lean_dec(x_3126); +lean_dec(x_3124); +lean_ctor_set_tag(x_3083, 7); +lean_ctor_set(x_3083, 1, x_3058); +lean_ctor_set(x_3083, 0, x_153); +x_3171 = lean_ctor_get(x_1, 0); +lean_inc(x_3171); +lean_dec(x_1); +x_3172 = l_Lean_IR_ToIR_bindVar(x_3171, x_3121, x_4, x_5, x_3119); +x_3173 = lean_ctor_get(x_3172, 0); +lean_inc(x_3173); +x_3174 = lean_ctor_get(x_3172, 1); +lean_inc(x_3174); +lean_dec(x_3172); +x_3175 = lean_ctor_get(x_3173, 0); +lean_inc(x_3175); +x_3176 = lean_ctor_get(x_3173, 1); +lean_inc(x_3176); +lean_dec(x_3173); +x_3177 = lean_box(7); +x_3178 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3175, x_3083, x_3177, x_3176, x_4, x_5, x_3174); +return x_3178; +} +} +else +{ +lean_object* x_3179; lean_object* x_3180; lean_object* x_3181; lean_object* x_3182; lean_object* x_3183; uint8_t x_3184; +x_3179 = lean_ctor_get(x_3083, 1); +lean_inc(x_3179); +lean_dec(x_3083); +x_3180 = lean_ctor_get(x_3084, 0); +lean_inc(x_3180); +lean_dec(x_3084); +x_3181 = lean_array_get_size(x_3058); +x_3182 = l_Lean_IR_Decl_params(x_3180); +lean_dec(x_3180); +x_3183 = lean_array_get_size(x_3182); +lean_dec(x_3182); +x_3184 = lean_nat_dec_lt(x_3181, x_3183); +if (x_3184 == 0) +{ +uint8_t x_3185; +x_3185 = lean_nat_dec_eq(x_3181, x_3183); +if (x_3185 == 0) +{ +lean_object* x_3186; lean_object* x_3187; lean_object* x_3188; lean_object* x_3189; lean_object* x_3190; lean_object* x_3191; lean_object* x_3192; lean_object* x_3193; lean_object* x_3194; lean_object* x_3195; lean_object* x_3196; lean_object* x_3197; lean_object* x_3198; lean_object* x_3199; lean_object* x_3200; lean_object* x_3201; lean_object* x_3202; +x_3186 = lean_unsigned_to_nat(0u); +x_3187 = l_Array_extract___rarg(x_3058, x_3186, x_3183); +x_3188 = l_Array_extract___rarg(x_3058, x_3183, x_3181); +lean_dec(x_3181); +lean_dec(x_3058); +x_3189 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_3189, 0, x_153); +lean_ctor_set(x_3189, 1, x_3187); +x_3190 = lean_ctor_get(x_1, 0); +lean_inc(x_3190); +x_3191 = l_Lean_IR_ToIR_bindVar(x_3190, x_3179, x_4, x_5, x_3119); +x_3192 = lean_ctor_get(x_3191, 0); +lean_inc(x_3192); +x_3193 = lean_ctor_get(x_3191, 1); +lean_inc(x_3193); +lean_dec(x_3191); +x_3194 = lean_ctor_get(x_3192, 0); +lean_inc(x_3194); +x_3195 = lean_ctor_get(x_3192, 1); +lean_inc(x_3195); +lean_dec(x_3192); +x_3196 = l_Lean_IR_ToIR_newVar(x_3195, x_4, x_5, x_3193); +x_3197 = lean_ctor_get(x_3196, 0); +lean_inc(x_3197); +x_3198 = lean_ctor_get(x_3196, 1); +lean_inc(x_3198); +lean_dec(x_3196); +x_3199 = lean_ctor_get(x_3197, 0); +lean_inc(x_3199); +x_3200 = lean_ctor_get(x_3197, 1); +lean_inc(x_3200); +lean_dec(x_3197); +x_3201 = lean_ctor_get(x_1, 2); +lean_inc(x_3201); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_3202 = l_Lean_IR_ToIR_lowerType(x_3201, x_3200, x_4, x_5, x_3198); +if (lean_obj_tag(x_3202) == 0) +{ +lean_object* x_3203; lean_object* x_3204; lean_object* x_3205; lean_object* x_3206; lean_object* x_3207; +x_3203 = lean_ctor_get(x_3202, 0); +lean_inc(x_3203); +x_3204 = lean_ctor_get(x_3202, 1); +lean_inc(x_3204); +lean_dec(x_3202); +x_3205 = lean_ctor_get(x_3203, 0); +lean_inc(x_3205); +x_3206 = lean_ctor_get(x_3203, 1); +lean_inc(x_3206); +lean_dec(x_3203); +x_3207 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_3199, x_3188, x_3194, x_3189, x_3205, x_3206, x_4, x_5, x_3204); +return x_3207; +} +else +{ +lean_object* x_3208; lean_object* x_3209; lean_object* x_3210; lean_object* x_3211; +lean_dec(x_3199); +lean_dec(x_3194); +lean_dec(x_3189); +lean_dec(x_3188); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_3208 = lean_ctor_get(x_3202, 0); +lean_inc(x_3208); +x_3209 = lean_ctor_get(x_3202, 1); +lean_inc(x_3209); +if (lean_is_exclusive(x_3202)) { + lean_ctor_release(x_3202, 0); + lean_ctor_release(x_3202, 1); + x_3210 = x_3202; +} else { + lean_dec_ref(x_3202); + x_3210 = lean_box(0); +} +if (lean_is_scalar(x_3210)) { + x_3211 = lean_alloc_ctor(1, 2, 0); +} else { + x_3211 = x_3210; +} +lean_ctor_set(x_3211, 0, x_3208); +lean_ctor_set(x_3211, 1, x_3209); +return x_3211; +} +} +else +{ +lean_object* x_3212; lean_object* x_3213; lean_object* x_3214; lean_object* x_3215; lean_object* x_3216; lean_object* x_3217; lean_object* x_3218; lean_object* x_3219; lean_object* x_3220; +lean_dec(x_3183); +lean_dec(x_3181); +x_3212 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_3212, 0, x_153); +lean_ctor_set(x_3212, 1, x_3058); +x_3213 = lean_ctor_get(x_1, 0); +lean_inc(x_3213); +x_3214 = l_Lean_IR_ToIR_bindVar(x_3213, x_3179, x_4, x_5, x_3119); +x_3215 = lean_ctor_get(x_3214, 0); +lean_inc(x_3215); +x_3216 = lean_ctor_get(x_3214, 1); +lean_inc(x_3216); +lean_dec(x_3214); +x_3217 = lean_ctor_get(x_3215, 0); +lean_inc(x_3217); +x_3218 = lean_ctor_get(x_3215, 1); +lean_inc(x_3218); +lean_dec(x_3215); +x_3219 = lean_ctor_get(x_1, 2); +lean_inc(x_3219); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_3220 = l_Lean_IR_ToIR_lowerType(x_3219, x_3218, x_4, x_5, x_3216); +if (lean_obj_tag(x_3220) == 0) +{ +lean_object* x_3221; lean_object* x_3222; lean_object* x_3223; lean_object* x_3224; lean_object* x_3225; +x_3221 = lean_ctor_get(x_3220, 0); +lean_inc(x_3221); +x_3222 = lean_ctor_get(x_3220, 1); +lean_inc(x_3222); +lean_dec(x_3220); +x_3223 = lean_ctor_get(x_3221, 0); +lean_inc(x_3223); +x_3224 = lean_ctor_get(x_3221, 1); +lean_inc(x_3224); +lean_dec(x_3221); +x_3225 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3217, x_3212, x_3223, x_3224, x_4, x_5, x_3222); +return x_3225; +} +else +{ +lean_object* x_3226; lean_object* x_3227; lean_object* x_3228; lean_object* x_3229; +lean_dec(x_3217); +lean_dec(x_3212); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_3226 = lean_ctor_get(x_3220, 0); +lean_inc(x_3226); +x_3227 = lean_ctor_get(x_3220, 1); +lean_inc(x_3227); +if (lean_is_exclusive(x_3220)) { + lean_ctor_release(x_3220, 0); + lean_ctor_release(x_3220, 1); + x_3228 = x_3220; +} else { + lean_dec_ref(x_3220); + x_3228 = lean_box(0); +} +if (lean_is_scalar(x_3228)) { + x_3229 = lean_alloc_ctor(1, 2, 0); +} else { + x_3229 = x_3228; +} +lean_ctor_set(x_3229, 0, x_3226); +lean_ctor_set(x_3229, 1, x_3227); +return x_3229; +} +} +} +else +{ +lean_object* x_3230; lean_object* x_3231; lean_object* x_3232; lean_object* x_3233; lean_object* x_3234; lean_object* x_3235; lean_object* x_3236; lean_object* x_3237; lean_object* x_3238; +lean_dec(x_3183); +lean_dec(x_3181); +x_3230 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_3230, 0, x_153); +lean_ctor_set(x_3230, 1, x_3058); +x_3231 = lean_ctor_get(x_1, 0); +lean_inc(x_3231); +lean_dec(x_1); +x_3232 = l_Lean_IR_ToIR_bindVar(x_3231, x_3179, x_4, x_5, x_3119); +x_3233 = lean_ctor_get(x_3232, 0); +lean_inc(x_3233); +x_3234 = lean_ctor_get(x_3232, 1); +lean_inc(x_3234); +lean_dec(x_3232); +x_3235 = lean_ctor_get(x_3233, 0); +lean_inc(x_3235); +x_3236 = lean_ctor_get(x_3233, 1); +lean_inc(x_3236); +lean_dec(x_3233); +x_3237 = lean_box(7); +x_3238 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3235, x_3230, x_3237, x_3236, x_4, x_5, x_3234); +return x_3238; +} +} +} +} +else +{ +lean_object* x_3239; lean_object* x_3240; +lean_free_object(x_3075); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3239 = lean_box(13); +lean_ctor_set(x_3060, 0, x_3239); +if (lean_is_scalar(x_3069)) { + x_3240 = lean_alloc_ctor(0, 2, 0); +} else { + x_3240 = x_3069; +} +lean_ctor_set(x_3240, 0, x_3060); +lean_ctor_set(x_3240, 1, x_3068); +return x_3240; +} +} +else +{ +lean_object* x_3241; lean_object* x_3242; lean_object* x_3243; +lean_free_object(x_3075); +lean_dec(x_3069); +lean_free_object(x_3060); +lean_dec(x_153); +x_3241 = l_Lean_IR_instInhabitedArg; +x_3242 = lean_unsigned_to_nat(2u); +x_3243 = lean_array_get(x_3241, x_3058, x_3242); +lean_dec(x_3058); +if (lean_obj_tag(x_3243) == 0) +{ +lean_object* x_3244; lean_object* x_3245; lean_object* x_3246; lean_object* x_3247; lean_object* x_3248; lean_object* x_3249; lean_object* x_3250; +x_3244 = lean_ctor_get(x_3243, 0); +lean_inc(x_3244); +lean_dec(x_3243); +x_3245 = lean_ctor_get(x_1, 0); +lean_inc(x_3245); +lean_dec(x_1); +x_3246 = l_Lean_IR_ToIR_bindVarToVarId(x_3245, x_3244, x_3064, x_4, x_5, x_3068); +x_3247 = lean_ctor_get(x_3246, 0); +lean_inc(x_3247); +x_3248 = lean_ctor_get(x_3246, 1); +lean_inc(x_3248); +lean_dec(x_3246); +x_3249 = lean_ctor_get(x_3247, 1); +lean_inc(x_3249); +lean_dec(x_3247); +x_3250 = l_Lean_IR_ToIR_lowerCode(x_2, x_3249, x_4, x_5, x_3248); +return x_3250; +} +else +{ +lean_object* x_3251; lean_object* x_3252; lean_object* x_3253; lean_object* x_3254; lean_object* x_3255; lean_object* x_3256; +x_3251 = lean_ctor_get(x_1, 0); +lean_inc(x_3251); +lean_dec(x_1); +x_3252 = l_Lean_IR_ToIR_bindErased(x_3251, x_3064, x_4, x_5, x_3068); +x_3253 = lean_ctor_get(x_3252, 0); +lean_inc(x_3253); +x_3254 = lean_ctor_get(x_3252, 1); +lean_inc(x_3254); +lean_dec(x_3252); +x_3255 = lean_ctor_get(x_3253, 1); +lean_inc(x_3255); +lean_dec(x_3253); +x_3256 = l_Lean_IR_ToIR_lowerCode(x_2, x_3255, x_4, x_5, x_3254); +return x_3256; +} +} +} +else +{ +lean_object* x_3257; uint8_t x_3258; +lean_dec(x_3075); +x_3257 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_3258 = lean_name_eq(x_153, x_3257); +if (x_3258 == 0) +{ +lean_object* x_3259; uint8_t x_3260; +x_3259 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_3260 = lean_name_eq(x_153, x_3259); +if (x_3260 == 0) +{ +lean_object* x_3261; lean_object* x_3262; lean_object* x_3263; +lean_dec(x_3069); +lean_free_object(x_3060); +lean_inc(x_153); +x_3261 = l_Lean_IR_ToIR_findDecl(x_153, x_3064, x_4, x_5, x_3068); +x_3262 = lean_ctor_get(x_3261, 0); +lean_inc(x_3262); +x_3263 = lean_ctor_get(x_3262, 0); +lean_inc(x_3263); +if (lean_obj_tag(x_3263) == 0) +{ +lean_object* x_3264; lean_object* x_3265; lean_object* x_3266; lean_object* x_3267; uint8_t x_3268; lean_object* x_3269; lean_object* x_3270; lean_object* x_3271; lean_object* x_3272; lean_object* x_3273; lean_object* x_3274; lean_object* x_3275; lean_object* x_3276; lean_object* x_3277; +lean_dec(x_3058); +lean_dec(x_2); +lean_dec(x_1); +x_3264 = lean_ctor_get(x_3261, 1); +lean_inc(x_3264); +if (lean_is_exclusive(x_3261)) { + lean_ctor_release(x_3261, 0); + lean_ctor_release(x_3261, 1); + x_3265 = x_3261; +} else { + lean_dec_ref(x_3261); + x_3265 = lean_box(0); +} +x_3266 = lean_ctor_get(x_3262, 1); +lean_inc(x_3266); +if (lean_is_exclusive(x_3262)) { + lean_ctor_release(x_3262, 0); + lean_ctor_release(x_3262, 1); + x_3267 = x_3262; +} else { + lean_dec_ref(x_3262); + x_3267 = lean_box(0); +} +x_3268 = 1; +x_3269 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_3270 = l_Lean_Name_toString(x_153, x_3268, x_3269); +x_3271 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_3271, 0, x_3270); +x_3272 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_3267)) { + x_3273 = lean_alloc_ctor(5, 2, 0); +} else { + x_3273 = x_3267; + lean_ctor_set_tag(x_3273, 5); +} +lean_ctor_set(x_3273, 0, x_3272); +lean_ctor_set(x_3273, 1, x_3271); +x_3274 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_3265)) { + x_3275 = lean_alloc_ctor(5, 2, 0); +} else { + x_3275 = x_3265; + lean_ctor_set_tag(x_3275, 5); +} +lean_ctor_set(x_3275, 0, x_3273); +lean_ctor_set(x_3275, 1, x_3274); +x_3276 = l_Lean_MessageData_ofFormat(x_3275); +x_3277 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_3276, x_3266, x_4, x_5, x_3264); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3266); +return x_3277; +} +else +{ +lean_object* x_3278; lean_object* x_3279; lean_object* x_3280; lean_object* x_3281; lean_object* x_3282; lean_object* x_3283; lean_object* x_3284; uint8_t x_3285; +x_3278 = lean_ctor_get(x_3261, 1); +lean_inc(x_3278); +lean_dec(x_3261); +x_3279 = lean_ctor_get(x_3262, 1); +lean_inc(x_3279); +if (lean_is_exclusive(x_3262)) { + lean_ctor_release(x_3262, 0); + lean_ctor_release(x_3262, 1); + x_3280 = x_3262; +} else { + lean_dec_ref(x_3262); + x_3280 = lean_box(0); +} +x_3281 = lean_ctor_get(x_3263, 0); +lean_inc(x_3281); +lean_dec(x_3263); +x_3282 = lean_array_get_size(x_3058); +x_3283 = l_Lean_IR_Decl_params(x_3281); +lean_dec(x_3281); +x_3284 = lean_array_get_size(x_3283); +lean_dec(x_3283); +x_3285 = lean_nat_dec_lt(x_3282, x_3284); +if (x_3285 == 0) +{ +uint8_t x_3286; +x_3286 = lean_nat_dec_eq(x_3282, x_3284); +if (x_3286 == 0) +{ +lean_object* x_3287; lean_object* x_3288; lean_object* x_3289; lean_object* x_3290; lean_object* x_3291; lean_object* x_3292; lean_object* x_3293; lean_object* x_3294; lean_object* x_3295; lean_object* x_3296; lean_object* x_3297; lean_object* x_3298; lean_object* x_3299; lean_object* x_3300; lean_object* x_3301; lean_object* x_3302; lean_object* x_3303; +x_3287 = lean_unsigned_to_nat(0u); +x_3288 = l_Array_extract___rarg(x_3058, x_3287, x_3284); +x_3289 = l_Array_extract___rarg(x_3058, x_3284, x_3282); +lean_dec(x_3282); +lean_dec(x_3058); +if (lean_is_scalar(x_3280)) { + x_3290 = lean_alloc_ctor(6, 2, 0); +} else { + x_3290 = x_3280; + lean_ctor_set_tag(x_3290, 6); +} +lean_ctor_set(x_3290, 0, x_153); +lean_ctor_set(x_3290, 1, x_3288); +x_3291 = lean_ctor_get(x_1, 0); +lean_inc(x_3291); +x_3292 = l_Lean_IR_ToIR_bindVar(x_3291, x_3279, x_4, x_5, x_3278); +x_3293 = lean_ctor_get(x_3292, 0); +lean_inc(x_3293); +x_3294 = lean_ctor_get(x_3292, 1); +lean_inc(x_3294); +lean_dec(x_3292); +x_3295 = lean_ctor_get(x_3293, 0); +lean_inc(x_3295); +x_3296 = lean_ctor_get(x_3293, 1); +lean_inc(x_3296); +lean_dec(x_3293); +x_3297 = l_Lean_IR_ToIR_newVar(x_3296, x_4, x_5, x_3294); +x_3298 = lean_ctor_get(x_3297, 0); +lean_inc(x_3298); +x_3299 = lean_ctor_get(x_3297, 1); +lean_inc(x_3299); +lean_dec(x_3297); +x_3300 = lean_ctor_get(x_3298, 0); +lean_inc(x_3300); +x_3301 = lean_ctor_get(x_3298, 1); +lean_inc(x_3301); +lean_dec(x_3298); +x_3302 = lean_ctor_get(x_1, 2); +lean_inc(x_3302); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_3303 = l_Lean_IR_ToIR_lowerType(x_3302, x_3301, x_4, x_5, x_3299); +if (lean_obj_tag(x_3303) == 0) +{ +lean_object* x_3304; lean_object* x_3305; lean_object* x_3306; lean_object* x_3307; lean_object* x_3308; +x_3304 = lean_ctor_get(x_3303, 0); +lean_inc(x_3304); +x_3305 = lean_ctor_get(x_3303, 1); +lean_inc(x_3305); +lean_dec(x_3303); +x_3306 = lean_ctor_get(x_3304, 0); +lean_inc(x_3306); +x_3307 = lean_ctor_get(x_3304, 1); +lean_inc(x_3307); +lean_dec(x_3304); +x_3308 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_3300, x_3289, x_3295, x_3290, x_3306, x_3307, x_4, x_5, x_3305); +return x_3308; +} +else +{ +lean_object* x_3309; lean_object* x_3310; lean_object* x_3311; lean_object* x_3312; +lean_dec(x_3300); +lean_dec(x_3295); +lean_dec(x_3290); +lean_dec(x_3289); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_3309 = lean_ctor_get(x_3303, 0); +lean_inc(x_3309); +x_3310 = lean_ctor_get(x_3303, 1); +lean_inc(x_3310); +if (lean_is_exclusive(x_3303)) { + lean_ctor_release(x_3303, 0); + lean_ctor_release(x_3303, 1); + x_3311 = x_3303; +} else { + lean_dec_ref(x_3303); + x_3311 = lean_box(0); +} +if (lean_is_scalar(x_3311)) { + x_3312 = lean_alloc_ctor(1, 2, 0); +} else { + x_3312 = x_3311; +} +lean_ctor_set(x_3312, 0, x_3309); +lean_ctor_set(x_3312, 1, x_3310); +return x_3312; +} +} +else +{ +lean_object* x_3313; lean_object* x_3314; lean_object* x_3315; lean_object* x_3316; lean_object* x_3317; lean_object* x_3318; lean_object* x_3319; lean_object* x_3320; lean_object* x_3321; +lean_dec(x_3284); +lean_dec(x_3282); +if (lean_is_scalar(x_3280)) { + x_3313 = lean_alloc_ctor(6, 2, 0); +} else { + x_3313 = x_3280; + lean_ctor_set_tag(x_3313, 6); +} +lean_ctor_set(x_3313, 0, x_153); +lean_ctor_set(x_3313, 1, x_3058); +x_3314 = lean_ctor_get(x_1, 0); +lean_inc(x_3314); +x_3315 = l_Lean_IR_ToIR_bindVar(x_3314, x_3279, x_4, x_5, x_3278); +x_3316 = lean_ctor_get(x_3315, 0); +lean_inc(x_3316); +x_3317 = lean_ctor_get(x_3315, 1); +lean_inc(x_3317); +lean_dec(x_3315); +x_3318 = lean_ctor_get(x_3316, 0); +lean_inc(x_3318); +x_3319 = lean_ctor_get(x_3316, 1); +lean_inc(x_3319); +lean_dec(x_3316); +x_3320 = lean_ctor_get(x_1, 2); +lean_inc(x_3320); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_3321 = l_Lean_IR_ToIR_lowerType(x_3320, x_3319, x_4, x_5, x_3317); +if (lean_obj_tag(x_3321) == 0) +{ +lean_object* x_3322; lean_object* x_3323; lean_object* x_3324; lean_object* x_3325; lean_object* x_3326; +x_3322 = lean_ctor_get(x_3321, 0); +lean_inc(x_3322); +x_3323 = lean_ctor_get(x_3321, 1); +lean_inc(x_3323); +lean_dec(x_3321); +x_3324 = lean_ctor_get(x_3322, 0); +lean_inc(x_3324); +x_3325 = lean_ctor_get(x_3322, 1); +lean_inc(x_3325); +lean_dec(x_3322); +x_3326 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3318, x_3313, x_3324, x_3325, x_4, x_5, x_3323); +return x_3326; +} +else +{ +lean_object* x_3327; lean_object* x_3328; lean_object* x_3329; lean_object* x_3330; +lean_dec(x_3318); +lean_dec(x_3313); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_3327 = lean_ctor_get(x_3321, 0); +lean_inc(x_3327); +x_3328 = lean_ctor_get(x_3321, 1); +lean_inc(x_3328); +if (lean_is_exclusive(x_3321)) { + lean_ctor_release(x_3321, 0); + lean_ctor_release(x_3321, 1); + x_3329 = x_3321; +} else { + lean_dec_ref(x_3321); + x_3329 = lean_box(0); +} +if (lean_is_scalar(x_3329)) { + x_3330 = lean_alloc_ctor(1, 2, 0); +} else { + x_3330 = x_3329; +} +lean_ctor_set(x_3330, 0, x_3327); +lean_ctor_set(x_3330, 1, x_3328); +return x_3330; +} +} +} +else +{ +lean_object* x_3331; lean_object* x_3332; lean_object* x_3333; lean_object* x_3334; lean_object* x_3335; lean_object* x_3336; lean_object* x_3337; lean_object* x_3338; lean_object* x_3339; +lean_dec(x_3284); +lean_dec(x_3282); +if (lean_is_scalar(x_3280)) { + x_3331 = lean_alloc_ctor(7, 2, 0); +} else { + x_3331 = x_3280; + lean_ctor_set_tag(x_3331, 7); +} +lean_ctor_set(x_3331, 0, x_153); +lean_ctor_set(x_3331, 1, x_3058); +x_3332 = lean_ctor_get(x_1, 0); +lean_inc(x_3332); +lean_dec(x_1); +x_3333 = l_Lean_IR_ToIR_bindVar(x_3332, x_3279, x_4, x_5, x_3278); +x_3334 = lean_ctor_get(x_3333, 0); +lean_inc(x_3334); +x_3335 = lean_ctor_get(x_3333, 1); +lean_inc(x_3335); +lean_dec(x_3333); +x_3336 = lean_ctor_get(x_3334, 0); +lean_inc(x_3336); +x_3337 = lean_ctor_get(x_3334, 1); +lean_inc(x_3337); +lean_dec(x_3334); +x_3338 = lean_box(7); +x_3339 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3336, x_3331, x_3338, x_3337, x_4, x_5, x_3335); +return x_3339; +} +} +} +else +{ +lean_object* x_3340; lean_object* x_3341; +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3340 = lean_box(13); +lean_ctor_set(x_3060, 0, x_3340); +if (lean_is_scalar(x_3069)) { + x_3341 = lean_alloc_ctor(0, 2, 0); +} else { + x_3341 = x_3069; +} +lean_ctor_set(x_3341, 0, x_3060); +lean_ctor_set(x_3341, 1, x_3068); +return x_3341; +} +} +else +{ +lean_object* x_3342; lean_object* x_3343; lean_object* x_3344; +lean_dec(x_3069); +lean_free_object(x_3060); +lean_dec(x_153); +x_3342 = l_Lean_IR_instInhabitedArg; +x_3343 = lean_unsigned_to_nat(2u); +x_3344 = lean_array_get(x_3342, x_3058, x_3343); +lean_dec(x_3058); +if (lean_obj_tag(x_3344) == 0) +{ +lean_object* x_3345; lean_object* x_3346; lean_object* x_3347; lean_object* x_3348; lean_object* x_3349; lean_object* x_3350; lean_object* x_3351; +x_3345 = lean_ctor_get(x_3344, 0); +lean_inc(x_3345); +lean_dec(x_3344); +x_3346 = lean_ctor_get(x_1, 0); +lean_inc(x_3346); +lean_dec(x_1); +x_3347 = l_Lean_IR_ToIR_bindVarToVarId(x_3346, x_3345, x_3064, x_4, x_5, x_3068); +x_3348 = lean_ctor_get(x_3347, 0); +lean_inc(x_3348); +x_3349 = lean_ctor_get(x_3347, 1); +lean_inc(x_3349); +lean_dec(x_3347); +x_3350 = lean_ctor_get(x_3348, 1); +lean_inc(x_3350); +lean_dec(x_3348); +x_3351 = l_Lean_IR_ToIR_lowerCode(x_2, x_3350, x_4, x_5, x_3349); +return x_3351; +} +else +{ +lean_object* x_3352; lean_object* x_3353; lean_object* x_3354; lean_object* x_3355; lean_object* x_3356; lean_object* x_3357; +x_3352 = lean_ctor_get(x_1, 0); +lean_inc(x_3352); +lean_dec(x_1); +x_3353 = l_Lean_IR_ToIR_bindErased(x_3352, x_3064, x_4, x_5, x_3068); +x_3354 = lean_ctor_get(x_3353, 0); +lean_inc(x_3354); +x_3355 = lean_ctor_get(x_3353, 1); +lean_inc(x_3355); +lean_dec(x_3353); +x_3356 = lean_ctor_get(x_3354, 1); +lean_inc(x_3356); +lean_dec(x_3354); +x_3357 = l_Lean_IR_ToIR_lowerCode(x_2, x_3356, x_4, x_5, x_3355); +return x_3357; +} +} +} +} +case 1: +{ +lean_object* x_3358; lean_object* x_3359; lean_object* x_3389; lean_object* x_3390; +lean_dec(x_3075); +lean_dec(x_3070); +lean_dec(x_3050); +lean_dec(x_3049); +lean_inc(x_153); +x_3389 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_3068); +x_3390 = lean_ctor_get(x_3389, 0); +lean_inc(x_3390); +if (lean_obj_tag(x_3390) == 0) +{ +lean_object* x_3391; lean_object* x_3392; +x_3391 = lean_ctor_get(x_3389, 1); +lean_inc(x_3391); +lean_dec(x_3389); +x_3392 = lean_box(0); +lean_ctor_set(x_3060, 0, x_3392); +x_3358 = x_3060; +x_3359 = x_3391; +goto block_3388; +} +else +{ +uint8_t x_3393; +lean_free_object(x_3060); +x_3393 = !lean_is_exclusive(x_3389); +if (x_3393 == 0) +{ +lean_object* x_3394; lean_object* x_3395; uint8_t x_3396; +x_3394 = lean_ctor_get(x_3389, 1); +x_3395 = lean_ctor_get(x_3389, 0); +lean_dec(x_3395); +x_3396 = !lean_is_exclusive(x_3390); +if (x_3396 == 0) +{ +lean_object* x_3397; lean_object* x_3398; lean_object* x_3399; lean_object* x_3400; uint8_t x_3401; +x_3397 = lean_ctor_get(x_3390, 0); +x_3398 = lean_array_get_size(x_3058); +x_3399 = lean_ctor_get(x_3397, 3); +lean_inc(x_3399); +lean_dec(x_3397); +x_3400 = lean_array_get_size(x_3399); +lean_dec(x_3399); +x_3401 = lean_nat_dec_lt(x_3398, x_3400); +if (x_3401 == 0) +{ +uint8_t x_3402; +x_3402 = lean_nat_dec_eq(x_3398, x_3400); +if (x_3402 == 0) +{ +lean_object* x_3403; lean_object* x_3404; lean_object* x_3405; lean_object* x_3406; lean_object* x_3407; lean_object* x_3408; lean_object* x_3409; lean_object* x_3410; lean_object* x_3411; lean_object* x_3412; lean_object* x_3413; lean_object* x_3414; lean_object* x_3415; lean_object* x_3416; lean_object* x_3417; lean_object* x_3418; +x_3403 = lean_unsigned_to_nat(0u); +x_3404 = l_Array_extract___rarg(x_3058, x_3403, x_3400); +x_3405 = l_Array_extract___rarg(x_3058, x_3400, x_3398); +lean_dec(x_3398); +lean_inc(x_153); +lean_ctor_set_tag(x_3389, 6); +lean_ctor_set(x_3389, 1, x_3404); +lean_ctor_set(x_3389, 0, x_153); +x_3406 = lean_ctor_get(x_1, 0); +lean_inc(x_3406); +x_3407 = l_Lean_IR_ToIR_bindVar(x_3406, x_3064, x_4, x_5, x_3394); +x_3408 = lean_ctor_get(x_3407, 0); +lean_inc(x_3408); +x_3409 = lean_ctor_get(x_3407, 1); +lean_inc(x_3409); +lean_dec(x_3407); +x_3410 = lean_ctor_get(x_3408, 0); +lean_inc(x_3410); +x_3411 = lean_ctor_get(x_3408, 1); +lean_inc(x_3411); +lean_dec(x_3408); +x_3412 = l_Lean_IR_ToIR_newVar(x_3411, x_4, x_5, x_3409); +x_3413 = lean_ctor_get(x_3412, 0); +lean_inc(x_3413); +x_3414 = lean_ctor_get(x_3412, 1); +lean_inc(x_3414); +lean_dec(x_3412); +x_3415 = lean_ctor_get(x_3413, 0); +lean_inc(x_3415); +x_3416 = lean_ctor_get(x_3413, 1); +lean_inc(x_3416); +lean_dec(x_3413); +x_3417 = lean_ctor_get(x_1, 2); +lean_inc(x_3417); +lean_inc(x_5); +lean_inc(x_4); +x_3418 = l_Lean_IR_ToIR_lowerType(x_3417, x_3416, x_4, x_5, x_3414); +if (lean_obj_tag(x_3418) == 0) +{ +lean_object* x_3419; lean_object* x_3420; lean_object* x_3421; lean_object* x_3422; lean_object* x_3423; +x_3419 = lean_ctor_get(x_3418, 0); +lean_inc(x_3419); +x_3420 = lean_ctor_get(x_3418, 1); +lean_inc(x_3420); +lean_dec(x_3418); +x_3421 = lean_ctor_get(x_3419, 0); +lean_inc(x_3421); +x_3422 = lean_ctor_get(x_3419, 1); +lean_inc(x_3422); +lean_dec(x_3419); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3423 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_3415, x_3405, x_3410, x_3389, x_3421, x_3422, x_4, x_5, x_3420); +if (lean_obj_tag(x_3423) == 0) +{ +lean_object* x_3424; lean_object* x_3425; uint8_t x_3426; +x_3424 = lean_ctor_get(x_3423, 0); +lean_inc(x_3424); +x_3425 = lean_ctor_get(x_3423, 1); +lean_inc(x_3425); +lean_dec(x_3423); +x_3426 = !lean_is_exclusive(x_3424); +if (x_3426 == 0) +{ +lean_object* x_3427; +x_3427 = lean_ctor_get(x_3424, 0); +lean_ctor_set(x_3390, 0, x_3427); +lean_ctor_set(x_3424, 0, x_3390); +x_3358 = x_3424; +x_3359 = x_3425; +goto block_3388; +} +else +{ +lean_object* x_3428; lean_object* x_3429; lean_object* x_3430; +x_3428 = lean_ctor_get(x_3424, 0); +x_3429 = lean_ctor_get(x_3424, 1); +lean_inc(x_3429); +lean_inc(x_3428); +lean_dec(x_3424); +lean_ctor_set(x_3390, 0, x_3428); +x_3430 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3430, 0, x_3390); +lean_ctor_set(x_3430, 1, x_3429); +x_3358 = x_3430; +x_3359 = x_3425; +goto block_3388; +} +} +else +{ +uint8_t x_3431; +lean_free_object(x_3390); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3431 = !lean_is_exclusive(x_3423); +if (x_3431 == 0) +{ +return x_3423; +} +else +{ +lean_object* x_3432; lean_object* x_3433; lean_object* x_3434; +x_3432 = lean_ctor_get(x_3423, 0); +x_3433 = lean_ctor_get(x_3423, 1); +lean_inc(x_3433); +lean_inc(x_3432); +lean_dec(x_3423); +x_3434 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3434, 0, x_3432); +lean_ctor_set(x_3434, 1, x_3433); +return x_3434; +} +} +} +else +{ +uint8_t x_3435; +lean_dec(x_3415); +lean_dec(x_3410); +lean_dec(x_3389); +lean_dec(x_3405); +lean_free_object(x_3390); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3435 = !lean_is_exclusive(x_3418); +if (x_3435 == 0) +{ +return x_3418; +} +else +{ +lean_object* x_3436; lean_object* x_3437; lean_object* x_3438; +x_3436 = lean_ctor_get(x_3418, 0); +x_3437 = lean_ctor_get(x_3418, 1); +lean_inc(x_3437); +lean_inc(x_3436); +lean_dec(x_3418); +x_3438 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3438, 0, x_3436); +lean_ctor_set(x_3438, 1, x_3437); +return x_3438; +} +} +} +else +{ +lean_object* x_3439; lean_object* x_3440; lean_object* x_3441; lean_object* x_3442; lean_object* x_3443; lean_object* x_3444; lean_object* x_3445; lean_object* x_3446; +lean_dec(x_3400); +lean_dec(x_3398); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_3389, 6); +lean_ctor_set(x_3389, 1, x_3058); +lean_ctor_set(x_3389, 0, x_153); +x_3439 = lean_ctor_get(x_1, 0); +lean_inc(x_3439); +x_3440 = l_Lean_IR_ToIR_bindVar(x_3439, x_3064, x_4, x_5, x_3394); +x_3441 = lean_ctor_get(x_3440, 0); +lean_inc(x_3441); +x_3442 = lean_ctor_get(x_3440, 1); +lean_inc(x_3442); +lean_dec(x_3440); +x_3443 = lean_ctor_get(x_3441, 0); +lean_inc(x_3443); +x_3444 = lean_ctor_get(x_3441, 1); +lean_inc(x_3444); +lean_dec(x_3441); +x_3445 = lean_ctor_get(x_1, 2); +lean_inc(x_3445); +lean_inc(x_5); +lean_inc(x_4); +x_3446 = l_Lean_IR_ToIR_lowerType(x_3445, x_3444, x_4, x_5, x_3442); +if (lean_obj_tag(x_3446) == 0) +{ +lean_object* x_3447; lean_object* x_3448; lean_object* x_3449; lean_object* x_3450; lean_object* x_3451; +x_3447 = lean_ctor_get(x_3446, 0); +lean_inc(x_3447); +x_3448 = lean_ctor_get(x_3446, 1); +lean_inc(x_3448); +lean_dec(x_3446); +x_3449 = lean_ctor_get(x_3447, 0); +lean_inc(x_3449); +x_3450 = lean_ctor_get(x_3447, 1); +lean_inc(x_3450); +lean_dec(x_3447); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3451 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3443, x_3389, x_3449, x_3450, x_4, x_5, x_3448); +if (lean_obj_tag(x_3451) == 0) +{ +lean_object* x_3452; lean_object* x_3453; uint8_t x_3454; +x_3452 = lean_ctor_get(x_3451, 0); +lean_inc(x_3452); +x_3453 = lean_ctor_get(x_3451, 1); +lean_inc(x_3453); +lean_dec(x_3451); +x_3454 = !lean_is_exclusive(x_3452); +if (x_3454 == 0) +{ +lean_object* x_3455; +x_3455 = lean_ctor_get(x_3452, 0); +lean_ctor_set(x_3390, 0, x_3455); +lean_ctor_set(x_3452, 0, x_3390); +x_3358 = x_3452; +x_3359 = x_3453; +goto block_3388; +} +else +{ +lean_object* x_3456; lean_object* x_3457; lean_object* x_3458; +x_3456 = lean_ctor_get(x_3452, 0); +x_3457 = lean_ctor_get(x_3452, 1); +lean_inc(x_3457); +lean_inc(x_3456); +lean_dec(x_3452); +lean_ctor_set(x_3390, 0, x_3456); +x_3458 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3458, 0, x_3390); +lean_ctor_set(x_3458, 1, x_3457); +x_3358 = x_3458; +x_3359 = x_3453; +goto block_3388; +} +} +else +{ +uint8_t x_3459; +lean_free_object(x_3390); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3459 = !lean_is_exclusive(x_3451); +if (x_3459 == 0) +{ +return x_3451; +} +else +{ +lean_object* x_3460; lean_object* x_3461; lean_object* x_3462; +x_3460 = lean_ctor_get(x_3451, 0); +x_3461 = lean_ctor_get(x_3451, 1); +lean_inc(x_3461); +lean_inc(x_3460); +lean_dec(x_3451); +x_3462 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3462, 0, x_3460); +lean_ctor_set(x_3462, 1, x_3461); +return x_3462; +} +} +} +else +{ +uint8_t x_3463; +lean_dec(x_3443); +lean_dec(x_3389); +lean_free_object(x_3390); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3463 = !lean_is_exclusive(x_3446); +if (x_3463 == 0) +{ +return x_3446; +} +else +{ +lean_object* x_3464; lean_object* x_3465; lean_object* x_3466; +x_3464 = lean_ctor_get(x_3446, 0); +x_3465 = lean_ctor_get(x_3446, 1); +lean_inc(x_3465); +lean_inc(x_3464); +lean_dec(x_3446); +x_3466 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3466, 0, x_3464); +lean_ctor_set(x_3466, 1, x_3465); +return x_3466; +} +} +} +} +else +{ +lean_object* x_3467; lean_object* x_3468; lean_object* x_3469; lean_object* x_3470; lean_object* x_3471; lean_object* x_3472; lean_object* x_3473; lean_object* x_3474; +lean_dec(x_3400); +lean_dec(x_3398); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_3389, 7); +lean_ctor_set(x_3389, 1, x_3058); +lean_ctor_set(x_3389, 0, x_153); +x_3467 = lean_ctor_get(x_1, 0); +lean_inc(x_3467); +x_3468 = l_Lean_IR_ToIR_bindVar(x_3467, x_3064, x_4, x_5, x_3394); +x_3469 = lean_ctor_get(x_3468, 0); +lean_inc(x_3469); +x_3470 = lean_ctor_get(x_3468, 1); +lean_inc(x_3470); +lean_dec(x_3468); +x_3471 = lean_ctor_get(x_3469, 0); +lean_inc(x_3471); +x_3472 = lean_ctor_get(x_3469, 1); +lean_inc(x_3472); +lean_dec(x_3469); +x_3473 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3474 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3471, x_3389, x_3473, x_3472, x_4, x_5, x_3470); +if (lean_obj_tag(x_3474) == 0) +{ +lean_object* x_3475; lean_object* x_3476; uint8_t x_3477; +x_3475 = lean_ctor_get(x_3474, 0); +lean_inc(x_3475); +x_3476 = lean_ctor_get(x_3474, 1); +lean_inc(x_3476); +lean_dec(x_3474); +x_3477 = !lean_is_exclusive(x_3475); +if (x_3477 == 0) +{ +lean_object* x_3478; +x_3478 = lean_ctor_get(x_3475, 0); +lean_ctor_set(x_3390, 0, x_3478); +lean_ctor_set(x_3475, 0, x_3390); +x_3358 = x_3475; +x_3359 = x_3476; +goto block_3388; +} +else +{ +lean_object* x_3479; lean_object* x_3480; lean_object* x_3481; +x_3479 = lean_ctor_get(x_3475, 0); +x_3480 = lean_ctor_get(x_3475, 1); +lean_inc(x_3480); +lean_inc(x_3479); +lean_dec(x_3475); +lean_ctor_set(x_3390, 0, x_3479); +x_3481 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3481, 0, x_3390); +lean_ctor_set(x_3481, 1, x_3480); +x_3358 = x_3481; +x_3359 = x_3476; +goto block_3388; +} +} +else +{ +uint8_t x_3482; +lean_free_object(x_3390); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3482 = !lean_is_exclusive(x_3474); +if (x_3482 == 0) +{ +return x_3474; +} +else +{ +lean_object* x_3483; lean_object* x_3484; lean_object* x_3485; +x_3483 = lean_ctor_get(x_3474, 0); +x_3484 = lean_ctor_get(x_3474, 1); +lean_inc(x_3484); +lean_inc(x_3483); +lean_dec(x_3474); +x_3485 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3485, 0, x_3483); +lean_ctor_set(x_3485, 1, x_3484); +return x_3485; +} +} +} +} +else +{ +lean_object* x_3486; lean_object* x_3487; lean_object* x_3488; lean_object* x_3489; uint8_t x_3490; +x_3486 = lean_ctor_get(x_3390, 0); +lean_inc(x_3486); +lean_dec(x_3390); +x_3487 = lean_array_get_size(x_3058); +x_3488 = lean_ctor_get(x_3486, 3); +lean_inc(x_3488); +lean_dec(x_3486); +x_3489 = lean_array_get_size(x_3488); +lean_dec(x_3488); +x_3490 = lean_nat_dec_lt(x_3487, x_3489); +if (x_3490 == 0) +{ +uint8_t x_3491; +x_3491 = lean_nat_dec_eq(x_3487, x_3489); +if (x_3491 == 0) +{ +lean_object* x_3492; lean_object* x_3493; lean_object* x_3494; lean_object* x_3495; lean_object* x_3496; lean_object* x_3497; lean_object* x_3498; lean_object* x_3499; lean_object* x_3500; lean_object* x_3501; lean_object* x_3502; lean_object* x_3503; lean_object* x_3504; lean_object* x_3505; lean_object* x_3506; lean_object* x_3507; +x_3492 = lean_unsigned_to_nat(0u); +x_3493 = l_Array_extract___rarg(x_3058, x_3492, x_3489); +x_3494 = l_Array_extract___rarg(x_3058, x_3489, x_3487); +lean_dec(x_3487); +lean_inc(x_153); +lean_ctor_set_tag(x_3389, 6); +lean_ctor_set(x_3389, 1, x_3493); +lean_ctor_set(x_3389, 0, x_153); +x_3495 = lean_ctor_get(x_1, 0); +lean_inc(x_3495); +x_3496 = l_Lean_IR_ToIR_bindVar(x_3495, x_3064, x_4, x_5, x_3394); +x_3497 = lean_ctor_get(x_3496, 0); +lean_inc(x_3497); +x_3498 = lean_ctor_get(x_3496, 1); +lean_inc(x_3498); +lean_dec(x_3496); +x_3499 = lean_ctor_get(x_3497, 0); +lean_inc(x_3499); +x_3500 = lean_ctor_get(x_3497, 1); +lean_inc(x_3500); +lean_dec(x_3497); +x_3501 = l_Lean_IR_ToIR_newVar(x_3500, x_4, x_5, x_3498); +x_3502 = lean_ctor_get(x_3501, 0); +lean_inc(x_3502); +x_3503 = lean_ctor_get(x_3501, 1); +lean_inc(x_3503); +lean_dec(x_3501); +x_3504 = lean_ctor_get(x_3502, 0); +lean_inc(x_3504); +x_3505 = lean_ctor_get(x_3502, 1); +lean_inc(x_3505); +lean_dec(x_3502); +x_3506 = lean_ctor_get(x_1, 2); +lean_inc(x_3506); +lean_inc(x_5); +lean_inc(x_4); +x_3507 = l_Lean_IR_ToIR_lowerType(x_3506, x_3505, x_4, x_5, x_3503); +if (lean_obj_tag(x_3507) == 0) +{ +lean_object* x_3508; lean_object* x_3509; lean_object* x_3510; lean_object* x_3511; lean_object* x_3512; +x_3508 = lean_ctor_get(x_3507, 0); +lean_inc(x_3508); +x_3509 = lean_ctor_get(x_3507, 1); +lean_inc(x_3509); +lean_dec(x_3507); +x_3510 = lean_ctor_get(x_3508, 0); +lean_inc(x_3510); +x_3511 = lean_ctor_get(x_3508, 1); +lean_inc(x_3511); +lean_dec(x_3508); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3512 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_3504, x_3494, x_3499, x_3389, x_3510, x_3511, x_4, x_5, x_3509); +if (lean_obj_tag(x_3512) == 0) +{ +lean_object* x_3513; lean_object* x_3514; lean_object* x_3515; lean_object* x_3516; lean_object* x_3517; lean_object* x_3518; lean_object* x_3519; +x_3513 = lean_ctor_get(x_3512, 0); +lean_inc(x_3513); +x_3514 = lean_ctor_get(x_3512, 1); +lean_inc(x_3514); +lean_dec(x_3512); +x_3515 = lean_ctor_get(x_3513, 0); +lean_inc(x_3515); +x_3516 = lean_ctor_get(x_3513, 1); +lean_inc(x_3516); +if (lean_is_exclusive(x_3513)) { + lean_ctor_release(x_3513, 0); + lean_ctor_release(x_3513, 1); + x_3517 = x_3513; +} else { + lean_dec_ref(x_3513); + x_3517 = lean_box(0); +} +x_3518 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_3518, 0, x_3515); +if (lean_is_scalar(x_3517)) { + x_3519 = lean_alloc_ctor(0, 2, 0); +} else { + x_3519 = x_3517; +} +lean_ctor_set(x_3519, 0, x_3518); +lean_ctor_set(x_3519, 1, x_3516); +x_3358 = x_3519; +x_3359 = x_3514; +goto block_3388; +} +else +{ +lean_object* x_3520; lean_object* x_3521; lean_object* x_3522; lean_object* x_3523; +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3520 = lean_ctor_get(x_3512, 0); +lean_inc(x_3520); +x_3521 = lean_ctor_get(x_3512, 1); +lean_inc(x_3521); +if (lean_is_exclusive(x_3512)) { + lean_ctor_release(x_3512, 0); + lean_ctor_release(x_3512, 1); + x_3522 = x_3512; +} else { + lean_dec_ref(x_3512); + x_3522 = lean_box(0); +} +if (lean_is_scalar(x_3522)) { + x_3523 = lean_alloc_ctor(1, 2, 0); +} else { + x_3523 = x_3522; +} +lean_ctor_set(x_3523, 0, x_3520); +lean_ctor_set(x_3523, 1, x_3521); +return x_3523; +} +} +else +{ +lean_object* x_3524; lean_object* x_3525; lean_object* x_3526; lean_object* x_3527; +lean_dec(x_3504); +lean_dec(x_3499); +lean_dec(x_3389); +lean_dec(x_3494); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3524 = lean_ctor_get(x_3507, 0); +lean_inc(x_3524); +x_3525 = lean_ctor_get(x_3507, 1); +lean_inc(x_3525); +if (lean_is_exclusive(x_3507)) { + lean_ctor_release(x_3507, 0); + lean_ctor_release(x_3507, 1); + x_3526 = x_3507; +} else { + lean_dec_ref(x_3507); + x_3526 = lean_box(0); +} +if (lean_is_scalar(x_3526)) { + x_3527 = lean_alloc_ctor(1, 2, 0); +} else { + x_3527 = x_3526; +} +lean_ctor_set(x_3527, 0, x_3524); +lean_ctor_set(x_3527, 1, x_3525); +return x_3527; +} +} +else +{ +lean_object* x_3528; lean_object* x_3529; lean_object* x_3530; lean_object* x_3531; lean_object* x_3532; lean_object* x_3533; lean_object* x_3534; lean_object* x_3535; +lean_dec(x_3489); +lean_dec(x_3487); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_3389, 6); +lean_ctor_set(x_3389, 1, x_3058); +lean_ctor_set(x_3389, 0, x_153); +x_3528 = lean_ctor_get(x_1, 0); +lean_inc(x_3528); +x_3529 = l_Lean_IR_ToIR_bindVar(x_3528, x_3064, x_4, x_5, x_3394); +x_3530 = lean_ctor_get(x_3529, 0); +lean_inc(x_3530); +x_3531 = lean_ctor_get(x_3529, 1); +lean_inc(x_3531); +lean_dec(x_3529); +x_3532 = lean_ctor_get(x_3530, 0); +lean_inc(x_3532); +x_3533 = lean_ctor_get(x_3530, 1); +lean_inc(x_3533); +lean_dec(x_3530); +x_3534 = lean_ctor_get(x_1, 2); +lean_inc(x_3534); +lean_inc(x_5); +lean_inc(x_4); +x_3535 = l_Lean_IR_ToIR_lowerType(x_3534, x_3533, x_4, x_5, x_3531); +if (lean_obj_tag(x_3535) == 0) +{ +lean_object* x_3536; lean_object* x_3537; lean_object* x_3538; lean_object* x_3539; lean_object* x_3540; +x_3536 = lean_ctor_get(x_3535, 0); +lean_inc(x_3536); +x_3537 = lean_ctor_get(x_3535, 1); +lean_inc(x_3537); +lean_dec(x_3535); +x_3538 = lean_ctor_get(x_3536, 0); +lean_inc(x_3538); +x_3539 = lean_ctor_get(x_3536, 1); +lean_inc(x_3539); +lean_dec(x_3536); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3540 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3532, x_3389, x_3538, x_3539, x_4, x_5, x_3537); +if (lean_obj_tag(x_3540) == 0) +{ +lean_object* x_3541; lean_object* x_3542; lean_object* x_3543; lean_object* x_3544; lean_object* x_3545; lean_object* x_3546; lean_object* x_3547; +x_3541 = lean_ctor_get(x_3540, 0); +lean_inc(x_3541); +x_3542 = lean_ctor_get(x_3540, 1); +lean_inc(x_3542); +lean_dec(x_3540); +x_3543 = lean_ctor_get(x_3541, 0); +lean_inc(x_3543); +x_3544 = lean_ctor_get(x_3541, 1); +lean_inc(x_3544); +if (lean_is_exclusive(x_3541)) { + lean_ctor_release(x_3541, 0); + lean_ctor_release(x_3541, 1); + x_3545 = x_3541; +} else { + lean_dec_ref(x_3541); + x_3545 = lean_box(0); +} +x_3546 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_3546, 0, x_3543); +if (lean_is_scalar(x_3545)) { + x_3547 = lean_alloc_ctor(0, 2, 0); +} else { + x_3547 = x_3545; +} +lean_ctor_set(x_3547, 0, x_3546); +lean_ctor_set(x_3547, 1, x_3544); +x_3358 = x_3547; +x_3359 = x_3542; +goto block_3388; +} +else +{ +lean_object* x_3548; lean_object* x_3549; lean_object* x_3550; lean_object* x_3551; +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3548 = lean_ctor_get(x_3540, 0); +lean_inc(x_3548); +x_3549 = lean_ctor_get(x_3540, 1); +lean_inc(x_3549); +if (lean_is_exclusive(x_3540)) { + lean_ctor_release(x_3540, 0); + lean_ctor_release(x_3540, 1); + x_3550 = x_3540; +} else { + lean_dec_ref(x_3540); + x_3550 = lean_box(0); +} +if (lean_is_scalar(x_3550)) { + x_3551 = lean_alloc_ctor(1, 2, 0); +} else { + x_3551 = x_3550; +} +lean_ctor_set(x_3551, 0, x_3548); +lean_ctor_set(x_3551, 1, x_3549); +return x_3551; +} +} +else +{ +lean_object* x_3552; lean_object* x_3553; lean_object* x_3554; lean_object* x_3555; +lean_dec(x_3532); +lean_dec(x_3389); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3552 = lean_ctor_get(x_3535, 0); +lean_inc(x_3552); +x_3553 = lean_ctor_get(x_3535, 1); +lean_inc(x_3553); +if (lean_is_exclusive(x_3535)) { + lean_ctor_release(x_3535, 0); + lean_ctor_release(x_3535, 1); + x_3554 = x_3535; +} else { + lean_dec_ref(x_3535); + x_3554 = lean_box(0); +} +if (lean_is_scalar(x_3554)) { + x_3555 = lean_alloc_ctor(1, 2, 0); +} else { + x_3555 = x_3554; +} +lean_ctor_set(x_3555, 0, x_3552); +lean_ctor_set(x_3555, 1, x_3553); +return x_3555; +} +} +} +else +{ +lean_object* x_3556; lean_object* x_3557; lean_object* x_3558; lean_object* x_3559; lean_object* x_3560; lean_object* x_3561; lean_object* x_3562; lean_object* x_3563; +lean_dec(x_3489); +lean_dec(x_3487); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_3389, 7); +lean_ctor_set(x_3389, 1, x_3058); +lean_ctor_set(x_3389, 0, x_153); +x_3556 = lean_ctor_get(x_1, 0); +lean_inc(x_3556); +x_3557 = l_Lean_IR_ToIR_bindVar(x_3556, x_3064, x_4, x_5, x_3394); +x_3558 = lean_ctor_get(x_3557, 0); +lean_inc(x_3558); +x_3559 = lean_ctor_get(x_3557, 1); +lean_inc(x_3559); +lean_dec(x_3557); +x_3560 = lean_ctor_get(x_3558, 0); +lean_inc(x_3560); +x_3561 = lean_ctor_get(x_3558, 1); +lean_inc(x_3561); +lean_dec(x_3558); +x_3562 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3563 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3560, x_3389, x_3562, x_3561, x_4, x_5, x_3559); +if (lean_obj_tag(x_3563) == 0) +{ +lean_object* x_3564; lean_object* x_3565; lean_object* x_3566; lean_object* x_3567; lean_object* x_3568; lean_object* x_3569; lean_object* x_3570; +x_3564 = lean_ctor_get(x_3563, 0); +lean_inc(x_3564); +x_3565 = lean_ctor_get(x_3563, 1); +lean_inc(x_3565); +lean_dec(x_3563); +x_3566 = lean_ctor_get(x_3564, 0); +lean_inc(x_3566); +x_3567 = lean_ctor_get(x_3564, 1); +lean_inc(x_3567); +if (lean_is_exclusive(x_3564)) { + lean_ctor_release(x_3564, 0); + lean_ctor_release(x_3564, 1); + x_3568 = x_3564; +} else { + lean_dec_ref(x_3564); + x_3568 = lean_box(0); +} +x_3569 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_3569, 0, x_3566); +if (lean_is_scalar(x_3568)) { + x_3570 = lean_alloc_ctor(0, 2, 0); +} else { + x_3570 = x_3568; +} +lean_ctor_set(x_3570, 0, x_3569); +lean_ctor_set(x_3570, 1, x_3567); +x_3358 = x_3570; +x_3359 = x_3565; +goto block_3388; +} +else +{ +lean_object* x_3571; lean_object* x_3572; lean_object* x_3573; lean_object* x_3574; +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3571 = lean_ctor_get(x_3563, 0); +lean_inc(x_3571); +x_3572 = lean_ctor_get(x_3563, 1); +lean_inc(x_3572); +if (lean_is_exclusive(x_3563)) { + lean_ctor_release(x_3563, 0); + lean_ctor_release(x_3563, 1); + x_3573 = x_3563; +} else { + lean_dec_ref(x_3563); + x_3573 = lean_box(0); +} +if (lean_is_scalar(x_3573)) { + x_3574 = lean_alloc_ctor(1, 2, 0); +} else { + x_3574 = x_3573; +} +lean_ctor_set(x_3574, 0, x_3571); +lean_ctor_set(x_3574, 1, x_3572); +return x_3574; +} +} +} +} +else +{ +lean_object* x_3575; lean_object* x_3576; lean_object* x_3577; lean_object* x_3578; lean_object* x_3579; lean_object* x_3580; uint8_t x_3581; +x_3575 = lean_ctor_get(x_3389, 1); +lean_inc(x_3575); +lean_dec(x_3389); +x_3576 = lean_ctor_get(x_3390, 0); +lean_inc(x_3576); +if (lean_is_exclusive(x_3390)) { + lean_ctor_release(x_3390, 0); + x_3577 = x_3390; +} else { + lean_dec_ref(x_3390); + x_3577 = lean_box(0); +} +x_3578 = lean_array_get_size(x_3058); +x_3579 = lean_ctor_get(x_3576, 3); +lean_inc(x_3579); +lean_dec(x_3576); +x_3580 = lean_array_get_size(x_3579); +lean_dec(x_3579); +x_3581 = lean_nat_dec_lt(x_3578, x_3580); +if (x_3581 == 0) +{ +uint8_t x_3582; +x_3582 = lean_nat_dec_eq(x_3578, x_3580); +if (x_3582 == 0) +{ +lean_object* x_3583; lean_object* x_3584; lean_object* x_3585; lean_object* x_3586; lean_object* x_3587; lean_object* x_3588; lean_object* x_3589; lean_object* x_3590; lean_object* x_3591; lean_object* x_3592; lean_object* x_3593; lean_object* x_3594; lean_object* x_3595; lean_object* x_3596; lean_object* x_3597; lean_object* x_3598; lean_object* x_3599; +x_3583 = lean_unsigned_to_nat(0u); +x_3584 = l_Array_extract___rarg(x_3058, x_3583, x_3580); +x_3585 = l_Array_extract___rarg(x_3058, x_3580, x_3578); +lean_dec(x_3578); +lean_inc(x_153); +x_3586 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_3586, 0, x_153); +lean_ctor_set(x_3586, 1, x_3584); +x_3587 = lean_ctor_get(x_1, 0); +lean_inc(x_3587); +x_3588 = l_Lean_IR_ToIR_bindVar(x_3587, x_3064, x_4, x_5, x_3575); +x_3589 = lean_ctor_get(x_3588, 0); +lean_inc(x_3589); +x_3590 = lean_ctor_get(x_3588, 1); +lean_inc(x_3590); +lean_dec(x_3588); +x_3591 = lean_ctor_get(x_3589, 0); +lean_inc(x_3591); +x_3592 = lean_ctor_get(x_3589, 1); +lean_inc(x_3592); +lean_dec(x_3589); +x_3593 = l_Lean_IR_ToIR_newVar(x_3592, x_4, x_5, x_3590); +x_3594 = lean_ctor_get(x_3593, 0); +lean_inc(x_3594); +x_3595 = lean_ctor_get(x_3593, 1); +lean_inc(x_3595); +lean_dec(x_3593); +x_3596 = lean_ctor_get(x_3594, 0); +lean_inc(x_3596); +x_3597 = lean_ctor_get(x_3594, 1); +lean_inc(x_3597); +lean_dec(x_3594); +x_3598 = lean_ctor_get(x_1, 2); +lean_inc(x_3598); +lean_inc(x_5); +lean_inc(x_4); +x_3599 = l_Lean_IR_ToIR_lowerType(x_3598, x_3597, x_4, x_5, x_3595); +if (lean_obj_tag(x_3599) == 0) +{ +lean_object* x_3600; lean_object* x_3601; lean_object* x_3602; lean_object* x_3603; lean_object* x_3604; +x_3600 = lean_ctor_get(x_3599, 0); +lean_inc(x_3600); +x_3601 = lean_ctor_get(x_3599, 1); +lean_inc(x_3601); +lean_dec(x_3599); +x_3602 = lean_ctor_get(x_3600, 0); +lean_inc(x_3602); +x_3603 = lean_ctor_get(x_3600, 1); +lean_inc(x_3603); +lean_dec(x_3600); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3604 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_3596, x_3585, x_3591, x_3586, x_3602, x_3603, x_4, x_5, x_3601); +if (lean_obj_tag(x_3604) == 0) +{ +lean_object* x_3605; lean_object* x_3606; lean_object* x_3607; lean_object* x_3608; lean_object* x_3609; lean_object* x_3610; lean_object* x_3611; +x_3605 = lean_ctor_get(x_3604, 0); +lean_inc(x_3605); +x_3606 = lean_ctor_get(x_3604, 1); +lean_inc(x_3606); +lean_dec(x_3604); +x_3607 = lean_ctor_get(x_3605, 0); +lean_inc(x_3607); +x_3608 = lean_ctor_get(x_3605, 1); +lean_inc(x_3608); +if (lean_is_exclusive(x_3605)) { + lean_ctor_release(x_3605, 0); + lean_ctor_release(x_3605, 1); + x_3609 = x_3605; +} else { + lean_dec_ref(x_3605); + x_3609 = lean_box(0); +} +if (lean_is_scalar(x_3577)) { + x_3610 = lean_alloc_ctor(1, 1, 0); +} else { + x_3610 = x_3577; +} +lean_ctor_set(x_3610, 0, x_3607); +if (lean_is_scalar(x_3609)) { + x_3611 = lean_alloc_ctor(0, 2, 0); +} else { + x_3611 = x_3609; +} +lean_ctor_set(x_3611, 0, x_3610); +lean_ctor_set(x_3611, 1, x_3608); +x_3358 = x_3611; +x_3359 = x_3606; +goto block_3388; +} +else +{ +lean_object* x_3612; lean_object* x_3613; lean_object* x_3614; lean_object* x_3615; +lean_dec(x_3577); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3612 = lean_ctor_get(x_3604, 0); +lean_inc(x_3612); +x_3613 = lean_ctor_get(x_3604, 1); +lean_inc(x_3613); +if (lean_is_exclusive(x_3604)) { + lean_ctor_release(x_3604, 0); + lean_ctor_release(x_3604, 1); + x_3614 = x_3604; +} else { + lean_dec_ref(x_3604); + x_3614 = lean_box(0); +} +if (lean_is_scalar(x_3614)) { + x_3615 = lean_alloc_ctor(1, 2, 0); +} else { + x_3615 = x_3614; +} +lean_ctor_set(x_3615, 0, x_3612); +lean_ctor_set(x_3615, 1, x_3613); +return x_3615; +} +} +else +{ +lean_object* x_3616; lean_object* x_3617; lean_object* x_3618; lean_object* x_3619; +lean_dec(x_3596); +lean_dec(x_3591); +lean_dec(x_3586); +lean_dec(x_3585); +lean_dec(x_3577); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3616 = lean_ctor_get(x_3599, 0); +lean_inc(x_3616); +x_3617 = lean_ctor_get(x_3599, 1); +lean_inc(x_3617); +if (lean_is_exclusive(x_3599)) { + lean_ctor_release(x_3599, 0); + lean_ctor_release(x_3599, 1); + x_3618 = x_3599; +} else { + lean_dec_ref(x_3599); + x_3618 = lean_box(0); +} +if (lean_is_scalar(x_3618)) { + x_3619 = lean_alloc_ctor(1, 2, 0); +} else { + x_3619 = x_3618; +} +lean_ctor_set(x_3619, 0, x_3616); +lean_ctor_set(x_3619, 1, x_3617); +return x_3619; +} +} +else +{ +lean_object* x_3620; lean_object* x_3621; lean_object* x_3622; lean_object* x_3623; lean_object* x_3624; lean_object* x_3625; lean_object* x_3626; lean_object* x_3627; lean_object* x_3628; +lean_dec(x_3580); +lean_dec(x_3578); +lean_inc(x_3058); +lean_inc(x_153); +x_3620 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_3620, 0, x_153); +lean_ctor_set(x_3620, 1, x_3058); +x_3621 = lean_ctor_get(x_1, 0); +lean_inc(x_3621); +x_3622 = l_Lean_IR_ToIR_bindVar(x_3621, x_3064, x_4, x_5, x_3575); +x_3623 = lean_ctor_get(x_3622, 0); +lean_inc(x_3623); +x_3624 = lean_ctor_get(x_3622, 1); +lean_inc(x_3624); +lean_dec(x_3622); +x_3625 = lean_ctor_get(x_3623, 0); +lean_inc(x_3625); +x_3626 = lean_ctor_get(x_3623, 1); +lean_inc(x_3626); +lean_dec(x_3623); +x_3627 = lean_ctor_get(x_1, 2); +lean_inc(x_3627); +lean_inc(x_5); +lean_inc(x_4); +x_3628 = l_Lean_IR_ToIR_lowerType(x_3627, x_3626, x_4, x_5, x_3624); +if (lean_obj_tag(x_3628) == 0) +{ +lean_object* x_3629; lean_object* x_3630; lean_object* x_3631; lean_object* x_3632; lean_object* x_3633; +x_3629 = lean_ctor_get(x_3628, 0); +lean_inc(x_3629); +x_3630 = lean_ctor_get(x_3628, 1); +lean_inc(x_3630); +lean_dec(x_3628); +x_3631 = lean_ctor_get(x_3629, 0); +lean_inc(x_3631); +x_3632 = lean_ctor_get(x_3629, 1); +lean_inc(x_3632); +lean_dec(x_3629); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3633 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3625, x_3620, x_3631, x_3632, x_4, x_5, x_3630); +if (lean_obj_tag(x_3633) == 0) +{ +lean_object* x_3634; lean_object* x_3635; lean_object* x_3636; lean_object* x_3637; lean_object* x_3638; lean_object* x_3639; lean_object* x_3640; +x_3634 = lean_ctor_get(x_3633, 0); +lean_inc(x_3634); +x_3635 = lean_ctor_get(x_3633, 1); +lean_inc(x_3635); +lean_dec(x_3633); +x_3636 = lean_ctor_get(x_3634, 0); +lean_inc(x_3636); +x_3637 = lean_ctor_get(x_3634, 1); +lean_inc(x_3637); +if (lean_is_exclusive(x_3634)) { + lean_ctor_release(x_3634, 0); + lean_ctor_release(x_3634, 1); + x_3638 = x_3634; +} else { + lean_dec_ref(x_3634); + x_3638 = lean_box(0); +} +if (lean_is_scalar(x_3577)) { + x_3639 = lean_alloc_ctor(1, 1, 0); +} else { + x_3639 = x_3577; +} +lean_ctor_set(x_3639, 0, x_3636); +if (lean_is_scalar(x_3638)) { + x_3640 = lean_alloc_ctor(0, 2, 0); +} else { + x_3640 = x_3638; +} +lean_ctor_set(x_3640, 0, x_3639); +lean_ctor_set(x_3640, 1, x_3637); +x_3358 = x_3640; +x_3359 = x_3635; +goto block_3388; +} +else +{ +lean_object* x_3641; lean_object* x_3642; lean_object* x_3643; lean_object* x_3644; +lean_dec(x_3577); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3641 = lean_ctor_get(x_3633, 0); +lean_inc(x_3641); +x_3642 = lean_ctor_get(x_3633, 1); +lean_inc(x_3642); +if (lean_is_exclusive(x_3633)) { + lean_ctor_release(x_3633, 0); + lean_ctor_release(x_3633, 1); + x_3643 = x_3633; +} else { + lean_dec_ref(x_3633); + x_3643 = lean_box(0); +} +if (lean_is_scalar(x_3643)) { + x_3644 = lean_alloc_ctor(1, 2, 0); +} else { + x_3644 = x_3643; +} +lean_ctor_set(x_3644, 0, x_3641); +lean_ctor_set(x_3644, 1, x_3642); +return x_3644; +} +} +else +{ +lean_object* x_3645; lean_object* x_3646; lean_object* x_3647; lean_object* x_3648; +lean_dec(x_3625); +lean_dec(x_3620); +lean_dec(x_3577); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3645 = lean_ctor_get(x_3628, 0); +lean_inc(x_3645); +x_3646 = lean_ctor_get(x_3628, 1); +lean_inc(x_3646); +if (lean_is_exclusive(x_3628)) { + lean_ctor_release(x_3628, 0); + lean_ctor_release(x_3628, 1); + x_3647 = x_3628; +} else { + lean_dec_ref(x_3628); + x_3647 = lean_box(0); +} +if (lean_is_scalar(x_3647)) { + x_3648 = lean_alloc_ctor(1, 2, 0); +} else { + x_3648 = x_3647; +} +lean_ctor_set(x_3648, 0, x_3645); +lean_ctor_set(x_3648, 1, x_3646); +return x_3648; +} +} +} +else +{ +lean_object* x_3649; lean_object* x_3650; lean_object* x_3651; lean_object* x_3652; lean_object* x_3653; lean_object* x_3654; lean_object* x_3655; lean_object* x_3656; lean_object* x_3657; +lean_dec(x_3580); +lean_dec(x_3578); +lean_inc(x_3058); +lean_inc(x_153); +x_3649 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_3649, 0, x_153); +lean_ctor_set(x_3649, 1, x_3058); +x_3650 = lean_ctor_get(x_1, 0); +lean_inc(x_3650); +x_3651 = l_Lean_IR_ToIR_bindVar(x_3650, x_3064, x_4, x_5, x_3575); +x_3652 = lean_ctor_get(x_3651, 0); +lean_inc(x_3652); +x_3653 = lean_ctor_get(x_3651, 1); +lean_inc(x_3653); +lean_dec(x_3651); +x_3654 = lean_ctor_get(x_3652, 0); +lean_inc(x_3654); +x_3655 = lean_ctor_get(x_3652, 1); +lean_inc(x_3655); +lean_dec(x_3652); +x_3656 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3657 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3654, x_3649, x_3656, x_3655, x_4, x_5, x_3653); +if (lean_obj_tag(x_3657) == 0) +{ +lean_object* x_3658; lean_object* x_3659; lean_object* x_3660; lean_object* x_3661; lean_object* x_3662; lean_object* x_3663; lean_object* x_3664; +x_3658 = lean_ctor_get(x_3657, 0); +lean_inc(x_3658); +x_3659 = lean_ctor_get(x_3657, 1); +lean_inc(x_3659); +lean_dec(x_3657); +x_3660 = lean_ctor_get(x_3658, 0); +lean_inc(x_3660); +x_3661 = lean_ctor_get(x_3658, 1); +lean_inc(x_3661); +if (lean_is_exclusive(x_3658)) { + lean_ctor_release(x_3658, 0); + lean_ctor_release(x_3658, 1); + x_3662 = x_3658; +} else { + lean_dec_ref(x_3658); + x_3662 = lean_box(0); +} +if (lean_is_scalar(x_3577)) { + x_3663 = lean_alloc_ctor(1, 1, 0); +} else { + x_3663 = x_3577; +} +lean_ctor_set(x_3663, 0, x_3660); +if (lean_is_scalar(x_3662)) { + x_3664 = lean_alloc_ctor(0, 2, 0); +} else { + x_3664 = x_3662; +} +lean_ctor_set(x_3664, 0, x_3663); +lean_ctor_set(x_3664, 1, x_3661); +x_3358 = x_3664; +x_3359 = x_3659; +goto block_3388; +} +else +{ +lean_object* x_3665; lean_object* x_3666; lean_object* x_3667; lean_object* x_3668; +lean_dec(x_3577); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3665 = lean_ctor_get(x_3657, 0); +lean_inc(x_3665); +x_3666 = lean_ctor_get(x_3657, 1); +lean_inc(x_3666); +if (lean_is_exclusive(x_3657)) { + lean_ctor_release(x_3657, 0); + lean_ctor_release(x_3657, 1); + x_3667 = x_3657; +} else { + lean_dec_ref(x_3657); + x_3667 = lean_box(0); +} +if (lean_is_scalar(x_3667)) { + x_3668 = lean_alloc_ctor(1, 2, 0); +} else { + x_3668 = x_3667; +} +lean_ctor_set(x_3668, 0, x_3665); +lean_ctor_set(x_3668, 1, x_3666); +return x_3668; +} +} +} +} +block_3388: +{ +lean_object* x_3360; +x_3360 = lean_ctor_get(x_3358, 0); +lean_inc(x_3360); +if (lean_obj_tag(x_3360) == 0) +{ +lean_object* x_3361; lean_object* x_3362; lean_object* x_3363; lean_object* x_3364; lean_object* x_3365; lean_object* x_3366; lean_object* x_3367; lean_object* x_3368; lean_object* x_3369; lean_object* x_3370; +lean_dec(x_3069); +x_3361 = lean_ctor_get(x_3358, 1); +lean_inc(x_3361); +lean_dec(x_3358); +x_3362 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_3362, 0, x_153); +lean_ctor_set(x_3362, 1, x_3058); +x_3363 = lean_ctor_get(x_1, 0); +lean_inc(x_3363); +x_3364 = l_Lean_IR_ToIR_bindVar(x_3363, x_3361, x_4, x_5, x_3359); +x_3365 = lean_ctor_get(x_3364, 0); +lean_inc(x_3365); +x_3366 = lean_ctor_get(x_3364, 1); +lean_inc(x_3366); +lean_dec(x_3364); +x_3367 = lean_ctor_get(x_3365, 0); +lean_inc(x_3367); +x_3368 = lean_ctor_get(x_3365, 1); +lean_inc(x_3368); +lean_dec(x_3365); +x_3369 = lean_ctor_get(x_1, 2); +lean_inc(x_3369); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_3370 = l_Lean_IR_ToIR_lowerType(x_3369, x_3368, x_4, x_5, x_3366); +if (lean_obj_tag(x_3370) == 0) +{ +lean_object* x_3371; lean_object* x_3372; lean_object* x_3373; lean_object* x_3374; lean_object* x_3375; +x_3371 = lean_ctor_get(x_3370, 0); +lean_inc(x_3371); +x_3372 = lean_ctor_get(x_3370, 1); +lean_inc(x_3372); +lean_dec(x_3370); +x_3373 = lean_ctor_get(x_3371, 0); +lean_inc(x_3373); +x_3374 = lean_ctor_get(x_3371, 1); +lean_inc(x_3374); +lean_dec(x_3371); +x_3375 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3367, x_3362, x_3373, x_3374, x_4, x_5, x_3372); +return x_3375; +} +else +{ +uint8_t x_3376; +lean_dec(x_3367); +lean_dec(x_3362); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_3376 = !lean_is_exclusive(x_3370); +if (x_3376 == 0) +{ +return x_3370; +} +else +{ +lean_object* x_3377; lean_object* x_3378; lean_object* x_3379; +x_3377 = lean_ctor_get(x_3370, 0); +x_3378 = lean_ctor_get(x_3370, 1); +lean_inc(x_3378); +lean_inc(x_3377); +lean_dec(x_3370); +x_3379 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3379, 0, x_3377); +lean_ctor_set(x_3379, 1, x_3378); +return x_3379; +} +} +} +else +{ +uint8_t x_3380; +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3380 = !lean_is_exclusive(x_3358); +if (x_3380 == 0) +{ +lean_object* x_3381; lean_object* x_3382; lean_object* x_3383; +x_3381 = lean_ctor_get(x_3358, 0); +lean_dec(x_3381); +x_3382 = lean_ctor_get(x_3360, 0); +lean_inc(x_3382); +lean_dec(x_3360); +lean_ctor_set(x_3358, 0, x_3382); +if (lean_is_scalar(x_3069)) { + x_3383 = lean_alloc_ctor(0, 2, 0); +} else { + x_3383 = x_3069; +} +lean_ctor_set(x_3383, 0, x_3358); +lean_ctor_set(x_3383, 1, x_3359); +return x_3383; +} +else +{ +lean_object* x_3384; lean_object* x_3385; lean_object* x_3386; lean_object* x_3387; +x_3384 = lean_ctor_get(x_3358, 1); +lean_inc(x_3384); +lean_dec(x_3358); +x_3385 = lean_ctor_get(x_3360, 0); +lean_inc(x_3385); +lean_dec(x_3360); +x_3386 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3386, 0, x_3385); +lean_ctor_set(x_3386, 1, x_3384); +if (lean_is_scalar(x_3069)) { + x_3387 = lean_alloc_ctor(0, 2, 0); +} else { + x_3387 = x_3069; +} +lean_ctor_set(x_3387, 0, x_3386); +lean_ctor_set(x_3387, 1, x_3359); +return x_3387; +} +} +} +} +case 2: +{ +lean_object* x_3669; lean_object* x_3670; +lean_dec(x_3075); +lean_dec(x_3070); +lean_dec(x_3069); +lean_free_object(x_3060); +lean_dec(x_3058); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_3669 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_3670 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_3669, x_3064, x_4, x_5, x_3068); +return x_3670; +} +case 3: +{ +lean_object* x_3671; lean_object* x_3672; lean_object* x_3702; lean_object* x_3703; +lean_dec(x_3075); +lean_dec(x_3070); +lean_dec(x_3050); +lean_dec(x_3049); +lean_inc(x_153); +x_3702 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_3068); +x_3703 = lean_ctor_get(x_3702, 0); +lean_inc(x_3703); +if (lean_obj_tag(x_3703) == 0) +{ +lean_object* x_3704; lean_object* x_3705; +x_3704 = lean_ctor_get(x_3702, 1); +lean_inc(x_3704); +lean_dec(x_3702); +x_3705 = lean_box(0); +lean_ctor_set(x_3060, 0, x_3705); +x_3671 = x_3060; +x_3672 = x_3704; +goto block_3701; +} +else +{ +uint8_t x_3706; +lean_free_object(x_3060); +x_3706 = !lean_is_exclusive(x_3702); +if (x_3706 == 0) +{ +lean_object* x_3707; lean_object* x_3708; uint8_t x_3709; +x_3707 = lean_ctor_get(x_3702, 1); +x_3708 = lean_ctor_get(x_3702, 0); +lean_dec(x_3708); +x_3709 = !lean_is_exclusive(x_3703); +if (x_3709 == 0) +{ +lean_object* x_3710; lean_object* x_3711; lean_object* x_3712; lean_object* x_3713; uint8_t x_3714; +x_3710 = lean_ctor_get(x_3703, 0); +x_3711 = lean_array_get_size(x_3058); +x_3712 = lean_ctor_get(x_3710, 3); +lean_inc(x_3712); +lean_dec(x_3710); +x_3713 = lean_array_get_size(x_3712); +lean_dec(x_3712); +x_3714 = lean_nat_dec_lt(x_3711, x_3713); +if (x_3714 == 0) +{ +uint8_t x_3715; +x_3715 = lean_nat_dec_eq(x_3711, x_3713); +if (x_3715 == 0) +{ +lean_object* x_3716; lean_object* x_3717; lean_object* x_3718; lean_object* x_3719; lean_object* x_3720; lean_object* x_3721; lean_object* x_3722; lean_object* x_3723; lean_object* x_3724; lean_object* x_3725; lean_object* x_3726; lean_object* x_3727; lean_object* x_3728; lean_object* x_3729; lean_object* x_3730; lean_object* x_3731; +x_3716 = lean_unsigned_to_nat(0u); +x_3717 = l_Array_extract___rarg(x_3058, x_3716, x_3713); +x_3718 = l_Array_extract___rarg(x_3058, x_3713, x_3711); +lean_dec(x_3711); +lean_inc(x_153); +lean_ctor_set_tag(x_3702, 6); +lean_ctor_set(x_3702, 1, x_3717); +lean_ctor_set(x_3702, 0, x_153); +x_3719 = lean_ctor_get(x_1, 0); +lean_inc(x_3719); +x_3720 = l_Lean_IR_ToIR_bindVar(x_3719, x_3064, x_4, x_5, x_3707); +x_3721 = lean_ctor_get(x_3720, 0); +lean_inc(x_3721); +x_3722 = lean_ctor_get(x_3720, 1); +lean_inc(x_3722); +lean_dec(x_3720); +x_3723 = lean_ctor_get(x_3721, 0); +lean_inc(x_3723); +x_3724 = lean_ctor_get(x_3721, 1); +lean_inc(x_3724); +lean_dec(x_3721); +x_3725 = l_Lean_IR_ToIR_newVar(x_3724, x_4, x_5, x_3722); +x_3726 = lean_ctor_get(x_3725, 0); +lean_inc(x_3726); +x_3727 = lean_ctor_get(x_3725, 1); +lean_inc(x_3727); +lean_dec(x_3725); +x_3728 = lean_ctor_get(x_3726, 0); +lean_inc(x_3728); +x_3729 = lean_ctor_get(x_3726, 1); +lean_inc(x_3729); +lean_dec(x_3726); +x_3730 = lean_ctor_get(x_1, 2); +lean_inc(x_3730); +lean_inc(x_5); +lean_inc(x_4); +x_3731 = l_Lean_IR_ToIR_lowerType(x_3730, x_3729, x_4, x_5, x_3727); +if (lean_obj_tag(x_3731) == 0) +{ +lean_object* x_3732; lean_object* x_3733; lean_object* x_3734; lean_object* x_3735; lean_object* x_3736; +x_3732 = lean_ctor_get(x_3731, 0); +lean_inc(x_3732); +x_3733 = lean_ctor_get(x_3731, 1); +lean_inc(x_3733); +lean_dec(x_3731); +x_3734 = lean_ctor_get(x_3732, 0); +lean_inc(x_3734); +x_3735 = lean_ctor_get(x_3732, 1); +lean_inc(x_3735); +lean_dec(x_3732); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3736 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_3728, x_3718, x_3723, x_3702, x_3734, x_3735, x_4, x_5, x_3733); +if (lean_obj_tag(x_3736) == 0) +{ +lean_object* x_3737; lean_object* x_3738; uint8_t x_3739; +x_3737 = lean_ctor_get(x_3736, 0); +lean_inc(x_3737); +x_3738 = lean_ctor_get(x_3736, 1); +lean_inc(x_3738); +lean_dec(x_3736); +x_3739 = !lean_is_exclusive(x_3737); +if (x_3739 == 0) +{ +lean_object* x_3740; +x_3740 = lean_ctor_get(x_3737, 0); +lean_ctor_set(x_3703, 0, x_3740); +lean_ctor_set(x_3737, 0, x_3703); +x_3671 = x_3737; +x_3672 = x_3738; +goto block_3701; +} +else +{ +lean_object* x_3741; lean_object* x_3742; lean_object* x_3743; +x_3741 = lean_ctor_get(x_3737, 0); +x_3742 = lean_ctor_get(x_3737, 1); +lean_inc(x_3742); +lean_inc(x_3741); +lean_dec(x_3737); +lean_ctor_set(x_3703, 0, x_3741); +x_3743 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3743, 0, x_3703); +lean_ctor_set(x_3743, 1, x_3742); +x_3671 = x_3743; +x_3672 = x_3738; +goto block_3701; +} +} +else +{ +uint8_t x_3744; +lean_free_object(x_3703); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3744 = !lean_is_exclusive(x_3736); +if (x_3744 == 0) +{ +return x_3736; +} +else +{ +lean_object* x_3745; lean_object* x_3746; lean_object* x_3747; +x_3745 = lean_ctor_get(x_3736, 0); +x_3746 = lean_ctor_get(x_3736, 1); +lean_inc(x_3746); +lean_inc(x_3745); +lean_dec(x_3736); +x_3747 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3747, 0, x_3745); +lean_ctor_set(x_3747, 1, x_3746); +return x_3747; +} +} +} +else +{ +uint8_t x_3748; +lean_dec(x_3728); +lean_dec(x_3723); +lean_dec(x_3702); +lean_dec(x_3718); +lean_free_object(x_3703); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3748 = !lean_is_exclusive(x_3731); +if (x_3748 == 0) +{ +return x_3731; +} +else +{ +lean_object* x_3749; lean_object* x_3750; lean_object* x_3751; +x_3749 = lean_ctor_get(x_3731, 0); +x_3750 = lean_ctor_get(x_3731, 1); +lean_inc(x_3750); +lean_inc(x_3749); +lean_dec(x_3731); +x_3751 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3751, 0, x_3749); +lean_ctor_set(x_3751, 1, x_3750); +return x_3751; +} +} +} +else +{ +lean_object* x_3752; lean_object* x_3753; lean_object* x_3754; lean_object* x_3755; lean_object* x_3756; lean_object* x_3757; lean_object* x_3758; lean_object* x_3759; +lean_dec(x_3713); +lean_dec(x_3711); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_3702, 6); +lean_ctor_set(x_3702, 1, x_3058); +lean_ctor_set(x_3702, 0, x_153); +x_3752 = lean_ctor_get(x_1, 0); +lean_inc(x_3752); +x_3753 = l_Lean_IR_ToIR_bindVar(x_3752, x_3064, x_4, x_5, x_3707); +x_3754 = lean_ctor_get(x_3753, 0); +lean_inc(x_3754); +x_3755 = lean_ctor_get(x_3753, 1); +lean_inc(x_3755); +lean_dec(x_3753); +x_3756 = lean_ctor_get(x_3754, 0); +lean_inc(x_3756); +x_3757 = lean_ctor_get(x_3754, 1); +lean_inc(x_3757); +lean_dec(x_3754); +x_3758 = lean_ctor_get(x_1, 2); +lean_inc(x_3758); +lean_inc(x_5); +lean_inc(x_4); +x_3759 = l_Lean_IR_ToIR_lowerType(x_3758, x_3757, x_4, x_5, x_3755); +if (lean_obj_tag(x_3759) == 0) +{ +lean_object* x_3760; lean_object* x_3761; lean_object* x_3762; lean_object* x_3763; lean_object* x_3764; +x_3760 = lean_ctor_get(x_3759, 0); +lean_inc(x_3760); +x_3761 = lean_ctor_get(x_3759, 1); +lean_inc(x_3761); +lean_dec(x_3759); +x_3762 = lean_ctor_get(x_3760, 0); +lean_inc(x_3762); +x_3763 = lean_ctor_get(x_3760, 1); +lean_inc(x_3763); +lean_dec(x_3760); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3764 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3756, x_3702, x_3762, x_3763, x_4, x_5, x_3761); +if (lean_obj_tag(x_3764) == 0) +{ +lean_object* x_3765; lean_object* x_3766; uint8_t x_3767; +x_3765 = lean_ctor_get(x_3764, 0); +lean_inc(x_3765); +x_3766 = lean_ctor_get(x_3764, 1); +lean_inc(x_3766); +lean_dec(x_3764); +x_3767 = !lean_is_exclusive(x_3765); +if (x_3767 == 0) +{ +lean_object* x_3768; +x_3768 = lean_ctor_get(x_3765, 0); +lean_ctor_set(x_3703, 0, x_3768); +lean_ctor_set(x_3765, 0, x_3703); +x_3671 = x_3765; +x_3672 = x_3766; +goto block_3701; +} +else +{ +lean_object* x_3769; lean_object* x_3770; lean_object* x_3771; +x_3769 = lean_ctor_get(x_3765, 0); +x_3770 = lean_ctor_get(x_3765, 1); +lean_inc(x_3770); +lean_inc(x_3769); +lean_dec(x_3765); +lean_ctor_set(x_3703, 0, x_3769); +x_3771 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3771, 0, x_3703); +lean_ctor_set(x_3771, 1, x_3770); +x_3671 = x_3771; +x_3672 = x_3766; +goto block_3701; +} +} +else +{ +uint8_t x_3772; +lean_free_object(x_3703); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3772 = !lean_is_exclusive(x_3764); +if (x_3772 == 0) +{ +return x_3764; +} +else +{ +lean_object* x_3773; lean_object* x_3774; lean_object* x_3775; +x_3773 = lean_ctor_get(x_3764, 0); +x_3774 = lean_ctor_get(x_3764, 1); +lean_inc(x_3774); +lean_inc(x_3773); +lean_dec(x_3764); +x_3775 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3775, 0, x_3773); +lean_ctor_set(x_3775, 1, x_3774); +return x_3775; +} +} +} +else +{ +uint8_t x_3776; +lean_dec(x_3756); +lean_dec(x_3702); +lean_free_object(x_3703); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3776 = !lean_is_exclusive(x_3759); +if (x_3776 == 0) +{ +return x_3759; +} +else +{ +lean_object* x_3777; lean_object* x_3778; lean_object* x_3779; +x_3777 = lean_ctor_get(x_3759, 0); +x_3778 = lean_ctor_get(x_3759, 1); +lean_inc(x_3778); +lean_inc(x_3777); +lean_dec(x_3759); +x_3779 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3779, 0, x_3777); +lean_ctor_set(x_3779, 1, x_3778); +return x_3779; +} +} +} +} +else +{ +lean_object* x_3780; lean_object* x_3781; lean_object* x_3782; lean_object* x_3783; lean_object* x_3784; lean_object* x_3785; lean_object* x_3786; lean_object* x_3787; +lean_dec(x_3713); +lean_dec(x_3711); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_3702, 7); +lean_ctor_set(x_3702, 1, x_3058); +lean_ctor_set(x_3702, 0, x_153); +x_3780 = lean_ctor_get(x_1, 0); +lean_inc(x_3780); +x_3781 = l_Lean_IR_ToIR_bindVar(x_3780, x_3064, x_4, x_5, x_3707); +x_3782 = lean_ctor_get(x_3781, 0); +lean_inc(x_3782); +x_3783 = lean_ctor_get(x_3781, 1); +lean_inc(x_3783); +lean_dec(x_3781); +x_3784 = lean_ctor_get(x_3782, 0); +lean_inc(x_3784); +x_3785 = lean_ctor_get(x_3782, 1); +lean_inc(x_3785); +lean_dec(x_3782); +x_3786 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3787 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3784, x_3702, x_3786, x_3785, x_4, x_5, x_3783); +if (lean_obj_tag(x_3787) == 0) +{ +lean_object* x_3788; lean_object* x_3789; uint8_t x_3790; +x_3788 = lean_ctor_get(x_3787, 0); +lean_inc(x_3788); +x_3789 = lean_ctor_get(x_3787, 1); +lean_inc(x_3789); +lean_dec(x_3787); +x_3790 = !lean_is_exclusive(x_3788); +if (x_3790 == 0) +{ +lean_object* x_3791; +x_3791 = lean_ctor_get(x_3788, 0); +lean_ctor_set(x_3703, 0, x_3791); +lean_ctor_set(x_3788, 0, x_3703); +x_3671 = x_3788; +x_3672 = x_3789; +goto block_3701; +} +else +{ +lean_object* x_3792; lean_object* x_3793; lean_object* x_3794; +x_3792 = lean_ctor_get(x_3788, 0); +x_3793 = lean_ctor_get(x_3788, 1); +lean_inc(x_3793); +lean_inc(x_3792); +lean_dec(x_3788); +lean_ctor_set(x_3703, 0, x_3792); +x_3794 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3794, 0, x_3703); +lean_ctor_set(x_3794, 1, x_3793); +x_3671 = x_3794; +x_3672 = x_3789; +goto block_3701; +} +} +else +{ +uint8_t x_3795; +lean_free_object(x_3703); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3795 = !lean_is_exclusive(x_3787); +if (x_3795 == 0) +{ +return x_3787; +} +else +{ +lean_object* x_3796; lean_object* x_3797; lean_object* x_3798; +x_3796 = lean_ctor_get(x_3787, 0); +x_3797 = lean_ctor_get(x_3787, 1); +lean_inc(x_3797); +lean_inc(x_3796); +lean_dec(x_3787); +x_3798 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3798, 0, x_3796); +lean_ctor_set(x_3798, 1, x_3797); +return x_3798; +} +} +} +} +else +{ +lean_object* x_3799; lean_object* x_3800; lean_object* x_3801; lean_object* x_3802; uint8_t x_3803; +x_3799 = lean_ctor_get(x_3703, 0); +lean_inc(x_3799); +lean_dec(x_3703); +x_3800 = lean_array_get_size(x_3058); +x_3801 = lean_ctor_get(x_3799, 3); +lean_inc(x_3801); +lean_dec(x_3799); +x_3802 = lean_array_get_size(x_3801); +lean_dec(x_3801); +x_3803 = lean_nat_dec_lt(x_3800, x_3802); +if (x_3803 == 0) +{ +uint8_t x_3804; +x_3804 = lean_nat_dec_eq(x_3800, x_3802); +if (x_3804 == 0) +{ +lean_object* x_3805; lean_object* x_3806; lean_object* x_3807; lean_object* x_3808; lean_object* x_3809; lean_object* x_3810; lean_object* x_3811; lean_object* x_3812; lean_object* x_3813; lean_object* x_3814; lean_object* x_3815; lean_object* x_3816; lean_object* x_3817; lean_object* x_3818; lean_object* x_3819; lean_object* x_3820; +x_3805 = lean_unsigned_to_nat(0u); +x_3806 = l_Array_extract___rarg(x_3058, x_3805, x_3802); +x_3807 = l_Array_extract___rarg(x_3058, x_3802, x_3800); +lean_dec(x_3800); +lean_inc(x_153); +lean_ctor_set_tag(x_3702, 6); +lean_ctor_set(x_3702, 1, x_3806); +lean_ctor_set(x_3702, 0, x_153); +x_3808 = lean_ctor_get(x_1, 0); +lean_inc(x_3808); +x_3809 = l_Lean_IR_ToIR_bindVar(x_3808, x_3064, x_4, x_5, x_3707); +x_3810 = lean_ctor_get(x_3809, 0); +lean_inc(x_3810); +x_3811 = lean_ctor_get(x_3809, 1); +lean_inc(x_3811); +lean_dec(x_3809); +x_3812 = lean_ctor_get(x_3810, 0); +lean_inc(x_3812); +x_3813 = lean_ctor_get(x_3810, 1); +lean_inc(x_3813); +lean_dec(x_3810); +x_3814 = l_Lean_IR_ToIR_newVar(x_3813, x_4, x_5, x_3811); +x_3815 = lean_ctor_get(x_3814, 0); +lean_inc(x_3815); +x_3816 = lean_ctor_get(x_3814, 1); +lean_inc(x_3816); +lean_dec(x_3814); +x_3817 = lean_ctor_get(x_3815, 0); +lean_inc(x_3817); +x_3818 = lean_ctor_get(x_3815, 1); +lean_inc(x_3818); +lean_dec(x_3815); +x_3819 = lean_ctor_get(x_1, 2); +lean_inc(x_3819); +lean_inc(x_5); +lean_inc(x_4); +x_3820 = l_Lean_IR_ToIR_lowerType(x_3819, x_3818, x_4, x_5, x_3816); +if (lean_obj_tag(x_3820) == 0) +{ +lean_object* x_3821; lean_object* x_3822; lean_object* x_3823; lean_object* x_3824; lean_object* x_3825; +x_3821 = lean_ctor_get(x_3820, 0); +lean_inc(x_3821); +x_3822 = lean_ctor_get(x_3820, 1); +lean_inc(x_3822); +lean_dec(x_3820); +x_3823 = lean_ctor_get(x_3821, 0); +lean_inc(x_3823); +x_3824 = lean_ctor_get(x_3821, 1); +lean_inc(x_3824); +lean_dec(x_3821); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3825 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_3817, x_3807, x_3812, x_3702, x_3823, x_3824, x_4, x_5, x_3822); +if (lean_obj_tag(x_3825) == 0) +{ +lean_object* x_3826; lean_object* x_3827; lean_object* x_3828; lean_object* x_3829; lean_object* x_3830; lean_object* x_3831; lean_object* x_3832; +x_3826 = lean_ctor_get(x_3825, 0); +lean_inc(x_3826); +x_3827 = lean_ctor_get(x_3825, 1); +lean_inc(x_3827); +lean_dec(x_3825); +x_3828 = lean_ctor_get(x_3826, 0); +lean_inc(x_3828); +x_3829 = lean_ctor_get(x_3826, 1); +lean_inc(x_3829); +if (lean_is_exclusive(x_3826)) { + lean_ctor_release(x_3826, 0); + lean_ctor_release(x_3826, 1); + x_3830 = x_3826; +} else { + lean_dec_ref(x_3826); + x_3830 = lean_box(0); +} +x_3831 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_3831, 0, x_3828); +if (lean_is_scalar(x_3830)) { + x_3832 = lean_alloc_ctor(0, 2, 0); +} else { + x_3832 = x_3830; +} +lean_ctor_set(x_3832, 0, x_3831); +lean_ctor_set(x_3832, 1, x_3829); +x_3671 = x_3832; +x_3672 = x_3827; +goto block_3701; +} +else +{ +lean_object* x_3833; lean_object* x_3834; lean_object* x_3835; lean_object* x_3836; +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3833 = lean_ctor_get(x_3825, 0); +lean_inc(x_3833); +x_3834 = lean_ctor_get(x_3825, 1); +lean_inc(x_3834); +if (lean_is_exclusive(x_3825)) { + lean_ctor_release(x_3825, 0); + lean_ctor_release(x_3825, 1); + x_3835 = x_3825; +} else { + lean_dec_ref(x_3825); + x_3835 = lean_box(0); +} +if (lean_is_scalar(x_3835)) { + x_3836 = lean_alloc_ctor(1, 2, 0); +} else { + x_3836 = x_3835; +} +lean_ctor_set(x_3836, 0, x_3833); +lean_ctor_set(x_3836, 1, x_3834); +return x_3836; +} +} +else +{ +lean_object* x_3837; lean_object* x_3838; lean_object* x_3839; lean_object* x_3840; +lean_dec(x_3817); +lean_dec(x_3812); +lean_dec(x_3702); +lean_dec(x_3807); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3837 = lean_ctor_get(x_3820, 0); +lean_inc(x_3837); +x_3838 = lean_ctor_get(x_3820, 1); +lean_inc(x_3838); +if (lean_is_exclusive(x_3820)) { + lean_ctor_release(x_3820, 0); + lean_ctor_release(x_3820, 1); + x_3839 = x_3820; +} else { + lean_dec_ref(x_3820); + x_3839 = lean_box(0); +} +if (lean_is_scalar(x_3839)) { + x_3840 = lean_alloc_ctor(1, 2, 0); +} else { + x_3840 = x_3839; +} +lean_ctor_set(x_3840, 0, x_3837); +lean_ctor_set(x_3840, 1, x_3838); +return x_3840; +} +} +else +{ +lean_object* x_3841; lean_object* x_3842; lean_object* x_3843; lean_object* x_3844; lean_object* x_3845; lean_object* x_3846; lean_object* x_3847; lean_object* x_3848; +lean_dec(x_3802); +lean_dec(x_3800); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_3702, 6); +lean_ctor_set(x_3702, 1, x_3058); +lean_ctor_set(x_3702, 0, x_153); +x_3841 = lean_ctor_get(x_1, 0); +lean_inc(x_3841); +x_3842 = l_Lean_IR_ToIR_bindVar(x_3841, x_3064, x_4, x_5, x_3707); +x_3843 = lean_ctor_get(x_3842, 0); +lean_inc(x_3843); +x_3844 = lean_ctor_get(x_3842, 1); +lean_inc(x_3844); +lean_dec(x_3842); +x_3845 = lean_ctor_get(x_3843, 0); +lean_inc(x_3845); +x_3846 = lean_ctor_get(x_3843, 1); +lean_inc(x_3846); +lean_dec(x_3843); +x_3847 = lean_ctor_get(x_1, 2); +lean_inc(x_3847); +lean_inc(x_5); +lean_inc(x_4); +x_3848 = l_Lean_IR_ToIR_lowerType(x_3847, x_3846, x_4, x_5, x_3844); +if (lean_obj_tag(x_3848) == 0) +{ +lean_object* x_3849; lean_object* x_3850; lean_object* x_3851; lean_object* x_3852; lean_object* x_3853; +x_3849 = lean_ctor_get(x_3848, 0); +lean_inc(x_3849); +x_3850 = lean_ctor_get(x_3848, 1); +lean_inc(x_3850); +lean_dec(x_3848); +x_3851 = lean_ctor_get(x_3849, 0); +lean_inc(x_3851); +x_3852 = lean_ctor_get(x_3849, 1); +lean_inc(x_3852); +lean_dec(x_3849); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3853 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3845, x_3702, x_3851, x_3852, x_4, x_5, x_3850); +if (lean_obj_tag(x_3853) == 0) +{ +lean_object* x_3854; lean_object* x_3855; lean_object* x_3856; lean_object* x_3857; lean_object* x_3858; lean_object* x_3859; lean_object* x_3860; +x_3854 = lean_ctor_get(x_3853, 0); +lean_inc(x_3854); +x_3855 = lean_ctor_get(x_3853, 1); +lean_inc(x_3855); +lean_dec(x_3853); +x_3856 = lean_ctor_get(x_3854, 0); +lean_inc(x_3856); +x_3857 = lean_ctor_get(x_3854, 1); +lean_inc(x_3857); +if (lean_is_exclusive(x_3854)) { + lean_ctor_release(x_3854, 0); + lean_ctor_release(x_3854, 1); + x_3858 = x_3854; +} else { + lean_dec_ref(x_3854); + x_3858 = lean_box(0); +} +x_3859 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_3859, 0, x_3856); +if (lean_is_scalar(x_3858)) { + x_3860 = lean_alloc_ctor(0, 2, 0); +} else { + x_3860 = x_3858; +} +lean_ctor_set(x_3860, 0, x_3859); +lean_ctor_set(x_3860, 1, x_3857); +x_3671 = x_3860; +x_3672 = x_3855; +goto block_3701; +} +else +{ +lean_object* x_3861; lean_object* x_3862; lean_object* x_3863; lean_object* x_3864; +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3861 = lean_ctor_get(x_3853, 0); +lean_inc(x_3861); +x_3862 = lean_ctor_get(x_3853, 1); +lean_inc(x_3862); +if (lean_is_exclusive(x_3853)) { + lean_ctor_release(x_3853, 0); + lean_ctor_release(x_3853, 1); + x_3863 = x_3853; +} else { + lean_dec_ref(x_3853); + x_3863 = lean_box(0); +} +if (lean_is_scalar(x_3863)) { + x_3864 = lean_alloc_ctor(1, 2, 0); +} else { + x_3864 = x_3863; +} +lean_ctor_set(x_3864, 0, x_3861); +lean_ctor_set(x_3864, 1, x_3862); +return x_3864; +} +} +else +{ +lean_object* x_3865; lean_object* x_3866; lean_object* x_3867; lean_object* x_3868; +lean_dec(x_3845); +lean_dec(x_3702); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3865 = lean_ctor_get(x_3848, 0); +lean_inc(x_3865); +x_3866 = lean_ctor_get(x_3848, 1); +lean_inc(x_3866); +if (lean_is_exclusive(x_3848)) { + lean_ctor_release(x_3848, 0); + lean_ctor_release(x_3848, 1); + x_3867 = x_3848; +} else { + lean_dec_ref(x_3848); + x_3867 = lean_box(0); +} +if (lean_is_scalar(x_3867)) { + x_3868 = lean_alloc_ctor(1, 2, 0); +} else { + x_3868 = x_3867; +} +lean_ctor_set(x_3868, 0, x_3865); +lean_ctor_set(x_3868, 1, x_3866); +return x_3868; +} +} +} +else +{ +lean_object* x_3869; lean_object* x_3870; lean_object* x_3871; lean_object* x_3872; lean_object* x_3873; lean_object* x_3874; lean_object* x_3875; lean_object* x_3876; +lean_dec(x_3802); +lean_dec(x_3800); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_3702, 7); +lean_ctor_set(x_3702, 1, x_3058); +lean_ctor_set(x_3702, 0, x_153); +x_3869 = lean_ctor_get(x_1, 0); +lean_inc(x_3869); +x_3870 = l_Lean_IR_ToIR_bindVar(x_3869, x_3064, x_4, x_5, x_3707); +x_3871 = lean_ctor_get(x_3870, 0); +lean_inc(x_3871); +x_3872 = lean_ctor_get(x_3870, 1); +lean_inc(x_3872); +lean_dec(x_3870); +x_3873 = lean_ctor_get(x_3871, 0); +lean_inc(x_3873); +x_3874 = lean_ctor_get(x_3871, 1); +lean_inc(x_3874); +lean_dec(x_3871); +x_3875 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3876 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3873, x_3702, x_3875, x_3874, x_4, x_5, x_3872); +if (lean_obj_tag(x_3876) == 0) +{ +lean_object* x_3877; lean_object* x_3878; lean_object* x_3879; lean_object* x_3880; lean_object* x_3881; lean_object* x_3882; lean_object* x_3883; +x_3877 = lean_ctor_get(x_3876, 0); +lean_inc(x_3877); +x_3878 = lean_ctor_get(x_3876, 1); +lean_inc(x_3878); +lean_dec(x_3876); +x_3879 = lean_ctor_get(x_3877, 0); +lean_inc(x_3879); +x_3880 = lean_ctor_get(x_3877, 1); +lean_inc(x_3880); +if (lean_is_exclusive(x_3877)) { + lean_ctor_release(x_3877, 0); + lean_ctor_release(x_3877, 1); + x_3881 = x_3877; +} else { + lean_dec_ref(x_3877); + x_3881 = lean_box(0); +} +x_3882 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_3882, 0, x_3879); +if (lean_is_scalar(x_3881)) { + x_3883 = lean_alloc_ctor(0, 2, 0); +} else { + x_3883 = x_3881; +} +lean_ctor_set(x_3883, 0, x_3882); +lean_ctor_set(x_3883, 1, x_3880); +x_3671 = x_3883; +x_3672 = x_3878; +goto block_3701; +} +else +{ +lean_object* x_3884; lean_object* x_3885; lean_object* x_3886; lean_object* x_3887; +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3884 = lean_ctor_get(x_3876, 0); +lean_inc(x_3884); +x_3885 = lean_ctor_get(x_3876, 1); +lean_inc(x_3885); +if (lean_is_exclusive(x_3876)) { + lean_ctor_release(x_3876, 0); + lean_ctor_release(x_3876, 1); + x_3886 = x_3876; +} else { + lean_dec_ref(x_3876); + x_3886 = lean_box(0); +} +if (lean_is_scalar(x_3886)) { + x_3887 = lean_alloc_ctor(1, 2, 0); +} else { + x_3887 = x_3886; +} +lean_ctor_set(x_3887, 0, x_3884); +lean_ctor_set(x_3887, 1, x_3885); +return x_3887; +} +} +} +} +else +{ +lean_object* x_3888; lean_object* x_3889; lean_object* x_3890; lean_object* x_3891; lean_object* x_3892; lean_object* x_3893; uint8_t x_3894; +x_3888 = lean_ctor_get(x_3702, 1); +lean_inc(x_3888); +lean_dec(x_3702); +x_3889 = lean_ctor_get(x_3703, 0); +lean_inc(x_3889); +if (lean_is_exclusive(x_3703)) { + lean_ctor_release(x_3703, 0); + x_3890 = x_3703; +} else { + lean_dec_ref(x_3703); + x_3890 = lean_box(0); +} +x_3891 = lean_array_get_size(x_3058); +x_3892 = lean_ctor_get(x_3889, 3); +lean_inc(x_3892); +lean_dec(x_3889); +x_3893 = lean_array_get_size(x_3892); +lean_dec(x_3892); +x_3894 = lean_nat_dec_lt(x_3891, x_3893); +if (x_3894 == 0) +{ +uint8_t x_3895; +x_3895 = lean_nat_dec_eq(x_3891, x_3893); +if (x_3895 == 0) +{ +lean_object* x_3896; lean_object* x_3897; lean_object* x_3898; lean_object* x_3899; lean_object* x_3900; lean_object* x_3901; lean_object* x_3902; lean_object* x_3903; lean_object* x_3904; lean_object* x_3905; lean_object* x_3906; lean_object* x_3907; lean_object* x_3908; lean_object* x_3909; lean_object* x_3910; lean_object* x_3911; lean_object* x_3912; +x_3896 = lean_unsigned_to_nat(0u); +x_3897 = l_Array_extract___rarg(x_3058, x_3896, x_3893); +x_3898 = l_Array_extract___rarg(x_3058, x_3893, x_3891); +lean_dec(x_3891); +lean_inc(x_153); +x_3899 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_3899, 0, x_153); +lean_ctor_set(x_3899, 1, x_3897); +x_3900 = lean_ctor_get(x_1, 0); +lean_inc(x_3900); +x_3901 = l_Lean_IR_ToIR_bindVar(x_3900, x_3064, x_4, x_5, x_3888); +x_3902 = lean_ctor_get(x_3901, 0); +lean_inc(x_3902); +x_3903 = lean_ctor_get(x_3901, 1); +lean_inc(x_3903); +lean_dec(x_3901); +x_3904 = lean_ctor_get(x_3902, 0); +lean_inc(x_3904); +x_3905 = lean_ctor_get(x_3902, 1); +lean_inc(x_3905); +lean_dec(x_3902); +x_3906 = l_Lean_IR_ToIR_newVar(x_3905, x_4, x_5, x_3903); +x_3907 = lean_ctor_get(x_3906, 0); +lean_inc(x_3907); +x_3908 = lean_ctor_get(x_3906, 1); +lean_inc(x_3908); +lean_dec(x_3906); +x_3909 = lean_ctor_get(x_3907, 0); +lean_inc(x_3909); +x_3910 = lean_ctor_get(x_3907, 1); +lean_inc(x_3910); +lean_dec(x_3907); +x_3911 = lean_ctor_get(x_1, 2); +lean_inc(x_3911); +lean_inc(x_5); +lean_inc(x_4); +x_3912 = l_Lean_IR_ToIR_lowerType(x_3911, x_3910, x_4, x_5, x_3908); +if (lean_obj_tag(x_3912) == 0) +{ +lean_object* x_3913; lean_object* x_3914; lean_object* x_3915; lean_object* x_3916; lean_object* x_3917; +x_3913 = lean_ctor_get(x_3912, 0); +lean_inc(x_3913); +x_3914 = lean_ctor_get(x_3912, 1); +lean_inc(x_3914); +lean_dec(x_3912); +x_3915 = lean_ctor_get(x_3913, 0); +lean_inc(x_3915); +x_3916 = lean_ctor_get(x_3913, 1); +lean_inc(x_3916); +lean_dec(x_3913); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3917 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_3909, x_3898, x_3904, x_3899, x_3915, x_3916, x_4, x_5, x_3914); +if (lean_obj_tag(x_3917) == 0) +{ +lean_object* x_3918; lean_object* x_3919; lean_object* x_3920; lean_object* x_3921; lean_object* x_3922; lean_object* x_3923; lean_object* x_3924; +x_3918 = lean_ctor_get(x_3917, 0); +lean_inc(x_3918); +x_3919 = lean_ctor_get(x_3917, 1); +lean_inc(x_3919); +lean_dec(x_3917); +x_3920 = lean_ctor_get(x_3918, 0); +lean_inc(x_3920); +x_3921 = lean_ctor_get(x_3918, 1); +lean_inc(x_3921); +if (lean_is_exclusive(x_3918)) { + lean_ctor_release(x_3918, 0); + lean_ctor_release(x_3918, 1); + x_3922 = x_3918; +} else { + lean_dec_ref(x_3918); + x_3922 = lean_box(0); +} +if (lean_is_scalar(x_3890)) { + x_3923 = lean_alloc_ctor(1, 1, 0); +} else { + x_3923 = x_3890; +} +lean_ctor_set(x_3923, 0, x_3920); +if (lean_is_scalar(x_3922)) { + x_3924 = lean_alloc_ctor(0, 2, 0); +} else { + x_3924 = x_3922; +} +lean_ctor_set(x_3924, 0, x_3923); +lean_ctor_set(x_3924, 1, x_3921); +x_3671 = x_3924; +x_3672 = x_3919; +goto block_3701; +} +else +{ +lean_object* x_3925; lean_object* x_3926; lean_object* x_3927; lean_object* x_3928; +lean_dec(x_3890); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3925 = lean_ctor_get(x_3917, 0); +lean_inc(x_3925); +x_3926 = lean_ctor_get(x_3917, 1); +lean_inc(x_3926); +if (lean_is_exclusive(x_3917)) { + lean_ctor_release(x_3917, 0); + lean_ctor_release(x_3917, 1); + x_3927 = x_3917; +} else { + lean_dec_ref(x_3917); + x_3927 = lean_box(0); +} +if (lean_is_scalar(x_3927)) { + x_3928 = lean_alloc_ctor(1, 2, 0); +} else { + x_3928 = x_3927; +} +lean_ctor_set(x_3928, 0, x_3925); +lean_ctor_set(x_3928, 1, x_3926); +return x_3928; +} +} +else +{ +lean_object* x_3929; lean_object* x_3930; lean_object* x_3931; lean_object* x_3932; +lean_dec(x_3909); +lean_dec(x_3904); +lean_dec(x_3899); +lean_dec(x_3898); +lean_dec(x_3890); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3929 = lean_ctor_get(x_3912, 0); +lean_inc(x_3929); +x_3930 = lean_ctor_get(x_3912, 1); +lean_inc(x_3930); +if (lean_is_exclusive(x_3912)) { + lean_ctor_release(x_3912, 0); + lean_ctor_release(x_3912, 1); + x_3931 = x_3912; +} else { + lean_dec_ref(x_3912); + x_3931 = lean_box(0); +} +if (lean_is_scalar(x_3931)) { + x_3932 = lean_alloc_ctor(1, 2, 0); +} else { + x_3932 = x_3931; +} +lean_ctor_set(x_3932, 0, x_3929); +lean_ctor_set(x_3932, 1, x_3930); +return x_3932; +} +} +else +{ +lean_object* x_3933; lean_object* x_3934; lean_object* x_3935; lean_object* x_3936; lean_object* x_3937; lean_object* x_3938; lean_object* x_3939; lean_object* x_3940; lean_object* x_3941; +lean_dec(x_3893); +lean_dec(x_3891); +lean_inc(x_3058); +lean_inc(x_153); +x_3933 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_3933, 0, x_153); +lean_ctor_set(x_3933, 1, x_3058); +x_3934 = lean_ctor_get(x_1, 0); +lean_inc(x_3934); +x_3935 = l_Lean_IR_ToIR_bindVar(x_3934, x_3064, x_4, x_5, x_3888); +x_3936 = lean_ctor_get(x_3935, 0); +lean_inc(x_3936); +x_3937 = lean_ctor_get(x_3935, 1); +lean_inc(x_3937); +lean_dec(x_3935); +x_3938 = lean_ctor_get(x_3936, 0); +lean_inc(x_3938); +x_3939 = lean_ctor_get(x_3936, 1); +lean_inc(x_3939); +lean_dec(x_3936); +x_3940 = lean_ctor_get(x_1, 2); +lean_inc(x_3940); +lean_inc(x_5); +lean_inc(x_4); +x_3941 = l_Lean_IR_ToIR_lowerType(x_3940, x_3939, x_4, x_5, x_3937); +if (lean_obj_tag(x_3941) == 0) +{ +lean_object* x_3942; lean_object* x_3943; lean_object* x_3944; lean_object* x_3945; lean_object* x_3946; +x_3942 = lean_ctor_get(x_3941, 0); +lean_inc(x_3942); +x_3943 = lean_ctor_get(x_3941, 1); +lean_inc(x_3943); +lean_dec(x_3941); +x_3944 = lean_ctor_get(x_3942, 0); +lean_inc(x_3944); +x_3945 = lean_ctor_get(x_3942, 1); +lean_inc(x_3945); +lean_dec(x_3942); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3946 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3938, x_3933, x_3944, x_3945, x_4, x_5, x_3943); +if (lean_obj_tag(x_3946) == 0) +{ +lean_object* x_3947; lean_object* x_3948; lean_object* x_3949; lean_object* x_3950; lean_object* x_3951; lean_object* x_3952; lean_object* x_3953; +x_3947 = lean_ctor_get(x_3946, 0); +lean_inc(x_3947); +x_3948 = lean_ctor_get(x_3946, 1); +lean_inc(x_3948); +lean_dec(x_3946); +x_3949 = lean_ctor_get(x_3947, 0); +lean_inc(x_3949); +x_3950 = lean_ctor_get(x_3947, 1); +lean_inc(x_3950); +if (lean_is_exclusive(x_3947)) { + lean_ctor_release(x_3947, 0); + lean_ctor_release(x_3947, 1); + x_3951 = x_3947; +} else { + lean_dec_ref(x_3947); + x_3951 = lean_box(0); +} +if (lean_is_scalar(x_3890)) { + x_3952 = lean_alloc_ctor(1, 1, 0); +} else { + x_3952 = x_3890; +} +lean_ctor_set(x_3952, 0, x_3949); +if (lean_is_scalar(x_3951)) { + x_3953 = lean_alloc_ctor(0, 2, 0); +} else { + x_3953 = x_3951; +} +lean_ctor_set(x_3953, 0, x_3952); +lean_ctor_set(x_3953, 1, x_3950); +x_3671 = x_3953; +x_3672 = x_3948; +goto block_3701; +} +else +{ +lean_object* x_3954; lean_object* x_3955; lean_object* x_3956; lean_object* x_3957; +lean_dec(x_3890); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3954 = lean_ctor_get(x_3946, 0); +lean_inc(x_3954); +x_3955 = lean_ctor_get(x_3946, 1); +lean_inc(x_3955); +if (lean_is_exclusive(x_3946)) { + lean_ctor_release(x_3946, 0); + lean_ctor_release(x_3946, 1); + x_3956 = x_3946; +} else { + lean_dec_ref(x_3946); + x_3956 = lean_box(0); +} +if (lean_is_scalar(x_3956)) { + x_3957 = lean_alloc_ctor(1, 2, 0); +} else { + x_3957 = x_3956; +} +lean_ctor_set(x_3957, 0, x_3954); +lean_ctor_set(x_3957, 1, x_3955); +return x_3957; +} +} +else +{ +lean_object* x_3958; lean_object* x_3959; lean_object* x_3960; lean_object* x_3961; +lean_dec(x_3938); +lean_dec(x_3933); +lean_dec(x_3890); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3958 = lean_ctor_get(x_3941, 0); +lean_inc(x_3958); +x_3959 = lean_ctor_get(x_3941, 1); +lean_inc(x_3959); +if (lean_is_exclusive(x_3941)) { + lean_ctor_release(x_3941, 0); + lean_ctor_release(x_3941, 1); + x_3960 = x_3941; +} else { + lean_dec_ref(x_3941); + x_3960 = lean_box(0); +} +if (lean_is_scalar(x_3960)) { + x_3961 = lean_alloc_ctor(1, 2, 0); +} else { + x_3961 = x_3960; +} +lean_ctor_set(x_3961, 0, x_3958); +lean_ctor_set(x_3961, 1, x_3959); +return x_3961; +} +} +} +else +{ +lean_object* x_3962; lean_object* x_3963; lean_object* x_3964; lean_object* x_3965; lean_object* x_3966; lean_object* x_3967; lean_object* x_3968; lean_object* x_3969; lean_object* x_3970; +lean_dec(x_3893); +lean_dec(x_3891); +lean_inc(x_3058); +lean_inc(x_153); +x_3962 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_3962, 0, x_153); +lean_ctor_set(x_3962, 1, x_3058); +x_3963 = lean_ctor_get(x_1, 0); +lean_inc(x_3963); +x_3964 = l_Lean_IR_ToIR_bindVar(x_3963, x_3064, x_4, x_5, x_3888); +x_3965 = lean_ctor_get(x_3964, 0); +lean_inc(x_3965); +x_3966 = lean_ctor_get(x_3964, 1); +lean_inc(x_3966); +lean_dec(x_3964); +x_3967 = lean_ctor_get(x_3965, 0); +lean_inc(x_3967); +x_3968 = lean_ctor_get(x_3965, 1); +lean_inc(x_3968); +lean_dec(x_3965); +x_3969 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_3970 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3967, x_3962, x_3969, x_3968, x_4, x_5, x_3966); +if (lean_obj_tag(x_3970) == 0) +{ +lean_object* x_3971; lean_object* x_3972; lean_object* x_3973; lean_object* x_3974; lean_object* x_3975; lean_object* x_3976; lean_object* x_3977; +x_3971 = lean_ctor_get(x_3970, 0); +lean_inc(x_3971); +x_3972 = lean_ctor_get(x_3970, 1); +lean_inc(x_3972); +lean_dec(x_3970); +x_3973 = lean_ctor_get(x_3971, 0); +lean_inc(x_3973); +x_3974 = lean_ctor_get(x_3971, 1); +lean_inc(x_3974); +if (lean_is_exclusive(x_3971)) { + lean_ctor_release(x_3971, 0); + lean_ctor_release(x_3971, 1); + x_3975 = x_3971; +} else { + lean_dec_ref(x_3971); + x_3975 = lean_box(0); +} +if (lean_is_scalar(x_3890)) { + x_3976 = lean_alloc_ctor(1, 1, 0); +} else { + x_3976 = x_3890; +} +lean_ctor_set(x_3976, 0, x_3973); +if (lean_is_scalar(x_3975)) { + x_3977 = lean_alloc_ctor(0, 2, 0); +} else { + x_3977 = x_3975; +} +lean_ctor_set(x_3977, 0, x_3976); +lean_ctor_set(x_3977, 1, x_3974); +x_3671 = x_3977; +x_3672 = x_3972; +goto block_3701; +} +else +{ +lean_object* x_3978; lean_object* x_3979; lean_object* x_3980; lean_object* x_3981; +lean_dec(x_3890); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3978 = lean_ctor_get(x_3970, 0); +lean_inc(x_3978); +x_3979 = lean_ctor_get(x_3970, 1); +lean_inc(x_3979); +if (lean_is_exclusive(x_3970)) { + lean_ctor_release(x_3970, 0); + lean_ctor_release(x_3970, 1); + x_3980 = x_3970; +} else { + lean_dec_ref(x_3970); + x_3980 = lean_box(0); +} +if (lean_is_scalar(x_3980)) { + x_3981 = lean_alloc_ctor(1, 2, 0); +} else { + x_3981 = x_3980; +} +lean_ctor_set(x_3981, 0, x_3978); +lean_ctor_set(x_3981, 1, x_3979); +return x_3981; +} +} +} +} +block_3701: +{ +lean_object* x_3673; +x_3673 = lean_ctor_get(x_3671, 0); +lean_inc(x_3673); +if (lean_obj_tag(x_3673) == 0) +{ +lean_object* x_3674; lean_object* x_3675; lean_object* x_3676; lean_object* x_3677; lean_object* x_3678; lean_object* x_3679; lean_object* x_3680; lean_object* x_3681; lean_object* x_3682; lean_object* x_3683; +lean_dec(x_3069); +x_3674 = lean_ctor_get(x_3671, 1); +lean_inc(x_3674); +lean_dec(x_3671); +x_3675 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_3675, 0, x_153); +lean_ctor_set(x_3675, 1, x_3058); +x_3676 = lean_ctor_get(x_1, 0); +lean_inc(x_3676); +x_3677 = l_Lean_IR_ToIR_bindVar(x_3676, x_3674, x_4, x_5, x_3672); +x_3678 = lean_ctor_get(x_3677, 0); +lean_inc(x_3678); +x_3679 = lean_ctor_get(x_3677, 1); +lean_inc(x_3679); +lean_dec(x_3677); +x_3680 = lean_ctor_get(x_3678, 0); +lean_inc(x_3680); +x_3681 = lean_ctor_get(x_3678, 1); +lean_inc(x_3681); +lean_dec(x_3678); +x_3682 = lean_ctor_get(x_1, 2); +lean_inc(x_3682); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_3683 = l_Lean_IR_ToIR_lowerType(x_3682, x_3681, x_4, x_5, x_3679); +if (lean_obj_tag(x_3683) == 0) +{ +lean_object* x_3684; lean_object* x_3685; lean_object* x_3686; lean_object* x_3687; lean_object* x_3688; +x_3684 = lean_ctor_get(x_3683, 0); +lean_inc(x_3684); +x_3685 = lean_ctor_get(x_3683, 1); +lean_inc(x_3685); +lean_dec(x_3683); +x_3686 = lean_ctor_get(x_3684, 0); +lean_inc(x_3686); +x_3687 = lean_ctor_get(x_3684, 1); +lean_inc(x_3687); +lean_dec(x_3684); +x_3688 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_3680, x_3675, x_3686, x_3687, x_4, x_5, x_3685); +return x_3688; +} +else +{ +uint8_t x_3689; +lean_dec(x_3680); +lean_dec(x_3675); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_3689 = !lean_is_exclusive(x_3683); +if (x_3689 == 0) +{ +return x_3683; +} +else +{ +lean_object* x_3690; lean_object* x_3691; lean_object* x_3692; +x_3690 = lean_ctor_get(x_3683, 0); +x_3691 = lean_ctor_get(x_3683, 1); +lean_inc(x_3691); +lean_inc(x_3690); +lean_dec(x_3683); +x_3692 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3692, 0, x_3690); +lean_ctor_set(x_3692, 1, x_3691); +return x_3692; +} +} +} +else +{ +uint8_t x_3693; +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_3693 = !lean_is_exclusive(x_3671); +if (x_3693 == 0) +{ +lean_object* x_3694; lean_object* x_3695; lean_object* x_3696; +x_3694 = lean_ctor_get(x_3671, 0); +lean_dec(x_3694); +x_3695 = lean_ctor_get(x_3673, 0); +lean_inc(x_3695); +lean_dec(x_3673); +lean_ctor_set(x_3671, 0, x_3695); +if (lean_is_scalar(x_3069)) { + x_3696 = lean_alloc_ctor(0, 2, 0); +} else { + x_3696 = x_3069; +} +lean_ctor_set(x_3696, 0, x_3671); +lean_ctor_set(x_3696, 1, x_3672); +return x_3696; +} +else +{ +lean_object* x_3697; lean_object* x_3698; lean_object* x_3699; lean_object* x_3700; +x_3697 = lean_ctor_get(x_3671, 1); +lean_inc(x_3697); +lean_dec(x_3671); +x_3698 = lean_ctor_get(x_3673, 0); +lean_inc(x_3698); +lean_dec(x_3673); +x_3699 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3699, 0, x_3698); +lean_ctor_set(x_3699, 1, x_3697); +if (lean_is_scalar(x_3069)) { + x_3700 = lean_alloc_ctor(0, 2, 0); +} else { + x_3700 = x_3069; +} +lean_ctor_set(x_3700, 0, x_3699); +lean_ctor_set(x_3700, 1, x_3672); +return x_3700; +} +} +} +} +case 4: +{ +uint8_t x_3982; +lean_dec(x_3070); +lean_dec(x_3069); +lean_free_object(x_3060); +lean_dec(x_3050); +lean_dec(x_3049); +x_3982 = !lean_is_exclusive(x_3075); +if (x_3982 == 0) +{ +lean_object* x_3983; lean_object* x_3984; uint8_t x_3985; +x_3983 = lean_ctor_get(x_3075, 0); +lean_dec(x_3983); +x_3984 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_3985 = lean_name_eq(x_153, x_3984); +if (x_3985 == 0) +{ +uint8_t x_3986; lean_object* x_3987; lean_object* x_3988; lean_object* x_3989; lean_object* x_3990; lean_object* x_3991; lean_object* x_3992; lean_object* x_3993; lean_object* x_3994; +lean_dec(x_3058); +lean_dec(x_2); +lean_dec(x_1); +x_3986 = 1; +x_3987 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_3988 = l_Lean_Name_toString(x_153, x_3986, x_3987); +lean_ctor_set_tag(x_3075, 3); +lean_ctor_set(x_3075, 0, x_3988); +x_3989 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_3990 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_3990, 0, x_3989); +lean_ctor_set(x_3990, 1, x_3075); +x_3991 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_3992 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_3992, 0, x_3990); +lean_ctor_set(x_3992, 1, x_3991); +x_3993 = l_Lean_MessageData_ofFormat(x_3992); +x_3994 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_3993, x_3064, x_4, x_5, x_3068); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3064); +return x_3994; +} +else +{ +lean_object* x_3995; lean_object* x_3996; lean_object* x_3997; +lean_free_object(x_3075); +lean_dec(x_153); +x_3995 = l_Lean_IR_instInhabitedArg; +x_3996 = lean_unsigned_to_nat(2u); +x_3997 = lean_array_get(x_3995, x_3058, x_3996); +lean_dec(x_3058); +if (lean_obj_tag(x_3997) == 0) +{ +lean_object* x_3998; lean_object* x_3999; lean_object* x_4000; lean_object* x_4001; lean_object* x_4002; lean_object* x_4003; lean_object* x_4004; +x_3998 = lean_ctor_get(x_3997, 0); +lean_inc(x_3998); +lean_dec(x_3997); +x_3999 = lean_ctor_get(x_1, 0); +lean_inc(x_3999); +lean_dec(x_1); +x_4000 = l_Lean_IR_ToIR_bindVarToVarId(x_3999, x_3998, x_3064, x_4, x_5, x_3068); +x_4001 = lean_ctor_get(x_4000, 0); +lean_inc(x_4001); +x_4002 = lean_ctor_get(x_4000, 1); +lean_inc(x_4002); +lean_dec(x_4000); +x_4003 = lean_ctor_get(x_4001, 1); +lean_inc(x_4003); +lean_dec(x_4001); +x_4004 = l_Lean_IR_ToIR_lowerCode(x_2, x_4003, x_4, x_5, x_4002); +return x_4004; +} +else +{ +lean_object* x_4005; lean_object* x_4006; lean_object* x_4007; lean_object* x_4008; lean_object* x_4009; lean_object* x_4010; +x_4005 = lean_ctor_get(x_1, 0); +lean_inc(x_4005); +lean_dec(x_1); +x_4006 = l_Lean_IR_ToIR_bindErased(x_4005, x_3064, x_4, x_5, x_3068); +x_4007 = lean_ctor_get(x_4006, 0); +lean_inc(x_4007); +x_4008 = lean_ctor_get(x_4006, 1); +lean_inc(x_4008); +lean_dec(x_4006); +x_4009 = lean_ctor_get(x_4007, 1); +lean_inc(x_4009); +lean_dec(x_4007); +x_4010 = l_Lean_IR_ToIR_lowerCode(x_2, x_4009, x_4, x_5, x_4008); +return x_4010; +} +} +} +else +{ +lean_object* x_4011; uint8_t x_4012; +lean_dec(x_3075); +x_4011 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_4012 = lean_name_eq(x_153, x_4011); +if (x_4012 == 0) +{ +uint8_t x_4013; lean_object* x_4014; lean_object* x_4015; lean_object* x_4016; lean_object* x_4017; lean_object* x_4018; lean_object* x_4019; lean_object* x_4020; lean_object* x_4021; lean_object* x_4022; +lean_dec(x_3058); +lean_dec(x_2); +lean_dec(x_1); +x_4013 = 1; +x_4014 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_4015 = l_Lean_Name_toString(x_153, x_4013, x_4014); +x_4016 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_4016, 0, x_4015); +x_4017 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_4018 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_4018, 0, x_4017); +lean_ctor_set(x_4018, 1, x_4016); +x_4019 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_4020 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_4020, 0, x_4018); +lean_ctor_set(x_4020, 1, x_4019); +x_4021 = l_Lean_MessageData_ofFormat(x_4020); +x_4022 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_4021, x_3064, x_4, x_5, x_3068); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3064); +return x_4022; +} +else +{ +lean_object* x_4023; lean_object* x_4024; lean_object* x_4025; +lean_dec(x_153); +x_4023 = l_Lean_IR_instInhabitedArg; +x_4024 = lean_unsigned_to_nat(2u); +x_4025 = lean_array_get(x_4023, x_3058, x_4024); +lean_dec(x_3058); +if (lean_obj_tag(x_4025) == 0) +{ +lean_object* x_4026; lean_object* x_4027; lean_object* x_4028; lean_object* x_4029; lean_object* x_4030; lean_object* x_4031; lean_object* x_4032; +x_4026 = lean_ctor_get(x_4025, 0); +lean_inc(x_4026); +lean_dec(x_4025); +x_4027 = lean_ctor_get(x_1, 0); +lean_inc(x_4027); +lean_dec(x_1); +x_4028 = l_Lean_IR_ToIR_bindVarToVarId(x_4027, x_4026, x_3064, x_4, x_5, x_3068); +x_4029 = lean_ctor_get(x_4028, 0); +lean_inc(x_4029); +x_4030 = lean_ctor_get(x_4028, 1); +lean_inc(x_4030); +lean_dec(x_4028); +x_4031 = lean_ctor_get(x_4029, 1); +lean_inc(x_4031); +lean_dec(x_4029); +x_4032 = l_Lean_IR_ToIR_lowerCode(x_2, x_4031, x_4, x_5, x_4030); +return x_4032; +} +else +{ +lean_object* x_4033; lean_object* x_4034; lean_object* x_4035; lean_object* x_4036; lean_object* x_4037; lean_object* x_4038; +x_4033 = lean_ctor_get(x_1, 0); +lean_inc(x_4033); +lean_dec(x_1); +x_4034 = l_Lean_IR_ToIR_bindErased(x_4033, x_3064, x_4, x_5, x_3068); +x_4035 = lean_ctor_get(x_4034, 0); +lean_inc(x_4035); +x_4036 = lean_ctor_get(x_4034, 1); +lean_inc(x_4036); +lean_dec(x_4034); +x_4037 = lean_ctor_get(x_4035, 1); +lean_inc(x_4037); +lean_dec(x_4035); +x_4038 = l_Lean_IR_ToIR_lowerCode(x_2, x_4037, x_4, x_5, x_4036); +return x_4038; +} +} +} +} +case 5: +{ +lean_object* x_4039; lean_object* x_4040; +lean_dec(x_3075); +lean_dec(x_3070); +lean_dec(x_3069); +lean_free_object(x_3060); +lean_dec(x_3058); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_4039 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_4040 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_4039, x_3064, x_4, x_5, x_3068); +return x_4040; +} +case 6: +{ +lean_object* x_4041; uint8_t x_4042; +x_4041 = lean_ctor_get(x_3075, 0); +lean_inc(x_4041); +lean_dec(x_3075); +lean_inc(x_153); +x_4042 = l_Lean_isExtern(x_3070, x_153); +if (x_4042 == 0) +{ +lean_object* x_4043; +lean_dec(x_3069); +lean_free_object(x_3060); +lean_dec(x_3058); +lean_inc(x_5); +lean_inc(x_4); +x_4043 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_3064, x_4, x_5, x_3068); +if (lean_obj_tag(x_4043) == 0) +{ +lean_object* x_4044; lean_object* x_4045; lean_object* x_4046; lean_object* x_4047; lean_object* x_4048; lean_object* x_4049; lean_object* x_4050; lean_object* x_4051; lean_object* x_4052; lean_object* x_4053; lean_object* x_4054; lean_object* x_4055; lean_object* x_4056; lean_object* x_4057; lean_object* x_4058; lean_object* x_4059; lean_object* x_4060; lean_object* x_4061; lean_object* x_4062; lean_object* x_4063; +x_4044 = lean_ctor_get(x_4043, 0); +lean_inc(x_4044); +x_4045 = lean_ctor_get(x_4044, 0); +lean_inc(x_4045); +x_4046 = lean_ctor_get(x_4043, 1); +lean_inc(x_4046); +lean_dec(x_4043); +x_4047 = lean_ctor_get(x_4044, 1); +lean_inc(x_4047); +lean_dec(x_4044); +x_4048 = lean_ctor_get(x_4045, 0); +lean_inc(x_4048); +x_4049 = lean_ctor_get(x_4045, 1); +lean_inc(x_4049); +lean_dec(x_4045); +x_4050 = lean_ctor_get(x_4041, 3); +lean_inc(x_4050); +lean_dec(x_4041); +x_4051 = lean_array_get_size(x_3049); +x_4052 = l_Array_extract___rarg(x_3049, x_4050, x_4051); +lean_dec(x_4051); +lean_dec(x_3049); +x_4053 = lean_array_get_size(x_4049); +x_4054 = lean_unsigned_to_nat(0u); +x_4055 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_3050)) { + x_4056 = lean_alloc_ctor(0, 3, 0); +} else { + x_4056 = x_3050; + lean_ctor_set_tag(x_4056, 0); +} +lean_ctor_set(x_4056, 0, x_4054); +lean_ctor_set(x_4056, 1, x_4053); +lean_ctor_set(x_4056, 2, x_4055); +x_4057 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_4058 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__3(x_4049, x_4052, x_4056, x_4056, x_4057, x_4054, lean_box(0), lean_box(0), x_4047, x_4, x_5, x_4046); +lean_dec(x_4056); +x_4059 = lean_ctor_get(x_4058, 0); +lean_inc(x_4059); +x_4060 = lean_ctor_get(x_4058, 1); +lean_inc(x_4060); +lean_dec(x_4058); +x_4061 = lean_ctor_get(x_4059, 0); +lean_inc(x_4061); +x_4062 = lean_ctor_get(x_4059, 1); +lean_inc(x_4062); +lean_dec(x_4059); +x_4063 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_4048, x_4049, x_4052, x_4061, x_4062, x_4, x_5, x_4060); +lean_dec(x_4052); +lean_dec(x_4049); +return x_4063; +} +else +{ +uint8_t x_4064; +lean_dec(x_4041); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4064 = !lean_is_exclusive(x_4043); +if (x_4064 == 0) +{ +return x_4043; +} +else +{ +lean_object* x_4065; lean_object* x_4066; lean_object* x_4067; +x_4065 = lean_ctor_get(x_4043, 0); +x_4066 = lean_ctor_get(x_4043, 1); +lean_inc(x_4066); +lean_inc(x_4065); +lean_dec(x_4043); +x_4067 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_4067, 0, x_4065); +lean_ctor_set(x_4067, 1, x_4066); +return x_4067; +} +} +} +else +{ +lean_object* x_4068; lean_object* x_4069; lean_object* x_4099; lean_object* x_4100; +lean_dec(x_4041); +lean_dec(x_3050); +lean_dec(x_3049); +lean_inc(x_153); +x_4099 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_3068); +x_4100 = lean_ctor_get(x_4099, 0); +lean_inc(x_4100); +if (lean_obj_tag(x_4100) == 0) +{ +lean_object* x_4101; lean_object* x_4102; +x_4101 = lean_ctor_get(x_4099, 1); +lean_inc(x_4101); +lean_dec(x_4099); +x_4102 = lean_box(0); +lean_ctor_set(x_3060, 0, x_4102); +x_4068 = x_3060; +x_4069 = x_4101; +goto block_4098; +} +else +{ +uint8_t x_4103; +lean_free_object(x_3060); +x_4103 = !lean_is_exclusive(x_4099); +if (x_4103 == 0) +{ +lean_object* x_4104; lean_object* x_4105; uint8_t x_4106; +x_4104 = lean_ctor_get(x_4099, 1); +x_4105 = lean_ctor_get(x_4099, 0); +lean_dec(x_4105); +x_4106 = !lean_is_exclusive(x_4100); +if (x_4106 == 0) +{ +lean_object* x_4107; lean_object* x_4108; lean_object* x_4109; lean_object* x_4110; uint8_t x_4111; +x_4107 = lean_ctor_get(x_4100, 0); +x_4108 = lean_array_get_size(x_3058); +x_4109 = lean_ctor_get(x_4107, 3); +lean_inc(x_4109); +lean_dec(x_4107); +x_4110 = lean_array_get_size(x_4109); +lean_dec(x_4109); +x_4111 = lean_nat_dec_lt(x_4108, x_4110); +if (x_4111 == 0) +{ +uint8_t x_4112; +x_4112 = lean_nat_dec_eq(x_4108, x_4110); +if (x_4112 == 0) +{ +lean_object* x_4113; lean_object* x_4114; lean_object* x_4115; lean_object* x_4116; lean_object* x_4117; lean_object* x_4118; lean_object* x_4119; lean_object* x_4120; lean_object* x_4121; lean_object* x_4122; lean_object* x_4123; lean_object* x_4124; lean_object* x_4125; lean_object* x_4126; lean_object* x_4127; lean_object* x_4128; +x_4113 = lean_unsigned_to_nat(0u); +x_4114 = l_Array_extract___rarg(x_3058, x_4113, x_4110); +x_4115 = l_Array_extract___rarg(x_3058, x_4110, x_4108); +lean_dec(x_4108); +lean_inc(x_153); +lean_ctor_set_tag(x_4099, 6); +lean_ctor_set(x_4099, 1, x_4114); +lean_ctor_set(x_4099, 0, x_153); +x_4116 = lean_ctor_get(x_1, 0); +lean_inc(x_4116); +x_4117 = l_Lean_IR_ToIR_bindVar(x_4116, x_3064, x_4, x_5, x_4104); +x_4118 = lean_ctor_get(x_4117, 0); +lean_inc(x_4118); +x_4119 = lean_ctor_get(x_4117, 1); +lean_inc(x_4119); +lean_dec(x_4117); +x_4120 = lean_ctor_get(x_4118, 0); +lean_inc(x_4120); +x_4121 = lean_ctor_get(x_4118, 1); +lean_inc(x_4121); +lean_dec(x_4118); +x_4122 = l_Lean_IR_ToIR_newVar(x_4121, x_4, x_5, x_4119); +x_4123 = lean_ctor_get(x_4122, 0); +lean_inc(x_4123); +x_4124 = lean_ctor_get(x_4122, 1); +lean_inc(x_4124); +lean_dec(x_4122); +x_4125 = lean_ctor_get(x_4123, 0); +lean_inc(x_4125); +x_4126 = lean_ctor_get(x_4123, 1); +lean_inc(x_4126); +lean_dec(x_4123); +x_4127 = lean_ctor_get(x_1, 2); +lean_inc(x_4127); +lean_inc(x_5); +lean_inc(x_4); +x_4128 = l_Lean_IR_ToIR_lowerType(x_4127, x_4126, x_4, x_5, x_4124); +if (lean_obj_tag(x_4128) == 0) +{ +lean_object* x_4129; lean_object* x_4130; lean_object* x_4131; lean_object* x_4132; lean_object* x_4133; +x_4129 = lean_ctor_get(x_4128, 0); +lean_inc(x_4129); +x_4130 = lean_ctor_get(x_4128, 1); +lean_inc(x_4130); +lean_dec(x_4128); +x_4131 = lean_ctor_get(x_4129, 0); +lean_inc(x_4131); +x_4132 = lean_ctor_get(x_4129, 1); +lean_inc(x_4132); +lean_dec(x_4129); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4133 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_4125, x_4115, x_4120, x_4099, x_4131, x_4132, x_4, x_5, x_4130); +if (lean_obj_tag(x_4133) == 0) +{ +lean_object* x_4134; lean_object* x_4135; uint8_t x_4136; +x_4134 = lean_ctor_get(x_4133, 0); +lean_inc(x_4134); +x_4135 = lean_ctor_get(x_4133, 1); +lean_inc(x_4135); +lean_dec(x_4133); +x_4136 = !lean_is_exclusive(x_4134); +if (x_4136 == 0) +{ +lean_object* x_4137; +x_4137 = lean_ctor_get(x_4134, 0); +lean_ctor_set(x_4100, 0, x_4137); +lean_ctor_set(x_4134, 0, x_4100); +x_4068 = x_4134; +x_4069 = x_4135; +goto block_4098; +} +else +{ +lean_object* x_4138; lean_object* x_4139; lean_object* x_4140; +x_4138 = lean_ctor_get(x_4134, 0); +x_4139 = lean_ctor_get(x_4134, 1); +lean_inc(x_4139); +lean_inc(x_4138); +lean_dec(x_4134); +lean_ctor_set(x_4100, 0, x_4138); +x_4140 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_4140, 0, x_4100); +lean_ctor_set(x_4140, 1, x_4139); +x_4068 = x_4140; +x_4069 = x_4135; +goto block_4098; +} +} +else +{ +uint8_t x_4141; +lean_free_object(x_4100); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4141 = !lean_is_exclusive(x_4133); +if (x_4141 == 0) +{ +return x_4133; +} +else +{ +lean_object* x_4142; lean_object* x_4143; lean_object* x_4144; +x_4142 = lean_ctor_get(x_4133, 0); +x_4143 = lean_ctor_get(x_4133, 1); +lean_inc(x_4143); +lean_inc(x_4142); +lean_dec(x_4133); +x_4144 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_4144, 0, x_4142); +lean_ctor_set(x_4144, 1, x_4143); +return x_4144; +} +} +} +else +{ +uint8_t x_4145; +lean_dec(x_4125); +lean_dec(x_4120); +lean_dec(x_4099); +lean_dec(x_4115); +lean_free_object(x_4100); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4145 = !lean_is_exclusive(x_4128); +if (x_4145 == 0) +{ +return x_4128; +} +else +{ +lean_object* x_4146; lean_object* x_4147; lean_object* x_4148; +x_4146 = lean_ctor_get(x_4128, 0); +x_4147 = lean_ctor_get(x_4128, 1); +lean_inc(x_4147); +lean_inc(x_4146); +lean_dec(x_4128); +x_4148 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_4148, 0, x_4146); +lean_ctor_set(x_4148, 1, x_4147); +return x_4148; +} +} +} +else +{ +lean_object* x_4149; lean_object* x_4150; lean_object* x_4151; lean_object* x_4152; lean_object* x_4153; lean_object* x_4154; lean_object* x_4155; lean_object* x_4156; +lean_dec(x_4110); +lean_dec(x_4108); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_4099, 6); +lean_ctor_set(x_4099, 1, x_3058); +lean_ctor_set(x_4099, 0, x_153); +x_4149 = lean_ctor_get(x_1, 0); +lean_inc(x_4149); +x_4150 = l_Lean_IR_ToIR_bindVar(x_4149, x_3064, x_4, x_5, x_4104); +x_4151 = lean_ctor_get(x_4150, 0); +lean_inc(x_4151); +x_4152 = lean_ctor_get(x_4150, 1); +lean_inc(x_4152); +lean_dec(x_4150); +x_4153 = lean_ctor_get(x_4151, 0); +lean_inc(x_4153); +x_4154 = lean_ctor_get(x_4151, 1); +lean_inc(x_4154); +lean_dec(x_4151); +x_4155 = lean_ctor_get(x_1, 2); +lean_inc(x_4155); +lean_inc(x_5); +lean_inc(x_4); +x_4156 = l_Lean_IR_ToIR_lowerType(x_4155, x_4154, x_4, x_5, x_4152); +if (lean_obj_tag(x_4156) == 0) +{ +lean_object* x_4157; lean_object* x_4158; lean_object* x_4159; lean_object* x_4160; lean_object* x_4161; +x_4157 = lean_ctor_get(x_4156, 0); +lean_inc(x_4157); +x_4158 = lean_ctor_get(x_4156, 1); +lean_inc(x_4158); +lean_dec(x_4156); +x_4159 = lean_ctor_get(x_4157, 0); +lean_inc(x_4159); +x_4160 = lean_ctor_get(x_4157, 1); +lean_inc(x_4160); +lean_dec(x_4157); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4161 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4153, x_4099, x_4159, x_4160, x_4, x_5, x_4158); +if (lean_obj_tag(x_4161) == 0) +{ +lean_object* x_4162; lean_object* x_4163; uint8_t x_4164; +x_4162 = lean_ctor_get(x_4161, 0); +lean_inc(x_4162); +x_4163 = lean_ctor_get(x_4161, 1); +lean_inc(x_4163); +lean_dec(x_4161); +x_4164 = !lean_is_exclusive(x_4162); +if (x_4164 == 0) +{ +lean_object* x_4165; +x_4165 = lean_ctor_get(x_4162, 0); +lean_ctor_set(x_4100, 0, x_4165); +lean_ctor_set(x_4162, 0, x_4100); +x_4068 = x_4162; +x_4069 = x_4163; +goto block_4098; +} +else +{ +lean_object* x_4166; lean_object* x_4167; lean_object* x_4168; +x_4166 = lean_ctor_get(x_4162, 0); +x_4167 = lean_ctor_get(x_4162, 1); +lean_inc(x_4167); +lean_inc(x_4166); +lean_dec(x_4162); +lean_ctor_set(x_4100, 0, x_4166); +x_4168 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_4168, 0, x_4100); +lean_ctor_set(x_4168, 1, x_4167); +x_4068 = x_4168; +x_4069 = x_4163; +goto block_4098; +} +} +else +{ +uint8_t x_4169; +lean_free_object(x_4100); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4169 = !lean_is_exclusive(x_4161); +if (x_4169 == 0) +{ +return x_4161; +} +else +{ +lean_object* x_4170; lean_object* x_4171; lean_object* x_4172; +x_4170 = lean_ctor_get(x_4161, 0); +x_4171 = lean_ctor_get(x_4161, 1); +lean_inc(x_4171); +lean_inc(x_4170); +lean_dec(x_4161); +x_4172 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_4172, 0, x_4170); +lean_ctor_set(x_4172, 1, x_4171); +return x_4172; +} +} +} +else +{ +uint8_t x_4173; +lean_dec(x_4153); +lean_dec(x_4099); +lean_free_object(x_4100); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4173 = !lean_is_exclusive(x_4156); +if (x_4173 == 0) +{ +return x_4156; +} +else +{ +lean_object* x_4174; lean_object* x_4175; lean_object* x_4176; +x_4174 = lean_ctor_get(x_4156, 0); +x_4175 = lean_ctor_get(x_4156, 1); +lean_inc(x_4175); +lean_inc(x_4174); +lean_dec(x_4156); +x_4176 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_4176, 0, x_4174); +lean_ctor_set(x_4176, 1, x_4175); +return x_4176; +} +} +} +} +else +{ +lean_object* x_4177; lean_object* x_4178; lean_object* x_4179; lean_object* x_4180; lean_object* x_4181; lean_object* x_4182; lean_object* x_4183; lean_object* x_4184; +lean_dec(x_4110); +lean_dec(x_4108); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_4099, 7); +lean_ctor_set(x_4099, 1, x_3058); +lean_ctor_set(x_4099, 0, x_153); +x_4177 = lean_ctor_get(x_1, 0); +lean_inc(x_4177); +x_4178 = l_Lean_IR_ToIR_bindVar(x_4177, x_3064, x_4, x_5, x_4104); +x_4179 = lean_ctor_get(x_4178, 0); +lean_inc(x_4179); +x_4180 = lean_ctor_get(x_4178, 1); +lean_inc(x_4180); +lean_dec(x_4178); +x_4181 = lean_ctor_get(x_4179, 0); +lean_inc(x_4181); +x_4182 = lean_ctor_get(x_4179, 1); +lean_inc(x_4182); +lean_dec(x_4179); +x_4183 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4184 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4181, x_4099, x_4183, x_4182, x_4, x_5, x_4180); +if (lean_obj_tag(x_4184) == 0) +{ +lean_object* x_4185; lean_object* x_4186; uint8_t x_4187; +x_4185 = lean_ctor_get(x_4184, 0); +lean_inc(x_4185); +x_4186 = lean_ctor_get(x_4184, 1); +lean_inc(x_4186); +lean_dec(x_4184); +x_4187 = !lean_is_exclusive(x_4185); +if (x_4187 == 0) +{ +lean_object* x_4188; +x_4188 = lean_ctor_get(x_4185, 0); +lean_ctor_set(x_4100, 0, x_4188); +lean_ctor_set(x_4185, 0, x_4100); +x_4068 = x_4185; +x_4069 = x_4186; +goto block_4098; +} +else +{ +lean_object* x_4189; lean_object* x_4190; lean_object* x_4191; +x_4189 = lean_ctor_get(x_4185, 0); +x_4190 = lean_ctor_get(x_4185, 1); +lean_inc(x_4190); +lean_inc(x_4189); +lean_dec(x_4185); +lean_ctor_set(x_4100, 0, x_4189); +x_4191 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_4191, 0, x_4100); +lean_ctor_set(x_4191, 1, x_4190); +x_4068 = x_4191; +x_4069 = x_4186; +goto block_4098; +} +} +else +{ +uint8_t x_4192; +lean_free_object(x_4100); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4192 = !lean_is_exclusive(x_4184); +if (x_4192 == 0) +{ +return x_4184; +} +else +{ +lean_object* x_4193; lean_object* x_4194; lean_object* x_4195; +x_4193 = lean_ctor_get(x_4184, 0); +x_4194 = lean_ctor_get(x_4184, 1); +lean_inc(x_4194); +lean_inc(x_4193); +lean_dec(x_4184); +x_4195 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_4195, 0, x_4193); +lean_ctor_set(x_4195, 1, x_4194); +return x_4195; +} +} +} +} +else +{ +lean_object* x_4196; lean_object* x_4197; lean_object* x_4198; lean_object* x_4199; uint8_t x_4200; +x_4196 = lean_ctor_get(x_4100, 0); +lean_inc(x_4196); +lean_dec(x_4100); +x_4197 = lean_array_get_size(x_3058); +x_4198 = lean_ctor_get(x_4196, 3); +lean_inc(x_4198); +lean_dec(x_4196); +x_4199 = lean_array_get_size(x_4198); +lean_dec(x_4198); +x_4200 = lean_nat_dec_lt(x_4197, x_4199); +if (x_4200 == 0) +{ +uint8_t x_4201; +x_4201 = lean_nat_dec_eq(x_4197, x_4199); +if (x_4201 == 0) +{ +lean_object* x_4202; lean_object* x_4203; lean_object* x_4204; lean_object* x_4205; lean_object* x_4206; lean_object* x_4207; lean_object* x_4208; lean_object* x_4209; lean_object* x_4210; lean_object* x_4211; lean_object* x_4212; lean_object* x_4213; lean_object* x_4214; lean_object* x_4215; lean_object* x_4216; lean_object* x_4217; +x_4202 = lean_unsigned_to_nat(0u); +x_4203 = l_Array_extract___rarg(x_3058, x_4202, x_4199); +x_4204 = l_Array_extract___rarg(x_3058, x_4199, x_4197); +lean_dec(x_4197); +lean_inc(x_153); +lean_ctor_set_tag(x_4099, 6); +lean_ctor_set(x_4099, 1, x_4203); +lean_ctor_set(x_4099, 0, x_153); +x_4205 = lean_ctor_get(x_1, 0); +lean_inc(x_4205); +x_4206 = l_Lean_IR_ToIR_bindVar(x_4205, x_3064, x_4, x_5, x_4104); +x_4207 = lean_ctor_get(x_4206, 0); +lean_inc(x_4207); +x_4208 = lean_ctor_get(x_4206, 1); +lean_inc(x_4208); +lean_dec(x_4206); +x_4209 = lean_ctor_get(x_4207, 0); +lean_inc(x_4209); +x_4210 = lean_ctor_get(x_4207, 1); +lean_inc(x_4210); +lean_dec(x_4207); +x_4211 = l_Lean_IR_ToIR_newVar(x_4210, x_4, x_5, x_4208); +x_4212 = lean_ctor_get(x_4211, 0); +lean_inc(x_4212); +x_4213 = lean_ctor_get(x_4211, 1); +lean_inc(x_4213); +lean_dec(x_4211); +x_4214 = lean_ctor_get(x_4212, 0); +lean_inc(x_4214); +x_4215 = lean_ctor_get(x_4212, 1); +lean_inc(x_4215); +lean_dec(x_4212); +x_4216 = lean_ctor_get(x_1, 2); +lean_inc(x_4216); +lean_inc(x_5); +lean_inc(x_4); +x_4217 = l_Lean_IR_ToIR_lowerType(x_4216, x_4215, x_4, x_5, x_4213); +if (lean_obj_tag(x_4217) == 0) +{ +lean_object* x_4218; lean_object* x_4219; lean_object* x_4220; lean_object* x_4221; lean_object* x_4222; +x_4218 = lean_ctor_get(x_4217, 0); +lean_inc(x_4218); +x_4219 = lean_ctor_get(x_4217, 1); +lean_inc(x_4219); +lean_dec(x_4217); +x_4220 = lean_ctor_get(x_4218, 0); +lean_inc(x_4220); +x_4221 = lean_ctor_get(x_4218, 1); +lean_inc(x_4221); +lean_dec(x_4218); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4222 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_4214, x_4204, x_4209, x_4099, x_4220, x_4221, x_4, x_5, x_4219); +if (lean_obj_tag(x_4222) == 0) +{ +lean_object* x_4223; lean_object* x_4224; lean_object* x_4225; lean_object* x_4226; lean_object* x_4227; lean_object* x_4228; lean_object* x_4229; +x_4223 = lean_ctor_get(x_4222, 0); +lean_inc(x_4223); +x_4224 = lean_ctor_get(x_4222, 1); +lean_inc(x_4224); +lean_dec(x_4222); +x_4225 = lean_ctor_get(x_4223, 0); +lean_inc(x_4225); +x_4226 = lean_ctor_get(x_4223, 1); +lean_inc(x_4226); +if (lean_is_exclusive(x_4223)) { + lean_ctor_release(x_4223, 0); + lean_ctor_release(x_4223, 1); + x_4227 = x_4223; +} else { + lean_dec_ref(x_4223); + x_4227 = lean_box(0); +} +x_4228 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_4228, 0, x_4225); +if (lean_is_scalar(x_4227)) { + x_4229 = lean_alloc_ctor(0, 2, 0); +} else { + x_4229 = x_4227; +} +lean_ctor_set(x_4229, 0, x_4228); +lean_ctor_set(x_4229, 1, x_4226); +x_4068 = x_4229; +x_4069 = x_4224; +goto block_4098; +} +else +{ +lean_object* x_4230; lean_object* x_4231; lean_object* x_4232; lean_object* x_4233; +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4230 = lean_ctor_get(x_4222, 0); +lean_inc(x_4230); +x_4231 = lean_ctor_get(x_4222, 1); +lean_inc(x_4231); +if (lean_is_exclusive(x_4222)) { + lean_ctor_release(x_4222, 0); + lean_ctor_release(x_4222, 1); + x_4232 = x_4222; +} else { + lean_dec_ref(x_4222); + x_4232 = lean_box(0); +} +if (lean_is_scalar(x_4232)) { + x_4233 = lean_alloc_ctor(1, 2, 0); +} else { + x_4233 = x_4232; +} +lean_ctor_set(x_4233, 0, x_4230); +lean_ctor_set(x_4233, 1, x_4231); +return x_4233; +} +} +else +{ +lean_object* x_4234; lean_object* x_4235; lean_object* x_4236; lean_object* x_4237; +lean_dec(x_4214); +lean_dec(x_4209); +lean_dec(x_4099); +lean_dec(x_4204); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4234 = lean_ctor_get(x_4217, 0); +lean_inc(x_4234); +x_4235 = lean_ctor_get(x_4217, 1); +lean_inc(x_4235); +if (lean_is_exclusive(x_4217)) { + lean_ctor_release(x_4217, 0); + lean_ctor_release(x_4217, 1); + x_4236 = x_4217; +} else { + lean_dec_ref(x_4217); + x_4236 = lean_box(0); +} +if (lean_is_scalar(x_4236)) { + x_4237 = lean_alloc_ctor(1, 2, 0); +} else { + x_4237 = x_4236; +} +lean_ctor_set(x_4237, 0, x_4234); +lean_ctor_set(x_4237, 1, x_4235); +return x_4237; +} +} +else +{ +lean_object* x_4238; lean_object* x_4239; lean_object* x_4240; lean_object* x_4241; lean_object* x_4242; lean_object* x_4243; lean_object* x_4244; lean_object* x_4245; +lean_dec(x_4199); +lean_dec(x_4197); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_4099, 6); +lean_ctor_set(x_4099, 1, x_3058); +lean_ctor_set(x_4099, 0, x_153); +x_4238 = lean_ctor_get(x_1, 0); +lean_inc(x_4238); +x_4239 = l_Lean_IR_ToIR_bindVar(x_4238, x_3064, x_4, x_5, x_4104); +x_4240 = lean_ctor_get(x_4239, 0); +lean_inc(x_4240); +x_4241 = lean_ctor_get(x_4239, 1); +lean_inc(x_4241); +lean_dec(x_4239); +x_4242 = lean_ctor_get(x_4240, 0); +lean_inc(x_4242); +x_4243 = lean_ctor_get(x_4240, 1); +lean_inc(x_4243); +lean_dec(x_4240); +x_4244 = lean_ctor_get(x_1, 2); +lean_inc(x_4244); +lean_inc(x_5); +lean_inc(x_4); +x_4245 = l_Lean_IR_ToIR_lowerType(x_4244, x_4243, x_4, x_5, x_4241); +if (lean_obj_tag(x_4245) == 0) +{ +lean_object* x_4246; lean_object* x_4247; lean_object* x_4248; lean_object* x_4249; lean_object* x_4250; +x_4246 = lean_ctor_get(x_4245, 0); +lean_inc(x_4246); +x_4247 = lean_ctor_get(x_4245, 1); +lean_inc(x_4247); +lean_dec(x_4245); +x_4248 = lean_ctor_get(x_4246, 0); +lean_inc(x_4248); +x_4249 = lean_ctor_get(x_4246, 1); +lean_inc(x_4249); +lean_dec(x_4246); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4250 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4242, x_4099, x_4248, x_4249, x_4, x_5, x_4247); +if (lean_obj_tag(x_4250) == 0) +{ +lean_object* x_4251; lean_object* x_4252; lean_object* x_4253; lean_object* x_4254; lean_object* x_4255; lean_object* x_4256; lean_object* x_4257; +x_4251 = lean_ctor_get(x_4250, 0); +lean_inc(x_4251); +x_4252 = lean_ctor_get(x_4250, 1); +lean_inc(x_4252); +lean_dec(x_4250); +x_4253 = lean_ctor_get(x_4251, 0); +lean_inc(x_4253); +x_4254 = lean_ctor_get(x_4251, 1); +lean_inc(x_4254); +if (lean_is_exclusive(x_4251)) { + lean_ctor_release(x_4251, 0); + lean_ctor_release(x_4251, 1); + x_4255 = x_4251; +} else { + lean_dec_ref(x_4251); + x_4255 = lean_box(0); +} +x_4256 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_4256, 0, x_4253); +if (lean_is_scalar(x_4255)) { + x_4257 = lean_alloc_ctor(0, 2, 0); +} else { + x_4257 = x_4255; +} +lean_ctor_set(x_4257, 0, x_4256); +lean_ctor_set(x_4257, 1, x_4254); +x_4068 = x_4257; +x_4069 = x_4252; +goto block_4098; +} +else +{ +lean_object* x_4258; lean_object* x_4259; lean_object* x_4260; lean_object* x_4261; +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4258 = lean_ctor_get(x_4250, 0); +lean_inc(x_4258); +x_4259 = lean_ctor_get(x_4250, 1); +lean_inc(x_4259); +if (lean_is_exclusive(x_4250)) { + lean_ctor_release(x_4250, 0); + lean_ctor_release(x_4250, 1); + x_4260 = x_4250; +} else { + lean_dec_ref(x_4250); + x_4260 = lean_box(0); +} +if (lean_is_scalar(x_4260)) { + x_4261 = lean_alloc_ctor(1, 2, 0); +} else { + x_4261 = x_4260; +} +lean_ctor_set(x_4261, 0, x_4258); +lean_ctor_set(x_4261, 1, x_4259); +return x_4261; +} +} +else +{ +lean_object* x_4262; lean_object* x_4263; lean_object* x_4264; lean_object* x_4265; +lean_dec(x_4242); +lean_dec(x_4099); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4262 = lean_ctor_get(x_4245, 0); +lean_inc(x_4262); +x_4263 = lean_ctor_get(x_4245, 1); +lean_inc(x_4263); +if (lean_is_exclusive(x_4245)) { + lean_ctor_release(x_4245, 0); + lean_ctor_release(x_4245, 1); + x_4264 = x_4245; +} else { + lean_dec_ref(x_4245); + x_4264 = lean_box(0); +} +if (lean_is_scalar(x_4264)) { + x_4265 = lean_alloc_ctor(1, 2, 0); +} else { + x_4265 = x_4264; +} +lean_ctor_set(x_4265, 0, x_4262); +lean_ctor_set(x_4265, 1, x_4263); +return x_4265; +} +} +} +else +{ +lean_object* x_4266; lean_object* x_4267; lean_object* x_4268; lean_object* x_4269; lean_object* x_4270; lean_object* x_4271; lean_object* x_4272; lean_object* x_4273; +lean_dec(x_4199); +lean_dec(x_4197); +lean_inc(x_3058); +lean_inc(x_153); +lean_ctor_set_tag(x_4099, 7); +lean_ctor_set(x_4099, 1, x_3058); +lean_ctor_set(x_4099, 0, x_153); +x_4266 = lean_ctor_get(x_1, 0); +lean_inc(x_4266); +x_4267 = l_Lean_IR_ToIR_bindVar(x_4266, x_3064, x_4, x_5, x_4104); +x_4268 = lean_ctor_get(x_4267, 0); +lean_inc(x_4268); +x_4269 = lean_ctor_get(x_4267, 1); +lean_inc(x_4269); +lean_dec(x_4267); +x_4270 = lean_ctor_get(x_4268, 0); +lean_inc(x_4270); +x_4271 = lean_ctor_get(x_4268, 1); +lean_inc(x_4271); +lean_dec(x_4268); +x_4272 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4273 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4270, x_4099, x_4272, x_4271, x_4, x_5, x_4269); +if (lean_obj_tag(x_4273) == 0) +{ +lean_object* x_4274; lean_object* x_4275; lean_object* x_4276; lean_object* x_4277; lean_object* x_4278; lean_object* x_4279; lean_object* x_4280; +x_4274 = lean_ctor_get(x_4273, 0); +lean_inc(x_4274); +x_4275 = lean_ctor_get(x_4273, 1); +lean_inc(x_4275); +lean_dec(x_4273); +x_4276 = lean_ctor_get(x_4274, 0); +lean_inc(x_4276); +x_4277 = lean_ctor_get(x_4274, 1); +lean_inc(x_4277); +if (lean_is_exclusive(x_4274)) { + lean_ctor_release(x_4274, 0); + lean_ctor_release(x_4274, 1); + x_4278 = x_4274; +} else { + lean_dec_ref(x_4274); + x_4278 = lean_box(0); +} +x_4279 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_4279, 0, x_4276); +if (lean_is_scalar(x_4278)) { + x_4280 = lean_alloc_ctor(0, 2, 0); +} else { + x_4280 = x_4278; +} +lean_ctor_set(x_4280, 0, x_4279); +lean_ctor_set(x_4280, 1, x_4277); +x_4068 = x_4280; +x_4069 = x_4275; +goto block_4098; +} +else +{ +lean_object* x_4281; lean_object* x_4282; lean_object* x_4283; lean_object* x_4284; +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4281 = lean_ctor_get(x_4273, 0); +lean_inc(x_4281); +x_4282 = lean_ctor_get(x_4273, 1); +lean_inc(x_4282); +if (lean_is_exclusive(x_4273)) { + lean_ctor_release(x_4273, 0); + lean_ctor_release(x_4273, 1); + x_4283 = x_4273; +} else { + lean_dec_ref(x_4273); + x_4283 = lean_box(0); +} +if (lean_is_scalar(x_4283)) { + x_4284 = lean_alloc_ctor(1, 2, 0); +} else { + x_4284 = x_4283; +} +lean_ctor_set(x_4284, 0, x_4281); +lean_ctor_set(x_4284, 1, x_4282); +return x_4284; +} +} +} +} +else +{ +lean_object* x_4285; lean_object* x_4286; lean_object* x_4287; lean_object* x_4288; lean_object* x_4289; lean_object* x_4290; uint8_t x_4291; +x_4285 = lean_ctor_get(x_4099, 1); +lean_inc(x_4285); +lean_dec(x_4099); +x_4286 = lean_ctor_get(x_4100, 0); +lean_inc(x_4286); +if (lean_is_exclusive(x_4100)) { + lean_ctor_release(x_4100, 0); + x_4287 = x_4100; +} else { + lean_dec_ref(x_4100); + x_4287 = lean_box(0); +} +x_4288 = lean_array_get_size(x_3058); +x_4289 = lean_ctor_get(x_4286, 3); +lean_inc(x_4289); +lean_dec(x_4286); +x_4290 = lean_array_get_size(x_4289); +lean_dec(x_4289); +x_4291 = lean_nat_dec_lt(x_4288, x_4290); +if (x_4291 == 0) +{ +uint8_t x_4292; +x_4292 = lean_nat_dec_eq(x_4288, x_4290); +if (x_4292 == 0) +{ +lean_object* x_4293; lean_object* x_4294; lean_object* x_4295; lean_object* x_4296; lean_object* x_4297; lean_object* x_4298; lean_object* x_4299; lean_object* x_4300; lean_object* x_4301; lean_object* x_4302; lean_object* x_4303; lean_object* x_4304; lean_object* x_4305; lean_object* x_4306; lean_object* x_4307; lean_object* x_4308; lean_object* x_4309; +x_4293 = lean_unsigned_to_nat(0u); +x_4294 = l_Array_extract___rarg(x_3058, x_4293, x_4290); +x_4295 = l_Array_extract___rarg(x_3058, x_4290, x_4288); +lean_dec(x_4288); +lean_inc(x_153); +x_4296 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_4296, 0, x_153); +lean_ctor_set(x_4296, 1, x_4294); +x_4297 = lean_ctor_get(x_1, 0); +lean_inc(x_4297); +x_4298 = l_Lean_IR_ToIR_bindVar(x_4297, x_3064, x_4, x_5, x_4285); +x_4299 = lean_ctor_get(x_4298, 0); +lean_inc(x_4299); +x_4300 = lean_ctor_get(x_4298, 1); +lean_inc(x_4300); +lean_dec(x_4298); +x_4301 = lean_ctor_get(x_4299, 0); +lean_inc(x_4301); +x_4302 = lean_ctor_get(x_4299, 1); +lean_inc(x_4302); +lean_dec(x_4299); +x_4303 = l_Lean_IR_ToIR_newVar(x_4302, x_4, x_5, x_4300); +x_4304 = lean_ctor_get(x_4303, 0); +lean_inc(x_4304); +x_4305 = lean_ctor_get(x_4303, 1); +lean_inc(x_4305); +lean_dec(x_4303); +x_4306 = lean_ctor_get(x_4304, 0); +lean_inc(x_4306); +x_4307 = lean_ctor_get(x_4304, 1); +lean_inc(x_4307); +lean_dec(x_4304); +x_4308 = lean_ctor_get(x_1, 2); +lean_inc(x_4308); +lean_inc(x_5); +lean_inc(x_4); +x_4309 = l_Lean_IR_ToIR_lowerType(x_4308, x_4307, x_4, x_5, x_4305); +if (lean_obj_tag(x_4309) == 0) +{ +lean_object* x_4310; lean_object* x_4311; lean_object* x_4312; lean_object* x_4313; lean_object* x_4314; +x_4310 = lean_ctor_get(x_4309, 0); +lean_inc(x_4310); +x_4311 = lean_ctor_get(x_4309, 1); +lean_inc(x_4311); +lean_dec(x_4309); +x_4312 = lean_ctor_get(x_4310, 0); +lean_inc(x_4312); +x_4313 = lean_ctor_get(x_4310, 1); +lean_inc(x_4313); +lean_dec(x_4310); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4314 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_4306, x_4295, x_4301, x_4296, x_4312, x_4313, x_4, x_5, x_4311); +if (lean_obj_tag(x_4314) == 0) +{ +lean_object* x_4315; lean_object* x_4316; lean_object* x_4317; lean_object* x_4318; lean_object* x_4319; lean_object* x_4320; lean_object* x_4321; +x_4315 = lean_ctor_get(x_4314, 0); +lean_inc(x_4315); +x_4316 = lean_ctor_get(x_4314, 1); +lean_inc(x_4316); +lean_dec(x_4314); +x_4317 = lean_ctor_get(x_4315, 0); +lean_inc(x_4317); +x_4318 = lean_ctor_get(x_4315, 1); +lean_inc(x_4318); +if (lean_is_exclusive(x_4315)) { + lean_ctor_release(x_4315, 0); + lean_ctor_release(x_4315, 1); + x_4319 = x_4315; +} else { + lean_dec_ref(x_4315); + x_4319 = lean_box(0); +} +if (lean_is_scalar(x_4287)) { + x_4320 = lean_alloc_ctor(1, 1, 0); +} else { + x_4320 = x_4287; +} +lean_ctor_set(x_4320, 0, x_4317); +if (lean_is_scalar(x_4319)) { + x_4321 = lean_alloc_ctor(0, 2, 0); +} else { + x_4321 = x_4319; +} +lean_ctor_set(x_4321, 0, x_4320); +lean_ctor_set(x_4321, 1, x_4318); +x_4068 = x_4321; +x_4069 = x_4316; +goto block_4098; +} +else +{ +lean_object* x_4322; lean_object* x_4323; lean_object* x_4324; lean_object* x_4325; +lean_dec(x_4287); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4322 = lean_ctor_get(x_4314, 0); +lean_inc(x_4322); +x_4323 = lean_ctor_get(x_4314, 1); +lean_inc(x_4323); +if (lean_is_exclusive(x_4314)) { + lean_ctor_release(x_4314, 0); + lean_ctor_release(x_4314, 1); + x_4324 = x_4314; +} else { + lean_dec_ref(x_4314); + x_4324 = lean_box(0); +} +if (lean_is_scalar(x_4324)) { + x_4325 = lean_alloc_ctor(1, 2, 0); +} else { + x_4325 = x_4324; +} +lean_ctor_set(x_4325, 0, x_4322); +lean_ctor_set(x_4325, 1, x_4323); +return x_4325; +} +} +else +{ +lean_object* x_4326; lean_object* x_4327; lean_object* x_4328; lean_object* x_4329; +lean_dec(x_4306); +lean_dec(x_4301); +lean_dec(x_4296); +lean_dec(x_4295); +lean_dec(x_4287); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4326 = lean_ctor_get(x_4309, 0); +lean_inc(x_4326); +x_4327 = lean_ctor_get(x_4309, 1); +lean_inc(x_4327); +if (lean_is_exclusive(x_4309)) { + lean_ctor_release(x_4309, 0); + lean_ctor_release(x_4309, 1); + x_4328 = x_4309; +} else { + lean_dec_ref(x_4309); + x_4328 = lean_box(0); +} +if (lean_is_scalar(x_4328)) { + x_4329 = lean_alloc_ctor(1, 2, 0); +} else { + x_4329 = x_4328; +} +lean_ctor_set(x_4329, 0, x_4326); +lean_ctor_set(x_4329, 1, x_4327); +return x_4329; +} +} +else +{ +lean_object* x_4330; lean_object* x_4331; lean_object* x_4332; lean_object* x_4333; lean_object* x_4334; lean_object* x_4335; lean_object* x_4336; lean_object* x_4337; lean_object* x_4338; +lean_dec(x_4290); +lean_dec(x_4288); +lean_inc(x_3058); +lean_inc(x_153); +x_4330 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_4330, 0, x_153); +lean_ctor_set(x_4330, 1, x_3058); +x_4331 = lean_ctor_get(x_1, 0); +lean_inc(x_4331); +x_4332 = l_Lean_IR_ToIR_bindVar(x_4331, x_3064, x_4, x_5, x_4285); +x_4333 = lean_ctor_get(x_4332, 0); +lean_inc(x_4333); +x_4334 = lean_ctor_get(x_4332, 1); +lean_inc(x_4334); +lean_dec(x_4332); +x_4335 = lean_ctor_get(x_4333, 0); +lean_inc(x_4335); +x_4336 = lean_ctor_get(x_4333, 1); +lean_inc(x_4336); +lean_dec(x_4333); +x_4337 = lean_ctor_get(x_1, 2); +lean_inc(x_4337); +lean_inc(x_5); +lean_inc(x_4); +x_4338 = l_Lean_IR_ToIR_lowerType(x_4337, x_4336, x_4, x_5, x_4334); +if (lean_obj_tag(x_4338) == 0) +{ +lean_object* x_4339; lean_object* x_4340; lean_object* x_4341; lean_object* x_4342; lean_object* x_4343; +x_4339 = lean_ctor_get(x_4338, 0); +lean_inc(x_4339); +x_4340 = lean_ctor_get(x_4338, 1); +lean_inc(x_4340); +lean_dec(x_4338); +x_4341 = lean_ctor_get(x_4339, 0); +lean_inc(x_4341); +x_4342 = lean_ctor_get(x_4339, 1); +lean_inc(x_4342); +lean_dec(x_4339); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4343 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4335, x_4330, x_4341, x_4342, x_4, x_5, x_4340); +if (lean_obj_tag(x_4343) == 0) +{ +lean_object* x_4344; lean_object* x_4345; lean_object* x_4346; lean_object* x_4347; lean_object* x_4348; lean_object* x_4349; lean_object* x_4350; +x_4344 = lean_ctor_get(x_4343, 0); +lean_inc(x_4344); +x_4345 = lean_ctor_get(x_4343, 1); +lean_inc(x_4345); +lean_dec(x_4343); +x_4346 = lean_ctor_get(x_4344, 0); +lean_inc(x_4346); +x_4347 = lean_ctor_get(x_4344, 1); +lean_inc(x_4347); +if (lean_is_exclusive(x_4344)) { + lean_ctor_release(x_4344, 0); + lean_ctor_release(x_4344, 1); + x_4348 = x_4344; +} else { + lean_dec_ref(x_4344); + x_4348 = lean_box(0); +} +if (lean_is_scalar(x_4287)) { + x_4349 = lean_alloc_ctor(1, 1, 0); +} else { + x_4349 = x_4287; +} +lean_ctor_set(x_4349, 0, x_4346); +if (lean_is_scalar(x_4348)) { + x_4350 = lean_alloc_ctor(0, 2, 0); +} else { + x_4350 = x_4348; +} +lean_ctor_set(x_4350, 0, x_4349); +lean_ctor_set(x_4350, 1, x_4347); +x_4068 = x_4350; +x_4069 = x_4345; +goto block_4098; +} +else +{ +lean_object* x_4351; lean_object* x_4352; lean_object* x_4353; lean_object* x_4354; +lean_dec(x_4287); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4351 = lean_ctor_get(x_4343, 0); +lean_inc(x_4351); +x_4352 = lean_ctor_get(x_4343, 1); +lean_inc(x_4352); +if (lean_is_exclusive(x_4343)) { + lean_ctor_release(x_4343, 0); + lean_ctor_release(x_4343, 1); + x_4353 = x_4343; +} else { + lean_dec_ref(x_4343); + x_4353 = lean_box(0); +} +if (lean_is_scalar(x_4353)) { + x_4354 = lean_alloc_ctor(1, 2, 0); +} else { + x_4354 = x_4353; +} +lean_ctor_set(x_4354, 0, x_4351); +lean_ctor_set(x_4354, 1, x_4352); +return x_4354; +} +} +else +{ +lean_object* x_4355; lean_object* x_4356; lean_object* x_4357; lean_object* x_4358; +lean_dec(x_4335); +lean_dec(x_4330); +lean_dec(x_4287); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4355 = lean_ctor_get(x_4338, 0); +lean_inc(x_4355); +x_4356 = lean_ctor_get(x_4338, 1); +lean_inc(x_4356); +if (lean_is_exclusive(x_4338)) { + lean_ctor_release(x_4338, 0); + lean_ctor_release(x_4338, 1); + x_4357 = x_4338; +} else { + lean_dec_ref(x_4338); + x_4357 = lean_box(0); +} +if (lean_is_scalar(x_4357)) { + x_4358 = lean_alloc_ctor(1, 2, 0); +} else { + x_4358 = x_4357; +} +lean_ctor_set(x_4358, 0, x_4355); +lean_ctor_set(x_4358, 1, x_4356); +return x_4358; +} +} +} +else +{ +lean_object* x_4359; lean_object* x_4360; lean_object* x_4361; lean_object* x_4362; lean_object* x_4363; lean_object* x_4364; lean_object* x_4365; lean_object* x_4366; lean_object* x_4367; +lean_dec(x_4290); +lean_dec(x_4288); +lean_inc(x_3058); +lean_inc(x_153); +x_4359 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_4359, 0, x_153); +lean_ctor_set(x_4359, 1, x_3058); +x_4360 = lean_ctor_get(x_1, 0); +lean_inc(x_4360); +x_4361 = l_Lean_IR_ToIR_bindVar(x_4360, x_3064, x_4, x_5, x_4285); +x_4362 = lean_ctor_get(x_4361, 0); +lean_inc(x_4362); +x_4363 = lean_ctor_get(x_4361, 1); +lean_inc(x_4363); +lean_dec(x_4361); +x_4364 = lean_ctor_get(x_4362, 0); +lean_inc(x_4364); +x_4365 = lean_ctor_get(x_4362, 1); +lean_inc(x_4365); +lean_dec(x_4362); +x_4366 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4367 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4364, x_4359, x_4366, x_4365, x_4, x_5, x_4363); +if (lean_obj_tag(x_4367) == 0) +{ +lean_object* x_4368; lean_object* x_4369; lean_object* x_4370; lean_object* x_4371; lean_object* x_4372; lean_object* x_4373; lean_object* x_4374; +x_4368 = lean_ctor_get(x_4367, 0); +lean_inc(x_4368); +x_4369 = lean_ctor_get(x_4367, 1); +lean_inc(x_4369); +lean_dec(x_4367); +x_4370 = lean_ctor_get(x_4368, 0); +lean_inc(x_4370); +x_4371 = lean_ctor_get(x_4368, 1); +lean_inc(x_4371); +if (lean_is_exclusive(x_4368)) { + lean_ctor_release(x_4368, 0); + lean_ctor_release(x_4368, 1); + x_4372 = x_4368; +} else { + lean_dec_ref(x_4368); + x_4372 = lean_box(0); +} +if (lean_is_scalar(x_4287)) { + x_4373 = lean_alloc_ctor(1, 1, 0); +} else { + x_4373 = x_4287; +} +lean_ctor_set(x_4373, 0, x_4370); +if (lean_is_scalar(x_4372)) { + x_4374 = lean_alloc_ctor(0, 2, 0); +} else { + x_4374 = x_4372; +} +lean_ctor_set(x_4374, 0, x_4373); +lean_ctor_set(x_4374, 1, x_4371); +x_4068 = x_4374; +x_4069 = x_4369; +goto block_4098; +} +else +{ +lean_object* x_4375; lean_object* x_4376; lean_object* x_4377; lean_object* x_4378; +lean_dec(x_4287); +lean_dec(x_3069); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4375 = lean_ctor_get(x_4367, 0); +lean_inc(x_4375); +x_4376 = lean_ctor_get(x_4367, 1); +lean_inc(x_4376); +if (lean_is_exclusive(x_4367)) { + lean_ctor_release(x_4367, 0); + lean_ctor_release(x_4367, 1); + x_4377 = x_4367; +} else { + lean_dec_ref(x_4367); + x_4377 = lean_box(0); +} +if (lean_is_scalar(x_4377)) { + x_4378 = lean_alloc_ctor(1, 2, 0); +} else { + x_4378 = x_4377; +} +lean_ctor_set(x_4378, 0, x_4375); +lean_ctor_set(x_4378, 1, x_4376); +return x_4378; +} +} +} +} +block_4098: +{ +lean_object* x_4070; +x_4070 = lean_ctor_get(x_4068, 0); +lean_inc(x_4070); +if (lean_obj_tag(x_4070) == 0) +{ +lean_object* x_4071; lean_object* x_4072; lean_object* x_4073; lean_object* x_4074; lean_object* x_4075; lean_object* x_4076; lean_object* x_4077; lean_object* x_4078; lean_object* x_4079; lean_object* x_4080; +lean_dec(x_3069); +x_4071 = lean_ctor_get(x_4068, 1); +lean_inc(x_4071); +lean_dec(x_4068); +x_4072 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_4072, 0, x_153); +lean_ctor_set(x_4072, 1, x_3058); +x_4073 = lean_ctor_get(x_1, 0); +lean_inc(x_4073); +x_4074 = l_Lean_IR_ToIR_bindVar(x_4073, x_4071, x_4, x_5, x_4069); +x_4075 = lean_ctor_get(x_4074, 0); +lean_inc(x_4075); +x_4076 = lean_ctor_get(x_4074, 1); +lean_inc(x_4076); +lean_dec(x_4074); +x_4077 = lean_ctor_get(x_4075, 0); +lean_inc(x_4077); +x_4078 = lean_ctor_get(x_4075, 1); +lean_inc(x_4078); +lean_dec(x_4075); +x_4079 = lean_ctor_get(x_1, 2); +lean_inc(x_4079); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_4080 = l_Lean_IR_ToIR_lowerType(x_4079, x_4078, x_4, x_5, x_4076); +if (lean_obj_tag(x_4080) == 0) +{ +lean_object* x_4081; lean_object* x_4082; lean_object* x_4083; lean_object* x_4084; lean_object* x_4085; +x_4081 = lean_ctor_get(x_4080, 0); +lean_inc(x_4081); +x_4082 = lean_ctor_get(x_4080, 1); +lean_inc(x_4082); +lean_dec(x_4080); +x_4083 = lean_ctor_get(x_4081, 0); +lean_inc(x_4083); +x_4084 = lean_ctor_get(x_4081, 1); +lean_inc(x_4084); +lean_dec(x_4081); +x_4085 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4077, x_4072, x_4083, x_4084, x_4, x_5, x_4082); +return x_4085; +} +else +{ +uint8_t x_4086; +lean_dec(x_4077); +lean_dec(x_4072); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_4086 = !lean_is_exclusive(x_4080); +if (x_4086 == 0) +{ +return x_4080; +} +else +{ +lean_object* x_4087; lean_object* x_4088; lean_object* x_4089; +x_4087 = lean_ctor_get(x_4080, 0); +x_4088 = lean_ctor_get(x_4080, 1); +lean_inc(x_4088); +lean_inc(x_4087); +lean_dec(x_4080); +x_4089 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_4089, 0, x_4087); +lean_ctor_set(x_4089, 1, x_4088); +return x_4089; +} +} +} +else +{ +uint8_t x_4090; +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4090 = !lean_is_exclusive(x_4068); +if (x_4090 == 0) +{ +lean_object* x_4091; lean_object* x_4092; lean_object* x_4093; +x_4091 = lean_ctor_get(x_4068, 0); +lean_dec(x_4091); +x_4092 = lean_ctor_get(x_4070, 0); +lean_inc(x_4092); +lean_dec(x_4070); +lean_ctor_set(x_4068, 0, x_4092); +if (lean_is_scalar(x_3069)) { + x_4093 = lean_alloc_ctor(0, 2, 0); +} else { + x_4093 = x_3069; +} +lean_ctor_set(x_4093, 0, x_4068); +lean_ctor_set(x_4093, 1, x_4069); +return x_4093; +} +else +{ +lean_object* x_4094; lean_object* x_4095; lean_object* x_4096; lean_object* x_4097; +x_4094 = lean_ctor_get(x_4068, 1); +lean_inc(x_4094); +lean_dec(x_4068); +x_4095 = lean_ctor_get(x_4070, 0); +lean_inc(x_4095); +lean_dec(x_4070); +x_4096 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_4096, 0, x_4095); +lean_ctor_set(x_4096, 1, x_4094); +if (lean_is_scalar(x_3069)) { + x_4097 = lean_alloc_ctor(0, 2, 0); +} else { + x_4097 = x_3069; +} +lean_ctor_set(x_4097, 0, x_4096); +lean_ctor_set(x_4097, 1, x_4069); +return x_4097; +} +} +} +} +} +default: +{ +uint8_t x_4379; +lean_dec(x_3070); +lean_dec(x_3069); +lean_free_object(x_3060); +lean_dec(x_3058); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_2); +lean_dec(x_1); +x_4379 = !lean_is_exclusive(x_3075); +if (x_4379 == 0) +{ +lean_object* x_4380; uint8_t x_4381; lean_object* x_4382; lean_object* x_4383; lean_object* x_4384; lean_object* x_4385; lean_object* x_4386; lean_object* x_4387; lean_object* x_4388; lean_object* x_4389; +x_4380 = lean_ctor_get(x_3075, 0); +lean_dec(x_4380); +x_4381 = 1; +x_4382 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_4383 = l_Lean_Name_toString(x_153, x_4381, x_4382); +lean_ctor_set_tag(x_3075, 3); +lean_ctor_set(x_3075, 0, x_4383); +x_4384 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_4385 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_4385, 0, x_4384); +lean_ctor_set(x_4385, 1, x_3075); +x_4386 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_4387 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_4387, 0, x_4385); +lean_ctor_set(x_4387, 1, x_4386); +x_4388 = l_Lean_MessageData_ofFormat(x_4387); +x_4389 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_4388, x_3064, x_4, x_5, x_3068); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3064); +return x_4389; +} +else +{ +uint8_t x_4390; lean_object* x_4391; lean_object* x_4392; lean_object* x_4393; lean_object* x_4394; lean_object* x_4395; lean_object* x_4396; lean_object* x_4397; lean_object* x_4398; lean_object* x_4399; +lean_dec(x_3075); +x_4390 = 1; +x_4391 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_4392 = l_Lean_Name_toString(x_153, x_4390, x_4391); +x_4393 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_4393, 0, x_4392); +x_4394 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_4395 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_4395, 0, x_4394); +lean_ctor_set(x_4395, 1, x_4393); +x_4396 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_4397 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_4397, 0, x_4395); +lean_ctor_set(x_4397, 1, x_4396); +x_4398 = l_Lean_MessageData_ofFormat(x_4397); +x_4399 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_4398, x_3064, x_4, x_5, x_3068); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3064); +return x_4399; +} +} +} +} +} +else +{ +lean_object* x_4400; lean_object* x_4401; lean_object* x_4402; lean_object* x_4403; lean_object* x_4404; lean_object* x_4405; uint8_t x_4406; lean_object* x_4407; +x_4400 = lean_ctor_get(x_3060, 1); +lean_inc(x_4400); +lean_dec(x_3060); +x_4401 = lean_st_ref_get(x_5, x_3061); +x_4402 = lean_ctor_get(x_4401, 0); +lean_inc(x_4402); +x_4403 = lean_ctor_get(x_4401, 1); +lean_inc(x_4403); +if (lean_is_exclusive(x_4401)) { + lean_ctor_release(x_4401, 0); + lean_ctor_release(x_4401, 1); + x_4404 = x_4401; +} else { + lean_dec_ref(x_4401); + x_4404 = lean_box(0); +} +x_4405 = lean_ctor_get(x_4402, 0); +lean_inc(x_4405); +lean_dec(x_4402); +x_4406 = 0; +lean_inc(x_153); +lean_inc(x_4405); +x_4407 = l_Lean_Environment_find_x3f(x_4405, x_153, x_4406); +if (lean_obj_tag(x_4407) == 0) +{ +lean_object* x_4408; lean_object* x_4409; +lean_dec(x_4405); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_4408 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_4409 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_4408, x_4400, x_4, x_5, x_4403); +return x_4409; +} +else +{ +lean_object* x_4410; +x_4410 = lean_ctor_get(x_4407, 0); +lean_inc(x_4410); +lean_dec(x_4407); +switch (lean_obj_tag(x_4410)) { +case 0: +{ +lean_object* x_4411; lean_object* x_4412; uint8_t x_4413; +lean_dec(x_4405); +lean_dec(x_3050); +lean_dec(x_3049); +if (lean_is_exclusive(x_4410)) { + lean_ctor_release(x_4410, 0); + x_4411 = x_4410; +} else { + lean_dec_ref(x_4410); + x_4411 = lean_box(0); +} +x_4412 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_4413 = lean_name_eq(x_153, x_4412); +if (x_4413 == 0) +{ +lean_object* x_4414; uint8_t x_4415; +x_4414 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_4415 = lean_name_eq(x_153, x_4414); +if (x_4415 == 0) +{ +lean_object* x_4416; lean_object* x_4417; lean_object* x_4418; +lean_dec(x_4404); +lean_inc(x_153); +x_4416 = l_Lean_IR_ToIR_findDecl(x_153, x_4400, x_4, x_5, x_4403); +x_4417 = lean_ctor_get(x_4416, 0); +lean_inc(x_4417); +x_4418 = lean_ctor_get(x_4417, 0); +lean_inc(x_4418); +if (lean_obj_tag(x_4418) == 0) +{ +lean_object* x_4419; lean_object* x_4420; lean_object* x_4421; lean_object* x_4422; uint8_t x_4423; lean_object* x_4424; lean_object* x_4425; lean_object* x_4426; lean_object* x_4427; lean_object* x_4428; lean_object* x_4429; lean_object* x_4430; lean_object* x_4431; lean_object* x_4432; +lean_dec(x_3058); +lean_dec(x_2); +lean_dec(x_1); +x_4419 = lean_ctor_get(x_4416, 1); +lean_inc(x_4419); +if (lean_is_exclusive(x_4416)) { + lean_ctor_release(x_4416, 0); + lean_ctor_release(x_4416, 1); + x_4420 = x_4416; +} else { + lean_dec_ref(x_4416); + x_4420 = lean_box(0); +} +x_4421 = lean_ctor_get(x_4417, 1); +lean_inc(x_4421); +if (lean_is_exclusive(x_4417)) { + lean_ctor_release(x_4417, 0); + lean_ctor_release(x_4417, 1); + x_4422 = x_4417; +} else { + lean_dec_ref(x_4417); + x_4422 = lean_box(0); +} +x_4423 = 1; +x_4424 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_4425 = l_Lean_Name_toString(x_153, x_4423, x_4424); +if (lean_is_scalar(x_4411)) { + x_4426 = lean_alloc_ctor(3, 1, 0); +} else { + x_4426 = x_4411; + lean_ctor_set_tag(x_4426, 3); +} +lean_ctor_set(x_4426, 0, x_4425); +x_4427 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_4422)) { + x_4428 = lean_alloc_ctor(5, 2, 0); +} else { + x_4428 = x_4422; + lean_ctor_set_tag(x_4428, 5); +} +lean_ctor_set(x_4428, 0, x_4427); +lean_ctor_set(x_4428, 1, x_4426); +x_4429 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_4420)) { + x_4430 = lean_alloc_ctor(5, 2, 0); +} else { + x_4430 = x_4420; + lean_ctor_set_tag(x_4430, 5); +} +lean_ctor_set(x_4430, 0, x_4428); +lean_ctor_set(x_4430, 1, x_4429); +x_4431 = l_Lean_MessageData_ofFormat(x_4430); +x_4432 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_4431, x_4421, x_4, x_5, x_4419); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_4421); +return x_4432; +} +else +{ +lean_object* x_4433; lean_object* x_4434; lean_object* x_4435; lean_object* x_4436; lean_object* x_4437; lean_object* x_4438; lean_object* x_4439; uint8_t x_4440; +lean_dec(x_4411); +x_4433 = lean_ctor_get(x_4416, 1); +lean_inc(x_4433); +lean_dec(x_4416); +x_4434 = lean_ctor_get(x_4417, 1); +lean_inc(x_4434); +if (lean_is_exclusive(x_4417)) { + lean_ctor_release(x_4417, 0); + lean_ctor_release(x_4417, 1); + x_4435 = x_4417; +} else { + lean_dec_ref(x_4417); + x_4435 = lean_box(0); +} +x_4436 = lean_ctor_get(x_4418, 0); +lean_inc(x_4436); +lean_dec(x_4418); +x_4437 = lean_array_get_size(x_3058); +x_4438 = l_Lean_IR_Decl_params(x_4436); +lean_dec(x_4436); +x_4439 = lean_array_get_size(x_4438); +lean_dec(x_4438); +x_4440 = lean_nat_dec_lt(x_4437, x_4439); +if (x_4440 == 0) +{ +uint8_t x_4441; +x_4441 = lean_nat_dec_eq(x_4437, x_4439); +if (x_4441 == 0) +{ +lean_object* x_4442; lean_object* x_4443; lean_object* x_4444; lean_object* x_4445; lean_object* x_4446; lean_object* x_4447; lean_object* x_4448; lean_object* x_4449; lean_object* x_4450; lean_object* x_4451; lean_object* x_4452; lean_object* x_4453; lean_object* x_4454; lean_object* x_4455; lean_object* x_4456; lean_object* x_4457; lean_object* x_4458; +x_4442 = lean_unsigned_to_nat(0u); +x_4443 = l_Array_extract___rarg(x_3058, x_4442, x_4439); +x_4444 = l_Array_extract___rarg(x_3058, x_4439, x_4437); +lean_dec(x_4437); +lean_dec(x_3058); +if (lean_is_scalar(x_4435)) { + x_4445 = lean_alloc_ctor(6, 2, 0); +} else { + x_4445 = x_4435; + lean_ctor_set_tag(x_4445, 6); +} +lean_ctor_set(x_4445, 0, x_153); +lean_ctor_set(x_4445, 1, x_4443); +x_4446 = lean_ctor_get(x_1, 0); +lean_inc(x_4446); +x_4447 = l_Lean_IR_ToIR_bindVar(x_4446, x_4434, x_4, x_5, x_4433); +x_4448 = lean_ctor_get(x_4447, 0); +lean_inc(x_4448); +x_4449 = lean_ctor_get(x_4447, 1); +lean_inc(x_4449); +lean_dec(x_4447); +x_4450 = lean_ctor_get(x_4448, 0); +lean_inc(x_4450); +x_4451 = lean_ctor_get(x_4448, 1); +lean_inc(x_4451); +lean_dec(x_4448); +x_4452 = l_Lean_IR_ToIR_newVar(x_4451, x_4, x_5, x_4449); +x_4453 = lean_ctor_get(x_4452, 0); +lean_inc(x_4453); +x_4454 = lean_ctor_get(x_4452, 1); +lean_inc(x_4454); +lean_dec(x_4452); +x_4455 = lean_ctor_get(x_4453, 0); +lean_inc(x_4455); +x_4456 = lean_ctor_get(x_4453, 1); +lean_inc(x_4456); +lean_dec(x_4453); +x_4457 = lean_ctor_get(x_1, 2); +lean_inc(x_4457); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_4458 = l_Lean_IR_ToIR_lowerType(x_4457, x_4456, x_4, x_5, x_4454); +if (lean_obj_tag(x_4458) == 0) +{ +lean_object* x_4459; lean_object* x_4460; lean_object* x_4461; lean_object* x_4462; lean_object* x_4463; +x_4459 = lean_ctor_get(x_4458, 0); +lean_inc(x_4459); +x_4460 = lean_ctor_get(x_4458, 1); +lean_inc(x_4460); +lean_dec(x_4458); +x_4461 = lean_ctor_get(x_4459, 0); +lean_inc(x_4461); +x_4462 = lean_ctor_get(x_4459, 1); +lean_inc(x_4462); +lean_dec(x_4459); +x_4463 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_4455, x_4444, x_4450, x_4445, x_4461, x_4462, x_4, x_5, x_4460); +return x_4463; +} +else +{ +lean_object* x_4464; lean_object* x_4465; lean_object* x_4466; lean_object* x_4467; +lean_dec(x_4455); +lean_dec(x_4450); +lean_dec(x_4445); +lean_dec(x_4444); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_4464 = lean_ctor_get(x_4458, 0); +lean_inc(x_4464); +x_4465 = lean_ctor_get(x_4458, 1); +lean_inc(x_4465); +if (lean_is_exclusive(x_4458)) { + lean_ctor_release(x_4458, 0); + lean_ctor_release(x_4458, 1); + x_4466 = x_4458; +} else { + lean_dec_ref(x_4458); + x_4466 = lean_box(0); +} +if (lean_is_scalar(x_4466)) { + x_4467 = lean_alloc_ctor(1, 2, 0); +} else { + x_4467 = x_4466; +} +lean_ctor_set(x_4467, 0, x_4464); +lean_ctor_set(x_4467, 1, x_4465); +return x_4467; +} +} +else +{ +lean_object* x_4468; lean_object* x_4469; lean_object* x_4470; lean_object* x_4471; lean_object* x_4472; lean_object* x_4473; lean_object* x_4474; lean_object* x_4475; lean_object* x_4476; +lean_dec(x_4439); +lean_dec(x_4437); +if (lean_is_scalar(x_4435)) { + x_4468 = lean_alloc_ctor(6, 2, 0); +} else { + x_4468 = x_4435; + lean_ctor_set_tag(x_4468, 6); +} +lean_ctor_set(x_4468, 0, x_153); +lean_ctor_set(x_4468, 1, x_3058); +x_4469 = lean_ctor_get(x_1, 0); +lean_inc(x_4469); +x_4470 = l_Lean_IR_ToIR_bindVar(x_4469, x_4434, x_4, x_5, x_4433); +x_4471 = lean_ctor_get(x_4470, 0); +lean_inc(x_4471); +x_4472 = lean_ctor_get(x_4470, 1); +lean_inc(x_4472); +lean_dec(x_4470); +x_4473 = lean_ctor_get(x_4471, 0); +lean_inc(x_4473); +x_4474 = lean_ctor_get(x_4471, 1); +lean_inc(x_4474); +lean_dec(x_4471); +x_4475 = lean_ctor_get(x_1, 2); +lean_inc(x_4475); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_4476 = l_Lean_IR_ToIR_lowerType(x_4475, x_4474, x_4, x_5, x_4472); +if (lean_obj_tag(x_4476) == 0) +{ +lean_object* x_4477; lean_object* x_4478; lean_object* x_4479; lean_object* x_4480; lean_object* x_4481; +x_4477 = lean_ctor_get(x_4476, 0); +lean_inc(x_4477); +x_4478 = lean_ctor_get(x_4476, 1); +lean_inc(x_4478); +lean_dec(x_4476); +x_4479 = lean_ctor_get(x_4477, 0); +lean_inc(x_4479); +x_4480 = lean_ctor_get(x_4477, 1); +lean_inc(x_4480); +lean_dec(x_4477); +x_4481 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4473, x_4468, x_4479, x_4480, x_4, x_5, x_4478); +return x_4481; +} +else +{ +lean_object* x_4482; lean_object* x_4483; lean_object* x_4484; lean_object* x_4485; +lean_dec(x_4473); +lean_dec(x_4468); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_4482 = lean_ctor_get(x_4476, 0); +lean_inc(x_4482); +x_4483 = lean_ctor_get(x_4476, 1); +lean_inc(x_4483); +if (lean_is_exclusive(x_4476)) { + lean_ctor_release(x_4476, 0); + lean_ctor_release(x_4476, 1); + x_4484 = x_4476; +} else { + lean_dec_ref(x_4476); + x_4484 = lean_box(0); +} +if (lean_is_scalar(x_4484)) { + x_4485 = lean_alloc_ctor(1, 2, 0); +} else { + x_4485 = x_4484; +} +lean_ctor_set(x_4485, 0, x_4482); +lean_ctor_set(x_4485, 1, x_4483); +return x_4485; +} +} +} +else +{ +lean_object* x_4486; lean_object* x_4487; lean_object* x_4488; lean_object* x_4489; lean_object* x_4490; lean_object* x_4491; lean_object* x_4492; lean_object* x_4493; lean_object* x_4494; +lean_dec(x_4439); +lean_dec(x_4437); +if (lean_is_scalar(x_4435)) { + x_4486 = lean_alloc_ctor(7, 2, 0); +} else { + x_4486 = x_4435; + lean_ctor_set_tag(x_4486, 7); +} +lean_ctor_set(x_4486, 0, x_153); +lean_ctor_set(x_4486, 1, x_3058); +x_4487 = lean_ctor_get(x_1, 0); +lean_inc(x_4487); +lean_dec(x_1); +x_4488 = l_Lean_IR_ToIR_bindVar(x_4487, x_4434, x_4, x_5, x_4433); +x_4489 = lean_ctor_get(x_4488, 0); +lean_inc(x_4489); +x_4490 = lean_ctor_get(x_4488, 1); +lean_inc(x_4490); +lean_dec(x_4488); +x_4491 = lean_ctor_get(x_4489, 0); +lean_inc(x_4491); +x_4492 = lean_ctor_get(x_4489, 1); +lean_inc(x_4492); +lean_dec(x_4489); +x_4493 = lean_box(7); +x_4494 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4491, x_4486, x_4493, x_4492, x_4, x_5, x_4490); +return x_4494; +} +} +} +else +{ +lean_object* x_4495; lean_object* x_4496; lean_object* x_4497; +lean_dec(x_4411); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4495 = lean_box(13); +x_4496 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_4496, 0, x_4495); +lean_ctor_set(x_4496, 1, x_4400); +if (lean_is_scalar(x_4404)) { + x_4497 = lean_alloc_ctor(0, 2, 0); +} else { + x_4497 = x_4404; +} +lean_ctor_set(x_4497, 0, x_4496); +lean_ctor_set(x_4497, 1, x_4403); +return x_4497; +} +} +else +{ +lean_object* x_4498; lean_object* x_4499; lean_object* x_4500; +lean_dec(x_4411); +lean_dec(x_4404); +lean_dec(x_153); +x_4498 = l_Lean_IR_instInhabitedArg; +x_4499 = lean_unsigned_to_nat(2u); +x_4500 = lean_array_get(x_4498, x_3058, x_4499); +lean_dec(x_3058); +if (lean_obj_tag(x_4500) == 0) +{ +lean_object* x_4501; lean_object* x_4502; lean_object* x_4503; lean_object* x_4504; lean_object* x_4505; lean_object* x_4506; lean_object* x_4507; +x_4501 = lean_ctor_get(x_4500, 0); +lean_inc(x_4501); +lean_dec(x_4500); +x_4502 = lean_ctor_get(x_1, 0); +lean_inc(x_4502); +lean_dec(x_1); +x_4503 = l_Lean_IR_ToIR_bindVarToVarId(x_4502, x_4501, x_4400, x_4, x_5, x_4403); +x_4504 = lean_ctor_get(x_4503, 0); +lean_inc(x_4504); +x_4505 = lean_ctor_get(x_4503, 1); +lean_inc(x_4505); +lean_dec(x_4503); +x_4506 = lean_ctor_get(x_4504, 1); +lean_inc(x_4506); +lean_dec(x_4504); +x_4507 = l_Lean_IR_ToIR_lowerCode(x_2, x_4506, x_4, x_5, x_4505); +return x_4507; +} +else +{ +lean_object* x_4508; lean_object* x_4509; lean_object* x_4510; lean_object* x_4511; lean_object* x_4512; lean_object* x_4513; +x_4508 = lean_ctor_get(x_1, 0); +lean_inc(x_4508); +lean_dec(x_1); +x_4509 = l_Lean_IR_ToIR_bindErased(x_4508, x_4400, x_4, x_5, x_4403); +x_4510 = lean_ctor_get(x_4509, 0); +lean_inc(x_4510); +x_4511 = lean_ctor_get(x_4509, 1); +lean_inc(x_4511); +lean_dec(x_4509); +x_4512 = lean_ctor_get(x_4510, 1); +lean_inc(x_4512); +lean_dec(x_4510); +x_4513 = l_Lean_IR_ToIR_lowerCode(x_2, x_4512, x_4, x_5, x_4511); +return x_4513; +} +} +} +case 1: +{ +lean_object* x_4514; lean_object* x_4515; lean_object* x_4542; lean_object* x_4543; +lean_dec(x_4410); +lean_dec(x_4405); +lean_dec(x_3050); +lean_dec(x_3049); +lean_inc(x_153); +x_4542 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_4403); +x_4543 = lean_ctor_get(x_4542, 0); +lean_inc(x_4543); +if (lean_obj_tag(x_4543) == 0) +{ +lean_object* x_4544; lean_object* x_4545; lean_object* x_4546; +x_4544 = lean_ctor_get(x_4542, 1); +lean_inc(x_4544); +lean_dec(x_4542); +x_4545 = lean_box(0); +x_4546 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_4546, 0, x_4545); +lean_ctor_set(x_4546, 1, x_4400); +x_4514 = x_4546; +x_4515 = x_4544; +goto block_4541; +} +else +{ +lean_object* x_4547; lean_object* x_4548; lean_object* x_4549; lean_object* x_4550; lean_object* x_4551; lean_object* x_4552; lean_object* x_4553; uint8_t x_4554; +x_4547 = lean_ctor_get(x_4542, 1); +lean_inc(x_4547); +if (lean_is_exclusive(x_4542)) { + lean_ctor_release(x_4542, 0); + lean_ctor_release(x_4542, 1); + x_4548 = x_4542; +} else { + lean_dec_ref(x_4542); + x_4548 = lean_box(0); +} +x_4549 = lean_ctor_get(x_4543, 0); +lean_inc(x_4549); +if (lean_is_exclusive(x_4543)) { + lean_ctor_release(x_4543, 0); + x_4550 = x_4543; +} else { + lean_dec_ref(x_4543); + x_4550 = lean_box(0); +} +x_4551 = lean_array_get_size(x_3058); +x_4552 = lean_ctor_get(x_4549, 3); +lean_inc(x_4552); +lean_dec(x_4549); +x_4553 = lean_array_get_size(x_4552); +lean_dec(x_4552); +x_4554 = lean_nat_dec_lt(x_4551, x_4553); +if (x_4554 == 0) +{ +uint8_t x_4555; +x_4555 = lean_nat_dec_eq(x_4551, x_4553); +if (x_4555 == 0) +{ +lean_object* x_4556; lean_object* x_4557; lean_object* x_4558; lean_object* x_4559; lean_object* x_4560; lean_object* x_4561; lean_object* x_4562; lean_object* x_4563; lean_object* x_4564; lean_object* x_4565; lean_object* x_4566; lean_object* x_4567; lean_object* x_4568; lean_object* x_4569; lean_object* x_4570; lean_object* x_4571; lean_object* x_4572; +x_4556 = lean_unsigned_to_nat(0u); +x_4557 = l_Array_extract___rarg(x_3058, x_4556, x_4553); +x_4558 = l_Array_extract___rarg(x_3058, x_4553, x_4551); +lean_dec(x_4551); +lean_inc(x_153); +if (lean_is_scalar(x_4548)) { + x_4559 = lean_alloc_ctor(6, 2, 0); +} else { + x_4559 = x_4548; + lean_ctor_set_tag(x_4559, 6); +} +lean_ctor_set(x_4559, 0, x_153); +lean_ctor_set(x_4559, 1, x_4557); +x_4560 = lean_ctor_get(x_1, 0); +lean_inc(x_4560); +x_4561 = l_Lean_IR_ToIR_bindVar(x_4560, x_4400, x_4, x_5, x_4547); +x_4562 = lean_ctor_get(x_4561, 0); +lean_inc(x_4562); +x_4563 = lean_ctor_get(x_4561, 1); +lean_inc(x_4563); +lean_dec(x_4561); +x_4564 = lean_ctor_get(x_4562, 0); +lean_inc(x_4564); +x_4565 = lean_ctor_get(x_4562, 1); +lean_inc(x_4565); +lean_dec(x_4562); +x_4566 = l_Lean_IR_ToIR_newVar(x_4565, x_4, x_5, x_4563); +x_4567 = lean_ctor_get(x_4566, 0); +lean_inc(x_4567); +x_4568 = lean_ctor_get(x_4566, 1); +lean_inc(x_4568); +lean_dec(x_4566); +x_4569 = lean_ctor_get(x_4567, 0); +lean_inc(x_4569); +x_4570 = lean_ctor_get(x_4567, 1); +lean_inc(x_4570); +lean_dec(x_4567); +x_4571 = lean_ctor_get(x_1, 2); +lean_inc(x_4571); +lean_inc(x_5); +lean_inc(x_4); +x_4572 = l_Lean_IR_ToIR_lowerType(x_4571, x_4570, x_4, x_5, x_4568); +if (lean_obj_tag(x_4572) == 0) +{ +lean_object* x_4573; lean_object* x_4574; lean_object* x_4575; lean_object* x_4576; lean_object* x_4577; +x_4573 = lean_ctor_get(x_4572, 0); +lean_inc(x_4573); +x_4574 = lean_ctor_get(x_4572, 1); +lean_inc(x_4574); +lean_dec(x_4572); +x_4575 = lean_ctor_get(x_4573, 0); +lean_inc(x_4575); +x_4576 = lean_ctor_get(x_4573, 1); +lean_inc(x_4576); +lean_dec(x_4573); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4577 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_4569, x_4558, x_4564, x_4559, x_4575, x_4576, x_4, x_5, x_4574); +if (lean_obj_tag(x_4577) == 0) +{ +lean_object* x_4578; lean_object* x_4579; lean_object* x_4580; lean_object* x_4581; lean_object* x_4582; lean_object* x_4583; lean_object* x_4584; +x_4578 = lean_ctor_get(x_4577, 0); +lean_inc(x_4578); +x_4579 = lean_ctor_get(x_4577, 1); +lean_inc(x_4579); +lean_dec(x_4577); +x_4580 = lean_ctor_get(x_4578, 0); +lean_inc(x_4580); +x_4581 = lean_ctor_get(x_4578, 1); +lean_inc(x_4581); +if (lean_is_exclusive(x_4578)) { + lean_ctor_release(x_4578, 0); + lean_ctor_release(x_4578, 1); + x_4582 = x_4578; +} else { + lean_dec_ref(x_4578); + x_4582 = lean_box(0); +} +if (lean_is_scalar(x_4550)) { + x_4583 = lean_alloc_ctor(1, 1, 0); +} else { + x_4583 = x_4550; +} +lean_ctor_set(x_4583, 0, x_4580); +if (lean_is_scalar(x_4582)) { + x_4584 = lean_alloc_ctor(0, 2, 0); +} else { + x_4584 = x_4582; +} +lean_ctor_set(x_4584, 0, x_4583); +lean_ctor_set(x_4584, 1, x_4581); +x_4514 = x_4584; +x_4515 = x_4579; +goto block_4541; +} +else +{ +lean_object* x_4585; lean_object* x_4586; lean_object* x_4587; lean_object* x_4588; +lean_dec(x_4550); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4585 = lean_ctor_get(x_4577, 0); +lean_inc(x_4585); +x_4586 = lean_ctor_get(x_4577, 1); +lean_inc(x_4586); +if (lean_is_exclusive(x_4577)) { + lean_ctor_release(x_4577, 0); + lean_ctor_release(x_4577, 1); + x_4587 = x_4577; +} else { + lean_dec_ref(x_4577); + x_4587 = lean_box(0); +} +if (lean_is_scalar(x_4587)) { + x_4588 = lean_alloc_ctor(1, 2, 0); +} else { + x_4588 = x_4587; +} +lean_ctor_set(x_4588, 0, x_4585); +lean_ctor_set(x_4588, 1, x_4586); +return x_4588; +} +} +else +{ +lean_object* x_4589; lean_object* x_4590; lean_object* x_4591; lean_object* x_4592; +lean_dec(x_4569); +lean_dec(x_4564); +lean_dec(x_4559); +lean_dec(x_4558); +lean_dec(x_4550); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4589 = lean_ctor_get(x_4572, 0); +lean_inc(x_4589); +x_4590 = lean_ctor_get(x_4572, 1); +lean_inc(x_4590); +if (lean_is_exclusive(x_4572)) { + lean_ctor_release(x_4572, 0); + lean_ctor_release(x_4572, 1); + x_4591 = x_4572; +} else { + lean_dec_ref(x_4572); + x_4591 = lean_box(0); +} +if (lean_is_scalar(x_4591)) { + x_4592 = lean_alloc_ctor(1, 2, 0); +} else { + x_4592 = x_4591; +} +lean_ctor_set(x_4592, 0, x_4589); +lean_ctor_set(x_4592, 1, x_4590); +return x_4592; +} +} +else +{ +lean_object* x_4593; lean_object* x_4594; lean_object* x_4595; lean_object* x_4596; lean_object* x_4597; lean_object* x_4598; lean_object* x_4599; lean_object* x_4600; lean_object* x_4601; +lean_dec(x_4553); +lean_dec(x_4551); +lean_inc(x_3058); +lean_inc(x_153); +if (lean_is_scalar(x_4548)) { + x_4593 = lean_alloc_ctor(6, 2, 0); +} else { + x_4593 = x_4548; + lean_ctor_set_tag(x_4593, 6); +} +lean_ctor_set(x_4593, 0, x_153); +lean_ctor_set(x_4593, 1, x_3058); +x_4594 = lean_ctor_get(x_1, 0); +lean_inc(x_4594); +x_4595 = l_Lean_IR_ToIR_bindVar(x_4594, x_4400, x_4, x_5, x_4547); +x_4596 = lean_ctor_get(x_4595, 0); +lean_inc(x_4596); +x_4597 = lean_ctor_get(x_4595, 1); +lean_inc(x_4597); +lean_dec(x_4595); +x_4598 = lean_ctor_get(x_4596, 0); +lean_inc(x_4598); +x_4599 = lean_ctor_get(x_4596, 1); +lean_inc(x_4599); +lean_dec(x_4596); +x_4600 = lean_ctor_get(x_1, 2); +lean_inc(x_4600); +lean_inc(x_5); +lean_inc(x_4); +x_4601 = l_Lean_IR_ToIR_lowerType(x_4600, x_4599, x_4, x_5, x_4597); +if (lean_obj_tag(x_4601) == 0) +{ +lean_object* x_4602; lean_object* x_4603; lean_object* x_4604; lean_object* x_4605; lean_object* x_4606; +x_4602 = lean_ctor_get(x_4601, 0); +lean_inc(x_4602); +x_4603 = lean_ctor_get(x_4601, 1); +lean_inc(x_4603); +lean_dec(x_4601); +x_4604 = lean_ctor_get(x_4602, 0); +lean_inc(x_4604); +x_4605 = lean_ctor_get(x_4602, 1); +lean_inc(x_4605); +lean_dec(x_4602); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4606 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4598, x_4593, x_4604, x_4605, x_4, x_5, x_4603); +if (lean_obj_tag(x_4606) == 0) +{ +lean_object* x_4607; lean_object* x_4608; lean_object* x_4609; lean_object* x_4610; lean_object* x_4611; lean_object* x_4612; lean_object* x_4613; +x_4607 = lean_ctor_get(x_4606, 0); +lean_inc(x_4607); +x_4608 = lean_ctor_get(x_4606, 1); +lean_inc(x_4608); +lean_dec(x_4606); +x_4609 = lean_ctor_get(x_4607, 0); +lean_inc(x_4609); +x_4610 = lean_ctor_get(x_4607, 1); +lean_inc(x_4610); +if (lean_is_exclusive(x_4607)) { + lean_ctor_release(x_4607, 0); + lean_ctor_release(x_4607, 1); + x_4611 = x_4607; +} else { + lean_dec_ref(x_4607); + x_4611 = lean_box(0); +} +if (lean_is_scalar(x_4550)) { + x_4612 = lean_alloc_ctor(1, 1, 0); +} else { + x_4612 = x_4550; +} +lean_ctor_set(x_4612, 0, x_4609); +if (lean_is_scalar(x_4611)) { + x_4613 = lean_alloc_ctor(0, 2, 0); +} else { + x_4613 = x_4611; +} +lean_ctor_set(x_4613, 0, x_4612); +lean_ctor_set(x_4613, 1, x_4610); +x_4514 = x_4613; +x_4515 = x_4608; +goto block_4541; +} +else +{ +lean_object* x_4614; lean_object* x_4615; lean_object* x_4616; lean_object* x_4617; +lean_dec(x_4550); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4614 = lean_ctor_get(x_4606, 0); +lean_inc(x_4614); +x_4615 = lean_ctor_get(x_4606, 1); +lean_inc(x_4615); +if (lean_is_exclusive(x_4606)) { + lean_ctor_release(x_4606, 0); + lean_ctor_release(x_4606, 1); + x_4616 = x_4606; +} else { + lean_dec_ref(x_4606); + x_4616 = lean_box(0); +} +if (lean_is_scalar(x_4616)) { + x_4617 = lean_alloc_ctor(1, 2, 0); +} else { + x_4617 = x_4616; +} +lean_ctor_set(x_4617, 0, x_4614); +lean_ctor_set(x_4617, 1, x_4615); +return x_4617; +} +} +else +{ +lean_object* x_4618; lean_object* x_4619; lean_object* x_4620; lean_object* x_4621; +lean_dec(x_4598); +lean_dec(x_4593); +lean_dec(x_4550); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4618 = lean_ctor_get(x_4601, 0); +lean_inc(x_4618); +x_4619 = lean_ctor_get(x_4601, 1); +lean_inc(x_4619); +if (lean_is_exclusive(x_4601)) { + lean_ctor_release(x_4601, 0); + lean_ctor_release(x_4601, 1); + x_4620 = x_4601; +} else { + lean_dec_ref(x_4601); + x_4620 = lean_box(0); +} +if (lean_is_scalar(x_4620)) { + x_4621 = lean_alloc_ctor(1, 2, 0); +} else { + x_4621 = x_4620; +} +lean_ctor_set(x_4621, 0, x_4618); +lean_ctor_set(x_4621, 1, x_4619); +return x_4621; +} +} +} +else +{ +lean_object* x_4622; lean_object* x_4623; lean_object* x_4624; lean_object* x_4625; lean_object* x_4626; lean_object* x_4627; lean_object* x_4628; lean_object* x_4629; lean_object* x_4630; +lean_dec(x_4553); +lean_dec(x_4551); +lean_inc(x_3058); +lean_inc(x_153); +if (lean_is_scalar(x_4548)) { + x_4622 = lean_alloc_ctor(7, 2, 0); +} else { + x_4622 = x_4548; + lean_ctor_set_tag(x_4622, 7); +} +lean_ctor_set(x_4622, 0, x_153); +lean_ctor_set(x_4622, 1, x_3058); +x_4623 = lean_ctor_get(x_1, 0); +lean_inc(x_4623); +x_4624 = l_Lean_IR_ToIR_bindVar(x_4623, x_4400, x_4, x_5, x_4547); +x_4625 = lean_ctor_get(x_4624, 0); +lean_inc(x_4625); +x_4626 = lean_ctor_get(x_4624, 1); +lean_inc(x_4626); +lean_dec(x_4624); +x_4627 = lean_ctor_get(x_4625, 0); +lean_inc(x_4627); +x_4628 = lean_ctor_get(x_4625, 1); +lean_inc(x_4628); +lean_dec(x_4625); +x_4629 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4630 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4627, x_4622, x_4629, x_4628, x_4, x_5, x_4626); +if (lean_obj_tag(x_4630) == 0) +{ +lean_object* x_4631; lean_object* x_4632; lean_object* x_4633; lean_object* x_4634; lean_object* x_4635; lean_object* x_4636; lean_object* x_4637; +x_4631 = lean_ctor_get(x_4630, 0); +lean_inc(x_4631); +x_4632 = lean_ctor_get(x_4630, 1); +lean_inc(x_4632); +lean_dec(x_4630); +x_4633 = lean_ctor_get(x_4631, 0); +lean_inc(x_4633); +x_4634 = lean_ctor_get(x_4631, 1); +lean_inc(x_4634); +if (lean_is_exclusive(x_4631)) { + lean_ctor_release(x_4631, 0); + lean_ctor_release(x_4631, 1); + x_4635 = x_4631; +} else { + lean_dec_ref(x_4631); + x_4635 = lean_box(0); +} +if (lean_is_scalar(x_4550)) { + x_4636 = lean_alloc_ctor(1, 1, 0); +} else { + x_4636 = x_4550; +} +lean_ctor_set(x_4636, 0, x_4633); +if (lean_is_scalar(x_4635)) { + x_4637 = lean_alloc_ctor(0, 2, 0); +} else { + x_4637 = x_4635; +} +lean_ctor_set(x_4637, 0, x_4636); +lean_ctor_set(x_4637, 1, x_4634); +x_4514 = x_4637; +x_4515 = x_4632; +goto block_4541; +} +else +{ +lean_object* x_4638; lean_object* x_4639; lean_object* x_4640; lean_object* x_4641; +lean_dec(x_4550); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4638 = lean_ctor_get(x_4630, 0); +lean_inc(x_4638); +x_4639 = lean_ctor_get(x_4630, 1); +lean_inc(x_4639); +if (lean_is_exclusive(x_4630)) { + lean_ctor_release(x_4630, 0); + lean_ctor_release(x_4630, 1); + x_4640 = x_4630; +} else { + lean_dec_ref(x_4630); + x_4640 = lean_box(0); +} +if (lean_is_scalar(x_4640)) { + x_4641 = lean_alloc_ctor(1, 2, 0); +} else { + x_4641 = x_4640; +} +lean_ctor_set(x_4641, 0, x_4638); +lean_ctor_set(x_4641, 1, x_4639); +return x_4641; +} +} +} +block_4541: +{ +lean_object* x_4516; +x_4516 = lean_ctor_get(x_4514, 0); +lean_inc(x_4516); +if (lean_obj_tag(x_4516) == 0) +{ +lean_object* x_4517; lean_object* x_4518; lean_object* x_4519; lean_object* x_4520; lean_object* x_4521; lean_object* x_4522; lean_object* x_4523; lean_object* x_4524; lean_object* x_4525; lean_object* x_4526; +lean_dec(x_4404); +x_4517 = lean_ctor_get(x_4514, 1); +lean_inc(x_4517); +lean_dec(x_4514); +x_4518 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_4518, 0, x_153); +lean_ctor_set(x_4518, 1, x_3058); +x_4519 = lean_ctor_get(x_1, 0); +lean_inc(x_4519); +x_4520 = l_Lean_IR_ToIR_bindVar(x_4519, x_4517, x_4, x_5, x_4515); +x_4521 = lean_ctor_get(x_4520, 0); +lean_inc(x_4521); +x_4522 = lean_ctor_get(x_4520, 1); +lean_inc(x_4522); +lean_dec(x_4520); +x_4523 = lean_ctor_get(x_4521, 0); +lean_inc(x_4523); +x_4524 = lean_ctor_get(x_4521, 1); +lean_inc(x_4524); +lean_dec(x_4521); +x_4525 = lean_ctor_get(x_1, 2); +lean_inc(x_4525); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_4526 = l_Lean_IR_ToIR_lowerType(x_4525, x_4524, x_4, x_5, x_4522); +if (lean_obj_tag(x_4526) == 0) +{ +lean_object* x_4527; lean_object* x_4528; lean_object* x_4529; lean_object* x_4530; lean_object* x_4531; +x_4527 = lean_ctor_get(x_4526, 0); +lean_inc(x_4527); +x_4528 = lean_ctor_get(x_4526, 1); +lean_inc(x_4528); +lean_dec(x_4526); +x_4529 = lean_ctor_get(x_4527, 0); +lean_inc(x_4529); +x_4530 = lean_ctor_get(x_4527, 1); +lean_inc(x_4530); +lean_dec(x_4527); +x_4531 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4523, x_4518, x_4529, x_4530, x_4, x_5, x_4528); +return x_4531; +} +else +{ +lean_object* x_4532; lean_object* x_4533; lean_object* x_4534; lean_object* x_4535; +lean_dec(x_4523); +lean_dec(x_4518); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_4532 = lean_ctor_get(x_4526, 0); +lean_inc(x_4532); +x_4533 = lean_ctor_get(x_4526, 1); +lean_inc(x_4533); +if (lean_is_exclusive(x_4526)) { + lean_ctor_release(x_4526, 0); + lean_ctor_release(x_4526, 1); + x_4534 = x_4526; +} else { + lean_dec_ref(x_4526); + x_4534 = lean_box(0); +} +if (lean_is_scalar(x_4534)) { + x_4535 = lean_alloc_ctor(1, 2, 0); +} else { + x_4535 = x_4534; +} +lean_ctor_set(x_4535, 0, x_4532); +lean_ctor_set(x_4535, 1, x_4533); +return x_4535; +} +} +else +{ +lean_object* x_4536; lean_object* x_4537; lean_object* x_4538; lean_object* x_4539; lean_object* x_4540; +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4536 = lean_ctor_get(x_4514, 1); +lean_inc(x_4536); +if (lean_is_exclusive(x_4514)) { + lean_ctor_release(x_4514, 0); + lean_ctor_release(x_4514, 1); + x_4537 = x_4514; +} else { + lean_dec_ref(x_4514); + x_4537 = lean_box(0); +} +x_4538 = lean_ctor_get(x_4516, 0); +lean_inc(x_4538); +lean_dec(x_4516); +if (lean_is_scalar(x_4537)) { + x_4539 = lean_alloc_ctor(0, 2, 0); +} else { + x_4539 = x_4537; +} +lean_ctor_set(x_4539, 0, x_4538); +lean_ctor_set(x_4539, 1, x_4536); +if (lean_is_scalar(x_4404)) { + x_4540 = lean_alloc_ctor(0, 2, 0); +} else { + x_4540 = x_4404; +} +lean_ctor_set(x_4540, 0, x_4539); +lean_ctor_set(x_4540, 1, x_4515); +return x_4540; +} +} +} +case 2: +{ +lean_object* x_4642; lean_object* x_4643; +lean_dec(x_4410); +lean_dec(x_4405); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_4642 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_4643 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_4642, x_4400, x_4, x_5, x_4403); +return x_4643; +} +case 3: +{ +lean_object* x_4644; lean_object* x_4645; lean_object* x_4672; lean_object* x_4673; +lean_dec(x_4410); +lean_dec(x_4405); +lean_dec(x_3050); +lean_dec(x_3049); +lean_inc(x_153); +x_4672 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_4403); +x_4673 = lean_ctor_get(x_4672, 0); +lean_inc(x_4673); +if (lean_obj_tag(x_4673) == 0) +{ +lean_object* x_4674; lean_object* x_4675; lean_object* x_4676; +x_4674 = lean_ctor_get(x_4672, 1); +lean_inc(x_4674); +lean_dec(x_4672); +x_4675 = lean_box(0); +x_4676 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_4676, 0, x_4675); +lean_ctor_set(x_4676, 1, x_4400); +x_4644 = x_4676; +x_4645 = x_4674; +goto block_4671; +} +else +{ +lean_object* x_4677; lean_object* x_4678; lean_object* x_4679; lean_object* x_4680; lean_object* x_4681; lean_object* x_4682; lean_object* x_4683; uint8_t x_4684; +x_4677 = lean_ctor_get(x_4672, 1); +lean_inc(x_4677); +if (lean_is_exclusive(x_4672)) { + lean_ctor_release(x_4672, 0); + lean_ctor_release(x_4672, 1); + x_4678 = x_4672; +} else { + lean_dec_ref(x_4672); + x_4678 = lean_box(0); +} +x_4679 = lean_ctor_get(x_4673, 0); +lean_inc(x_4679); +if (lean_is_exclusive(x_4673)) { + lean_ctor_release(x_4673, 0); + x_4680 = x_4673; +} else { + lean_dec_ref(x_4673); + x_4680 = lean_box(0); +} +x_4681 = lean_array_get_size(x_3058); +x_4682 = lean_ctor_get(x_4679, 3); +lean_inc(x_4682); +lean_dec(x_4679); +x_4683 = lean_array_get_size(x_4682); +lean_dec(x_4682); +x_4684 = lean_nat_dec_lt(x_4681, x_4683); +if (x_4684 == 0) +{ +uint8_t x_4685; +x_4685 = lean_nat_dec_eq(x_4681, x_4683); +if (x_4685 == 0) +{ +lean_object* x_4686; lean_object* x_4687; lean_object* x_4688; lean_object* x_4689; lean_object* x_4690; lean_object* x_4691; lean_object* x_4692; lean_object* x_4693; lean_object* x_4694; lean_object* x_4695; lean_object* x_4696; lean_object* x_4697; lean_object* x_4698; lean_object* x_4699; lean_object* x_4700; lean_object* x_4701; lean_object* x_4702; +x_4686 = lean_unsigned_to_nat(0u); +x_4687 = l_Array_extract___rarg(x_3058, x_4686, x_4683); +x_4688 = l_Array_extract___rarg(x_3058, x_4683, x_4681); +lean_dec(x_4681); +lean_inc(x_153); +if (lean_is_scalar(x_4678)) { + x_4689 = lean_alloc_ctor(6, 2, 0); +} else { + x_4689 = x_4678; + lean_ctor_set_tag(x_4689, 6); +} +lean_ctor_set(x_4689, 0, x_153); +lean_ctor_set(x_4689, 1, x_4687); +x_4690 = lean_ctor_get(x_1, 0); +lean_inc(x_4690); +x_4691 = l_Lean_IR_ToIR_bindVar(x_4690, x_4400, x_4, x_5, x_4677); +x_4692 = lean_ctor_get(x_4691, 0); +lean_inc(x_4692); +x_4693 = lean_ctor_get(x_4691, 1); +lean_inc(x_4693); +lean_dec(x_4691); +x_4694 = lean_ctor_get(x_4692, 0); +lean_inc(x_4694); +x_4695 = lean_ctor_get(x_4692, 1); +lean_inc(x_4695); +lean_dec(x_4692); +x_4696 = l_Lean_IR_ToIR_newVar(x_4695, x_4, x_5, x_4693); +x_4697 = lean_ctor_get(x_4696, 0); +lean_inc(x_4697); +x_4698 = lean_ctor_get(x_4696, 1); +lean_inc(x_4698); +lean_dec(x_4696); +x_4699 = lean_ctor_get(x_4697, 0); +lean_inc(x_4699); +x_4700 = lean_ctor_get(x_4697, 1); +lean_inc(x_4700); +lean_dec(x_4697); +x_4701 = lean_ctor_get(x_1, 2); +lean_inc(x_4701); +lean_inc(x_5); +lean_inc(x_4); +x_4702 = l_Lean_IR_ToIR_lowerType(x_4701, x_4700, x_4, x_5, x_4698); +if (lean_obj_tag(x_4702) == 0) +{ +lean_object* x_4703; lean_object* x_4704; lean_object* x_4705; lean_object* x_4706; lean_object* x_4707; +x_4703 = lean_ctor_get(x_4702, 0); +lean_inc(x_4703); +x_4704 = lean_ctor_get(x_4702, 1); +lean_inc(x_4704); +lean_dec(x_4702); +x_4705 = lean_ctor_get(x_4703, 0); +lean_inc(x_4705); +x_4706 = lean_ctor_get(x_4703, 1); +lean_inc(x_4706); +lean_dec(x_4703); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4707 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_4699, x_4688, x_4694, x_4689, x_4705, x_4706, x_4, x_5, x_4704); +if (lean_obj_tag(x_4707) == 0) +{ +lean_object* x_4708; lean_object* x_4709; lean_object* x_4710; lean_object* x_4711; lean_object* x_4712; lean_object* x_4713; lean_object* x_4714; +x_4708 = lean_ctor_get(x_4707, 0); +lean_inc(x_4708); +x_4709 = lean_ctor_get(x_4707, 1); +lean_inc(x_4709); +lean_dec(x_4707); +x_4710 = lean_ctor_get(x_4708, 0); +lean_inc(x_4710); +x_4711 = lean_ctor_get(x_4708, 1); +lean_inc(x_4711); +if (lean_is_exclusive(x_4708)) { + lean_ctor_release(x_4708, 0); + lean_ctor_release(x_4708, 1); + x_4712 = x_4708; +} else { + lean_dec_ref(x_4708); + x_4712 = lean_box(0); +} +if (lean_is_scalar(x_4680)) { + x_4713 = lean_alloc_ctor(1, 1, 0); +} else { + x_4713 = x_4680; +} +lean_ctor_set(x_4713, 0, x_4710); +if (lean_is_scalar(x_4712)) { + x_4714 = lean_alloc_ctor(0, 2, 0); +} else { + x_4714 = x_4712; +} +lean_ctor_set(x_4714, 0, x_4713); +lean_ctor_set(x_4714, 1, x_4711); +x_4644 = x_4714; +x_4645 = x_4709; +goto block_4671; +} +else +{ +lean_object* x_4715; lean_object* x_4716; lean_object* x_4717; lean_object* x_4718; +lean_dec(x_4680); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4715 = lean_ctor_get(x_4707, 0); +lean_inc(x_4715); +x_4716 = lean_ctor_get(x_4707, 1); +lean_inc(x_4716); +if (lean_is_exclusive(x_4707)) { + lean_ctor_release(x_4707, 0); + lean_ctor_release(x_4707, 1); + x_4717 = x_4707; +} else { + lean_dec_ref(x_4707); + x_4717 = lean_box(0); +} +if (lean_is_scalar(x_4717)) { + x_4718 = lean_alloc_ctor(1, 2, 0); +} else { + x_4718 = x_4717; +} +lean_ctor_set(x_4718, 0, x_4715); +lean_ctor_set(x_4718, 1, x_4716); +return x_4718; +} +} +else +{ +lean_object* x_4719; lean_object* x_4720; lean_object* x_4721; lean_object* x_4722; +lean_dec(x_4699); +lean_dec(x_4694); +lean_dec(x_4689); +lean_dec(x_4688); +lean_dec(x_4680); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4719 = lean_ctor_get(x_4702, 0); +lean_inc(x_4719); +x_4720 = lean_ctor_get(x_4702, 1); +lean_inc(x_4720); +if (lean_is_exclusive(x_4702)) { + lean_ctor_release(x_4702, 0); + lean_ctor_release(x_4702, 1); + x_4721 = x_4702; +} else { + lean_dec_ref(x_4702); + x_4721 = lean_box(0); +} +if (lean_is_scalar(x_4721)) { + x_4722 = lean_alloc_ctor(1, 2, 0); +} else { + x_4722 = x_4721; +} +lean_ctor_set(x_4722, 0, x_4719); +lean_ctor_set(x_4722, 1, x_4720); +return x_4722; +} +} +else +{ +lean_object* x_4723; lean_object* x_4724; lean_object* x_4725; lean_object* x_4726; lean_object* x_4727; lean_object* x_4728; lean_object* x_4729; lean_object* x_4730; lean_object* x_4731; +lean_dec(x_4683); +lean_dec(x_4681); +lean_inc(x_3058); +lean_inc(x_153); +if (lean_is_scalar(x_4678)) { + x_4723 = lean_alloc_ctor(6, 2, 0); +} else { + x_4723 = x_4678; + lean_ctor_set_tag(x_4723, 6); +} +lean_ctor_set(x_4723, 0, x_153); +lean_ctor_set(x_4723, 1, x_3058); +x_4724 = lean_ctor_get(x_1, 0); +lean_inc(x_4724); +x_4725 = l_Lean_IR_ToIR_bindVar(x_4724, x_4400, x_4, x_5, x_4677); +x_4726 = lean_ctor_get(x_4725, 0); +lean_inc(x_4726); +x_4727 = lean_ctor_get(x_4725, 1); +lean_inc(x_4727); +lean_dec(x_4725); +x_4728 = lean_ctor_get(x_4726, 0); +lean_inc(x_4728); +x_4729 = lean_ctor_get(x_4726, 1); +lean_inc(x_4729); +lean_dec(x_4726); +x_4730 = lean_ctor_get(x_1, 2); +lean_inc(x_4730); +lean_inc(x_5); +lean_inc(x_4); +x_4731 = l_Lean_IR_ToIR_lowerType(x_4730, x_4729, x_4, x_5, x_4727); +if (lean_obj_tag(x_4731) == 0) +{ +lean_object* x_4732; lean_object* x_4733; lean_object* x_4734; lean_object* x_4735; lean_object* x_4736; +x_4732 = lean_ctor_get(x_4731, 0); +lean_inc(x_4732); +x_4733 = lean_ctor_get(x_4731, 1); +lean_inc(x_4733); +lean_dec(x_4731); +x_4734 = lean_ctor_get(x_4732, 0); +lean_inc(x_4734); +x_4735 = lean_ctor_get(x_4732, 1); +lean_inc(x_4735); +lean_dec(x_4732); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4736 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4728, x_4723, x_4734, x_4735, x_4, x_5, x_4733); +if (lean_obj_tag(x_4736) == 0) +{ +lean_object* x_4737; lean_object* x_4738; lean_object* x_4739; lean_object* x_4740; lean_object* x_4741; lean_object* x_4742; lean_object* x_4743; +x_4737 = lean_ctor_get(x_4736, 0); +lean_inc(x_4737); +x_4738 = lean_ctor_get(x_4736, 1); +lean_inc(x_4738); +lean_dec(x_4736); +x_4739 = lean_ctor_get(x_4737, 0); +lean_inc(x_4739); +x_4740 = lean_ctor_get(x_4737, 1); +lean_inc(x_4740); +if (lean_is_exclusive(x_4737)) { + lean_ctor_release(x_4737, 0); + lean_ctor_release(x_4737, 1); + x_4741 = x_4737; +} else { + lean_dec_ref(x_4737); + x_4741 = lean_box(0); +} +if (lean_is_scalar(x_4680)) { + x_4742 = lean_alloc_ctor(1, 1, 0); +} else { + x_4742 = x_4680; +} +lean_ctor_set(x_4742, 0, x_4739); +if (lean_is_scalar(x_4741)) { + x_4743 = lean_alloc_ctor(0, 2, 0); +} else { + x_4743 = x_4741; +} +lean_ctor_set(x_4743, 0, x_4742); +lean_ctor_set(x_4743, 1, x_4740); +x_4644 = x_4743; +x_4645 = x_4738; +goto block_4671; +} +else +{ +lean_object* x_4744; lean_object* x_4745; lean_object* x_4746; lean_object* x_4747; +lean_dec(x_4680); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4744 = lean_ctor_get(x_4736, 0); +lean_inc(x_4744); +x_4745 = lean_ctor_get(x_4736, 1); +lean_inc(x_4745); +if (lean_is_exclusive(x_4736)) { + lean_ctor_release(x_4736, 0); + lean_ctor_release(x_4736, 1); + x_4746 = x_4736; +} else { + lean_dec_ref(x_4736); + x_4746 = lean_box(0); +} +if (lean_is_scalar(x_4746)) { + x_4747 = lean_alloc_ctor(1, 2, 0); +} else { + x_4747 = x_4746; +} +lean_ctor_set(x_4747, 0, x_4744); +lean_ctor_set(x_4747, 1, x_4745); +return x_4747; +} +} +else +{ +lean_object* x_4748; lean_object* x_4749; lean_object* x_4750; lean_object* x_4751; +lean_dec(x_4728); +lean_dec(x_4723); +lean_dec(x_4680); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4748 = lean_ctor_get(x_4731, 0); +lean_inc(x_4748); +x_4749 = lean_ctor_get(x_4731, 1); +lean_inc(x_4749); +if (lean_is_exclusive(x_4731)) { + lean_ctor_release(x_4731, 0); + lean_ctor_release(x_4731, 1); + x_4750 = x_4731; +} else { + lean_dec_ref(x_4731); + x_4750 = lean_box(0); +} +if (lean_is_scalar(x_4750)) { + x_4751 = lean_alloc_ctor(1, 2, 0); +} else { + x_4751 = x_4750; +} +lean_ctor_set(x_4751, 0, x_4748); +lean_ctor_set(x_4751, 1, x_4749); +return x_4751; +} +} +} +else +{ +lean_object* x_4752; lean_object* x_4753; lean_object* x_4754; lean_object* x_4755; lean_object* x_4756; lean_object* x_4757; lean_object* x_4758; lean_object* x_4759; lean_object* x_4760; +lean_dec(x_4683); +lean_dec(x_4681); +lean_inc(x_3058); +lean_inc(x_153); +if (lean_is_scalar(x_4678)) { + x_4752 = lean_alloc_ctor(7, 2, 0); +} else { + x_4752 = x_4678; + lean_ctor_set_tag(x_4752, 7); +} +lean_ctor_set(x_4752, 0, x_153); +lean_ctor_set(x_4752, 1, x_3058); +x_4753 = lean_ctor_get(x_1, 0); +lean_inc(x_4753); +x_4754 = l_Lean_IR_ToIR_bindVar(x_4753, x_4400, x_4, x_5, x_4677); +x_4755 = lean_ctor_get(x_4754, 0); +lean_inc(x_4755); +x_4756 = lean_ctor_get(x_4754, 1); +lean_inc(x_4756); +lean_dec(x_4754); +x_4757 = lean_ctor_get(x_4755, 0); +lean_inc(x_4757); +x_4758 = lean_ctor_get(x_4755, 1); +lean_inc(x_4758); +lean_dec(x_4755); +x_4759 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4760 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4757, x_4752, x_4759, x_4758, x_4, x_5, x_4756); +if (lean_obj_tag(x_4760) == 0) +{ +lean_object* x_4761; lean_object* x_4762; lean_object* x_4763; lean_object* x_4764; lean_object* x_4765; lean_object* x_4766; lean_object* x_4767; +x_4761 = lean_ctor_get(x_4760, 0); +lean_inc(x_4761); +x_4762 = lean_ctor_get(x_4760, 1); +lean_inc(x_4762); +lean_dec(x_4760); +x_4763 = lean_ctor_get(x_4761, 0); +lean_inc(x_4763); +x_4764 = lean_ctor_get(x_4761, 1); +lean_inc(x_4764); +if (lean_is_exclusive(x_4761)) { + lean_ctor_release(x_4761, 0); + lean_ctor_release(x_4761, 1); + x_4765 = x_4761; +} else { + lean_dec_ref(x_4761); + x_4765 = lean_box(0); +} +if (lean_is_scalar(x_4680)) { + x_4766 = lean_alloc_ctor(1, 1, 0); +} else { + x_4766 = x_4680; +} +lean_ctor_set(x_4766, 0, x_4763); +if (lean_is_scalar(x_4765)) { + x_4767 = lean_alloc_ctor(0, 2, 0); +} else { + x_4767 = x_4765; +} +lean_ctor_set(x_4767, 0, x_4766); +lean_ctor_set(x_4767, 1, x_4764); +x_4644 = x_4767; +x_4645 = x_4762; +goto block_4671; +} +else +{ +lean_object* x_4768; lean_object* x_4769; lean_object* x_4770; lean_object* x_4771; +lean_dec(x_4680); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4768 = lean_ctor_get(x_4760, 0); +lean_inc(x_4768); +x_4769 = lean_ctor_get(x_4760, 1); +lean_inc(x_4769); +if (lean_is_exclusive(x_4760)) { + lean_ctor_release(x_4760, 0); + lean_ctor_release(x_4760, 1); + x_4770 = x_4760; +} else { + lean_dec_ref(x_4760); + x_4770 = lean_box(0); +} +if (lean_is_scalar(x_4770)) { + x_4771 = lean_alloc_ctor(1, 2, 0); +} else { + x_4771 = x_4770; +} +lean_ctor_set(x_4771, 0, x_4768); +lean_ctor_set(x_4771, 1, x_4769); +return x_4771; +} +} +} +block_4671: +{ +lean_object* x_4646; +x_4646 = lean_ctor_get(x_4644, 0); +lean_inc(x_4646); +if (lean_obj_tag(x_4646) == 0) +{ +lean_object* x_4647; lean_object* x_4648; lean_object* x_4649; lean_object* x_4650; lean_object* x_4651; lean_object* x_4652; lean_object* x_4653; lean_object* x_4654; lean_object* x_4655; lean_object* x_4656; +lean_dec(x_4404); +x_4647 = lean_ctor_get(x_4644, 1); +lean_inc(x_4647); +lean_dec(x_4644); +x_4648 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_4648, 0, x_153); +lean_ctor_set(x_4648, 1, x_3058); +x_4649 = lean_ctor_get(x_1, 0); +lean_inc(x_4649); +x_4650 = l_Lean_IR_ToIR_bindVar(x_4649, x_4647, x_4, x_5, x_4645); +x_4651 = lean_ctor_get(x_4650, 0); +lean_inc(x_4651); +x_4652 = lean_ctor_get(x_4650, 1); +lean_inc(x_4652); +lean_dec(x_4650); +x_4653 = lean_ctor_get(x_4651, 0); +lean_inc(x_4653); +x_4654 = lean_ctor_get(x_4651, 1); +lean_inc(x_4654); +lean_dec(x_4651); +x_4655 = lean_ctor_get(x_1, 2); +lean_inc(x_4655); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_4656 = l_Lean_IR_ToIR_lowerType(x_4655, x_4654, x_4, x_5, x_4652); +if (lean_obj_tag(x_4656) == 0) +{ +lean_object* x_4657; lean_object* x_4658; lean_object* x_4659; lean_object* x_4660; lean_object* x_4661; +x_4657 = lean_ctor_get(x_4656, 0); +lean_inc(x_4657); +x_4658 = lean_ctor_get(x_4656, 1); +lean_inc(x_4658); +lean_dec(x_4656); +x_4659 = lean_ctor_get(x_4657, 0); +lean_inc(x_4659); +x_4660 = lean_ctor_get(x_4657, 1); +lean_inc(x_4660); +lean_dec(x_4657); +x_4661 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4653, x_4648, x_4659, x_4660, x_4, x_5, x_4658); +return x_4661; +} +else +{ +lean_object* x_4662; lean_object* x_4663; lean_object* x_4664; lean_object* x_4665; +lean_dec(x_4653); +lean_dec(x_4648); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_4662 = lean_ctor_get(x_4656, 0); +lean_inc(x_4662); +x_4663 = lean_ctor_get(x_4656, 1); +lean_inc(x_4663); +if (lean_is_exclusive(x_4656)) { + lean_ctor_release(x_4656, 0); + lean_ctor_release(x_4656, 1); + x_4664 = x_4656; +} else { + lean_dec_ref(x_4656); + x_4664 = lean_box(0); +} +if (lean_is_scalar(x_4664)) { + x_4665 = lean_alloc_ctor(1, 2, 0); +} else { + x_4665 = x_4664; +} +lean_ctor_set(x_4665, 0, x_4662); +lean_ctor_set(x_4665, 1, x_4663); +return x_4665; +} +} +else +{ +lean_object* x_4666; lean_object* x_4667; lean_object* x_4668; lean_object* x_4669; lean_object* x_4670; +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4666 = lean_ctor_get(x_4644, 1); +lean_inc(x_4666); +if (lean_is_exclusive(x_4644)) { + lean_ctor_release(x_4644, 0); + lean_ctor_release(x_4644, 1); + x_4667 = x_4644; +} else { + lean_dec_ref(x_4644); + x_4667 = lean_box(0); +} +x_4668 = lean_ctor_get(x_4646, 0); +lean_inc(x_4668); +lean_dec(x_4646); +if (lean_is_scalar(x_4667)) { + x_4669 = lean_alloc_ctor(0, 2, 0); +} else { + x_4669 = x_4667; +} +lean_ctor_set(x_4669, 0, x_4668); +lean_ctor_set(x_4669, 1, x_4666); +if (lean_is_scalar(x_4404)) { + x_4670 = lean_alloc_ctor(0, 2, 0); +} else { + x_4670 = x_4404; +} +lean_ctor_set(x_4670, 0, x_4669); +lean_ctor_set(x_4670, 1, x_4645); +return x_4670; +} +} +} +case 4: +{ +lean_object* x_4772; lean_object* x_4773; uint8_t x_4774; +lean_dec(x_4405); +lean_dec(x_4404); +lean_dec(x_3050); +lean_dec(x_3049); +if (lean_is_exclusive(x_4410)) { + lean_ctor_release(x_4410, 0); + x_4772 = x_4410; +} else { + lean_dec_ref(x_4410); + x_4772 = lean_box(0); +} +x_4773 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_4774 = lean_name_eq(x_153, x_4773); +if (x_4774 == 0) +{ +uint8_t x_4775; lean_object* x_4776; lean_object* x_4777; lean_object* x_4778; lean_object* x_4779; lean_object* x_4780; lean_object* x_4781; lean_object* x_4782; lean_object* x_4783; lean_object* x_4784; +lean_dec(x_3058); +lean_dec(x_2); +lean_dec(x_1); +x_4775 = 1; +x_4776 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_4777 = l_Lean_Name_toString(x_153, x_4775, x_4776); +if (lean_is_scalar(x_4772)) { + x_4778 = lean_alloc_ctor(3, 1, 0); +} else { + x_4778 = x_4772; + lean_ctor_set_tag(x_4778, 3); +} +lean_ctor_set(x_4778, 0, x_4777); +x_4779 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_4780 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_4780, 0, x_4779); +lean_ctor_set(x_4780, 1, x_4778); +x_4781 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_4782 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_4782, 0, x_4780); +lean_ctor_set(x_4782, 1, x_4781); +x_4783 = l_Lean_MessageData_ofFormat(x_4782); +x_4784 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_4783, x_4400, x_4, x_5, x_4403); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_4400); +return x_4784; +} +else +{ +lean_object* x_4785; lean_object* x_4786; lean_object* x_4787; +lean_dec(x_4772); +lean_dec(x_153); +x_4785 = l_Lean_IR_instInhabitedArg; +x_4786 = lean_unsigned_to_nat(2u); +x_4787 = lean_array_get(x_4785, x_3058, x_4786); +lean_dec(x_3058); +if (lean_obj_tag(x_4787) == 0) +{ +lean_object* x_4788; lean_object* x_4789; lean_object* x_4790; lean_object* x_4791; lean_object* x_4792; lean_object* x_4793; lean_object* x_4794; +x_4788 = lean_ctor_get(x_4787, 0); +lean_inc(x_4788); +lean_dec(x_4787); +x_4789 = lean_ctor_get(x_1, 0); +lean_inc(x_4789); +lean_dec(x_1); +x_4790 = l_Lean_IR_ToIR_bindVarToVarId(x_4789, x_4788, x_4400, x_4, x_5, x_4403); +x_4791 = lean_ctor_get(x_4790, 0); +lean_inc(x_4791); +x_4792 = lean_ctor_get(x_4790, 1); +lean_inc(x_4792); +lean_dec(x_4790); +x_4793 = lean_ctor_get(x_4791, 1); +lean_inc(x_4793); +lean_dec(x_4791); +x_4794 = l_Lean_IR_ToIR_lowerCode(x_2, x_4793, x_4, x_5, x_4792); +return x_4794; +} +else +{ +lean_object* x_4795; lean_object* x_4796; lean_object* x_4797; lean_object* x_4798; lean_object* x_4799; lean_object* x_4800; +x_4795 = lean_ctor_get(x_1, 0); +lean_inc(x_4795); +lean_dec(x_1); +x_4796 = l_Lean_IR_ToIR_bindErased(x_4795, x_4400, x_4, x_5, x_4403); +x_4797 = lean_ctor_get(x_4796, 0); +lean_inc(x_4797); +x_4798 = lean_ctor_get(x_4796, 1); +lean_inc(x_4798); +lean_dec(x_4796); +x_4799 = lean_ctor_get(x_4797, 1); +lean_inc(x_4799); +lean_dec(x_4797); +x_4800 = l_Lean_IR_ToIR_lowerCode(x_2, x_4799, x_4, x_5, x_4798); +return x_4800; +} +} +} +case 5: +{ +lean_object* x_4801; lean_object* x_4802; +lean_dec(x_4410); +lean_dec(x_4405); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_4801 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_4802 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_4801, x_4400, x_4, x_5, x_4403); +return x_4802; +} +case 6: +{ +lean_object* x_4803; uint8_t x_4804; +x_4803 = lean_ctor_get(x_4410, 0); +lean_inc(x_4803); +lean_dec(x_4410); +lean_inc(x_153); +x_4804 = l_Lean_isExtern(x_4405, x_153); +if (x_4804 == 0) +{ +lean_object* x_4805; +lean_dec(x_4404); +lean_dec(x_3058); +lean_inc(x_5); +lean_inc(x_4); +x_4805 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_4400, x_4, x_5, x_4403); +if (lean_obj_tag(x_4805) == 0) +{ +lean_object* x_4806; lean_object* x_4807; lean_object* x_4808; lean_object* x_4809; lean_object* x_4810; lean_object* x_4811; lean_object* x_4812; lean_object* x_4813; lean_object* x_4814; lean_object* x_4815; lean_object* x_4816; lean_object* x_4817; lean_object* x_4818; lean_object* x_4819; lean_object* x_4820; lean_object* x_4821; lean_object* x_4822; lean_object* x_4823; lean_object* x_4824; lean_object* x_4825; +x_4806 = lean_ctor_get(x_4805, 0); +lean_inc(x_4806); +x_4807 = lean_ctor_get(x_4806, 0); +lean_inc(x_4807); +x_4808 = lean_ctor_get(x_4805, 1); +lean_inc(x_4808); +lean_dec(x_4805); +x_4809 = lean_ctor_get(x_4806, 1); +lean_inc(x_4809); +lean_dec(x_4806); +x_4810 = lean_ctor_get(x_4807, 0); +lean_inc(x_4810); +x_4811 = lean_ctor_get(x_4807, 1); +lean_inc(x_4811); +lean_dec(x_4807); +x_4812 = lean_ctor_get(x_4803, 3); +lean_inc(x_4812); +lean_dec(x_4803); +x_4813 = lean_array_get_size(x_3049); +x_4814 = l_Array_extract___rarg(x_3049, x_4812, x_4813); +lean_dec(x_4813); +lean_dec(x_3049); +x_4815 = lean_array_get_size(x_4811); +x_4816 = lean_unsigned_to_nat(0u); +x_4817 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_3050)) { + x_4818 = lean_alloc_ctor(0, 3, 0); +} else { + x_4818 = x_3050; + lean_ctor_set_tag(x_4818, 0); +} +lean_ctor_set(x_4818, 0, x_4816); +lean_ctor_set(x_4818, 1, x_4815); +lean_ctor_set(x_4818, 2, x_4817); +x_4819 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_4820 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__3(x_4811, x_4814, x_4818, x_4818, x_4819, x_4816, lean_box(0), lean_box(0), x_4809, x_4, x_5, x_4808); +lean_dec(x_4818); +x_4821 = lean_ctor_get(x_4820, 0); +lean_inc(x_4821); +x_4822 = lean_ctor_get(x_4820, 1); +lean_inc(x_4822); +lean_dec(x_4820); +x_4823 = lean_ctor_get(x_4821, 0); +lean_inc(x_4823); +x_4824 = lean_ctor_get(x_4821, 1); +lean_inc(x_4824); +lean_dec(x_4821); +x_4825 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_4810, x_4811, x_4814, x_4823, x_4824, x_4, x_5, x_4822); +lean_dec(x_4814); +lean_dec(x_4811); +return x_4825; +} +else +{ +lean_object* x_4826; lean_object* x_4827; lean_object* x_4828; lean_object* x_4829; +lean_dec(x_4803); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4826 = lean_ctor_get(x_4805, 0); +lean_inc(x_4826); +x_4827 = lean_ctor_get(x_4805, 1); +lean_inc(x_4827); +if (lean_is_exclusive(x_4805)) { + lean_ctor_release(x_4805, 0); + lean_ctor_release(x_4805, 1); + x_4828 = x_4805; +} else { + lean_dec_ref(x_4805); + x_4828 = lean_box(0); +} +if (lean_is_scalar(x_4828)) { + x_4829 = lean_alloc_ctor(1, 2, 0); +} else { + x_4829 = x_4828; +} +lean_ctor_set(x_4829, 0, x_4826); +lean_ctor_set(x_4829, 1, x_4827); +return x_4829; +} +} +else +{ +lean_object* x_4830; lean_object* x_4831; lean_object* x_4858; lean_object* x_4859; +lean_dec(x_4803); +lean_dec(x_3050); +lean_dec(x_3049); +lean_inc(x_153); +x_4858 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_4403); +x_4859 = lean_ctor_get(x_4858, 0); +lean_inc(x_4859); +if (lean_obj_tag(x_4859) == 0) +{ +lean_object* x_4860; lean_object* x_4861; lean_object* x_4862; +x_4860 = lean_ctor_get(x_4858, 1); +lean_inc(x_4860); +lean_dec(x_4858); +x_4861 = lean_box(0); +x_4862 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_4862, 0, x_4861); +lean_ctor_set(x_4862, 1, x_4400); +x_4830 = x_4862; +x_4831 = x_4860; +goto block_4857; +} +else +{ +lean_object* x_4863; lean_object* x_4864; lean_object* x_4865; lean_object* x_4866; lean_object* x_4867; lean_object* x_4868; lean_object* x_4869; uint8_t x_4870; +x_4863 = lean_ctor_get(x_4858, 1); +lean_inc(x_4863); +if (lean_is_exclusive(x_4858)) { + lean_ctor_release(x_4858, 0); + lean_ctor_release(x_4858, 1); + x_4864 = x_4858; +} else { + lean_dec_ref(x_4858); + x_4864 = lean_box(0); +} +x_4865 = lean_ctor_get(x_4859, 0); +lean_inc(x_4865); +if (lean_is_exclusive(x_4859)) { + lean_ctor_release(x_4859, 0); + x_4866 = x_4859; +} else { + lean_dec_ref(x_4859); + x_4866 = lean_box(0); +} +x_4867 = lean_array_get_size(x_3058); +x_4868 = lean_ctor_get(x_4865, 3); +lean_inc(x_4868); +lean_dec(x_4865); +x_4869 = lean_array_get_size(x_4868); +lean_dec(x_4868); +x_4870 = lean_nat_dec_lt(x_4867, x_4869); +if (x_4870 == 0) +{ +uint8_t x_4871; +x_4871 = lean_nat_dec_eq(x_4867, x_4869); +if (x_4871 == 0) +{ +lean_object* x_4872; lean_object* x_4873; lean_object* x_4874; lean_object* x_4875; lean_object* x_4876; lean_object* x_4877; lean_object* x_4878; lean_object* x_4879; lean_object* x_4880; lean_object* x_4881; lean_object* x_4882; lean_object* x_4883; lean_object* x_4884; lean_object* x_4885; lean_object* x_4886; lean_object* x_4887; lean_object* x_4888; +x_4872 = lean_unsigned_to_nat(0u); +x_4873 = l_Array_extract___rarg(x_3058, x_4872, x_4869); +x_4874 = l_Array_extract___rarg(x_3058, x_4869, x_4867); +lean_dec(x_4867); +lean_inc(x_153); +if (lean_is_scalar(x_4864)) { + x_4875 = lean_alloc_ctor(6, 2, 0); +} else { + x_4875 = x_4864; + lean_ctor_set_tag(x_4875, 6); +} +lean_ctor_set(x_4875, 0, x_153); +lean_ctor_set(x_4875, 1, x_4873); +x_4876 = lean_ctor_get(x_1, 0); +lean_inc(x_4876); +x_4877 = l_Lean_IR_ToIR_bindVar(x_4876, x_4400, x_4, x_5, x_4863); +x_4878 = lean_ctor_get(x_4877, 0); +lean_inc(x_4878); +x_4879 = lean_ctor_get(x_4877, 1); +lean_inc(x_4879); +lean_dec(x_4877); +x_4880 = lean_ctor_get(x_4878, 0); +lean_inc(x_4880); +x_4881 = lean_ctor_get(x_4878, 1); +lean_inc(x_4881); +lean_dec(x_4878); +x_4882 = l_Lean_IR_ToIR_newVar(x_4881, x_4, x_5, x_4879); +x_4883 = lean_ctor_get(x_4882, 0); +lean_inc(x_4883); +x_4884 = lean_ctor_get(x_4882, 1); +lean_inc(x_4884); +lean_dec(x_4882); +x_4885 = lean_ctor_get(x_4883, 0); +lean_inc(x_4885); +x_4886 = lean_ctor_get(x_4883, 1); +lean_inc(x_4886); +lean_dec(x_4883); +x_4887 = lean_ctor_get(x_1, 2); +lean_inc(x_4887); +lean_inc(x_5); +lean_inc(x_4); +x_4888 = l_Lean_IR_ToIR_lowerType(x_4887, x_4886, x_4, x_5, x_4884); +if (lean_obj_tag(x_4888) == 0) +{ +lean_object* x_4889; lean_object* x_4890; lean_object* x_4891; lean_object* x_4892; lean_object* x_4893; +x_4889 = lean_ctor_get(x_4888, 0); +lean_inc(x_4889); +x_4890 = lean_ctor_get(x_4888, 1); +lean_inc(x_4890); +lean_dec(x_4888); +x_4891 = lean_ctor_get(x_4889, 0); +lean_inc(x_4891); +x_4892 = lean_ctor_get(x_4889, 1); +lean_inc(x_4892); +lean_dec(x_4889); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4893 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_4885, x_4874, x_4880, x_4875, x_4891, x_4892, x_4, x_5, x_4890); +if (lean_obj_tag(x_4893) == 0) +{ +lean_object* x_4894; lean_object* x_4895; lean_object* x_4896; lean_object* x_4897; lean_object* x_4898; lean_object* x_4899; lean_object* x_4900; +x_4894 = lean_ctor_get(x_4893, 0); +lean_inc(x_4894); +x_4895 = lean_ctor_get(x_4893, 1); +lean_inc(x_4895); +lean_dec(x_4893); +x_4896 = lean_ctor_get(x_4894, 0); +lean_inc(x_4896); +x_4897 = lean_ctor_get(x_4894, 1); +lean_inc(x_4897); +if (lean_is_exclusive(x_4894)) { + lean_ctor_release(x_4894, 0); + lean_ctor_release(x_4894, 1); + x_4898 = x_4894; +} else { + lean_dec_ref(x_4894); + x_4898 = lean_box(0); +} +if (lean_is_scalar(x_4866)) { + x_4899 = lean_alloc_ctor(1, 1, 0); +} else { + x_4899 = x_4866; +} +lean_ctor_set(x_4899, 0, x_4896); +if (lean_is_scalar(x_4898)) { + x_4900 = lean_alloc_ctor(0, 2, 0); +} else { + x_4900 = x_4898; +} +lean_ctor_set(x_4900, 0, x_4899); +lean_ctor_set(x_4900, 1, x_4897); +x_4830 = x_4900; +x_4831 = x_4895; +goto block_4857; +} +else +{ +lean_object* x_4901; lean_object* x_4902; lean_object* x_4903; lean_object* x_4904; +lean_dec(x_4866); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4901 = lean_ctor_get(x_4893, 0); +lean_inc(x_4901); +x_4902 = lean_ctor_get(x_4893, 1); +lean_inc(x_4902); +if (lean_is_exclusive(x_4893)) { + lean_ctor_release(x_4893, 0); + lean_ctor_release(x_4893, 1); + x_4903 = x_4893; +} else { + lean_dec_ref(x_4893); + x_4903 = lean_box(0); +} +if (lean_is_scalar(x_4903)) { + x_4904 = lean_alloc_ctor(1, 2, 0); +} else { + x_4904 = x_4903; +} +lean_ctor_set(x_4904, 0, x_4901); +lean_ctor_set(x_4904, 1, x_4902); +return x_4904; +} +} +else +{ +lean_object* x_4905; lean_object* x_4906; lean_object* x_4907; lean_object* x_4908; +lean_dec(x_4885); +lean_dec(x_4880); +lean_dec(x_4875); +lean_dec(x_4874); +lean_dec(x_4866); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4905 = lean_ctor_get(x_4888, 0); +lean_inc(x_4905); +x_4906 = lean_ctor_get(x_4888, 1); +lean_inc(x_4906); +if (lean_is_exclusive(x_4888)) { + lean_ctor_release(x_4888, 0); + lean_ctor_release(x_4888, 1); + x_4907 = x_4888; +} else { + lean_dec_ref(x_4888); + x_4907 = lean_box(0); +} +if (lean_is_scalar(x_4907)) { + x_4908 = lean_alloc_ctor(1, 2, 0); +} else { + x_4908 = x_4907; +} +lean_ctor_set(x_4908, 0, x_4905); +lean_ctor_set(x_4908, 1, x_4906); +return x_4908; +} +} +else +{ +lean_object* x_4909; lean_object* x_4910; lean_object* x_4911; lean_object* x_4912; lean_object* x_4913; lean_object* x_4914; lean_object* x_4915; lean_object* x_4916; lean_object* x_4917; +lean_dec(x_4869); +lean_dec(x_4867); +lean_inc(x_3058); +lean_inc(x_153); +if (lean_is_scalar(x_4864)) { + x_4909 = lean_alloc_ctor(6, 2, 0); +} else { + x_4909 = x_4864; + lean_ctor_set_tag(x_4909, 6); +} +lean_ctor_set(x_4909, 0, x_153); +lean_ctor_set(x_4909, 1, x_3058); +x_4910 = lean_ctor_get(x_1, 0); +lean_inc(x_4910); +x_4911 = l_Lean_IR_ToIR_bindVar(x_4910, x_4400, x_4, x_5, x_4863); +x_4912 = lean_ctor_get(x_4911, 0); +lean_inc(x_4912); +x_4913 = lean_ctor_get(x_4911, 1); +lean_inc(x_4913); +lean_dec(x_4911); +x_4914 = lean_ctor_get(x_4912, 0); +lean_inc(x_4914); +x_4915 = lean_ctor_get(x_4912, 1); +lean_inc(x_4915); +lean_dec(x_4912); +x_4916 = lean_ctor_get(x_1, 2); +lean_inc(x_4916); +lean_inc(x_5); +lean_inc(x_4); +x_4917 = l_Lean_IR_ToIR_lowerType(x_4916, x_4915, x_4, x_5, x_4913); +if (lean_obj_tag(x_4917) == 0) +{ +lean_object* x_4918; lean_object* x_4919; lean_object* x_4920; lean_object* x_4921; lean_object* x_4922; +x_4918 = lean_ctor_get(x_4917, 0); +lean_inc(x_4918); +x_4919 = lean_ctor_get(x_4917, 1); +lean_inc(x_4919); +lean_dec(x_4917); +x_4920 = lean_ctor_get(x_4918, 0); +lean_inc(x_4920); +x_4921 = lean_ctor_get(x_4918, 1); +lean_inc(x_4921); +lean_dec(x_4918); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4922 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4914, x_4909, x_4920, x_4921, x_4, x_5, x_4919); +if (lean_obj_tag(x_4922) == 0) +{ +lean_object* x_4923; lean_object* x_4924; lean_object* x_4925; lean_object* x_4926; lean_object* x_4927; lean_object* x_4928; lean_object* x_4929; +x_4923 = lean_ctor_get(x_4922, 0); +lean_inc(x_4923); +x_4924 = lean_ctor_get(x_4922, 1); +lean_inc(x_4924); +lean_dec(x_4922); +x_4925 = lean_ctor_get(x_4923, 0); +lean_inc(x_4925); +x_4926 = lean_ctor_get(x_4923, 1); +lean_inc(x_4926); +if (lean_is_exclusive(x_4923)) { + lean_ctor_release(x_4923, 0); + lean_ctor_release(x_4923, 1); + x_4927 = x_4923; +} else { + lean_dec_ref(x_4923); + x_4927 = lean_box(0); +} +if (lean_is_scalar(x_4866)) { + x_4928 = lean_alloc_ctor(1, 1, 0); +} else { + x_4928 = x_4866; +} +lean_ctor_set(x_4928, 0, x_4925); +if (lean_is_scalar(x_4927)) { + x_4929 = lean_alloc_ctor(0, 2, 0); +} else { + x_4929 = x_4927; +} +lean_ctor_set(x_4929, 0, x_4928); +lean_ctor_set(x_4929, 1, x_4926); +x_4830 = x_4929; +x_4831 = x_4924; +goto block_4857; +} +else +{ +lean_object* x_4930; lean_object* x_4931; lean_object* x_4932; lean_object* x_4933; +lean_dec(x_4866); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4930 = lean_ctor_get(x_4922, 0); +lean_inc(x_4930); +x_4931 = lean_ctor_get(x_4922, 1); +lean_inc(x_4931); +if (lean_is_exclusive(x_4922)) { + lean_ctor_release(x_4922, 0); + lean_ctor_release(x_4922, 1); + x_4932 = x_4922; +} else { + lean_dec_ref(x_4922); + x_4932 = lean_box(0); +} +if (lean_is_scalar(x_4932)) { + x_4933 = lean_alloc_ctor(1, 2, 0); +} else { + x_4933 = x_4932; +} +lean_ctor_set(x_4933, 0, x_4930); +lean_ctor_set(x_4933, 1, x_4931); +return x_4933; +} +} +else +{ +lean_object* x_4934; lean_object* x_4935; lean_object* x_4936; lean_object* x_4937; +lean_dec(x_4914); +lean_dec(x_4909); +lean_dec(x_4866); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4934 = lean_ctor_get(x_4917, 0); +lean_inc(x_4934); +x_4935 = lean_ctor_get(x_4917, 1); +lean_inc(x_4935); +if (lean_is_exclusive(x_4917)) { + lean_ctor_release(x_4917, 0); + lean_ctor_release(x_4917, 1); + x_4936 = x_4917; +} else { + lean_dec_ref(x_4917); + x_4936 = lean_box(0); +} +if (lean_is_scalar(x_4936)) { + x_4937 = lean_alloc_ctor(1, 2, 0); +} else { + x_4937 = x_4936; +} +lean_ctor_set(x_4937, 0, x_4934); +lean_ctor_set(x_4937, 1, x_4935); +return x_4937; +} +} +} +else +{ +lean_object* x_4938; lean_object* x_4939; lean_object* x_4940; lean_object* x_4941; lean_object* x_4942; lean_object* x_4943; lean_object* x_4944; lean_object* x_4945; lean_object* x_4946; +lean_dec(x_4869); +lean_dec(x_4867); +lean_inc(x_3058); +lean_inc(x_153); +if (lean_is_scalar(x_4864)) { + x_4938 = lean_alloc_ctor(7, 2, 0); +} else { + x_4938 = x_4864; + lean_ctor_set_tag(x_4938, 7); +} +lean_ctor_set(x_4938, 0, x_153); +lean_ctor_set(x_4938, 1, x_3058); +x_4939 = lean_ctor_get(x_1, 0); +lean_inc(x_4939); +x_4940 = l_Lean_IR_ToIR_bindVar(x_4939, x_4400, x_4, x_5, x_4863); +x_4941 = lean_ctor_get(x_4940, 0); +lean_inc(x_4941); +x_4942 = lean_ctor_get(x_4940, 1); +lean_inc(x_4942); +lean_dec(x_4940); +x_4943 = lean_ctor_get(x_4941, 0); +lean_inc(x_4943); +x_4944 = lean_ctor_get(x_4941, 1); +lean_inc(x_4944); +lean_dec(x_4941); +x_4945 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_4946 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4943, x_4938, x_4945, x_4944, x_4, x_5, x_4942); +if (lean_obj_tag(x_4946) == 0) +{ +lean_object* x_4947; lean_object* x_4948; lean_object* x_4949; lean_object* x_4950; lean_object* x_4951; lean_object* x_4952; lean_object* x_4953; +x_4947 = lean_ctor_get(x_4946, 0); +lean_inc(x_4947); +x_4948 = lean_ctor_get(x_4946, 1); +lean_inc(x_4948); +lean_dec(x_4946); +x_4949 = lean_ctor_get(x_4947, 0); +lean_inc(x_4949); +x_4950 = lean_ctor_get(x_4947, 1); +lean_inc(x_4950); +if (lean_is_exclusive(x_4947)) { + lean_ctor_release(x_4947, 0); + lean_ctor_release(x_4947, 1); + x_4951 = x_4947; +} else { + lean_dec_ref(x_4947); + x_4951 = lean_box(0); +} +if (lean_is_scalar(x_4866)) { + x_4952 = lean_alloc_ctor(1, 1, 0); +} else { + x_4952 = x_4866; +} +lean_ctor_set(x_4952, 0, x_4949); +if (lean_is_scalar(x_4951)) { + x_4953 = lean_alloc_ctor(0, 2, 0); +} else { + x_4953 = x_4951; +} +lean_ctor_set(x_4953, 0, x_4952); +lean_ctor_set(x_4953, 1, x_4950); +x_4830 = x_4953; +x_4831 = x_4948; +goto block_4857; +} +else +{ +lean_object* x_4954; lean_object* x_4955; lean_object* x_4956; lean_object* x_4957; +lean_dec(x_4866); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4954 = lean_ctor_get(x_4946, 0); +lean_inc(x_4954); +x_4955 = lean_ctor_get(x_4946, 1); +lean_inc(x_4955); +if (lean_is_exclusive(x_4946)) { + lean_ctor_release(x_4946, 0); + lean_ctor_release(x_4946, 1); + x_4956 = x_4946; +} else { + lean_dec_ref(x_4946); + x_4956 = lean_box(0); +} +if (lean_is_scalar(x_4956)) { + x_4957 = lean_alloc_ctor(1, 2, 0); +} else { + x_4957 = x_4956; +} +lean_ctor_set(x_4957, 0, x_4954); +lean_ctor_set(x_4957, 1, x_4955); +return x_4957; +} +} +} +block_4857: +{ +lean_object* x_4832; +x_4832 = lean_ctor_get(x_4830, 0); +lean_inc(x_4832); +if (lean_obj_tag(x_4832) == 0) +{ +lean_object* x_4833; lean_object* x_4834; lean_object* x_4835; lean_object* x_4836; lean_object* x_4837; lean_object* x_4838; lean_object* x_4839; lean_object* x_4840; lean_object* x_4841; lean_object* x_4842; +lean_dec(x_4404); +x_4833 = lean_ctor_get(x_4830, 1); +lean_inc(x_4833); +lean_dec(x_4830); +x_4834 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_4834, 0, x_153); +lean_ctor_set(x_4834, 1, x_3058); +x_4835 = lean_ctor_get(x_1, 0); +lean_inc(x_4835); +x_4836 = l_Lean_IR_ToIR_bindVar(x_4835, x_4833, x_4, x_5, x_4831); +x_4837 = lean_ctor_get(x_4836, 0); +lean_inc(x_4837); +x_4838 = lean_ctor_get(x_4836, 1); +lean_inc(x_4838); +lean_dec(x_4836); +x_4839 = lean_ctor_get(x_4837, 0); +lean_inc(x_4839); +x_4840 = lean_ctor_get(x_4837, 1); +lean_inc(x_4840); +lean_dec(x_4837); +x_4841 = lean_ctor_get(x_1, 2); +lean_inc(x_4841); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_4842 = l_Lean_IR_ToIR_lowerType(x_4841, x_4840, x_4, x_5, x_4838); +if (lean_obj_tag(x_4842) == 0) +{ +lean_object* x_4843; lean_object* x_4844; lean_object* x_4845; lean_object* x_4846; lean_object* x_4847; +x_4843 = lean_ctor_get(x_4842, 0); +lean_inc(x_4843); +x_4844 = lean_ctor_get(x_4842, 1); +lean_inc(x_4844); +lean_dec(x_4842); +x_4845 = lean_ctor_get(x_4843, 0); +lean_inc(x_4845); +x_4846 = lean_ctor_get(x_4843, 1); +lean_inc(x_4846); +lean_dec(x_4843); +x_4847 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_4839, x_4834, x_4845, x_4846, x_4, x_5, x_4844); +return x_4847; +} +else +{ +lean_object* x_4848; lean_object* x_4849; lean_object* x_4850; lean_object* x_4851; +lean_dec(x_4839); +lean_dec(x_4834); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_4848 = lean_ctor_get(x_4842, 0); +lean_inc(x_4848); +x_4849 = lean_ctor_get(x_4842, 1); +lean_inc(x_4849); +if (lean_is_exclusive(x_4842)) { + lean_ctor_release(x_4842, 0); + lean_ctor_release(x_4842, 1); + x_4850 = x_4842; +} else { + lean_dec_ref(x_4842); + x_4850 = lean_box(0); +} +if (lean_is_scalar(x_4850)) { + x_4851 = lean_alloc_ctor(1, 2, 0); +} else { + x_4851 = x_4850; +} +lean_ctor_set(x_4851, 0, x_4848); +lean_ctor_set(x_4851, 1, x_4849); +return x_4851; +} +} +else +{ +lean_object* x_4852; lean_object* x_4853; lean_object* x_4854; lean_object* x_4855; lean_object* x_4856; +lean_dec(x_3058); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4852 = lean_ctor_get(x_4830, 1); +lean_inc(x_4852); +if (lean_is_exclusive(x_4830)) { + lean_ctor_release(x_4830, 0); + lean_ctor_release(x_4830, 1); + x_4853 = x_4830; +} else { + lean_dec_ref(x_4830); + x_4853 = lean_box(0); +} +x_4854 = lean_ctor_get(x_4832, 0); +lean_inc(x_4854); +lean_dec(x_4832); +if (lean_is_scalar(x_4853)) { + x_4855 = lean_alloc_ctor(0, 2, 0); +} else { + x_4855 = x_4853; +} +lean_ctor_set(x_4855, 0, x_4854); +lean_ctor_set(x_4855, 1, x_4852); +if (lean_is_scalar(x_4404)) { + x_4856 = lean_alloc_ctor(0, 2, 0); +} else { + x_4856 = x_4404; +} +lean_ctor_set(x_4856, 0, x_4855); +lean_ctor_set(x_4856, 1, x_4831); +return x_4856; +} +} +} +} +default: +{ +lean_object* x_4958; uint8_t x_4959; lean_object* x_4960; lean_object* x_4961; lean_object* x_4962; lean_object* x_4963; lean_object* x_4964; lean_object* x_4965; lean_object* x_4966; lean_object* x_4967; lean_object* x_4968; +lean_dec(x_4405); +lean_dec(x_4404); +lean_dec(x_3058); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_4410)) { + lean_ctor_release(x_4410, 0); + x_4958 = x_4410; +} else { + lean_dec_ref(x_4410); + x_4958 = lean_box(0); +} +x_4959 = 1; +x_4960 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_4961 = l_Lean_Name_toString(x_153, x_4959, x_4960); +if (lean_is_scalar(x_4958)) { + x_4962 = lean_alloc_ctor(3, 1, 0); +} else { + x_4962 = x_4958; + lean_ctor_set_tag(x_4962, 3); +} +lean_ctor_set(x_4962, 0, x_4961); +x_4963 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_4964 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_4964, 0, x_4963); +lean_ctor_set(x_4964, 1, x_4962); +x_4965 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_4966 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_4966, 0, x_4964); +lean_ctor_set(x_4966, 1, x_4965); +x_4967 = l_Lean_MessageData_ofFormat(x_4966); +x_4968 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_4967, x_4400, x_4, x_5, x_4403); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_4400); +return x_4968; +} +} +} +} +} +else +{ +uint8_t x_4969; +lean_dec(x_3058); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_4969 = !lean_is_exclusive(x_3060); +if (x_4969 == 0) +{ +lean_object* x_4970; lean_object* x_4971; lean_object* x_4972; +x_4970 = lean_ctor_get(x_3060, 0); +lean_dec(x_4970); +x_4971 = lean_ctor_get(x_3062, 0); +lean_inc(x_4971); +lean_dec(x_3062); +lean_ctor_set(x_3060, 0, x_4971); +if (lean_is_scalar(x_3056)) { + x_4972 = lean_alloc_ctor(0, 2, 0); +} else { + x_4972 = x_3056; +} +lean_ctor_set(x_4972, 0, x_3060); +lean_ctor_set(x_4972, 1, x_3061); +return x_4972; +} +else +{ +lean_object* x_4973; lean_object* x_4974; lean_object* x_4975; lean_object* x_4976; +x_4973 = lean_ctor_get(x_3060, 1); +lean_inc(x_4973); +lean_dec(x_3060); +x_4974 = lean_ctor_get(x_3062, 0); +lean_inc(x_4974); +lean_dec(x_3062); +x_4975 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_4975, 0, x_4974); +lean_ctor_set(x_4975, 1, x_4973); +if (lean_is_scalar(x_3056)) { + x_4976 = lean_alloc_ctor(0, 2, 0); +} else { + x_4976 = x_3056; +} +lean_ctor_set(x_4976, 0, x_4975); +lean_ctor_set(x_4976, 1, x_3061); +return x_4976; +} +} +} +} +else +{ +lean_object* x_5258; lean_object* x_5259; lean_object* x_5260; lean_object* x_5261; lean_object* x_5839; lean_object* x_5840; +x_5258 = lean_ctor_get(x_3054, 0); +x_5259 = lean_ctor_get(x_3054, 1); +lean_inc(x_5259); +lean_inc(x_5258); +lean_dec(x_3054); +lean_inc(x_153); +x_5839 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_3055); +x_5840 = lean_ctor_get(x_5839, 0); +lean_inc(x_5840); +if (lean_obj_tag(x_5840) == 0) +{ +lean_object* x_5841; lean_object* x_5842; lean_object* x_5843; +x_5841 = lean_ctor_get(x_5839, 1); +lean_inc(x_5841); +lean_dec(x_5839); +x_5842 = lean_box(0); +x_5843 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_5843, 0, x_5842); +lean_ctor_set(x_5843, 1, x_5259); +x_5260 = x_5843; +x_5261 = x_5841; +goto block_5838; +} +else +{ +lean_object* x_5844; lean_object* x_5845; lean_object* x_5846; lean_object* x_5847; lean_object* x_5848; lean_object* x_5849; lean_object* x_5850; uint8_t x_5851; +x_5844 = lean_ctor_get(x_5839, 1); +lean_inc(x_5844); +if (lean_is_exclusive(x_5839)) { + lean_ctor_release(x_5839, 0); + lean_ctor_release(x_5839, 1); + x_5845 = x_5839; +} else { + lean_dec_ref(x_5839); + x_5845 = lean_box(0); +} +x_5846 = lean_ctor_get(x_5840, 0); +lean_inc(x_5846); +if (lean_is_exclusive(x_5840)) { + lean_ctor_release(x_5840, 0); + x_5847 = x_5840; +} else { + lean_dec_ref(x_5840); + x_5847 = lean_box(0); +} +x_5848 = lean_array_get_size(x_5258); +x_5849 = lean_ctor_get(x_5846, 3); +lean_inc(x_5849); +lean_dec(x_5846); +x_5850 = lean_array_get_size(x_5849); +lean_dec(x_5849); +x_5851 = lean_nat_dec_lt(x_5848, x_5850); +if (x_5851 == 0) +{ +uint8_t x_5852; +x_5852 = lean_nat_dec_eq(x_5848, x_5850); +if (x_5852 == 0) +{ +lean_object* x_5853; lean_object* x_5854; lean_object* x_5855; lean_object* x_5856; lean_object* x_5857; lean_object* x_5858; lean_object* x_5859; lean_object* x_5860; lean_object* x_5861; lean_object* x_5862; lean_object* x_5863; lean_object* x_5864; lean_object* x_5865; lean_object* x_5866; lean_object* x_5867; lean_object* x_5868; lean_object* x_5869; +x_5853 = lean_unsigned_to_nat(0u); +x_5854 = l_Array_extract___rarg(x_5258, x_5853, x_5850); +x_5855 = l_Array_extract___rarg(x_5258, x_5850, x_5848); +lean_dec(x_5848); +lean_inc(x_153); +if (lean_is_scalar(x_5845)) { + x_5856 = lean_alloc_ctor(6, 2, 0); +} else { + x_5856 = x_5845; + lean_ctor_set_tag(x_5856, 6); +} +lean_ctor_set(x_5856, 0, x_153); +lean_ctor_set(x_5856, 1, x_5854); +x_5857 = lean_ctor_get(x_1, 0); +lean_inc(x_5857); +x_5858 = l_Lean_IR_ToIR_bindVar(x_5857, x_5259, x_4, x_5, x_5844); +x_5859 = lean_ctor_get(x_5858, 0); +lean_inc(x_5859); +x_5860 = lean_ctor_get(x_5858, 1); +lean_inc(x_5860); +lean_dec(x_5858); +x_5861 = lean_ctor_get(x_5859, 0); +lean_inc(x_5861); +x_5862 = lean_ctor_get(x_5859, 1); +lean_inc(x_5862); +lean_dec(x_5859); +x_5863 = l_Lean_IR_ToIR_newVar(x_5862, x_4, x_5, x_5860); +x_5864 = lean_ctor_get(x_5863, 0); +lean_inc(x_5864); +x_5865 = lean_ctor_get(x_5863, 1); +lean_inc(x_5865); +lean_dec(x_5863); +x_5866 = lean_ctor_get(x_5864, 0); +lean_inc(x_5866); +x_5867 = lean_ctor_get(x_5864, 1); +lean_inc(x_5867); +lean_dec(x_5864); +x_5868 = lean_ctor_get(x_1, 2); +lean_inc(x_5868); +lean_inc(x_5); +lean_inc(x_4); +x_5869 = l_Lean_IR_ToIR_lowerType(x_5868, x_5867, x_4, x_5, x_5865); +if (lean_obj_tag(x_5869) == 0) +{ +lean_object* x_5870; lean_object* x_5871; lean_object* x_5872; lean_object* x_5873; lean_object* x_5874; +x_5870 = lean_ctor_get(x_5869, 0); +lean_inc(x_5870); +x_5871 = lean_ctor_get(x_5869, 1); +lean_inc(x_5871); +lean_dec(x_5869); +x_5872 = lean_ctor_get(x_5870, 0); +lean_inc(x_5872); +x_5873 = lean_ctor_get(x_5870, 1); +lean_inc(x_5873); +lean_dec(x_5870); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5874 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_5866, x_5855, x_5861, x_5856, x_5872, x_5873, x_4, x_5, x_5871); +if (lean_obj_tag(x_5874) == 0) +{ +lean_object* x_5875; lean_object* x_5876; lean_object* x_5877; lean_object* x_5878; lean_object* x_5879; lean_object* x_5880; lean_object* x_5881; +x_5875 = lean_ctor_get(x_5874, 0); +lean_inc(x_5875); +x_5876 = lean_ctor_get(x_5874, 1); +lean_inc(x_5876); +lean_dec(x_5874); +x_5877 = lean_ctor_get(x_5875, 0); +lean_inc(x_5877); +x_5878 = lean_ctor_get(x_5875, 1); +lean_inc(x_5878); +if (lean_is_exclusive(x_5875)) { + lean_ctor_release(x_5875, 0); + lean_ctor_release(x_5875, 1); + x_5879 = x_5875; +} else { + lean_dec_ref(x_5875); + x_5879 = lean_box(0); +} +if (lean_is_scalar(x_5847)) { + x_5880 = lean_alloc_ctor(1, 1, 0); +} else { + x_5880 = x_5847; +} +lean_ctor_set(x_5880, 0, x_5877); +if (lean_is_scalar(x_5879)) { + x_5881 = lean_alloc_ctor(0, 2, 0); +} else { + x_5881 = x_5879; +} +lean_ctor_set(x_5881, 0, x_5880); +lean_ctor_set(x_5881, 1, x_5878); +x_5260 = x_5881; +x_5261 = x_5876; +goto block_5838; +} +else +{ +lean_object* x_5882; lean_object* x_5883; lean_object* x_5884; lean_object* x_5885; +lean_dec(x_5847); +lean_dec(x_5258); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5882 = lean_ctor_get(x_5874, 0); +lean_inc(x_5882); +x_5883 = lean_ctor_get(x_5874, 1); +lean_inc(x_5883); +if (lean_is_exclusive(x_5874)) { + lean_ctor_release(x_5874, 0); + lean_ctor_release(x_5874, 1); + x_5884 = x_5874; +} else { + lean_dec_ref(x_5874); + x_5884 = lean_box(0); +} +if (lean_is_scalar(x_5884)) { + x_5885 = lean_alloc_ctor(1, 2, 0); +} else { + x_5885 = x_5884; +} +lean_ctor_set(x_5885, 0, x_5882); +lean_ctor_set(x_5885, 1, x_5883); +return x_5885; +} +} +else +{ +lean_object* x_5886; lean_object* x_5887; lean_object* x_5888; lean_object* x_5889; +lean_dec(x_5866); +lean_dec(x_5861); +lean_dec(x_5856); +lean_dec(x_5855); +lean_dec(x_5847); +lean_dec(x_5258); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5886 = lean_ctor_get(x_5869, 0); +lean_inc(x_5886); +x_5887 = lean_ctor_get(x_5869, 1); +lean_inc(x_5887); +if (lean_is_exclusive(x_5869)) { + lean_ctor_release(x_5869, 0); + lean_ctor_release(x_5869, 1); + x_5888 = x_5869; +} else { + lean_dec_ref(x_5869); + x_5888 = lean_box(0); +} +if (lean_is_scalar(x_5888)) { + x_5889 = lean_alloc_ctor(1, 2, 0); +} else { + x_5889 = x_5888; +} +lean_ctor_set(x_5889, 0, x_5886); +lean_ctor_set(x_5889, 1, x_5887); +return x_5889; +} +} +else +{ +lean_object* x_5890; lean_object* x_5891; lean_object* x_5892; lean_object* x_5893; lean_object* x_5894; lean_object* x_5895; lean_object* x_5896; lean_object* x_5897; lean_object* x_5898; +lean_dec(x_5850); +lean_dec(x_5848); +lean_inc(x_5258); +lean_inc(x_153); +if (lean_is_scalar(x_5845)) { + x_5890 = lean_alloc_ctor(6, 2, 0); +} else { + x_5890 = x_5845; + lean_ctor_set_tag(x_5890, 6); +} +lean_ctor_set(x_5890, 0, x_153); +lean_ctor_set(x_5890, 1, x_5258); +x_5891 = lean_ctor_get(x_1, 0); +lean_inc(x_5891); +x_5892 = l_Lean_IR_ToIR_bindVar(x_5891, x_5259, x_4, x_5, x_5844); +x_5893 = lean_ctor_get(x_5892, 0); +lean_inc(x_5893); +x_5894 = lean_ctor_get(x_5892, 1); +lean_inc(x_5894); +lean_dec(x_5892); +x_5895 = lean_ctor_get(x_5893, 0); +lean_inc(x_5895); +x_5896 = lean_ctor_get(x_5893, 1); +lean_inc(x_5896); +lean_dec(x_5893); +x_5897 = lean_ctor_get(x_1, 2); +lean_inc(x_5897); +lean_inc(x_5); +lean_inc(x_4); +x_5898 = l_Lean_IR_ToIR_lowerType(x_5897, x_5896, x_4, x_5, x_5894); +if (lean_obj_tag(x_5898) == 0) +{ +lean_object* x_5899; lean_object* x_5900; lean_object* x_5901; lean_object* x_5902; lean_object* x_5903; +x_5899 = lean_ctor_get(x_5898, 0); +lean_inc(x_5899); +x_5900 = lean_ctor_get(x_5898, 1); +lean_inc(x_5900); +lean_dec(x_5898); +x_5901 = lean_ctor_get(x_5899, 0); +lean_inc(x_5901); +x_5902 = lean_ctor_get(x_5899, 1); +lean_inc(x_5902); +lean_dec(x_5899); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5903 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5895, x_5890, x_5901, x_5902, x_4, x_5, x_5900); +if (lean_obj_tag(x_5903) == 0) +{ +lean_object* x_5904; lean_object* x_5905; lean_object* x_5906; lean_object* x_5907; lean_object* x_5908; lean_object* x_5909; lean_object* x_5910; +x_5904 = lean_ctor_get(x_5903, 0); +lean_inc(x_5904); +x_5905 = lean_ctor_get(x_5903, 1); +lean_inc(x_5905); +lean_dec(x_5903); +x_5906 = lean_ctor_get(x_5904, 0); +lean_inc(x_5906); +x_5907 = lean_ctor_get(x_5904, 1); +lean_inc(x_5907); +if (lean_is_exclusive(x_5904)) { + lean_ctor_release(x_5904, 0); + lean_ctor_release(x_5904, 1); + x_5908 = x_5904; +} else { + lean_dec_ref(x_5904); + x_5908 = lean_box(0); +} +if (lean_is_scalar(x_5847)) { + x_5909 = lean_alloc_ctor(1, 1, 0); +} else { + x_5909 = x_5847; +} +lean_ctor_set(x_5909, 0, x_5906); +if (lean_is_scalar(x_5908)) { + x_5910 = lean_alloc_ctor(0, 2, 0); +} else { + x_5910 = x_5908; +} +lean_ctor_set(x_5910, 0, x_5909); +lean_ctor_set(x_5910, 1, x_5907); +x_5260 = x_5910; +x_5261 = x_5905; +goto block_5838; +} +else +{ +lean_object* x_5911; lean_object* x_5912; lean_object* x_5913; lean_object* x_5914; +lean_dec(x_5847); +lean_dec(x_5258); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5911 = lean_ctor_get(x_5903, 0); +lean_inc(x_5911); +x_5912 = lean_ctor_get(x_5903, 1); +lean_inc(x_5912); +if (lean_is_exclusive(x_5903)) { + lean_ctor_release(x_5903, 0); + lean_ctor_release(x_5903, 1); + x_5913 = x_5903; +} else { + lean_dec_ref(x_5903); + x_5913 = lean_box(0); +} +if (lean_is_scalar(x_5913)) { + x_5914 = lean_alloc_ctor(1, 2, 0); +} else { + x_5914 = x_5913; +} +lean_ctor_set(x_5914, 0, x_5911); +lean_ctor_set(x_5914, 1, x_5912); +return x_5914; +} +} +else +{ +lean_object* x_5915; lean_object* x_5916; lean_object* x_5917; lean_object* x_5918; +lean_dec(x_5895); +lean_dec(x_5890); +lean_dec(x_5847); +lean_dec(x_5258); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5915 = lean_ctor_get(x_5898, 0); +lean_inc(x_5915); +x_5916 = lean_ctor_get(x_5898, 1); +lean_inc(x_5916); +if (lean_is_exclusive(x_5898)) { + lean_ctor_release(x_5898, 0); + lean_ctor_release(x_5898, 1); + x_5917 = x_5898; +} else { + lean_dec_ref(x_5898); + x_5917 = lean_box(0); +} +if (lean_is_scalar(x_5917)) { + x_5918 = lean_alloc_ctor(1, 2, 0); +} else { + x_5918 = x_5917; +} +lean_ctor_set(x_5918, 0, x_5915); +lean_ctor_set(x_5918, 1, x_5916); +return x_5918; +} +} +} +else +{ +lean_object* x_5919; lean_object* x_5920; lean_object* x_5921; lean_object* x_5922; lean_object* x_5923; lean_object* x_5924; lean_object* x_5925; lean_object* x_5926; lean_object* x_5927; +lean_dec(x_5850); +lean_dec(x_5848); +lean_inc(x_5258); +lean_inc(x_153); +if (lean_is_scalar(x_5845)) { + x_5919 = lean_alloc_ctor(7, 2, 0); +} else { + x_5919 = x_5845; + lean_ctor_set_tag(x_5919, 7); +} +lean_ctor_set(x_5919, 0, x_153); +lean_ctor_set(x_5919, 1, x_5258); +x_5920 = lean_ctor_get(x_1, 0); +lean_inc(x_5920); +x_5921 = l_Lean_IR_ToIR_bindVar(x_5920, x_5259, x_4, x_5, x_5844); +x_5922 = lean_ctor_get(x_5921, 0); +lean_inc(x_5922); +x_5923 = lean_ctor_get(x_5921, 1); +lean_inc(x_5923); +lean_dec(x_5921); +x_5924 = lean_ctor_get(x_5922, 0); +lean_inc(x_5924); +x_5925 = lean_ctor_get(x_5922, 1); +lean_inc(x_5925); +lean_dec(x_5922); +x_5926 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5927 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5924, x_5919, x_5926, x_5925, x_4, x_5, x_5923); +if (lean_obj_tag(x_5927) == 0) +{ +lean_object* x_5928; lean_object* x_5929; lean_object* x_5930; lean_object* x_5931; lean_object* x_5932; lean_object* x_5933; lean_object* x_5934; +x_5928 = lean_ctor_get(x_5927, 0); +lean_inc(x_5928); +x_5929 = lean_ctor_get(x_5927, 1); +lean_inc(x_5929); +lean_dec(x_5927); +x_5930 = lean_ctor_get(x_5928, 0); +lean_inc(x_5930); +x_5931 = lean_ctor_get(x_5928, 1); +lean_inc(x_5931); +if (lean_is_exclusive(x_5928)) { + lean_ctor_release(x_5928, 0); + lean_ctor_release(x_5928, 1); + x_5932 = x_5928; +} else { + lean_dec_ref(x_5928); + x_5932 = lean_box(0); +} +if (lean_is_scalar(x_5847)) { + x_5933 = lean_alloc_ctor(1, 1, 0); +} else { + x_5933 = x_5847; +} +lean_ctor_set(x_5933, 0, x_5930); +if (lean_is_scalar(x_5932)) { + x_5934 = lean_alloc_ctor(0, 2, 0); +} else { + x_5934 = x_5932; +} +lean_ctor_set(x_5934, 0, x_5933); +lean_ctor_set(x_5934, 1, x_5931); +x_5260 = x_5934; +x_5261 = x_5929; +goto block_5838; +} +else +{ +lean_object* x_5935; lean_object* x_5936; lean_object* x_5937; lean_object* x_5938; +lean_dec(x_5847); +lean_dec(x_5258); +lean_dec(x_3056); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5935 = lean_ctor_get(x_5927, 0); +lean_inc(x_5935); +x_5936 = lean_ctor_get(x_5927, 1); +lean_inc(x_5936); +if (lean_is_exclusive(x_5927)) { + lean_ctor_release(x_5927, 0); + lean_ctor_release(x_5927, 1); + x_5937 = x_5927; +} else { + lean_dec_ref(x_5927); + x_5937 = lean_box(0); +} +if (lean_is_scalar(x_5937)) { + x_5938 = lean_alloc_ctor(1, 2, 0); +} else { + x_5938 = x_5937; +} +lean_ctor_set(x_5938, 0, x_5935); +lean_ctor_set(x_5938, 1, x_5936); +return x_5938; +} +} +} +block_5838: +{ +lean_object* x_5262; +x_5262 = lean_ctor_get(x_5260, 0); +lean_inc(x_5262); +if (lean_obj_tag(x_5262) == 0) +{ +lean_object* x_5263; lean_object* x_5264; lean_object* x_5265; lean_object* x_5266; lean_object* x_5267; lean_object* x_5268; lean_object* x_5269; uint8_t x_5270; lean_object* x_5271; +lean_dec(x_3056); +x_5263 = lean_ctor_get(x_5260, 1); +lean_inc(x_5263); +if (lean_is_exclusive(x_5260)) { + lean_ctor_release(x_5260, 0); + lean_ctor_release(x_5260, 1); + x_5264 = x_5260; +} else { + lean_dec_ref(x_5260); + x_5264 = lean_box(0); +} +x_5265 = lean_st_ref_get(x_5, x_5261); +x_5266 = lean_ctor_get(x_5265, 0); +lean_inc(x_5266); +x_5267 = lean_ctor_get(x_5265, 1); +lean_inc(x_5267); +if (lean_is_exclusive(x_5265)) { + lean_ctor_release(x_5265, 0); + lean_ctor_release(x_5265, 1); + x_5268 = x_5265; +} else { + lean_dec_ref(x_5265); + x_5268 = lean_box(0); +} +x_5269 = lean_ctor_get(x_5266, 0); +lean_inc(x_5269); +lean_dec(x_5266); +x_5270 = 0; +lean_inc(x_153); +lean_inc(x_5269); +x_5271 = l_Lean_Environment_find_x3f(x_5269, x_153, x_5270); +if (lean_obj_tag(x_5271) == 0) +{ +lean_object* x_5272; lean_object* x_5273; +lean_dec(x_5269); +lean_dec(x_5268); +lean_dec(x_5264); +lean_dec(x_5258); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_5272 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_5273 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_5272, x_5263, x_4, x_5, x_5267); +return x_5273; +} +else +{ +lean_object* x_5274; +x_5274 = lean_ctor_get(x_5271, 0); +lean_inc(x_5274); +lean_dec(x_5271); +switch (lean_obj_tag(x_5274)) { +case 0: +{ +lean_object* x_5275; lean_object* x_5276; uint8_t x_5277; +lean_dec(x_5269); +lean_dec(x_3050); +lean_dec(x_3049); +if (lean_is_exclusive(x_5274)) { + lean_ctor_release(x_5274, 0); + x_5275 = x_5274; +} else { + lean_dec_ref(x_5274); + x_5275 = lean_box(0); +} +x_5276 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_5277 = lean_name_eq(x_153, x_5276); +if (x_5277 == 0) +{ +lean_object* x_5278; uint8_t x_5279; +x_5278 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_5279 = lean_name_eq(x_153, x_5278); +if (x_5279 == 0) +{ +lean_object* x_5280; lean_object* x_5281; lean_object* x_5282; +lean_dec(x_5268); +lean_dec(x_5264); +lean_inc(x_153); +x_5280 = l_Lean_IR_ToIR_findDecl(x_153, x_5263, x_4, x_5, x_5267); +x_5281 = lean_ctor_get(x_5280, 0); +lean_inc(x_5281); +x_5282 = lean_ctor_get(x_5281, 0); +lean_inc(x_5282); +if (lean_obj_tag(x_5282) == 0) +{ +lean_object* x_5283; lean_object* x_5284; lean_object* x_5285; lean_object* x_5286; uint8_t x_5287; lean_object* x_5288; lean_object* x_5289; lean_object* x_5290; lean_object* x_5291; lean_object* x_5292; lean_object* x_5293; lean_object* x_5294; lean_object* x_5295; lean_object* x_5296; +lean_dec(x_5258); +lean_dec(x_2); +lean_dec(x_1); +x_5283 = lean_ctor_get(x_5280, 1); +lean_inc(x_5283); +if (lean_is_exclusive(x_5280)) { + lean_ctor_release(x_5280, 0); + lean_ctor_release(x_5280, 1); + x_5284 = x_5280; +} else { + lean_dec_ref(x_5280); + x_5284 = lean_box(0); +} +x_5285 = lean_ctor_get(x_5281, 1); +lean_inc(x_5285); +if (lean_is_exclusive(x_5281)) { + lean_ctor_release(x_5281, 0); + lean_ctor_release(x_5281, 1); + x_5286 = x_5281; +} else { + lean_dec_ref(x_5281); + x_5286 = lean_box(0); +} +x_5287 = 1; +x_5288 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_5289 = l_Lean_Name_toString(x_153, x_5287, x_5288); +if (lean_is_scalar(x_5275)) { + x_5290 = lean_alloc_ctor(3, 1, 0); +} else { + x_5290 = x_5275; + lean_ctor_set_tag(x_5290, 3); +} +lean_ctor_set(x_5290, 0, x_5289); +x_5291 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_5286)) { + x_5292 = lean_alloc_ctor(5, 2, 0); +} else { + x_5292 = x_5286; + lean_ctor_set_tag(x_5292, 5); +} +lean_ctor_set(x_5292, 0, x_5291); +lean_ctor_set(x_5292, 1, x_5290); +x_5293 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_5284)) { + x_5294 = lean_alloc_ctor(5, 2, 0); +} else { + x_5294 = x_5284; + lean_ctor_set_tag(x_5294, 5); +} +lean_ctor_set(x_5294, 0, x_5292); +lean_ctor_set(x_5294, 1, x_5293); +x_5295 = l_Lean_MessageData_ofFormat(x_5294); +x_5296 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_5295, x_5285, x_4, x_5, x_5283); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_5285); +return x_5296; +} +else +{ +lean_object* x_5297; lean_object* x_5298; lean_object* x_5299; lean_object* x_5300; lean_object* x_5301; lean_object* x_5302; lean_object* x_5303; uint8_t x_5304; +lean_dec(x_5275); +x_5297 = lean_ctor_get(x_5280, 1); +lean_inc(x_5297); +lean_dec(x_5280); +x_5298 = lean_ctor_get(x_5281, 1); +lean_inc(x_5298); +if (lean_is_exclusive(x_5281)) { + lean_ctor_release(x_5281, 0); + lean_ctor_release(x_5281, 1); + x_5299 = x_5281; +} else { + lean_dec_ref(x_5281); + x_5299 = lean_box(0); +} +x_5300 = lean_ctor_get(x_5282, 0); +lean_inc(x_5300); +lean_dec(x_5282); +x_5301 = lean_array_get_size(x_5258); +x_5302 = l_Lean_IR_Decl_params(x_5300); +lean_dec(x_5300); +x_5303 = lean_array_get_size(x_5302); +lean_dec(x_5302); +x_5304 = lean_nat_dec_lt(x_5301, x_5303); +if (x_5304 == 0) +{ +uint8_t x_5305; +x_5305 = lean_nat_dec_eq(x_5301, x_5303); +if (x_5305 == 0) +{ +lean_object* x_5306; lean_object* x_5307; lean_object* x_5308; lean_object* x_5309; lean_object* x_5310; lean_object* x_5311; lean_object* x_5312; lean_object* x_5313; lean_object* x_5314; lean_object* x_5315; lean_object* x_5316; lean_object* x_5317; lean_object* x_5318; lean_object* x_5319; lean_object* x_5320; lean_object* x_5321; lean_object* x_5322; +x_5306 = lean_unsigned_to_nat(0u); +x_5307 = l_Array_extract___rarg(x_5258, x_5306, x_5303); +x_5308 = l_Array_extract___rarg(x_5258, x_5303, x_5301); +lean_dec(x_5301); +lean_dec(x_5258); +if (lean_is_scalar(x_5299)) { + x_5309 = lean_alloc_ctor(6, 2, 0); +} else { + x_5309 = x_5299; + lean_ctor_set_tag(x_5309, 6); +} +lean_ctor_set(x_5309, 0, x_153); +lean_ctor_set(x_5309, 1, x_5307); +x_5310 = lean_ctor_get(x_1, 0); +lean_inc(x_5310); +x_5311 = l_Lean_IR_ToIR_bindVar(x_5310, x_5298, x_4, x_5, x_5297); +x_5312 = lean_ctor_get(x_5311, 0); +lean_inc(x_5312); +x_5313 = lean_ctor_get(x_5311, 1); +lean_inc(x_5313); +lean_dec(x_5311); +x_5314 = lean_ctor_get(x_5312, 0); +lean_inc(x_5314); +x_5315 = lean_ctor_get(x_5312, 1); +lean_inc(x_5315); +lean_dec(x_5312); +x_5316 = l_Lean_IR_ToIR_newVar(x_5315, x_4, x_5, x_5313); +x_5317 = lean_ctor_get(x_5316, 0); +lean_inc(x_5317); +x_5318 = lean_ctor_get(x_5316, 1); +lean_inc(x_5318); +lean_dec(x_5316); +x_5319 = lean_ctor_get(x_5317, 0); +lean_inc(x_5319); +x_5320 = lean_ctor_get(x_5317, 1); +lean_inc(x_5320); +lean_dec(x_5317); +x_5321 = lean_ctor_get(x_1, 2); +lean_inc(x_5321); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_5322 = l_Lean_IR_ToIR_lowerType(x_5321, x_5320, x_4, x_5, x_5318); +if (lean_obj_tag(x_5322) == 0) +{ +lean_object* x_5323; lean_object* x_5324; lean_object* x_5325; lean_object* x_5326; lean_object* x_5327; +x_5323 = lean_ctor_get(x_5322, 0); +lean_inc(x_5323); +x_5324 = lean_ctor_get(x_5322, 1); +lean_inc(x_5324); +lean_dec(x_5322); +x_5325 = lean_ctor_get(x_5323, 0); +lean_inc(x_5325); +x_5326 = lean_ctor_get(x_5323, 1); +lean_inc(x_5326); +lean_dec(x_5323); +x_5327 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_5319, x_5308, x_5314, x_5309, x_5325, x_5326, x_4, x_5, x_5324); +return x_5327; +} +else +{ +lean_object* x_5328; lean_object* x_5329; lean_object* x_5330; lean_object* x_5331; +lean_dec(x_5319); +lean_dec(x_5314); +lean_dec(x_5309); +lean_dec(x_5308); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_5328 = lean_ctor_get(x_5322, 0); +lean_inc(x_5328); +x_5329 = lean_ctor_get(x_5322, 1); +lean_inc(x_5329); +if (lean_is_exclusive(x_5322)) { + lean_ctor_release(x_5322, 0); + lean_ctor_release(x_5322, 1); + x_5330 = x_5322; +} else { + lean_dec_ref(x_5322); + x_5330 = lean_box(0); +} +if (lean_is_scalar(x_5330)) { + x_5331 = lean_alloc_ctor(1, 2, 0); +} else { + x_5331 = x_5330; +} +lean_ctor_set(x_5331, 0, x_5328); +lean_ctor_set(x_5331, 1, x_5329); +return x_5331; +} +} +else +{ +lean_object* x_5332; lean_object* x_5333; lean_object* x_5334; lean_object* x_5335; lean_object* x_5336; lean_object* x_5337; lean_object* x_5338; lean_object* x_5339; lean_object* x_5340; +lean_dec(x_5303); +lean_dec(x_5301); +if (lean_is_scalar(x_5299)) { + x_5332 = lean_alloc_ctor(6, 2, 0); +} else { + x_5332 = x_5299; + lean_ctor_set_tag(x_5332, 6); +} +lean_ctor_set(x_5332, 0, x_153); +lean_ctor_set(x_5332, 1, x_5258); +x_5333 = lean_ctor_get(x_1, 0); +lean_inc(x_5333); +x_5334 = l_Lean_IR_ToIR_bindVar(x_5333, x_5298, x_4, x_5, x_5297); +x_5335 = lean_ctor_get(x_5334, 0); +lean_inc(x_5335); +x_5336 = lean_ctor_get(x_5334, 1); +lean_inc(x_5336); +lean_dec(x_5334); +x_5337 = lean_ctor_get(x_5335, 0); +lean_inc(x_5337); +x_5338 = lean_ctor_get(x_5335, 1); +lean_inc(x_5338); +lean_dec(x_5335); +x_5339 = lean_ctor_get(x_1, 2); +lean_inc(x_5339); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_5340 = l_Lean_IR_ToIR_lowerType(x_5339, x_5338, x_4, x_5, x_5336); +if (lean_obj_tag(x_5340) == 0) +{ +lean_object* x_5341; lean_object* x_5342; lean_object* x_5343; lean_object* x_5344; lean_object* x_5345; +x_5341 = lean_ctor_get(x_5340, 0); +lean_inc(x_5341); +x_5342 = lean_ctor_get(x_5340, 1); +lean_inc(x_5342); +lean_dec(x_5340); +x_5343 = lean_ctor_get(x_5341, 0); +lean_inc(x_5343); +x_5344 = lean_ctor_get(x_5341, 1); +lean_inc(x_5344); +lean_dec(x_5341); +x_5345 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5337, x_5332, x_5343, x_5344, x_4, x_5, x_5342); +return x_5345; +} +else +{ +lean_object* x_5346; lean_object* x_5347; lean_object* x_5348; lean_object* x_5349; +lean_dec(x_5337); +lean_dec(x_5332); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_5346 = lean_ctor_get(x_5340, 0); +lean_inc(x_5346); +x_5347 = lean_ctor_get(x_5340, 1); +lean_inc(x_5347); +if (lean_is_exclusive(x_5340)) { + lean_ctor_release(x_5340, 0); + lean_ctor_release(x_5340, 1); + x_5348 = x_5340; +} else { + lean_dec_ref(x_5340); + x_5348 = lean_box(0); +} +if (lean_is_scalar(x_5348)) { + x_5349 = lean_alloc_ctor(1, 2, 0); +} else { + x_5349 = x_5348; +} +lean_ctor_set(x_5349, 0, x_5346); +lean_ctor_set(x_5349, 1, x_5347); +return x_5349; +} +} +} +else +{ +lean_object* x_5350; lean_object* x_5351; lean_object* x_5352; lean_object* x_5353; lean_object* x_5354; lean_object* x_5355; lean_object* x_5356; lean_object* x_5357; lean_object* x_5358; +lean_dec(x_5303); +lean_dec(x_5301); +if (lean_is_scalar(x_5299)) { + x_5350 = lean_alloc_ctor(7, 2, 0); +} else { + x_5350 = x_5299; + lean_ctor_set_tag(x_5350, 7); +} +lean_ctor_set(x_5350, 0, x_153); +lean_ctor_set(x_5350, 1, x_5258); +x_5351 = lean_ctor_get(x_1, 0); +lean_inc(x_5351); +lean_dec(x_1); +x_5352 = l_Lean_IR_ToIR_bindVar(x_5351, x_5298, x_4, x_5, x_5297); +x_5353 = lean_ctor_get(x_5352, 0); +lean_inc(x_5353); +x_5354 = lean_ctor_get(x_5352, 1); +lean_inc(x_5354); +lean_dec(x_5352); +x_5355 = lean_ctor_get(x_5353, 0); +lean_inc(x_5355); +x_5356 = lean_ctor_get(x_5353, 1); +lean_inc(x_5356); +lean_dec(x_5353); +x_5357 = lean_box(7); +x_5358 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5355, x_5350, x_5357, x_5356, x_4, x_5, x_5354); +return x_5358; +} +} +} +else +{ +lean_object* x_5359; lean_object* x_5360; lean_object* x_5361; +lean_dec(x_5275); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5359 = lean_box(13); +if (lean_is_scalar(x_5264)) { + x_5360 = lean_alloc_ctor(0, 2, 0); +} else { + x_5360 = x_5264; +} +lean_ctor_set(x_5360, 0, x_5359); +lean_ctor_set(x_5360, 1, x_5263); +if (lean_is_scalar(x_5268)) { + x_5361 = lean_alloc_ctor(0, 2, 0); +} else { + x_5361 = x_5268; +} +lean_ctor_set(x_5361, 0, x_5360); +lean_ctor_set(x_5361, 1, x_5267); +return x_5361; +} +} +else +{ +lean_object* x_5362; lean_object* x_5363; lean_object* x_5364; +lean_dec(x_5275); +lean_dec(x_5268); +lean_dec(x_5264); +lean_dec(x_153); +x_5362 = l_Lean_IR_instInhabitedArg; +x_5363 = lean_unsigned_to_nat(2u); +x_5364 = lean_array_get(x_5362, x_5258, x_5363); +lean_dec(x_5258); +if (lean_obj_tag(x_5364) == 0) +{ +lean_object* x_5365; lean_object* x_5366; lean_object* x_5367; lean_object* x_5368; lean_object* x_5369; lean_object* x_5370; lean_object* x_5371; +x_5365 = lean_ctor_get(x_5364, 0); +lean_inc(x_5365); +lean_dec(x_5364); +x_5366 = lean_ctor_get(x_1, 0); +lean_inc(x_5366); +lean_dec(x_1); +x_5367 = l_Lean_IR_ToIR_bindVarToVarId(x_5366, x_5365, x_5263, x_4, x_5, x_5267); +x_5368 = lean_ctor_get(x_5367, 0); +lean_inc(x_5368); +x_5369 = lean_ctor_get(x_5367, 1); +lean_inc(x_5369); +lean_dec(x_5367); +x_5370 = lean_ctor_get(x_5368, 1); +lean_inc(x_5370); +lean_dec(x_5368); +x_5371 = l_Lean_IR_ToIR_lowerCode(x_2, x_5370, x_4, x_5, x_5369); +return x_5371; +} +else +{ +lean_object* x_5372; lean_object* x_5373; lean_object* x_5374; lean_object* x_5375; lean_object* x_5376; lean_object* x_5377; +x_5372 = lean_ctor_get(x_1, 0); +lean_inc(x_5372); +lean_dec(x_1); +x_5373 = l_Lean_IR_ToIR_bindErased(x_5372, x_5263, x_4, x_5, x_5267); +x_5374 = lean_ctor_get(x_5373, 0); +lean_inc(x_5374); +x_5375 = lean_ctor_get(x_5373, 1); +lean_inc(x_5375); +lean_dec(x_5373); +x_5376 = lean_ctor_get(x_5374, 1); +lean_inc(x_5376); +lean_dec(x_5374); +x_5377 = l_Lean_IR_ToIR_lowerCode(x_2, x_5376, x_4, x_5, x_5375); +return x_5377; +} +} +} +case 1: +{ +lean_object* x_5378; lean_object* x_5379; lean_object* x_5406; lean_object* x_5407; +lean_dec(x_5274); +lean_dec(x_5269); +lean_dec(x_3050); +lean_dec(x_3049); +lean_inc(x_153); +x_5406 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_5267); +x_5407 = lean_ctor_get(x_5406, 0); +lean_inc(x_5407); +if (lean_obj_tag(x_5407) == 0) +{ +lean_object* x_5408; lean_object* x_5409; lean_object* x_5410; +x_5408 = lean_ctor_get(x_5406, 1); +lean_inc(x_5408); +lean_dec(x_5406); +x_5409 = lean_box(0); +if (lean_is_scalar(x_5264)) { + x_5410 = lean_alloc_ctor(0, 2, 0); +} else { + x_5410 = x_5264; +} +lean_ctor_set(x_5410, 0, x_5409); +lean_ctor_set(x_5410, 1, x_5263); +x_5378 = x_5410; +x_5379 = x_5408; +goto block_5405; +} +else +{ +lean_object* x_5411; lean_object* x_5412; lean_object* x_5413; lean_object* x_5414; lean_object* x_5415; lean_object* x_5416; lean_object* x_5417; uint8_t x_5418; +lean_dec(x_5264); +x_5411 = lean_ctor_get(x_5406, 1); +lean_inc(x_5411); +if (lean_is_exclusive(x_5406)) { + lean_ctor_release(x_5406, 0); + lean_ctor_release(x_5406, 1); + x_5412 = x_5406; +} else { + lean_dec_ref(x_5406); + x_5412 = lean_box(0); +} +x_5413 = lean_ctor_get(x_5407, 0); +lean_inc(x_5413); +if (lean_is_exclusive(x_5407)) { + lean_ctor_release(x_5407, 0); + x_5414 = x_5407; +} else { + lean_dec_ref(x_5407); + x_5414 = lean_box(0); +} +x_5415 = lean_array_get_size(x_5258); +x_5416 = lean_ctor_get(x_5413, 3); +lean_inc(x_5416); +lean_dec(x_5413); +x_5417 = lean_array_get_size(x_5416); +lean_dec(x_5416); +x_5418 = lean_nat_dec_lt(x_5415, x_5417); +if (x_5418 == 0) +{ +uint8_t x_5419; +x_5419 = lean_nat_dec_eq(x_5415, x_5417); +if (x_5419 == 0) +{ +lean_object* x_5420; lean_object* x_5421; lean_object* x_5422; lean_object* x_5423; lean_object* x_5424; lean_object* x_5425; lean_object* x_5426; lean_object* x_5427; lean_object* x_5428; lean_object* x_5429; lean_object* x_5430; lean_object* x_5431; lean_object* x_5432; lean_object* x_5433; lean_object* x_5434; lean_object* x_5435; lean_object* x_5436; +x_5420 = lean_unsigned_to_nat(0u); +x_5421 = l_Array_extract___rarg(x_5258, x_5420, x_5417); +x_5422 = l_Array_extract___rarg(x_5258, x_5417, x_5415); +lean_dec(x_5415); +lean_inc(x_153); +if (lean_is_scalar(x_5412)) { + x_5423 = lean_alloc_ctor(6, 2, 0); +} else { + x_5423 = x_5412; + lean_ctor_set_tag(x_5423, 6); +} +lean_ctor_set(x_5423, 0, x_153); +lean_ctor_set(x_5423, 1, x_5421); +x_5424 = lean_ctor_get(x_1, 0); +lean_inc(x_5424); +x_5425 = l_Lean_IR_ToIR_bindVar(x_5424, x_5263, x_4, x_5, x_5411); +x_5426 = lean_ctor_get(x_5425, 0); +lean_inc(x_5426); +x_5427 = lean_ctor_get(x_5425, 1); +lean_inc(x_5427); +lean_dec(x_5425); +x_5428 = lean_ctor_get(x_5426, 0); +lean_inc(x_5428); +x_5429 = lean_ctor_get(x_5426, 1); +lean_inc(x_5429); +lean_dec(x_5426); +x_5430 = l_Lean_IR_ToIR_newVar(x_5429, x_4, x_5, x_5427); +x_5431 = lean_ctor_get(x_5430, 0); +lean_inc(x_5431); +x_5432 = lean_ctor_get(x_5430, 1); +lean_inc(x_5432); +lean_dec(x_5430); +x_5433 = lean_ctor_get(x_5431, 0); +lean_inc(x_5433); +x_5434 = lean_ctor_get(x_5431, 1); +lean_inc(x_5434); +lean_dec(x_5431); +x_5435 = lean_ctor_get(x_1, 2); +lean_inc(x_5435); +lean_inc(x_5); +lean_inc(x_4); +x_5436 = l_Lean_IR_ToIR_lowerType(x_5435, x_5434, x_4, x_5, x_5432); +if (lean_obj_tag(x_5436) == 0) +{ +lean_object* x_5437; lean_object* x_5438; lean_object* x_5439; lean_object* x_5440; lean_object* x_5441; +x_5437 = lean_ctor_get(x_5436, 0); +lean_inc(x_5437); +x_5438 = lean_ctor_get(x_5436, 1); +lean_inc(x_5438); +lean_dec(x_5436); +x_5439 = lean_ctor_get(x_5437, 0); +lean_inc(x_5439); +x_5440 = lean_ctor_get(x_5437, 1); +lean_inc(x_5440); +lean_dec(x_5437); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5441 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_5433, x_5422, x_5428, x_5423, x_5439, x_5440, x_4, x_5, x_5438); +if (lean_obj_tag(x_5441) == 0) +{ +lean_object* x_5442; lean_object* x_5443; lean_object* x_5444; lean_object* x_5445; lean_object* x_5446; lean_object* x_5447; lean_object* x_5448; +x_5442 = lean_ctor_get(x_5441, 0); +lean_inc(x_5442); +x_5443 = lean_ctor_get(x_5441, 1); +lean_inc(x_5443); +lean_dec(x_5441); +x_5444 = lean_ctor_get(x_5442, 0); +lean_inc(x_5444); +x_5445 = lean_ctor_get(x_5442, 1); +lean_inc(x_5445); +if (lean_is_exclusive(x_5442)) { + lean_ctor_release(x_5442, 0); + lean_ctor_release(x_5442, 1); + x_5446 = x_5442; +} else { + lean_dec_ref(x_5442); + x_5446 = lean_box(0); +} +if (lean_is_scalar(x_5414)) { + x_5447 = lean_alloc_ctor(1, 1, 0); +} else { + x_5447 = x_5414; +} +lean_ctor_set(x_5447, 0, x_5444); +if (lean_is_scalar(x_5446)) { + x_5448 = lean_alloc_ctor(0, 2, 0); +} else { + x_5448 = x_5446; +} +lean_ctor_set(x_5448, 0, x_5447); +lean_ctor_set(x_5448, 1, x_5445); +x_5378 = x_5448; +x_5379 = x_5443; +goto block_5405; +} +else +{ +lean_object* x_5449; lean_object* x_5450; lean_object* x_5451; lean_object* x_5452; +lean_dec(x_5414); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5449 = lean_ctor_get(x_5441, 0); +lean_inc(x_5449); +x_5450 = lean_ctor_get(x_5441, 1); +lean_inc(x_5450); +if (lean_is_exclusive(x_5441)) { + lean_ctor_release(x_5441, 0); + lean_ctor_release(x_5441, 1); + x_5451 = x_5441; +} else { + lean_dec_ref(x_5441); + x_5451 = lean_box(0); +} +if (lean_is_scalar(x_5451)) { + x_5452 = lean_alloc_ctor(1, 2, 0); +} else { + x_5452 = x_5451; +} +lean_ctor_set(x_5452, 0, x_5449); +lean_ctor_set(x_5452, 1, x_5450); +return x_5452; +} +} +else +{ +lean_object* x_5453; lean_object* x_5454; lean_object* x_5455; lean_object* x_5456; +lean_dec(x_5433); +lean_dec(x_5428); +lean_dec(x_5423); +lean_dec(x_5422); +lean_dec(x_5414); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5453 = lean_ctor_get(x_5436, 0); +lean_inc(x_5453); +x_5454 = lean_ctor_get(x_5436, 1); +lean_inc(x_5454); +if (lean_is_exclusive(x_5436)) { + lean_ctor_release(x_5436, 0); + lean_ctor_release(x_5436, 1); + x_5455 = x_5436; +} else { + lean_dec_ref(x_5436); + x_5455 = lean_box(0); +} +if (lean_is_scalar(x_5455)) { + x_5456 = lean_alloc_ctor(1, 2, 0); +} else { + x_5456 = x_5455; +} +lean_ctor_set(x_5456, 0, x_5453); +lean_ctor_set(x_5456, 1, x_5454); +return x_5456; +} +} +else +{ +lean_object* x_5457; lean_object* x_5458; lean_object* x_5459; lean_object* x_5460; lean_object* x_5461; lean_object* x_5462; lean_object* x_5463; lean_object* x_5464; lean_object* x_5465; +lean_dec(x_5417); +lean_dec(x_5415); +lean_inc(x_5258); +lean_inc(x_153); +if (lean_is_scalar(x_5412)) { + x_5457 = lean_alloc_ctor(6, 2, 0); +} else { + x_5457 = x_5412; + lean_ctor_set_tag(x_5457, 6); +} +lean_ctor_set(x_5457, 0, x_153); +lean_ctor_set(x_5457, 1, x_5258); +x_5458 = lean_ctor_get(x_1, 0); +lean_inc(x_5458); +x_5459 = l_Lean_IR_ToIR_bindVar(x_5458, x_5263, x_4, x_5, x_5411); +x_5460 = lean_ctor_get(x_5459, 0); +lean_inc(x_5460); +x_5461 = lean_ctor_get(x_5459, 1); +lean_inc(x_5461); +lean_dec(x_5459); +x_5462 = lean_ctor_get(x_5460, 0); +lean_inc(x_5462); +x_5463 = lean_ctor_get(x_5460, 1); +lean_inc(x_5463); +lean_dec(x_5460); +x_5464 = lean_ctor_get(x_1, 2); +lean_inc(x_5464); +lean_inc(x_5); +lean_inc(x_4); +x_5465 = l_Lean_IR_ToIR_lowerType(x_5464, x_5463, x_4, x_5, x_5461); +if (lean_obj_tag(x_5465) == 0) +{ +lean_object* x_5466; lean_object* x_5467; lean_object* x_5468; lean_object* x_5469; lean_object* x_5470; +x_5466 = lean_ctor_get(x_5465, 0); +lean_inc(x_5466); +x_5467 = lean_ctor_get(x_5465, 1); +lean_inc(x_5467); +lean_dec(x_5465); +x_5468 = lean_ctor_get(x_5466, 0); +lean_inc(x_5468); +x_5469 = lean_ctor_get(x_5466, 1); +lean_inc(x_5469); +lean_dec(x_5466); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5470 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5462, x_5457, x_5468, x_5469, x_4, x_5, x_5467); +if (lean_obj_tag(x_5470) == 0) +{ +lean_object* x_5471; lean_object* x_5472; lean_object* x_5473; lean_object* x_5474; lean_object* x_5475; lean_object* x_5476; lean_object* x_5477; +x_5471 = lean_ctor_get(x_5470, 0); +lean_inc(x_5471); +x_5472 = lean_ctor_get(x_5470, 1); +lean_inc(x_5472); +lean_dec(x_5470); +x_5473 = lean_ctor_get(x_5471, 0); +lean_inc(x_5473); +x_5474 = lean_ctor_get(x_5471, 1); +lean_inc(x_5474); +if (lean_is_exclusive(x_5471)) { + lean_ctor_release(x_5471, 0); + lean_ctor_release(x_5471, 1); + x_5475 = x_5471; +} else { + lean_dec_ref(x_5471); + x_5475 = lean_box(0); +} +if (lean_is_scalar(x_5414)) { + x_5476 = lean_alloc_ctor(1, 1, 0); +} else { + x_5476 = x_5414; +} +lean_ctor_set(x_5476, 0, x_5473); +if (lean_is_scalar(x_5475)) { + x_5477 = lean_alloc_ctor(0, 2, 0); +} else { + x_5477 = x_5475; +} +lean_ctor_set(x_5477, 0, x_5476); +lean_ctor_set(x_5477, 1, x_5474); +x_5378 = x_5477; +x_5379 = x_5472; +goto block_5405; +} +else +{ +lean_object* x_5478; lean_object* x_5479; lean_object* x_5480; lean_object* x_5481; +lean_dec(x_5414); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5478 = lean_ctor_get(x_5470, 0); +lean_inc(x_5478); +x_5479 = lean_ctor_get(x_5470, 1); +lean_inc(x_5479); +if (lean_is_exclusive(x_5470)) { + lean_ctor_release(x_5470, 0); + lean_ctor_release(x_5470, 1); + x_5480 = x_5470; +} else { + lean_dec_ref(x_5470); + x_5480 = lean_box(0); +} +if (lean_is_scalar(x_5480)) { + x_5481 = lean_alloc_ctor(1, 2, 0); +} else { + x_5481 = x_5480; +} +lean_ctor_set(x_5481, 0, x_5478); +lean_ctor_set(x_5481, 1, x_5479); +return x_5481; +} +} +else +{ +lean_object* x_5482; lean_object* x_5483; lean_object* x_5484; lean_object* x_5485; +lean_dec(x_5462); +lean_dec(x_5457); +lean_dec(x_5414); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5482 = lean_ctor_get(x_5465, 0); +lean_inc(x_5482); +x_5483 = lean_ctor_get(x_5465, 1); +lean_inc(x_5483); +if (lean_is_exclusive(x_5465)) { + lean_ctor_release(x_5465, 0); + lean_ctor_release(x_5465, 1); + x_5484 = x_5465; +} else { + lean_dec_ref(x_5465); + x_5484 = lean_box(0); +} +if (lean_is_scalar(x_5484)) { + x_5485 = lean_alloc_ctor(1, 2, 0); +} else { + x_5485 = x_5484; +} +lean_ctor_set(x_5485, 0, x_5482); +lean_ctor_set(x_5485, 1, x_5483); +return x_5485; +} +} +} +else +{ +lean_object* x_5486; lean_object* x_5487; lean_object* x_5488; lean_object* x_5489; lean_object* x_5490; lean_object* x_5491; lean_object* x_5492; lean_object* x_5493; lean_object* x_5494; +lean_dec(x_5417); +lean_dec(x_5415); +lean_inc(x_5258); +lean_inc(x_153); +if (lean_is_scalar(x_5412)) { + x_5486 = lean_alloc_ctor(7, 2, 0); +} else { + x_5486 = x_5412; + lean_ctor_set_tag(x_5486, 7); +} +lean_ctor_set(x_5486, 0, x_153); +lean_ctor_set(x_5486, 1, x_5258); +x_5487 = lean_ctor_get(x_1, 0); +lean_inc(x_5487); +x_5488 = l_Lean_IR_ToIR_bindVar(x_5487, x_5263, x_4, x_5, x_5411); +x_5489 = lean_ctor_get(x_5488, 0); +lean_inc(x_5489); +x_5490 = lean_ctor_get(x_5488, 1); +lean_inc(x_5490); +lean_dec(x_5488); +x_5491 = lean_ctor_get(x_5489, 0); +lean_inc(x_5491); +x_5492 = lean_ctor_get(x_5489, 1); +lean_inc(x_5492); +lean_dec(x_5489); +x_5493 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5494 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5491, x_5486, x_5493, x_5492, x_4, x_5, x_5490); +if (lean_obj_tag(x_5494) == 0) +{ +lean_object* x_5495; lean_object* x_5496; lean_object* x_5497; lean_object* x_5498; lean_object* x_5499; lean_object* x_5500; lean_object* x_5501; +x_5495 = lean_ctor_get(x_5494, 0); +lean_inc(x_5495); +x_5496 = lean_ctor_get(x_5494, 1); +lean_inc(x_5496); +lean_dec(x_5494); +x_5497 = lean_ctor_get(x_5495, 0); +lean_inc(x_5497); +x_5498 = lean_ctor_get(x_5495, 1); +lean_inc(x_5498); +if (lean_is_exclusive(x_5495)) { + lean_ctor_release(x_5495, 0); + lean_ctor_release(x_5495, 1); + x_5499 = x_5495; +} else { + lean_dec_ref(x_5495); + x_5499 = lean_box(0); +} +if (lean_is_scalar(x_5414)) { + x_5500 = lean_alloc_ctor(1, 1, 0); +} else { + x_5500 = x_5414; +} +lean_ctor_set(x_5500, 0, x_5497); +if (lean_is_scalar(x_5499)) { + x_5501 = lean_alloc_ctor(0, 2, 0); +} else { + x_5501 = x_5499; +} +lean_ctor_set(x_5501, 0, x_5500); +lean_ctor_set(x_5501, 1, x_5498); +x_5378 = x_5501; +x_5379 = x_5496; +goto block_5405; +} +else +{ +lean_object* x_5502; lean_object* x_5503; lean_object* x_5504; lean_object* x_5505; +lean_dec(x_5414); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5502 = lean_ctor_get(x_5494, 0); +lean_inc(x_5502); +x_5503 = lean_ctor_get(x_5494, 1); +lean_inc(x_5503); +if (lean_is_exclusive(x_5494)) { + lean_ctor_release(x_5494, 0); + lean_ctor_release(x_5494, 1); + x_5504 = x_5494; +} else { + lean_dec_ref(x_5494); + x_5504 = lean_box(0); +} +if (lean_is_scalar(x_5504)) { + x_5505 = lean_alloc_ctor(1, 2, 0); +} else { + x_5505 = x_5504; +} +lean_ctor_set(x_5505, 0, x_5502); +lean_ctor_set(x_5505, 1, x_5503); +return x_5505; +} +} +} +block_5405: +{ +lean_object* x_5380; +x_5380 = lean_ctor_get(x_5378, 0); +lean_inc(x_5380); +if (lean_obj_tag(x_5380) == 0) +{ +lean_object* x_5381; lean_object* x_5382; lean_object* x_5383; lean_object* x_5384; lean_object* x_5385; lean_object* x_5386; lean_object* x_5387; lean_object* x_5388; lean_object* x_5389; lean_object* x_5390; +lean_dec(x_5268); +x_5381 = lean_ctor_get(x_5378, 1); +lean_inc(x_5381); +lean_dec(x_5378); +x_5382 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_5382, 0, x_153); +lean_ctor_set(x_5382, 1, x_5258); +x_5383 = lean_ctor_get(x_1, 0); +lean_inc(x_5383); +x_5384 = l_Lean_IR_ToIR_bindVar(x_5383, x_5381, x_4, x_5, x_5379); +x_5385 = lean_ctor_get(x_5384, 0); +lean_inc(x_5385); +x_5386 = lean_ctor_get(x_5384, 1); +lean_inc(x_5386); +lean_dec(x_5384); +x_5387 = lean_ctor_get(x_5385, 0); +lean_inc(x_5387); +x_5388 = lean_ctor_get(x_5385, 1); +lean_inc(x_5388); +lean_dec(x_5385); +x_5389 = lean_ctor_get(x_1, 2); +lean_inc(x_5389); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_5390 = l_Lean_IR_ToIR_lowerType(x_5389, x_5388, x_4, x_5, x_5386); +if (lean_obj_tag(x_5390) == 0) +{ +lean_object* x_5391; lean_object* x_5392; lean_object* x_5393; lean_object* x_5394; lean_object* x_5395; +x_5391 = lean_ctor_get(x_5390, 0); +lean_inc(x_5391); +x_5392 = lean_ctor_get(x_5390, 1); +lean_inc(x_5392); +lean_dec(x_5390); +x_5393 = lean_ctor_get(x_5391, 0); +lean_inc(x_5393); +x_5394 = lean_ctor_get(x_5391, 1); +lean_inc(x_5394); +lean_dec(x_5391); +x_5395 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5387, x_5382, x_5393, x_5394, x_4, x_5, x_5392); +return x_5395; +} +else +{ +lean_object* x_5396; lean_object* x_5397; lean_object* x_5398; lean_object* x_5399; +lean_dec(x_5387); +lean_dec(x_5382); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_5396 = lean_ctor_get(x_5390, 0); +lean_inc(x_5396); +x_5397 = lean_ctor_get(x_5390, 1); +lean_inc(x_5397); +if (lean_is_exclusive(x_5390)) { + lean_ctor_release(x_5390, 0); + lean_ctor_release(x_5390, 1); + x_5398 = x_5390; +} else { + lean_dec_ref(x_5390); + x_5398 = lean_box(0); +} +if (lean_is_scalar(x_5398)) { + x_5399 = lean_alloc_ctor(1, 2, 0); +} else { + x_5399 = x_5398; +} +lean_ctor_set(x_5399, 0, x_5396); +lean_ctor_set(x_5399, 1, x_5397); +return x_5399; +} +} +else +{ +lean_object* x_5400; lean_object* x_5401; lean_object* x_5402; lean_object* x_5403; lean_object* x_5404; +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5400 = lean_ctor_get(x_5378, 1); +lean_inc(x_5400); +if (lean_is_exclusive(x_5378)) { + lean_ctor_release(x_5378, 0); + lean_ctor_release(x_5378, 1); + x_5401 = x_5378; +} else { + lean_dec_ref(x_5378); + x_5401 = lean_box(0); +} +x_5402 = lean_ctor_get(x_5380, 0); +lean_inc(x_5402); +lean_dec(x_5380); +if (lean_is_scalar(x_5401)) { + x_5403 = lean_alloc_ctor(0, 2, 0); +} else { + x_5403 = x_5401; +} +lean_ctor_set(x_5403, 0, x_5402); +lean_ctor_set(x_5403, 1, x_5400); +if (lean_is_scalar(x_5268)) { + x_5404 = lean_alloc_ctor(0, 2, 0); +} else { + x_5404 = x_5268; +} +lean_ctor_set(x_5404, 0, x_5403); +lean_ctor_set(x_5404, 1, x_5379); +return x_5404; +} +} +} +case 2: +{ +lean_object* x_5506; lean_object* x_5507; +lean_dec(x_5274); +lean_dec(x_5269); +lean_dec(x_5268); +lean_dec(x_5264); +lean_dec(x_5258); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_5506 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_5507 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_5506, x_5263, x_4, x_5, x_5267); +return x_5507; +} +case 3: +{ +lean_object* x_5508; lean_object* x_5509; lean_object* x_5536; lean_object* x_5537; +lean_dec(x_5274); +lean_dec(x_5269); +lean_dec(x_3050); +lean_dec(x_3049); +lean_inc(x_153); +x_5536 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_5267); +x_5537 = lean_ctor_get(x_5536, 0); +lean_inc(x_5537); +if (lean_obj_tag(x_5537) == 0) +{ +lean_object* x_5538; lean_object* x_5539; lean_object* x_5540; +x_5538 = lean_ctor_get(x_5536, 1); +lean_inc(x_5538); +lean_dec(x_5536); +x_5539 = lean_box(0); +if (lean_is_scalar(x_5264)) { + x_5540 = lean_alloc_ctor(0, 2, 0); +} else { + x_5540 = x_5264; +} +lean_ctor_set(x_5540, 0, x_5539); +lean_ctor_set(x_5540, 1, x_5263); +x_5508 = x_5540; +x_5509 = x_5538; +goto block_5535; +} +else +{ +lean_object* x_5541; lean_object* x_5542; lean_object* x_5543; lean_object* x_5544; lean_object* x_5545; lean_object* x_5546; lean_object* x_5547; uint8_t x_5548; +lean_dec(x_5264); +x_5541 = lean_ctor_get(x_5536, 1); +lean_inc(x_5541); +if (lean_is_exclusive(x_5536)) { + lean_ctor_release(x_5536, 0); + lean_ctor_release(x_5536, 1); + x_5542 = x_5536; +} else { + lean_dec_ref(x_5536); + x_5542 = lean_box(0); +} +x_5543 = lean_ctor_get(x_5537, 0); +lean_inc(x_5543); +if (lean_is_exclusive(x_5537)) { + lean_ctor_release(x_5537, 0); + x_5544 = x_5537; +} else { + lean_dec_ref(x_5537); + x_5544 = lean_box(0); +} +x_5545 = lean_array_get_size(x_5258); +x_5546 = lean_ctor_get(x_5543, 3); +lean_inc(x_5546); +lean_dec(x_5543); +x_5547 = lean_array_get_size(x_5546); +lean_dec(x_5546); +x_5548 = lean_nat_dec_lt(x_5545, x_5547); +if (x_5548 == 0) +{ +uint8_t x_5549; +x_5549 = lean_nat_dec_eq(x_5545, x_5547); +if (x_5549 == 0) +{ +lean_object* x_5550; lean_object* x_5551; lean_object* x_5552; lean_object* x_5553; lean_object* x_5554; lean_object* x_5555; lean_object* x_5556; lean_object* x_5557; lean_object* x_5558; lean_object* x_5559; lean_object* x_5560; lean_object* x_5561; lean_object* x_5562; lean_object* x_5563; lean_object* x_5564; lean_object* x_5565; lean_object* x_5566; +x_5550 = lean_unsigned_to_nat(0u); +x_5551 = l_Array_extract___rarg(x_5258, x_5550, x_5547); +x_5552 = l_Array_extract___rarg(x_5258, x_5547, x_5545); +lean_dec(x_5545); +lean_inc(x_153); +if (lean_is_scalar(x_5542)) { + x_5553 = lean_alloc_ctor(6, 2, 0); +} else { + x_5553 = x_5542; + lean_ctor_set_tag(x_5553, 6); +} +lean_ctor_set(x_5553, 0, x_153); +lean_ctor_set(x_5553, 1, x_5551); +x_5554 = lean_ctor_get(x_1, 0); +lean_inc(x_5554); +x_5555 = l_Lean_IR_ToIR_bindVar(x_5554, x_5263, x_4, x_5, x_5541); +x_5556 = lean_ctor_get(x_5555, 0); +lean_inc(x_5556); +x_5557 = lean_ctor_get(x_5555, 1); +lean_inc(x_5557); +lean_dec(x_5555); +x_5558 = lean_ctor_get(x_5556, 0); +lean_inc(x_5558); +x_5559 = lean_ctor_get(x_5556, 1); +lean_inc(x_5559); +lean_dec(x_5556); +x_5560 = l_Lean_IR_ToIR_newVar(x_5559, x_4, x_5, x_5557); +x_5561 = lean_ctor_get(x_5560, 0); +lean_inc(x_5561); +x_5562 = lean_ctor_get(x_5560, 1); +lean_inc(x_5562); +lean_dec(x_5560); +x_5563 = lean_ctor_get(x_5561, 0); +lean_inc(x_5563); +x_5564 = lean_ctor_get(x_5561, 1); +lean_inc(x_5564); +lean_dec(x_5561); +x_5565 = lean_ctor_get(x_1, 2); +lean_inc(x_5565); +lean_inc(x_5); +lean_inc(x_4); +x_5566 = l_Lean_IR_ToIR_lowerType(x_5565, x_5564, x_4, x_5, x_5562); +if (lean_obj_tag(x_5566) == 0) +{ +lean_object* x_5567; lean_object* x_5568; lean_object* x_5569; lean_object* x_5570; lean_object* x_5571; +x_5567 = lean_ctor_get(x_5566, 0); +lean_inc(x_5567); +x_5568 = lean_ctor_get(x_5566, 1); +lean_inc(x_5568); +lean_dec(x_5566); +x_5569 = lean_ctor_get(x_5567, 0); +lean_inc(x_5569); +x_5570 = lean_ctor_get(x_5567, 1); +lean_inc(x_5570); +lean_dec(x_5567); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5571 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_5563, x_5552, x_5558, x_5553, x_5569, x_5570, x_4, x_5, x_5568); +if (lean_obj_tag(x_5571) == 0) +{ +lean_object* x_5572; lean_object* x_5573; lean_object* x_5574; lean_object* x_5575; lean_object* x_5576; lean_object* x_5577; lean_object* x_5578; +x_5572 = lean_ctor_get(x_5571, 0); +lean_inc(x_5572); +x_5573 = lean_ctor_get(x_5571, 1); +lean_inc(x_5573); +lean_dec(x_5571); +x_5574 = lean_ctor_get(x_5572, 0); +lean_inc(x_5574); +x_5575 = lean_ctor_get(x_5572, 1); +lean_inc(x_5575); +if (lean_is_exclusive(x_5572)) { + lean_ctor_release(x_5572, 0); + lean_ctor_release(x_5572, 1); + x_5576 = x_5572; +} else { + lean_dec_ref(x_5572); + x_5576 = lean_box(0); +} +if (lean_is_scalar(x_5544)) { + x_5577 = lean_alloc_ctor(1, 1, 0); +} else { + x_5577 = x_5544; +} +lean_ctor_set(x_5577, 0, x_5574); +if (lean_is_scalar(x_5576)) { + x_5578 = lean_alloc_ctor(0, 2, 0); +} else { + x_5578 = x_5576; +} +lean_ctor_set(x_5578, 0, x_5577); +lean_ctor_set(x_5578, 1, x_5575); +x_5508 = x_5578; +x_5509 = x_5573; +goto block_5535; +} +else +{ +lean_object* x_5579; lean_object* x_5580; lean_object* x_5581; lean_object* x_5582; +lean_dec(x_5544); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5579 = lean_ctor_get(x_5571, 0); +lean_inc(x_5579); +x_5580 = lean_ctor_get(x_5571, 1); +lean_inc(x_5580); +if (lean_is_exclusive(x_5571)) { + lean_ctor_release(x_5571, 0); + lean_ctor_release(x_5571, 1); + x_5581 = x_5571; +} else { + lean_dec_ref(x_5571); + x_5581 = lean_box(0); +} +if (lean_is_scalar(x_5581)) { + x_5582 = lean_alloc_ctor(1, 2, 0); +} else { + x_5582 = x_5581; +} +lean_ctor_set(x_5582, 0, x_5579); +lean_ctor_set(x_5582, 1, x_5580); +return x_5582; +} +} +else +{ +lean_object* x_5583; lean_object* x_5584; lean_object* x_5585; lean_object* x_5586; +lean_dec(x_5563); +lean_dec(x_5558); +lean_dec(x_5553); +lean_dec(x_5552); +lean_dec(x_5544); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5583 = lean_ctor_get(x_5566, 0); +lean_inc(x_5583); +x_5584 = lean_ctor_get(x_5566, 1); +lean_inc(x_5584); +if (lean_is_exclusive(x_5566)) { + lean_ctor_release(x_5566, 0); + lean_ctor_release(x_5566, 1); + x_5585 = x_5566; +} else { + lean_dec_ref(x_5566); + x_5585 = lean_box(0); +} +if (lean_is_scalar(x_5585)) { + x_5586 = lean_alloc_ctor(1, 2, 0); +} else { + x_5586 = x_5585; +} +lean_ctor_set(x_5586, 0, x_5583); +lean_ctor_set(x_5586, 1, x_5584); +return x_5586; +} +} +else +{ +lean_object* x_5587; lean_object* x_5588; lean_object* x_5589; lean_object* x_5590; lean_object* x_5591; lean_object* x_5592; lean_object* x_5593; lean_object* x_5594; lean_object* x_5595; +lean_dec(x_5547); +lean_dec(x_5545); +lean_inc(x_5258); +lean_inc(x_153); +if (lean_is_scalar(x_5542)) { + x_5587 = lean_alloc_ctor(6, 2, 0); +} else { + x_5587 = x_5542; + lean_ctor_set_tag(x_5587, 6); +} +lean_ctor_set(x_5587, 0, x_153); +lean_ctor_set(x_5587, 1, x_5258); +x_5588 = lean_ctor_get(x_1, 0); +lean_inc(x_5588); +x_5589 = l_Lean_IR_ToIR_bindVar(x_5588, x_5263, x_4, x_5, x_5541); +x_5590 = lean_ctor_get(x_5589, 0); +lean_inc(x_5590); +x_5591 = lean_ctor_get(x_5589, 1); +lean_inc(x_5591); +lean_dec(x_5589); +x_5592 = lean_ctor_get(x_5590, 0); +lean_inc(x_5592); +x_5593 = lean_ctor_get(x_5590, 1); +lean_inc(x_5593); +lean_dec(x_5590); +x_5594 = lean_ctor_get(x_1, 2); +lean_inc(x_5594); +lean_inc(x_5); +lean_inc(x_4); +x_5595 = l_Lean_IR_ToIR_lowerType(x_5594, x_5593, x_4, x_5, x_5591); +if (lean_obj_tag(x_5595) == 0) +{ +lean_object* x_5596; lean_object* x_5597; lean_object* x_5598; lean_object* x_5599; lean_object* x_5600; +x_5596 = lean_ctor_get(x_5595, 0); +lean_inc(x_5596); +x_5597 = lean_ctor_get(x_5595, 1); +lean_inc(x_5597); +lean_dec(x_5595); +x_5598 = lean_ctor_get(x_5596, 0); +lean_inc(x_5598); +x_5599 = lean_ctor_get(x_5596, 1); +lean_inc(x_5599); +lean_dec(x_5596); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5600 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5592, x_5587, x_5598, x_5599, x_4, x_5, x_5597); +if (lean_obj_tag(x_5600) == 0) +{ +lean_object* x_5601; lean_object* x_5602; lean_object* x_5603; lean_object* x_5604; lean_object* x_5605; lean_object* x_5606; lean_object* x_5607; +x_5601 = lean_ctor_get(x_5600, 0); +lean_inc(x_5601); +x_5602 = lean_ctor_get(x_5600, 1); +lean_inc(x_5602); +lean_dec(x_5600); +x_5603 = lean_ctor_get(x_5601, 0); +lean_inc(x_5603); +x_5604 = lean_ctor_get(x_5601, 1); +lean_inc(x_5604); +if (lean_is_exclusive(x_5601)) { + lean_ctor_release(x_5601, 0); + lean_ctor_release(x_5601, 1); + x_5605 = x_5601; +} else { + lean_dec_ref(x_5601); + x_5605 = lean_box(0); +} +if (lean_is_scalar(x_5544)) { + x_5606 = lean_alloc_ctor(1, 1, 0); +} else { + x_5606 = x_5544; +} +lean_ctor_set(x_5606, 0, x_5603); +if (lean_is_scalar(x_5605)) { + x_5607 = lean_alloc_ctor(0, 2, 0); +} else { + x_5607 = x_5605; +} +lean_ctor_set(x_5607, 0, x_5606); +lean_ctor_set(x_5607, 1, x_5604); +x_5508 = x_5607; +x_5509 = x_5602; +goto block_5535; +} +else +{ +lean_object* x_5608; lean_object* x_5609; lean_object* x_5610; lean_object* x_5611; +lean_dec(x_5544); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5608 = lean_ctor_get(x_5600, 0); +lean_inc(x_5608); +x_5609 = lean_ctor_get(x_5600, 1); +lean_inc(x_5609); +if (lean_is_exclusive(x_5600)) { + lean_ctor_release(x_5600, 0); + lean_ctor_release(x_5600, 1); + x_5610 = x_5600; +} else { + lean_dec_ref(x_5600); + x_5610 = lean_box(0); +} +if (lean_is_scalar(x_5610)) { + x_5611 = lean_alloc_ctor(1, 2, 0); +} else { + x_5611 = x_5610; +} +lean_ctor_set(x_5611, 0, x_5608); +lean_ctor_set(x_5611, 1, x_5609); +return x_5611; +} +} +else +{ +lean_object* x_5612; lean_object* x_5613; lean_object* x_5614; lean_object* x_5615; +lean_dec(x_5592); +lean_dec(x_5587); +lean_dec(x_5544); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5612 = lean_ctor_get(x_5595, 0); +lean_inc(x_5612); +x_5613 = lean_ctor_get(x_5595, 1); +lean_inc(x_5613); +if (lean_is_exclusive(x_5595)) { + lean_ctor_release(x_5595, 0); + lean_ctor_release(x_5595, 1); + x_5614 = x_5595; +} else { + lean_dec_ref(x_5595); + x_5614 = lean_box(0); +} +if (lean_is_scalar(x_5614)) { + x_5615 = lean_alloc_ctor(1, 2, 0); +} else { + x_5615 = x_5614; +} +lean_ctor_set(x_5615, 0, x_5612); +lean_ctor_set(x_5615, 1, x_5613); +return x_5615; +} +} +} +else +{ +lean_object* x_5616; lean_object* x_5617; lean_object* x_5618; lean_object* x_5619; lean_object* x_5620; lean_object* x_5621; lean_object* x_5622; lean_object* x_5623; lean_object* x_5624; +lean_dec(x_5547); +lean_dec(x_5545); +lean_inc(x_5258); +lean_inc(x_153); +if (lean_is_scalar(x_5542)) { + x_5616 = lean_alloc_ctor(7, 2, 0); +} else { + x_5616 = x_5542; + lean_ctor_set_tag(x_5616, 7); +} +lean_ctor_set(x_5616, 0, x_153); +lean_ctor_set(x_5616, 1, x_5258); +x_5617 = lean_ctor_get(x_1, 0); +lean_inc(x_5617); +x_5618 = l_Lean_IR_ToIR_bindVar(x_5617, x_5263, x_4, x_5, x_5541); +x_5619 = lean_ctor_get(x_5618, 0); +lean_inc(x_5619); +x_5620 = lean_ctor_get(x_5618, 1); +lean_inc(x_5620); +lean_dec(x_5618); +x_5621 = lean_ctor_get(x_5619, 0); +lean_inc(x_5621); +x_5622 = lean_ctor_get(x_5619, 1); +lean_inc(x_5622); +lean_dec(x_5619); +x_5623 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5624 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5621, x_5616, x_5623, x_5622, x_4, x_5, x_5620); +if (lean_obj_tag(x_5624) == 0) +{ +lean_object* x_5625; lean_object* x_5626; lean_object* x_5627; lean_object* x_5628; lean_object* x_5629; lean_object* x_5630; lean_object* x_5631; +x_5625 = lean_ctor_get(x_5624, 0); +lean_inc(x_5625); +x_5626 = lean_ctor_get(x_5624, 1); +lean_inc(x_5626); +lean_dec(x_5624); +x_5627 = lean_ctor_get(x_5625, 0); +lean_inc(x_5627); +x_5628 = lean_ctor_get(x_5625, 1); +lean_inc(x_5628); +if (lean_is_exclusive(x_5625)) { + lean_ctor_release(x_5625, 0); + lean_ctor_release(x_5625, 1); + x_5629 = x_5625; +} else { + lean_dec_ref(x_5625); + x_5629 = lean_box(0); +} +if (lean_is_scalar(x_5544)) { + x_5630 = lean_alloc_ctor(1, 1, 0); +} else { + x_5630 = x_5544; +} +lean_ctor_set(x_5630, 0, x_5627); +if (lean_is_scalar(x_5629)) { + x_5631 = lean_alloc_ctor(0, 2, 0); +} else { + x_5631 = x_5629; +} +lean_ctor_set(x_5631, 0, x_5630); +lean_ctor_set(x_5631, 1, x_5628); +x_5508 = x_5631; +x_5509 = x_5626; +goto block_5535; +} +else +{ +lean_object* x_5632; lean_object* x_5633; lean_object* x_5634; lean_object* x_5635; +lean_dec(x_5544); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5632 = lean_ctor_get(x_5624, 0); +lean_inc(x_5632); +x_5633 = lean_ctor_get(x_5624, 1); +lean_inc(x_5633); +if (lean_is_exclusive(x_5624)) { + lean_ctor_release(x_5624, 0); + lean_ctor_release(x_5624, 1); + x_5634 = x_5624; +} else { + lean_dec_ref(x_5624); + x_5634 = lean_box(0); +} +if (lean_is_scalar(x_5634)) { + x_5635 = lean_alloc_ctor(1, 2, 0); +} else { + x_5635 = x_5634; +} +lean_ctor_set(x_5635, 0, x_5632); +lean_ctor_set(x_5635, 1, x_5633); +return x_5635; +} +} +} +block_5535: +{ +lean_object* x_5510; +x_5510 = lean_ctor_get(x_5508, 0); +lean_inc(x_5510); +if (lean_obj_tag(x_5510) == 0) +{ +lean_object* x_5511; lean_object* x_5512; lean_object* x_5513; lean_object* x_5514; lean_object* x_5515; lean_object* x_5516; lean_object* x_5517; lean_object* x_5518; lean_object* x_5519; lean_object* x_5520; +lean_dec(x_5268); +x_5511 = lean_ctor_get(x_5508, 1); +lean_inc(x_5511); +lean_dec(x_5508); +x_5512 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_5512, 0, x_153); +lean_ctor_set(x_5512, 1, x_5258); +x_5513 = lean_ctor_get(x_1, 0); +lean_inc(x_5513); +x_5514 = l_Lean_IR_ToIR_bindVar(x_5513, x_5511, x_4, x_5, x_5509); +x_5515 = lean_ctor_get(x_5514, 0); +lean_inc(x_5515); +x_5516 = lean_ctor_get(x_5514, 1); +lean_inc(x_5516); +lean_dec(x_5514); +x_5517 = lean_ctor_get(x_5515, 0); +lean_inc(x_5517); +x_5518 = lean_ctor_get(x_5515, 1); +lean_inc(x_5518); +lean_dec(x_5515); +x_5519 = lean_ctor_get(x_1, 2); +lean_inc(x_5519); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_5520 = l_Lean_IR_ToIR_lowerType(x_5519, x_5518, x_4, x_5, x_5516); +if (lean_obj_tag(x_5520) == 0) +{ +lean_object* x_5521; lean_object* x_5522; lean_object* x_5523; lean_object* x_5524; lean_object* x_5525; +x_5521 = lean_ctor_get(x_5520, 0); +lean_inc(x_5521); +x_5522 = lean_ctor_get(x_5520, 1); +lean_inc(x_5522); +lean_dec(x_5520); +x_5523 = lean_ctor_get(x_5521, 0); +lean_inc(x_5523); +x_5524 = lean_ctor_get(x_5521, 1); +lean_inc(x_5524); +lean_dec(x_5521); +x_5525 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5517, x_5512, x_5523, x_5524, x_4, x_5, x_5522); +return x_5525; +} +else +{ +lean_object* x_5526; lean_object* x_5527; lean_object* x_5528; lean_object* x_5529; +lean_dec(x_5517); +lean_dec(x_5512); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_5526 = lean_ctor_get(x_5520, 0); +lean_inc(x_5526); +x_5527 = lean_ctor_get(x_5520, 1); +lean_inc(x_5527); +if (lean_is_exclusive(x_5520)) { + lean_ctor_release(x_5520, 0); + lean_ctor_release(x_5520, 1); + x_5528 = x_5520; +} else { + lean_dec_ref(x_5520); + x_5528 = lean_box(0); +} +if (lean_is_scalar(x_5528)) { + x_5529 = lean_alloc_ctor(1, 2, 0); +} else { + x_5529 = x_5528; +} +lean_ctor_set(x_5529, 0, x_5526); +lean_ctor_set(x_5529, 1, x_5527); +return x_5529; +} +} +else +{ +lean_object* x_5530; lean_object* x_5531; lean_object* x_5532; lean_object* x_5533; lean_object* x_5534; +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5530 = lean_ctor_get(x_5508, 1); +lean_inc(x_5530); +if (lean_is_exclusive(x_5508)) { + lean_ctor_release(x_5508, 0); + lean_ctor_release(x_5508, 1); + x_5531 = x_5508; +} else { + lean_dec_ref(x_5508); + x_5531 = lean_box(0); +} +x_5532 = lean_ctor_get(x_5510, 0); +lean_inc(x_5532); +lean_dec(x_5510); +if (lean_is_scalar(x_5531)) { + x_5533 = lean_alloc_ctor(0, 2, 0); +} else { + x_5533 = x_5531; +} +lean_ctor_set(x_5533, 0, x_5532); +lean_ctor_set(x_5533, 1, x_5530); +if (lean_is_scalar(x_5268)) { + x_5534 = lean_alloc_ctor(0, 2, 0); +} else { + x_5534 = x_5268; +} +lean_ctor_set(x_5534, 0, x_5533); +lean_ctor_set(x_5534, 1, x_5509); +return x_5534; +} +} +} +case 4: +{ +lean_object* x_5636; lean_object* x_5637; uint8_t x_5638; +lean_dec(x_5269); +lean_dec(x_5268); +lean_dec(x_5264); +lean_dec(x_3050); +lean_dec(x_3049); +if (lean_is_exclusive(x_5274)) { + lean_ctor_release(x_5274, 0); + x_5636 = x_5274; +} else { + lean_dec_ref(x_5274); + x_5636 = lean_box(0); +} +x_5637 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_5638 = lean_name_eq(x_153, x_5637); +if (x_5638 == 0) +{ +uint8_t x_5639; lean_object* x_5640; lean_object* x_5641; lean_object* x_5642; lean_object* x_5643; lean_object* x_5644; lean_object* x_5645; lean_object* x_5646; lean_object* x_5647; lean_object* x_5648; +lean_dec(x_5258); +lean_dec(x_2); +lean_dec(x_1); +x_5639 = 1; +x_5640 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_5641 = l_Lean_Name_toString(x_153, x_5639, x_5640); +if (lean_is_scalar(x_5636)) { + x_5642 = lean_alloc_ctor(3, 1, 0); +} else { + x_5642 = x_5636; + lean_ctor_set_tag(x_5642, 3); +} +lean_ctor_set(x_5642, 0, x_5641); +x_5643 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_5644 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_5644, 0, x_5643); +lean_ctor_set(x_5644, 1, x_5642); +x_5645 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_5646 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_5646, 0, x_5644); +lean_ctor_set(x_5646, 1, x_5645); +x_5647 = l_Lean_MessageData_ofFormat(x_5646); +x_5648 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_5647, x_5263, x_4, x_5, x_5267); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_5263); +return x_5648; +} +else +{ +lean_object* x_5649; lean_object* x_5650; lean_object* x_5651; +lean_dec(x_5636); +lean_dec(x_153); +x_5649 = l_Lean_IR_instInhabitedArg; +x_5650 = lean_unsigned_to_nat(2u); +x_5651 = lean_array_get(x_5649, x_5258, x_5650); +lean_dec(x_5258); +if (lean_obj_tag(x_5651) == 0) +{ +lean_object* x_5652; lean_object* x_5653; lean_object* x_5654; lean_object* x_5655; lean_object* x_5656; lean_object* x_5657; lean_object* x_5658; +x_5652 = lean_ctor_get(x_5651, 0); +lean_inc(x_5652); +lean_dec(x_5651); +x_5653 = lean_ctor_get(x_1, 0); +lean_inc(x_5653); +lean_dec(x_1); +x_5654 = l_Lean_IR_ToIR_bindVarToVarId(x_5653, x_5652, x_5263, x_4, x_5, x_5267); +x_5655 = lean_ctor_get(x_5654, 0); +lean_inc(x_5655); +x_5656 = lean_ctor_get(x_5654, 1); +lean_inc(x_5656); +lean_dec(x_5654); +x_5657 = lean_ctor_get(x_5655, 1); +lean_inc(x_5657); +lean_dec(x_5655); +x_5658 = l_Lean_IR_ToIR_lowerCode(x_2, x_5657, x_4, x_5, x_5656); +return x_5658; +} +else +{ +lean_object* x_5659; lean_object* x_5660; lean_object* x_5661; lean_object* x_5662; lean_object* x_5663; lean_object* x_5664; +x_5659 = lean_ctor_get(x_1, 0); +lean_inc(x_5659); +lean_dec(x_1); +x_5660 = l_Lean_IR_ToIR_bindErased(x_5659, x_5263, x_4, x_5, x_5267); +x_5661 = lean_ctor_get(x_5660, 0); +lean_inc(x_5661); +x_5662 = lean_ctor_get(x_5660, 1); +lean_inc(x_5662); +lean_dec(x_5660); +x_5663 = lean_ctor_get(x_5661, 1); +lean_inc(x_5663); +lean_dec(x_5661); +x_5664 = l_Lean_IR_ToIR_lowerCode(x_2, x_5663, x_4, x_5, x_5662); +return x_5664; +} +} +} +case 5: +{ +lean_object* x_5665; lean_object* x_5666; +lean_dec(x_5274); +lean_dec(x_5269); +lean_dec(x_5268); +lean_dec(x_5264); +lean_dec(x_5258); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_5665 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_5666 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_5665, x_5263, x_4, x_5, x_5267); +return x_5666; +} +case 6: +{ +lean_object* x_5667; uint8_t x_5668; +x_5667 = lean_ctor_get(x_5274, 0); +lean_inc(x_5667); +lean_dec(x_5274); +lean_inc(x_153); +x_5668 = l_Lean_isExtern(x_5269, x_153); +if (x_5668 == 0) +{ +lean_object* x_5669; +lean_dec(x_5268); +lean_dec(x_5264); +lean_dec(x_5258); +lean_inc(x_5); +lean_inc(x_4); +x_5669 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_5263, x_4, x_5, x_5267); +if (lean_obj_tag(x_5669) == 0) +{ +lean_object* x_5670; lean_object* x_5671; lean_object* x_5672; lean_object* x_5673; lean_object* x_5674; lean_object* x_5675; lean_object* x_5676; lean_object* x_5677; lean_object* x_5678; lean_object* x_5679; lean_object* x_5680; lean_object* x_5681; lean_object* x_5682; lean_object* x_5683; lean_object* x_5684; lean_object* x_5685; lean_object* x_5686; lean_object* x_5687; lean_object* x_5688; lean_object* x_5689; +x_5670 = lean_ctor_get(x_5669, 0); +lean_inc(x_5670); +x_5671 = lean_ctor_get(x_5670, 0); +lean_inc(x_5671); +x_5672 = lean_ctor_get(x_5669, 1); +lean_inc(x_5672); +lean_dec(x_5669); +x_5673 = lean_ctor_get(x_5670, 1); +lean_inc(x_5673); +lean_dec(x_5670); +x_5674 = lean_ctor_get(x_5671, 0); +lean_inc(x_5674); +x_5675 = lean_ctor_get(x_5671, 1); +lean_inc(x_5675); +lean_dec(x_5671); +x_5676 = lean_ctor_get(x_5667, 3); +lean_inc(x_5676); +lean_dec(x_5667); +x_5677 = lean_array_get_size(x_3049); +x_5678 = l_Array_extract___rarg(x_3049, x_5676, x_5677); +lean_dec(x_5677); +lean_dec(x_3049); +x_5679 = lean_array_get_size(x_5675); +x_5680 = lean_unsigned_to_nat(0u); +x_5681 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_3050)) { + x_5682 = lean_alloc_ctor(0, 3, 0); +} else { + x_5682 = x_3050; + lean_ctor_set_tag(x_5682, 0); +} +lean_ctor_set(x_5682, 0, x_5680); +lean_ctor_set(x_5682, 1, x_5679); +lean_ctor_set(x_5682, 2, x_5681); +x_5683 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_5684 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__3(x_5675, x_5678, x_5682, x_5682, x_5683, x_5680, lean_box(0), lean_box(0), x_5673, x_4, x_5, x_5672); +lean_dec(x_5682); +x_5685 = lean_ctor_get(x_5684, 0); +lean_inc(x_5685); +x_5686 = lean_ctor_get(x_5684, 1); +lean_inc(x_5686); +lean_dec(x_5684); +x_5687 = lean_ctor_get(x_5685, 0); +lean_inc(x_5687); +x_5688 = lean_ctor_get(x_5685, 1); +lean_inc(x_5688); +lean_dec(x_5685); +x_5689 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_5674, x_5675, x_5678, x_5687, x_5688, x_4, x_5, x_5686); +lean_dec(x_5678); +lean_dec(x_5675); +return x_5689; +} +else +{ +lean_object* x_5690; lean_object* x_5691; lean_object* x_5692; lean_object* x_5693; +lean_dec(x_5667); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5690 = lean_ctor_get(x_5669, 0); +lean_inc(x_5690); +x_5691 = lean_ctor_get(x_5669, 1); +lean_inc(x_5691); +if (lean_is_exclusive(x_5669)) { + lean_ctor_release(x_5669, 0); + lean_ctor_release(x_5669, 1); + x_5692 = x_5669; +} else { + lean_dec_ref(x_5669); + x_5692 = lean_box(0); +} +if (lean_is_scalar(x_5692)) { + x_5693 = lean_alloc_ctor(1, 2, 0); +} else { + x_5693 = x_5692; +} +lean_ctor_set(x_5693, 0, x_5690); +lean_ctor_set(x_5693, 1, x_5691); +return x_5693; +} +} +else +{ +lean_object* x_5694; lean_object* x_5695; lean_object* x_5722; lean_object* x_5723; +lean_dec(x_5667); +lean_dec(x_3050); +lean_dec(x_3049); +lean_inc(x_153); +x_5722 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_5267); +x_5723 = lean_ctor_get(x_5722, 0); +lean_inc(x_5723); +if (lean_obj_tag(x_5723) == 0) +{ +lean_object* x_5724; lean_object* x_5725; lean_object* x_5726; +x_5724 = lean_ctor_get(x_5722, 1); +lean_inc(x_5724); +lean_dec(x_5722); +x_5725 = lean_box(0); +if (lean_is_scalar(x_5264)) { + x_5726 = lean_alloc_ctor(0, 2, 0); +} else { + x_5726 = x_5264; +} +lean_ctor_set(x_5726, 0, x_5725); +lean_ctor_set(x_5726, 1, x_5263); +x_5694 = x_5726; +x_5695 = x_5724; +goto block_5721; +} +else +{ +lean_object* x_5727; lean_object* x_5728; lean_object* x_5729; lean_object* x_5730; lean_object* x_5731; lean_object* x_5732; lean_object* x_5733; uint8_t x_5734; +lean_dec(x_5264); +x_5727 = lean_ctor_get(x_5722, 1); +lean_inc(x_5727); +if (lean_is_exclusive(x_5722)) { + lean_ctor_release(x_5722, 0); + lean_ctor_release(x_5722, 1); + x_5728 = x_5722; +} else { + lean_dec_ref(x_5722); + x_5728 = lean_box(0); +} +x_5729 = lean_ctor_get(x_5723, 0); +lean_inc(x_5729); +if (lean_is_exclusive(x_5723)) { + lean_ctor_release(x_5723, 0); + x_5730 = x_5723; +} else { + lean_dec_ref(x_5723); + x_5730 = lean_box(0); +} +x_5731 = lean_array_get_size(x_5258); +x_5732 = lean_ctor_get(x_5729, 3); +lean_inc(x_5732); +lean_dec(x_5729); +x_5733 = lean_array_get_size(x_5732); +lean_dec(x_5732); +x_5734 = lean_nat_dec_lt(x_5731, x_5733); +if (x_5734 == 0) +{ +uint8_t x_5735; +x_5735 = lean_nat_dec_eq(x_5731, x_5733); +if (x_5735 == 0) +{ +lean_object* x_5736; lean_object* x_5737; lean_object* x_5738; lean_object* x_5739; lean_object* x_5740; lean_object* x_5741; lean_object* x_5742; lean_object* x_5743; lean_object* x_5744; lean_object* x_5745; lean_object* x_5746; lean_object* x_5747; lean_object* x_5748; lean_object* x_5749; lean_object* x_5750; lean_object* x_5751; lean_object* x_5752; +x_5736 = lean_unsigned_to_nat(0u); +x_5737 = l_Array_extract___rarg(x_5258, x_5736, x_5733); +x_5738 = l_Array_extract___rarg(x_5258, x_5733, x_5731); +lean_dec(x_5731); +lean_inc(x_153); +if (lean_is_scalar(x_5728)) { + x_5739 = lean_alloc_ctor(6, 2, 0); +} else { + x_5739 = x_5728; + lean_ctor_set_tag(x_5739, 6); +} +lean_ctor_set(x_5739, 0, x_153); +lean_ctor_set(x_5739, 1, x_5737); +x_5740 = lean_ctor_get(x_1, 0); +lean_inc(x_5740); +x_5741 = l_Lean_IR_ToIR_bindVar(x_5740, x_5263, x_4, x_5, x_5727); +x_5742 = lean_ctor_get(x_5741, 0); +lean_inc(x_5742); +x_5743 = lean_ctor_get(x_5741, 1); +lean_inc(x_5743); +lean_dec(x_5741); +x_5744 = lean_ctor_get(x_5742, 0); +lean_inc(x_5744); +x_5745 = lean_ctor_get(x_5742, 1); +lean_inc(x_5745); +lean_dec(x_5742); +x_5746 = l_Lean_IR_ToIR_newVar(x_5745, x_4, x_5, x_5743); +x_5747 = lean_ctor_get(x_5746, 0); +lean_inc(x_5747); +x_5748 = lean_ctor_get(x_5746, 1); +lean_inc(x_5748); +lean_dec(x_5746); +x_5749 = lean_ctor_get(x_5747, 0); +lean_inc(x_5749); +x_5750 = lean_ctor_get(x_5747, 1); +lean_inc(x_5750); +lean_dec(x_5747); +x_5751 = lean_ctor_get(x_1, 2); +lean_inc(x_5751); +lean_inc(x_5); +lean_inc(x_4); +x_5752 = l_Lean_IR_ToIR_lowerType(x_5751, x_5750, x_4, x_5, x_5748); +if (lean_obj_tag(x_5752) == 0) +{ +lean_object* x_5753; lean_object* x_5754; lean_object* x_5755; lean_object* x_5756; lean_object* x_5757; +x_5753 = lean_ctor_get(x_5752, 0); +lean_inc(x_5753); +x_5754 = lean_ctor_get(x_5752, 1); +lean_inc(x_5754); +lean_dec(x_5752); +x_5755 = lean_ctor_get(x_5753, 0); +lean_inc(x_5755); +x_5756 = lean_ctor_get(x_5753, 1); +lean_inc(x_5756); +lean_dec(x_5753); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5757 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_5749, x_5738, x_5744, x_5739, x_5755, x_5756, x_4, x_5, x_5754); +if (lean_obj_tag(x_5757) == 0) +{ +lean_object* x_5758; lean_object* x_5759; lean_object* x_5760; lean_object* x_5761; lean_object* x_5762; lean_object* x_5763; lean_object* x_5764; +x_5758 = lean_ctor_get(x_5757, 0); +lean_inc(x_5758); +x_5759 = lean_ctor_get(x_5757, 1); +lean_inc(x_5759); +lean_dec(x_5757); +x_5760 = lean_ctor_get(x_5758, 0); +lean_inc(x_5760); +x_5761 = lean_ctor_get(x_5758, 1); +lean_inc(x_5761); +if (lean_is_exclusive(x_5758)) { + lean_ctor_release(x_5758, 0); + lean_ctor_release(x_5758, 1); + x_5762 = x_5758; +} else { + lean_dec_ref(x_5758); + x_5762 = lean_box(0); +} +if (lean_is_scalar(x_5730)) { + x_5763 = lean_alloc_ctor(1, 1, 0); +} else { + x_5763 = x_5730; +} +lean_ctor_set(x_5763, 0, x_5760); +if (lean_is_scalar(x_5762)) { + x_5764 = lean_alloc_ctor(0, 2, 0); +} else { + x_5764 = x_5762; +} +lean_ctor_set(x_5764, 0, x_5763); +lean_ctor_set(x_5764, 1, x_5761); +x_5694 = x_5764; +x_5695 = x_5759; +goto block_5721; +} +else +{ +lean_object* x_5765; lean_object* x_5766; lean_object* x_5767; lean_object* x_5768; +lean_dec(x_5730); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5765 = lean_ctor_get(x_5757, 0); +lean_inc(x_5765); +x_5766 = lean_ctor_get(x_5757, 1); +lean_inc(x_5766); +if (lean_is_exclusive(x_5757)) { + lean_ctor_release(x_5757, 0); + lean_ctor_release(x_5757, 1); + x_5767 = x_5757; +} else { + lean_dec_ref(x_5757); + x_5767 = lean_box(0); +} +if (lean_is_scalar(x_5767)) { + x_5768 = lean_alloc_ctor(1, 2, 0); +} else { + x_5768 = x_5767; +} +lean_ctor_set(x_5768, 0, x_5765); +lean_ctor_set(x_5768, 1, x_5766); +return x_5768; +} +} +else +{ +lean_object* x_5769; lean_object* x_5770; lean_object* x_5771; lean_object* x_5772; +lean_dec(x_5749); +lean_dec(x_5744); +lean_dec(x_5739); +lean_dec(x_5738); +lean_dec(x_5730); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5769 = lean_ctor_get(x_5752, 0); +lean_inc(x_5769); +x_5770 = lean_ctor_get(x_5752, 1); +lean_inc(x_5770); +if (lean_is_exclusive(x_5752)) { + lean_ctor_release(x_5752, 0); + lean_ctor_release(x_5752, 1); + x_5771 = x_5752; +} else { + lean_dec_ref(x_5752); + x_5771 = lean_box(0); +} +if (lean_is_scalar(x_5771)) { + x_5772 = lean_alloc_ctor(1, 2, 0); +} else { + x_5772 = x_5771; +} +lean_ctor_set(x_5772, 0, x_5769); +lean_ctor_set(x_5772, 1, x_5770); +return x_5772; +} +} +else +{ +lean_object* x_5773; lean_object* x_5774; lean_object* x_5775; lean_object* x_5776; lean_object* x_5777; lean_object* x_5778; lean_object* x_5779; lean_object* x_5780; lean_object* x_5781; +lean_dec(x_5733); +lean_dec(x_5731); +lean_inc(x_5258); +lean_inc(x_153); +if (lean_is_scalar(x_5728)) { + x_5773 = lean_alloc_ctor(6, 2, 0); +} else { + x_5773 = x_5728; + lean_ctor_set_tag(x_5773, 6); +} +lean_ctor_set(x_5773, 0, x_153); +lean_ctor_set(x_5773, 1, x_5258); +x_5774 = lean_ctor_get(x_1, 0); +lean_inc(x_5774); +x_5775 = l_Lean_IR_ToIR_bindVar(x_5774, x_5263, x_4, x_5, x_5727); +x_5776 = lean_ctor_get(x_5775, 0); +lean_inc(x_5776); +x_5777 = lean_ctor_get(x_5775, 1); +lean_inc(x_5777); +lean_dec(x_5775); +x_5778 = lean_ctor_get(x_5776, 0); +lean_inc(x_5778); +x_5779 = lean_ctor_get(x_5776, 1); +lean_inc(x_5779); +lean_dec(x_5776); +x_5780 = lean_ctor_get(x_1, 2); +lean_inc(x_5780); +lean_inc(x_5); +lean_inc(x_4); +x_5781 = l_Lean_IR_ToIR_lowerType(x_5780, x_5779, x_4, x_5, x_5777); +if (lean_obj_tag(x_5781) == 0) +{ +lean_object* x_5782; lean_object* x_5783; lean_object* x_5784; lean_object* x_5785; lean_object* x_5786; +x_5782 = lean_ctor_get(x_5781, 0); +lean_inc(x_5782); +x_5783 = lean_ctor_get(x_5781, 1); +lean_inc(x_5783); +lean_dec(x_5781); +x_5784 = lean_ctor_get(x_5782, 0); +lean_inc(x_5784); +x_5785 = lean_ctor_get(x_5782, 1); +lean_inc(x_5785); +lean_dec(x_5782); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5786 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5778, x_5773, x_5784, x_5785, x_4, x_5, x_5783); +if (lean_obj_tag(x_5786) == 0) +{ +lean_object* x_5787; lean_object* x_5788; lean_object* x_5789; lean_object* x_5790; lean_object* x_5791; lean_object* x_5792; lean_object* x_5793; +x_5787 = lean_ctor_get(x_5786, 0); +lean_inc(x_5787); +x_5788 = lean_ctor_get(x_5786, 1); +lean_inc(x_5788); +lean_dec(x_5786); +x_5789 = lean_ctor_get(x_5787, 0); +lean_inc(x_5789); +x_5790 = lean_ctor_get(x_5787, 1); +lean_inc(x_5790); +if (lean_is_exclusive(x_5787)) { + lean_ctor_release(x_5787, 0); + lean_ctor_release(x_5787, 1); + x_5791 = x_5787; +} else { + lean_dec_ref(x_5787); + x_5791 = lean_box(0); +} +if (lean_is_scalar(x_5730)) { + x_5792 = lean_alloc_ctor(1, 1, 0); +} else { + x_5792 = x_5730; +} +lean_ctor_set(x_5792, 0, x_5789); +if (lean_is_scalar(x_5791)) { + x_5793 = lean_alloc_ctor(0, 2, 0); +} else { + x_5793 = x_5791; +} +lean_ctor_set(x_5793, 0, x_5792); +lean_ctor_set(x_5793, 1, x_5790); +x_5694 = x_5793; +x_5695 = x_5788; +goto block_5721; +} +else +{ +lean_object* x_5794; lean_object* x_5795; lean_object* x_5796; lean_object* x_5797; +lean_dec(x_5730); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5794 = lean_ctor_get(x_5786, 0); +lean_inc(x_5794); +x_5795 = lean_ctor_get(x_5786, 1); +lean_inc(x_5795); +if (lean_is_exclusive(x_5786)) { + lean_ctor_release(x_5786, 0); + lean_ctor_release(x_5786, 1); + x_5796 = x_5786; +} else { + lean_dec_ref(x_5786); + x_5796 = lean_box(0); +} +if (lean_is_scalar(x_5796)) { + x_5797 = lean_alloc_ctor(1, 2, 0); +} else { + x_5797 = x_5796; +} +lean_ctor_set(x_5797, 0, x_5794); +lean_ctor_set(x_5797, 1, x_5795); +return x_5797; +} +} +else +{ +lean_object* x_5798; lean_object* x_5799; lean_object* x_5800; lean_object* x_5801; +lean_dec(x_5778); +lean_dec(x_5773); +lean_dec(x_5730); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5798 = lean_ctor_get(x_5781, 0); +lean_inc(x_5798); +x_5799 = lean_ctor_get(x_5781, 1); +lean_inc(x_5799); +if (lean_is_exclusive(x_5781)) { + lean_ctor_release(x_5781, 0); + lean_ctor_release(x_5781, 1); + x_5800 = x_5781; +} else { + lean_dec_ref(x_5781); + x_5800 = lean_box(0); +} +if (lean_is_scalar(x_5800)) { + x_5801 = lean_alloc_ctor(1, 2, 0); +} else { + x_5801 = x_5800; +} +lean_ctor_set(x_5801, 0, x_5798); +lean_ctor_set(x_5801, 1, x_5799); +return x_5801; +} +} +} +else +{ +lean_object* x_5802; lean_object* x_5803; lean_object* x_5804; lean_object* x_5805; lean_object* x_5806; lean_object* x_5807; lean_object* x_5808; lean_object* x_5809; lean_object* x_5810; +lean_dec(x_5733); +lean_dec(x_5731); +lean_inc(x_5258); +lean_inc(x_153); +if (lean_is_scalar(x_5728)) { + x_5802 = lean_alloc_ctor(7, 2, 0); +} else { + x_5802 = x_5728; + lean_ctor_set_tag(x_5802, 7); +} +lean_ctor_set(x_5802, 0, x_153); +lean_ctor_set(x_5802, 1, x_5258); +x_5803 = lean_ctor_get(x_1, 0); +lean_inc(x_5803); +x_5804 = l_Lean_IR_ToIR_bindVar(x_5803, x_5263, x_4, x_5, x_5727); +x_5805 = lean_ctor_get(x_5804, 0); +lean_inc(x_5805); +x_5806 = lean_ctor_get(x_5804, 1); +lean_inc(x_5806); +lean_dec(x_5804); +x_5807 = lean_ctor_get(x_5805, 0); +lean_inc(x_5807); +x_5808 = lean_ctor_get(x_5805, 1); +lean_inc(x_5808); +lean_dec(x_5805); +x_5809 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_5810 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5807, x_5802, x_5809, x_5808, x_4, x_5, x_5806); +if (lean_obj_tag(x_5810) == 0) +{ +lean_object* x_5811; lean_object* x_5812; lean_object* x_5813; lean_object* x_5814; lean_object* x_5815; lean_object* x_5816; lean_object* x_5817; +x_5811 = lean_ctor_get(x_5810, 0); +lean_inc(x_5811); +x_5812 = lean_ctor_get(x_5810, 1); +lean_inc(x_5812); +lean_dec(x_5810); +x_5813 = lean_ctor_get(x_5811, 0); +lean_inc(x_5813); +x_5814 = lean_ctor_get(x_5811, 1); +lean_inc(x_5814); +if (lean_is_exclusive(x_5811)) { + lean_ctor_release(x_5811, 0); + lean_ctor_release(x_5811, 1); + x_5815 = x_5811; +} else { + lean_dec_ref(x_5811); + x_5815 = lean_box(0); +} +if (lean_is_scalar(x_5730)) { + x_5816 = lean_alloc_ctor(1, 1, 0); +} else { + x_5816 = x_5730; +} +lean_ctor_set(x_5816, 0, x_5813); +if (lean_is_scalar(x_5815)) { + x_5817 = lean_alloc_ctor(0, 2, 0); +} else { + x_5817 = x_5815; +} +lean_ctor_set(x_5817, 0, x_5816); +lean_ctor_set(x_5817, 1, x_5814); +x_5694 = x_5817; +x_5695 = x_5812; +goto block_5721; +} +else +{ +lean_object* x_5818; lean_object* x_5819; lean_object* x_5820; lean_object* x_5821; +lean_dec(x_5730); +lean_dec(x_5268); +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5818 = lean_ctor_get(x_5810, 0); +lean_inc(x_5818); +x_5819 = lean_ctor_get(x_5810, 1); +lean_inc(x_5819); +if (lean_is_exclusive(x_5810)) { + lean_ctor_release(x_5810, 0); + lean_ctor_release(x_5810, 1); + x_5820 = x_5810; +} else { + lean_dec_ref(x_5810); + x_5820 = lean_box(0); +} +if (lean_is_scalar(x_5820)) { + x_5821 = lean_alloc_ctor(1, 2, 0); +} else { + x_5821 = x_5820; +} +lean_ctor_set(x_5821, 0, x_5818); +lean_ctor_set(x_5821, 1, x_5819); +return x_5821; +} +} +} +block_5721: +{ +lean_object* x_5696; +x_5696 = lean_ctor_get(x_5694, 0); +lean_inc(x_5696); +if (lean_obj_tag(x_5696) == 0) +{ +lean_object* x_5697; lean_object* x_5698; lean_object* x_5699; lean_object* x_5700; lean_object* x_5701; lean_object* x_5702; lean_object* x_5703; lean_object* x_5704; lean_object* x_5705; lean_object* x_5706; +lean_dec(x_5268); +x_5697 = lean_ctor_get(x_5694, 1); +lean_inc(x_5697); +lean_dec(x_5694); +x_5698 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_5698, 0, x_153); +lean_ctor_set(x_5698, 1, x_5258); +x_5699 = lean_ctor_get(x_1, 0); +lean_inc(x_5699); +x_5700 = l_Lean_IR_ToIR_bindVar(x_5699, x_5697, x_4, x_5, x_5695); +x_5701 = lean_ctor_get(x_5700, 0); +lean_inc(x_5701); +x_5702 = lean_ctor_get(x_5700, 1); +lean_inc(x_5702); +lean_dec(x_5700); +x_5703 = lean_ctor_get(x_5701, 0); +lean_inc(x_5703); +x_5704 = lean_ctor_get(x_5701, 1); +lean_inc(x_5704); +lean_dec(x_5701); +x_5705 = lean_ctor_get(x_1, 2); +lean_inc(x_5705); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_5706 = l_Lean_IR_ToIR_lowerType(x_5705, x_5704, x_4, x_5, x_5702); +if (lean_obj_tag(x_5706) == 0) +{ +lean_object* x_5707; lean_object* x_5708; lean_object* x_5709; lean_object* x_5710; lean_object* x_5711; +x_5707 = lean_ctor_get(x_5706, 0); +lean_inc(x_5707); +x_5708 = lean_ctor_get(x_5706, 1); +lean_inc(x_5708); +lean_dec(x_5706); +x_5709 = lean_ctor_get(x_5707, 0); +lean_inc(x_5709); +x_5710 = lean_ctor_get(x_5707, 1); +lean_inc(x_5710); +lean_dec(x_5707); +x_5711 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_5703, x_5698, x_5709, x_5710, x_4, x_5, x_5708); +return x_5711; +} +else +{ +lean_object* x_5712; lean_object* x_5713; lean_object* x_5714; lean_object* x_5715; +lean_dec(x_5703); +lean_dec(x_5698); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_5712 = lean_ctor_get(x_5706, 0); +lean_inc(x_5712); +x_5713 = lean_ctor_get(x_5706, 1); +lean_inc(x_5713); +if (lean_is_exclusive(x_5706)) { + lean_ctor_release(x_5706, 0); + lean_ctor_release(x_5706, 1); + x_5714 = x_5706; +} else { + lean_dec_ref(x_5706); + x_5714 = lean_box(0); +} +if (lean_is_scalar(x_5714)) { + x_5715 = lean_alloc_ctor(1, 2, 0); +} else { + x_5715 = x_5714; +} +lean_ctor_set(x_5715, 0, x_5712); +lean_ctor_set(x_5715, 1, x_5713); +return x_5715; +} +} +else +{ +lean_object* x_5716; lean_object* x_5717; lean_object* x_5718; lean_object* x_5719; lean_object* x_5720; +lean_dec(x_5258); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5716 = lean_ctor_get(x_5694, 1); +lean_inc(x_5716); +if (lean_is_exclusive(x_5694)) { + lean_ctor_release(x_5694, 0); + lean_ctor_release(x_5694, 1); + x_5717 = x_5694; +} else { + lean_dec_ref(x_5694); + x_5717 = lean_box(0); +} +x_5718 = lean_ctor_get(x_5696, 0); +lean_inc(x_5718); +lean_dec(x_5696); +if (lean_is_scalar(x_5717)) { + x_5719 = lean_alloc_ctor(0, 2, 0); +} else { + x_5719 = x_5717; +} +lean_ctor_set(x_5719, 0, x_5718); +lean_ctor_set(x_5719, 1, x_5716); +if (lean_is_scalar(x_5268)) { + x_5720 = lean_alloc_ctor(0, 2, 0); +} else { + x_5720 = x_5268; +} +lean_ctor_set(x_5720, 0, x_5719); +lean_ctor_set(x_5720, 1, x_5695); +return x_5720; +} +} +} +} +default: +{ +lean_object* x_5822; uint8_t x_5823; lean_object* x_5824; lean_object* x_5825; lean_object* x_5826; lean_object* x_5827; lean_object* x_5828; lean_object* x_5829; lean_object* x_5830; lean_object* x_5831; lean_object* x_5832; +lean_dec(x_5269); +lean_dec(x_5268); +lean_dec(x_5264); +lean_dec(x_5258); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_5274)) { + lean_ctor_release(x_5274, 0); + x_5822 = x_5274; +} else { + lean_dec_ref(x_5274); + x_5822 = lean_box(0); +} +x_5823 = 1; +x_5824 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_5825 = l_Lean_Name_toString(x_153, x_5823, x_5824); +if (lean_is_scalar(x_5822)) { + x_5826 = lean_alloc_ctor(3, 1, 0); +} else { + x_5826 = x_5822; + lean_ctor_set_tag(x_5826, 3); +} +lean_ctor_set(x_5826, 0, x_5825); +x_5827 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_5828 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_5828, 0, x_5827); +lean_ctor_set(x_5828, 1, x_5826); +x_5829 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_5830 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_5830, 0, x_5828); +lean_ctor_set(x_5830, 1, x_5829); +x_5831 = l_Lean_MessageData_ofFormat(x_5830); +x_5832 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_5831, x_5263, x_4, x_5, x_5267); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_5263); +return x_5832; +} +} +} +} +else +{ +lean_object* x_5833; lean_object* x_5834; lean_object* x_5835; lean_object* x_5836; lean_object* x_5837; +lean_dec(x_5258); +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5833 = lean_ctor_get(x_5260, 1); +lean_inc(x_5833); +if (lean_is_exclusive(x_5260)) { + lean_ctor_release(x_5260, 0); + lean_ctor_release(x_5260, 1); + x_5834 = x_5260; +} else { + lean_dec_ref(x_5260); + x_5834 = lean_box(0); +} +x_5835 = lean_ctor_get(x_5262, 0); +lean_inc(x_5835); +lean_dec(x_5262); +if (lean_is_scalar(x_5834)) { + x_5836 = lean_alloc_ctor(0, 2, 0); +} else { + x_5836 = x_5834; +} +lean_ctor_set(x_5836, 0, x_5835); +lean_ctor_set(x_5836, 1, x_5833); +if (lean_is_scalar(x_3056)) { + x_5837 = lean_alloc_ctor(0, 2, 0); +} else { + x_5837 = x_3056; +} +lean_ctor_set(x_5837, 0, x_5836); +lean_ctor_set(x_5837, 1, x_5261); +return x_5837; +} +} +} +} +else +{ +uint8_t x_5939; +lean_dec(x_3050); +lean_dec(x_3049); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_5939 = !lean_is_exclusive(x_3053); +if (x_5939 == 0) +{ +return x_3053; +} +else +{ +lean_object* x_5940; lean_object* x_5941; lean_object* x_5942; +x_5940 = lean_ctor_get(x_3053, 0); +x_5941 = lean_ctor_get(x_3053, 1); +lean_inc(x_5941); +lean_inc(x_5940); +lean_dec(x_3053); +x_5942 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_5942, 0, x_5940); +lean_ctor_set(x_5942, 1, x_5941); +return x_5942; +} +} +} +case 1: +{ +lean_object* x_5943; +x_5943 = lean_ctor_get(x_3048, 0); +lean_inc(x_5943); +switch (lean_obj_tag(x_5943)) { +case 0: +{ +lean_object* x_5944; lean_object* x_5945; lean_object* x_5946; lean_object* x_5947; lean_object* x_5948; uint8_t x_5949; +x_5944 = lean_ctor_get(x_7, 2); +lean_inc(x_5944); +if (lean_is_exclusive(x_7)) { + lean_ctor_release(x_7, 0); + lean_ctor_release(x_7, 1); + lean_ctor_release(x_7, 2); + x_5945 = x_7; +} else { + lean_dec_ref(x_7); + x_5945 = lean_box(0); +} +x_5946 = lean_ctor_get(x_153, 1); +lean_inc(x_5946); +x_5947 = lean_ctor_get(x_3048, 1); +lean_inc(x_5947); +lean_dec(x_3048); +x_5948 = l_Lean_IR_ToIR_lowerLet___closed__32; +x_5949 = lean_string_dec_eq(x_5947, x_5948); +lean_dec(x_5947); +if (x_5949 == 0) +{ +size_t x_5950; size_t x_5951; lean_object* x_5952; +lean_dec(x_5946); +x_5950 = lean_array_size(x_5944); +x_5951 = 0; +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_5944); +x_5952 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_5950, x_5951, x_5944, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_5952) == 0) +{ +lean_object* x_5953; lean_object* x_5954; lean_object* x_5955; uint8_t x_5956; +x_5953 = lean_ctor_get(x_5952, 0); +lean_inc(x_5953); +x_5954 = lean_ctor_get(x_5952, 1); +lean_inc(x_5954); +if (lean_is_exclusive(x_5952)) { + lean_ctor_release(x_5952, 0); + lean_ctor_release(x_5952, 1); + x_5955 = x_5952; +} else { + lean_dec_ref(x_5952); + x_5955 = lean_box(0); +} +x_5956 = !lean_is_exclusive(x_5953); +if (x_5956 == 0) +{ +lean_object* x_5957; lean_object* x_5958; lean_object* x_5959; lean_object* x_5960; lean_object* x_7877; lean_object* x_7878; +x_5957 = lean_ctor_get(x_5953, 0); +x_5958 = lean_ctor_get(x_5953, 1); +lean_inc(x_153); +x_7877 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_5954); +x_7878 = lean_ctor_get(x_7877, 0); +lean_inc(x_7878); +if (lean_obj_tag(x_7878) == 0) +{ +lean_object* x_7879; lean_object* x_7880; +x_7879 = lean_ctor_get(x_7877, 1); +lean_inc(x_7879); +lean_dec(x_7877); +x_7880 = lean_box(0); +lean_ctor_set(x_5953, 0, x_7880); +x_5959 = x_5953; +x_5960 = x_7879; +goto block_7876; +} +else +{ +uint8_t x_7881; +lean_free_object(x_5953); +x_7881 = !lean_is_exclusive(x_7877); +if (x_7881 == 0) +{ +lean_object* x_7882; lean_object* x_7883; uint8_t x_7884; +x_7882 = lean_ctor_get(x_7877, 1); +x_7883 = lean_ctor_get(x_7877, 0); +lean_dec(x_7883); +x_7884 = !lean_is_exclusive(x_7878); +if (x_7884 == 0) +{ +lean_object* x_7885; lean_object* x_7886; lean_object* x_7887; lean_object* x_7888; uint8_t x_7889; +x_7885 = lean_ctor_get(x_7878, 0); +x_7886 = lean_array_get_size(x_5957); +x_7887 = lean_ctor_get(x_7885, 3); +lean_inc(x_7887); +lean_dec(x_7885); +x_7888 = lean_array_get_size(x_7887); +lean_dec(x_7887); +x_7889 = lean_nat_dec_lt(x_7886, x_7888); +if (x_7889 == 0) +{ +uint8_t x_7890; +x_7890 = lean_nat_dec_eq(x_7886, x_7888); +if (x_7890 == 0) +{ +lean_object* x_7891; lean_object* x_7892; lean_object* x_7893; lean_object* x_7894; lean_object* x_7895; lean_object* x_7896; lean_object* x_7897; lean_object* x_7898; lean_object* x_7899; lean_object* x_7900; lean_object* x_7901; lean_object* x_7902; lean_object* x_7903; lean_object* x_7904; lean_object* x_7905; lean_object* x_7906; +x_7891 = lean_unsigned_to_nat(0u); +x_7892 = l_Array_extract___rarg(x_5957, x_7891, x_7888); +x_7893 = l_Array_extract___rarg(x_5957, x_7888, x_7886); +lean_dec(x_7886); +lean_inc(x_153); +lean_ctor_set_tag(x_7877, 6); +lean_ctor_set(x_7877, 1, x_7892); +lean_ctor_set(x_7877, 0, x_153); +x_7894 = lean_ctor_get(x_1, 0); +lean_inc(x_7894); +x_7895 = l_Lean_IR_ToIR_bindVar(x_7894, x_5958, x_4, x_5, x_7882); +x_7896 = lean_ctor_get(x_7895, 0); +lean_inc(x_7896); +x_7897 = lean_ctor_get(x_7895, 1); +lean_inc(x_7897); +lean_dec(x_7895); +x_7898 = lean_ctor_get(x_7896, 0); +lean_inc(x_7898); +x_7899 = lean_ctor_get(x_7896, 1); +lean_inc(x_7899); +lean_dec(x_7896); +x_7900 = l_Lean_IR_ToIR_newVar(x_7899, x_4, x_5, x_7897); +x_7901 = lean_ctor_get(x_7900, 0); +lean_inc(x_7901); +x_7902 = lean_ctor_get(x_7900, 1); +lean_inc(x_7902); +lean_dec(x_7900); +x_7903 = lean_ctor_get(x_7901, 0); +lean_inc(x_7903); +x_7904 = lean_ctor_get(x_7901, 1); +lean_inc(x_7904); +lean_dec(x_7901); +x_7905 = lean_ctor_get(x_1, 2); +lean_inc(x_7905); +lean_inc(x_5); +lean_inc(x_4); +x_7906 = l_Lean_IR_ToIR_lowerType(x_7905, x_7904, x_4, x_5, x_7902); +if (lean_obj_tag(x_7906) == 0) +{ +lean_object* x_7907; lean_object* x_7908; lean_object* x_7909; lean_object* x_7910; lean_object* x_7911; +x_7907 = lean_ctor_get(x_7906, 0); +lean_inc(x_7907); +x_7908 = lean_ctor_get(x_7906, 1); +lean_inc(x_7908); +lean_dec(x_7906); +x_7909 = lean_ctor_get(x_7907, 0); +lean_inc(x_7909); +x_7910 = lean_ctor_get(x_7907, 1); +lean_inc(x_7910); +lean_dec(x_7907); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7911 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_7903, x_7893, x_7898, x_7877, x_7909, x_7910, x_4, x_5, x_7908); +if (lean_obj_tag(x_7911) == 0) +{ +lean_object* x_7912; lean_object* x_7913; uint8_t x_7914; +x_7912 = lean_ctor_get(x_7911, 0); +lean_inc(x_7912); +x_7913 = lean_ctor_get(x_7911, 1); +lean_inc(x_7913); +lean_dec(x_7911); +x_7914 = !lean_is_exclusive(x_7912); +if (x_7914 == 0) +{ +lean_object* x_7915; +x_7915 = lean_ctor_get(x_7912, 0); +lean_ctor_set(x_7878, 0, x_7915); +lean_ctor_set(x_7912, 0, x_7878); +x_5959 = x_7912; +x_5960 = x_7913; +goto block_7876; +} +else +{ +lean_object* x_7916; lean_object* x_7917; lean_object* x_7918; +x_7916 = lean_ctor_get(x_7912, 0); +x_7917 = lean_ctor_get(x_7912, 1); +lean_inc(x_7917); +lean_inc(x_7916); +lean_dec(x_7912); +lean_ctor_set(x_7878, 0, x_7916); +x_7918 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_7918, 0, x_7878); +lean_ctor_set(x_7918, 1, x_7917); +x_5959 = x_7918; +x_5960 = x_7913; +goto block_7876; +} +} +else +{ +uint8_t x_7919; +lean_free_object(x_7878); +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7919 = !lean_is_exclusive(x_7911); +if (x_7919 == 0) +{ +return x_7911; +} +else +{ +lean_object* x_7920; lean_object* x_7921; lean_object* x_7922; +x_7920 = lean_ctor_get(x_7911, 0); +x_7921 = lean_ctor_get(x_7911, 1); +lean_inc(x_7921); +lean_inc(x_7920); +lean_dec(x_7911); +x_7922 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_7922, 0, x_7920); +lean_ctor_set(x_7922, 1, x_7921); +return x_7922; +} +} +} +else +{ +uint8_t x_7923; +lean_dec(x_7903); +lean_dec(x_7898); +lean_dec(x_7877); +lean_dec(x_7893); +lean_free_object(x_7878); +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7923 = !lean_is_exclusive(x_7906); +if (x_7923 == 0) +{ +return x_7906; +} +else +{ +lean_object* x_7924; lean_object* x_7925; lean_object* x_7926; +x_7924 = lean_ctor_get(x_7906, 0); +x_7925 = lean_ctor_get(x_7906, 1); +lean_inc(x_7925); +lean_inc(x_7924); +lean_dec(x_7906); +x_7926 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_7926, 0, x_7924); +lean_ctor_set(x_7926, 1, x_7925); +return x_7926; +} +} +} +else +{ +lean_object* x_7927; lean_object* x_7928; lean_object* x_7929; lean_object* x_7930; lean_object* x_7931; lean_object* x_7932; lean_object* x_7933; lean_object* x_7934; +lean_dec(x_7888); +lean_dec(x_7886); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_7877, 6); +lean_ctor_set(x_7877, 1, x_5957); +lean_ctor_set(x_7877, 0, x_153); +x_7927 = lean_ctor_get(x_1, 0); +lean_inc(x_7927); +x_7928 = l_Lean_IR_ToIR_bindVar(x_7927, x_5958, x_4, x_5, x_7882); +x_7929 = lean_ctor_get(x_7928, 0); +lean_inc(x_7929); +x_7930 = lean_ctor_get(x_7928, 1); +lean_inc(x_7930); +lean_dec(x_7928); +x_7931 = lean_ctor_get(x_7929, 0); +lean_inc(x_7931); +x_7932 = lean_ctor_get(x_7929, 1); +lean_inc(x_7932); +lean_dec(x_7929); +x_7933 = lean_ctor_get(x_1, 2); +lean_inc(x_7933); +lean_inc(x_5); +lean_inc(x_4); +x_7934 = l_Lean_IR_ToIR_lowerType(x_7933, x_7932, x_4, x_5, x_7930); +if (lean_obj_tag(x_7934) == 0) +{ +lean_object* x_7935; lean_object* x_7936; lean_object* x_7937; lean_object* x_7938; lean_object* x_7939; +x_7935 = lean_ctor_get(x_7934, 0); +lean_inc(x_7935); +x_7936 = lean_ctor_get(x_7934, 1); +lean_inc(x_7936); +lean_dec(x_7934); +x_7937 = lean_ctor_get(x_7935, 0); +lean_inc(x_7937); +x_7938 = lean_ctor_get(x_7935, 1); +lean_inc(x_7938); +lean_dec(x_7935); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7939 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7931, x_7877, x_7937, x_7938, x_4, x_5, x_7936); +if (lean_obj_tag(x_7939) == 0) +{ +lean_object* x_7940; lean_object* x_7941; uint8_t x_7942; +x_7940 = lean_ctor_get(x_7939, 0); +lean_inc(x_7940); +x_7941 = lean_ctor_get(x_7939, 1); +lean_inc(x_7941); +lean_dec(x_7939); +x_7942 = !lean_is_exclusive(x_7940); +if (x_7942 == 0) +{ +lean_object* x_7943; +x_7943 = lean_ctor_get(x_7940, 0); +lean_ctor_set(x_7878, 0, x_7943); +lean_ctor_set(x_7940, 0, x_7878); +x_5959 = x_7940; +x_5960 = x_7941; +goto block_7876; +} +else +{ +lean_object* x_7944; lean_object* x_7945; lean_object* x_7946; +x_7944 = lean_ctor_get(x_7940, 0); +x_7945 = lean_ctor_get(x_7940, 1); +lean_inc(x_7945); +lean_inc(x_7944); +lean_dec(x_7940); +lean_ctor_set(x_7878, 0, x_7944); +x_7946 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_7946, 0, x_7878); +lean_ctor_set(x_7946, 1, x_7945); +x_5959 = x_7946; +x_5960 = x_7941; +goto block_7876; +} +} +else +{ +uint8_t x_7947; +lean_free_object(x_7878); +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7947 = !lean_is_exclusive(x_7939); +if (x_7947 == 0) +{ +return x_7939; +} +else +{ +lean_object* x_7948; lean_object* x_7949; lean_object* x_7950; +x_7948 = lean_ctor_get(x_7939, 0); +x_7949 = lean_ctor_get(x_7939, 1); +lean_inc(x_7949); +lean_inc(x_7948); +lean_dec(x_7939); +x_7950 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_7950, 0, x_7948); +lean_ctor_set(x_7950, 1, x_7949); +return x_7950; +} +} +} +else +{ +uint8_t x_7951; +lean_dec(x_7931); +lean_dec(x_7877); +lean_free_object(x_7878); +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7951 = !lean_is_exclusive(x_7934); +if (x_7951 == 0) +{ +return x_7934; +} +else +{ +lean_object* x_7952; lean_object* x_7953; lean_object* x_7954; +x_7952 = lean_ctor_get(x_7934, 0); +x_7953 = lean_ctor_get(x_7934, 1); +lean_inc(x_7953); +lean_inc(x_7952); +lean_dec(x_7934); +x_7954 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_7954, 0, x_7952); +lean_ctor_set(x_7954, 1, x_7953); +return x_7954; +} +} +} +} +else +{ +lean_object* x_7955; lean_object* x_7956; lean_object* x_7957; lean_object* x_7958; lean_object* x_7959; lean_object* x_7960; lean_object* x_7961; lean_object* x_7962; +lean_dec(x_7888); +lean_dec(x_7886); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_7877, 7); +lean_ctor_set(x_7877, 1, x_5957); +lean_ctor_set(x_7877, 0, x_153); +x_7955 = lean_ctor_get(x_1, 0); +lean_inc(x_7955); +x_7956 = l_Lean_IR_ToIR_bindVar(x_7955, x_5958, x_4, x_5, x_7882); +x_7957 = lean_ctor_get(x_7956, 0); +lean_inc(x_7957); +x_7958 = lean_ctor_get(x_7956, 1); +lean_inc(x_7958); +lean_dec(x_7956); +x_7959 = lean_ctor_get(x_7957, 0); +lean_inc(x_7959); +x_7960 = lean_ctor_get(x_7957, 1); +lean_inc(x_7960); +lean_dec(x_7957); +x_7961 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7962 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7959, x_7877, x_7961, x_7960, x_4, x_5, x_7958); +if (lean_obj_tag(x_7962) == 0) +{ +lean_object* x_7963; lean_object* x_7964; uint8_t x_7965; +x_7963 = lean_ctor_get(x_7962, 0); +lean_inc(x_7963); +x_7964 = lean_ctor_get(x_7962, 1); +lean_inc(x_7964); +lean_dec(x_7962); +x_7965 = !lean_is_exclusive(x_7963); +if (x_7965 == 0) +{ +lean_object* x_7966; +x_7966 = lean_ctor_get(x_7963, 0); +lean_ctor_set(x_7878, 0, x_7966); +lean_ctor_set(x_7963, 0, x_7878); +x_5959 = x_7963; +x_5960 = x_7964; +goto block_7876; +} +else +{ +lean_object* x_7967; lean_object* x_7968; lean_object* x_7969; +x_7967 = lean_ctor_get(x_7963, 0); +x_7968 = lean_ctor_get(x_7963, 1); +lean_inc(x_7968); +lean_inc(x_7967); +lean_dec(x_7963); +lean_ctor_set(x_7878, 0, x_7967); +x_7969 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_7969, 0, x_7878); +lean_ctor_set(x_7969, 1, x_7968); +x_5959 = x_7969; +x_5960 = x_7964; +goto block_7876; +} +} +else +{ +uint8_t x_7970; +lean_free_object(x_7878); +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7970 = !lean_is_exclusive(x_7962); +if (x_7970 == 0) +{ +return x_7962; +} +else +{ +lean_object* x_7971; lean_object* x_7972; lean_object* x_7973; +x_7971 = lean_ctor_get(x_7962, 0); +x_7972 = lean_ctor_get(x_7962, 1); +lean_inc(x_7972); +lean_inc(x_7971); +lean_dec(x_7962); +x_7973 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_7973, 0, x_7971); +lean_ctor_set(x_7973, 1, x_7972); +return x_7973; +} +} +} +} +else +{ +lean_object* x_7974; lean_object* x_7975; lean_object* x_7976; lean_object* x_7977; uint8_t x_7978; +x_7974 = lean_ctor_get(x_7878, 0); +lean_inc(x_7974); +lean_dec(x_7878); +x_7975 = lean_array_get_size(x_5957); +x_7976 = lean_ctor_get(x_7974, 3); +lean_inc(x_7976); +lean_dec(x_7974); +x_7977 = lean_array_get_size(x_7976); +lean_dec(x_7976); +x_7978 = lean_nat_dec_lt(x_7975, x_7977); +if (x_7978 == 0) +{ +uint8_t x_7979; +x_7979 = lean_nat_dec_eq(x_7975, x_7977); +if (x_7979 == 0) +{ +lean_object* x_7980; lean_object* x_7981; lean_object* x_7982; lean_object* x_7983; lean_object* x_7984; lean_object* x_7985; lean_object* x_7986; lean_object* x_7987; lean_object* x_7988; lean_object* x_7989; lean_object* x_7990; lean_object* x_7991; lean_object* x_7992; lean_object* x_7993; lean_object* x_7994; lean_object* x_7995; +x_7980 = lean_unsigned_to_nat(0u); +x_7981 = l_Array_extract___rarg(x_5957, x_7980, x_7977); +x_7982 = l_Array_extract___rarg(x_5957, x_7977, x_7975); +lean_dec(x_7975); +lean_inc(x_153); +lean_ctor_set_tag(x_7877, 6); +lean_ctor_set(x_7877, 1, x_7981); +lean_ctor_set(x_7877, 0, x_153); +x_7983 = lean_ctor_get(x_1, 0); +lean_inc(x_7983); +x_7984 = l_Lean_IR_ToIR_bindVar(x_7983, x_5958, x_4, x_5, x_7882); +x_7985 = lean_ctor_get(x_7984, 0); +lean_inc(x_7985); +x_7986 = lean_ctor_get(x_7984, 1); +lean_inc(x_7986); +lean_dec(x_7984); +x_7987 = lean_ctor_get(x_7985, 0); +lean_inc(x_7987); +x_7988 = lean_ctor_get(x_7985, 1); +lean_inc(x_7988); +lean_dec(x_7985); +x_7989 = l_Lean_IR_ToIR_newVar(x_7988, x_4, x_5, x_7986); +x_7990 = lean_ctor_get(x_7989, 0); +lean_inc(x_7990); +x_7991 = lean_ctor_get(x_7989, 1); +lean_inc(x_7991); +lean_dec(x_7989); +x_7992 = lean_ctor_get(x_7990, 0); +lean_inc(x_7992); +x_7993 = lean_ctor_get(x_7990, 1); +lean_inc(x_7993); +lean_dec(x_7990); +x_7994 = lean_ctor_get(x_1, 2); +lean_inc(x_7994); +lean_inc(x_5); +lean_inc(x_4); +x_7995 = l_Lean_IR_ToIR_lowerType(x_7994, x_7993, x_4, x_5, x_7991); +if (lean_obj_tag(x_7995) == 0) +{ +lean_object* x_7996; lean_object* x_7997; lean_object* x_7998; lean_object* x_7999; lean_object* x_8000; +x_7996 = lean_ctor_get(x_7995, 0); +lean_inc(x_7996); +x_7997 = lean_ctor_get(x_7995, 1); +lean_inc(x_7997); +lean_dec(x_7995); +x_7998 = lean_ctor_get(x_7996, 0); +lean_inc(x_7998); +x_7999 = lean_ctor_get(x_7996, 1); +lean_inc(x_7999); +lean_dec(x_7996); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8000 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_7992, x_7982, x_7987, x_7877, x_7998, x_7999, x_4, x_5, x_7997); +if (lean_obj_tag(x_8000) == 0) +{ +lean_object* x_8001; lean_object* x_8002; lean_object* x_8003; lean_object* x_8004; lean_object* x_8005; lean_object* x_8006; lean_object* x_8007; +x_8001 = lean_ctor_get(x_8000, 0); +lean_inc(x_8001); +x_8002 = lean_ctor_get(x_8000, 1); +lean_inc(x_8002); +lean_dec(x_8000); +x_8003 = lean_ctor_get(x_8001, 0); +lean_inc(x_8003); +x_8004 = lean_ctor_get(x_8001, 1); +lean_inc(x_8004); +if (lean_is_exclusive(x_8001)) { + lean_ctor_release(x_8001, 0); + lean_ctor_release(x_8001, 1); + x_8005 = x_8001; +} else { + lean_dec_ref(x_8001); + x_8005 = lean_box(0); +} +x_8006 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_8006, 0, x_8003); +if (lean_is_scalar(x_8005)) { + x_8007 = lean_alloc_ctor(0, 2, 0); +} else { + x_8007 = x_8005; +} +lean_ctor_set(x_8007, 0, x_8006); +lean_ctor_set(x_8007, 1, x_8004); +x_5959 = x_8007; +x_5960 = x_8002; +goto block_7876; +} +else +{ +lean_object* x_8008; lean_object* x_8009; lean_object* x_8010; lean_object* x_8011; +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8008 = lean_ctor_get(x_8000, 0); +lean_inc(x_8008); +x_8009 = lean_ctor_get(x_8000, 1); +lean_inc(x_8009); +if (lean_is_exclusive(x_8000)) { + lean_ctor_release(x_8000, 0); + lean_ctor_release(x_8000, 1); + x_8010 = x_8000; +} else { + lean_dec_ref(x_8000); + x_8010 = lean_box(0); +} +if (lean_is_scalar(x_8010)) { + x_8011 = lean_alloc_ctor(1, 2, 0); +} else { + x_8011 = x_8010; +} +lean_ctor_set(x_8011, 0, x_8008); +lean_ctor_set(x_8011, 1, x_8009); +return x_8011; +} +} +else +{ +lean_object* x_8012; lean_object* x_8013; lean_object* x_8014; lean_object* x_8015; +lean_dec(x_7992); +lean_dec(x_7987); +lean_dec(x_7877); +lean_dec(x_7982); +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8012 = lean_ctor_get(x_7995, 0); +lean_inc(x_8012); +x_8013 = lean_ctor_get(x_7995, 1); +lean_inc(x_8013); +if (lean_is_exclusive(x_7995)) { + lean_ctor_release(x_7995, 0); + lean_ctor_release(x_7995, 1); + x_8014 = x_7995; +} else { + lean_dec_ref(x_7995); + x_8014 = lean_box(0); +} +if (lean_is_scalar(x_8014)) { + x_8015 = lean_alloc_ctor(1, 2, 0); +} else { + x_8015 = x_8014; +} +lean_ctor_set(x_8015, 0, x_8012); +lean_ctor_set(x_8015, 1, x_8013); +return x_8015; +} +} +else +{ +lean_object* x_8016; lean_object* x_8017; lean_object* x_8018; lean_object* x_8019; lean_object* x_8020; lean_object* x_8021; lean_object* x_8022; lean_object* x_8023; +lean_dec(x_7977); +lean_dec(x_7975); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_7877, 6); +lean_ctor_set(x_7877, 1, x_5957); +lean_ctor_set(x_7877, 0, x_153); +x_8016 = lean_ctor_get(x_1, 0); +lean_inc(x_8016); +x_8017 = l_Lean_IR_ToIR_bindVar(x_8016, x_5958, x_4, x_5, x_7882); +x_8018 = lean_ctor_get(x_8017, 0); +lean_inc(x_8018); +x_8019 = lean_ctor_get(x_8017, 1); +lean_inc(x_8019); +lean_dec(x_8017); +x_8020 = lean_ctor_get(x_8018, 0); +lean_inc(x_8020); +x_8021 = lean_ctor_get(x_8018, 1); +lean_inc(x_8021); +lean_dec(x_8018); +x_8022 = lean_ctor_get(x_1, 2); +lean_inc(x_8022); +lean_inc(x_5); +lean_inc(x_4); +x_8023 = l_Lean_IR_ToIR_lowerType(x_8022, x_8021, x_4, x_5, x_8019); +if (lean_obj_tag(x_8023) == 0) +{ +lean_object* x_8024; lean_object* x_8025; lean_object* x_8026; lean_object* x_8027; lean_object* x_8028; +x_8024 = lean_ctor_get(x_8023, 0); +lean_inc(x_8024); +x_8025 = lean_ctor_get(x_8023, 1); +lean_inc(x_8025); +lean_dec(x_8023); +x_8026 = lean_ctor_get(x_8024, 0); +lean_inc(x_8026); +x_8027 = lean_ctor_get(x_8024, 1); +lean_inc(x_8027); +lean_dec(x_8024); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8028 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8020, x_7877, x_8026, x_8027, x_4, x_5, x_8025); +if (lean_obj_tag(x_8028) == 0) +{ +lean_object* x_8029; lean_object* x_8030; lean_object* x_8031; lean_object* x_8032; lean_object* x_8033; lean_object* x_8034; lean_object* x_8035; +x_8029 = lean_ctor_get(x_8028, 0); +lean_inc(x_8029); +x_8030 = lean_ctor_get(x_8028, 1); +lean_inc(x_8030); +lean_dec(x_8028); +x_8031 = lean_ctor_get(x_8029, 0); +lean_inc(x_8031); +x_8032 = lean_ctor_get(x_8029, 1); +lean_inc(x_8032); +if (lean_is_exclusive(x_8029)) { + lean_ctor_release(x_8029, 0); + lean_ctor_release(x_8029, 1); + x_8033 = x_8029; +} else { + lean_dec_ref(x_8029); + x_8033 = lean_box(0); +} +x_8034 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_8034, 0, x_8031); +if (lean_is_scalar(x_8033)) { + x_8035 = lean_alloc_ctor(0, 2, 0); +} else { + x_8035 = x_8033; +} +lean_ctor_set(x_8035, 0, x_8034); +lean_ctor_set(x_8035, 1, x_8032); +x_5959 = x_8035; +x_5960 = x_8030; +goto block_7876; +} +else +{ +lean_object* x_8036; lean_object* x_8037; lean_object* x_8038; lean_object* x_8039; +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8036 = lean_ctor_get(x_8028, 0); +lean_inc(x_8036); +x_8037 = lean_ctor_get(x_8028, 1); +lean_inc(x_8037); +if (lean_is_exclusive(x_8028)) { + lean_ctor_release(x_8028, 0); + lean_ctor_release(x_8028, 1); + x_8038 = x_8028; +} else { + lean_dec_ref(x_8028); + x_8038 = lean_box(0); +} +if (lean_is_scalar(x_8038)) { + x_8039 = lean_alloc_ctor(1, 2, 0); +} else { + x_8039 = x_8038; +} +lean_ctor_set(x_8039, 0, x_8036); +lean_ctor_set(x_8039, 1, x_8037); +return x_8039; +} +} +else +{ +lean_object* x_8040; lean_object* x_8041; lean_object* x_8042; lean_object* x_8043; +lean_dec(x_8020); +lean_dec(x_7877); +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8040 = lean_ctor_get(x_8023, 0); +lean_inc(x_8040); +x_8041 = lean_ctor_get(x_8023, 1); +lean_inc(x_8041); +if (lean_is_exclusive(x_8023)) { + lean_ctor_release(x_8023, 0); + lean_ctor_release(x_8023, 1); + x_8042 = x_8023; +} else { + lean_dec_ref(x_8023); + x_8042 = lean_box(0); +} +if (lean_is_scalar(x_8042)) { + x_8043 = lean_alloc_ctor(1, 2, 0); +} else { + x_8043 = x_8042; +} +lean_ctor_set(x_8043, 0, x_8040); +lean_ctor_set(x_8043, 1, x_8041); +return x_8043; +} +} +} +else +{ +lean_object* x_8044; lean_object* x_8045; lean_object* x_8046; lean_object* x_8047; lean_object* x_8048; lean_object* x_8049; lean_object* x_8050; lean_object* x_8051; +lean_dec(x_7977); +lean_dec(x_7975); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_7877, 7); +lean_ctor_set(x_7877, 1, x_5957); +lean_ctor_set(x_7877, 0, x_153); +x_8044 = lean_ctor_get(x_1, 0); +lean_inc(x_8044); +x_8045 = l_Lean_IR_ToIR_bindVar(x_8044, x_5958, x_4, x_5, x_7882); +x_8046 = lean_ctor_get(x_8045, 0); +lean_inc(x_8046); +x_8047 = lean_ctor_get(x_8045, 1); +lean_inc(x_8047); +lean_dec(x_8045); +x_8048 = lean_ctor_get(x_8046, 0); +lean_inc(x_8048); +x_8049 = lean_ctor_get(x_8046, 1); +lean_inc(x_8049); +lean_dec(x_8046); +x_8050 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8051 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8048, x_7877, x_8050, x_8049, x_4, x_5, x_8047); +if (lean_obj_tag(x_8051) == 0) +{ +lean_object* x_8052; lean_object* x_8053; lean_object* x_8054; lean_object* x_8055; lean_object* x_8056; lean_object* x_8057; lean_object* x_8058; +x_8052 = lean_ctor_get(x_8051, 0); +lean_inc(x_8052); +x_8053 = lean_ctor_get(x_8051, 1); +lean_inc(x_8053); +lean_dec(x_8051); +x_8054 = lean_ctor_get(x_8052, 0); +lean_inc(x_8054); +x_8055 = lean_ctor_get(x_8052, 1); +lean_inc(x_8055); +if (lean_is_exclusive(x_8052)) { + lean_ctor_release(x_8052, 0); + lean_ctor_release(x_8052, 1); + x_8056 = x_8052; +} else { + lean_dec_ref(x_8052); + x_8056 = lean_box(0); +} +x_8057 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_8057, 0, x_8054); +if (lean_is_scalar(x_8056)) { + x_8058 = lean_alloc_ctor(0, 2, 0); +} else { + x_8058 = x_8056; +} +lean_ctor_set(x_8058, 0, x_8057); +lean_ctor_set(x_8058, 1, x_8055); +x_5959 = x_8058; +x_5960 = x_8053; +goto block_7876; +} +else +{ +lean_object* x_8059; lean_object* x_8060; lean_object* x_8061; lean_object* x_8062; +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8059 = lean_ctor_get(x_8051, 0); +lean_inc(x_8059); +x_8060 = lean_ctor_get(x_8051, 1); +lean_inc(x_8060); +if (lean_is_exclusive(x_8051)) { + lean_ctor_release(x_8051, 0); + lean_ctor_release(x_8051, 1); + x_8061 = x_8051; +} else { + lean_dec_ref(x_8051); + x_8061 = lean_box(0); +} +if (lean_is_scalar(x_8061)) { + x_8062 = lean_alloc_ctor(1, 2, 0); +} else { + x_8062 = x_8061; +} +lean_ctor_set(x_8062, 0, x_8059); +lean_ctor_set(x_8062, 1, x_8060); +return x_8062; +} +} +} +} +else +{ +lean_object* x_8063; lean_object* x_8064; lean_object* x_8065; lean_object* x_8066; lean_object* x_8067; lean_object* x_8068; uint8_t x_8069; +x_8063 = lean_ctor_get(x_7877, 1); +lean_inc(x_8063); +lean_dec(x_7877); +x_8064 = lean_ctor_get(x_7878, 0); +lean_inc(x_8064); +if (lean_is_exclusive(x_7878)) { + lean_ctor_release(x_7878, 0); + x_8065 = x_7878; +} else { + lean_dec_ref(x_7878); + x_8065 = lean_box(0); +} +x_8066 = lean_array_get_size(x_5957); +x_8067 = lean_ctor_get(x_8064, 3); +lean_inc(x_8067); +lean_dec(x_8064); +x_8068 = lean_array_get_size(x_8067); +lean_dec(x_8067); +x_8069 = lean_nat_dec_lt(x_8066, x_8068); +if (x_8069 == 0) +{ +uint8_t x_8070; +x_8070 = lean_nat_dec_eq(x_8066, x_8068); +if (x_8070 == 0) +{ +lean_object* x_8071; lean_object* x_8072; lean_object* x_8073; lean_object* x_8074; lean_object* x_8075; lean_object* x_8076; lean_object* x_8077; lean_object* x_8078; lean_object* x_8079; lean_object* x_8080; lean_object* x_8081; lean_object* x_8082; lean_object* x_8083; lean_object* x_8084; lean_object* x_8085; lean_object* x_8086; lean_object* x_8087; +x_8071 = lean_unsigned_to_nat(0u); +x_8072 = l_Array_extract___rarg(x_5957, x_8071, x_8068); +x_8073 = l_Array_extract___rarg(x_5957, x_8068, x_8066); +lean_dec(x_8066); +lean_inc(x_153); +x_8074 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_8074, 0, x_153); +lean_ctor_set(x_8074, 1, x_8072); +x_8075 = lean_ctor_get(x_1, 0); +lean_inc(x_8075); +x_8076 = l_Lean_IR_ToIR_bindVar(x_8075, x_5958, x_4, x_5, x_8063); +x_8077 = lean_ctor_get(x_8076, 0); +lean_inc(x_8077); +x_8078 = lean_ctor_get(x_8076, 1); +lean_inc(x_8078); +lean_dec(x_8076); +x_8079 = lean_ctor_get(x_8077, 0); +lean_inc(x_8079); +x_8080 = lean_ctor_get(x_8077, 1); +lean_inc(x_8080); +lean_dec(x_8077); +x_8081 = l_Lean_IR_ToIR_newVar(x_8080, x_4, x_5, x_8078); +x_8082 = lean_ctor_get(x_8081, 0); +lean_inc(x_8082); +x_8083 = lean_ctor_get(x_8081, 1); +lean_inc(x_8083); +lean_dec(x_8081); +x_8084 = lean_ctor_get(x_8082, 0); +lean_inc(x_8084); +x_8085 = lean_ctor_get(x_8082, 1); +lean_inc(x_8085); +lean_dec(x_8082); +x_8086 = lean_ctor_get(x_1, 2); +lean_inc(x_8086); +lean_inc(x_5); +lean_inc(x_4); +x_8087 = l_Lean_IR_ToIR_lowerType(x_8086, x_8085, x_4, x_5, x_8083); +if (lean_obj_tag(x_8087) == 0) +{ +lean_object* x_8088; lean_object* x_8089; lean_object* x_8090; lean_object* x_8091; lean_object* x_8092; +x_8088 = lean_ctor_get(x_8087, 0); +lean_inc(x_8088); +x_8089 = lean_ctor_get(x_8087, 1); +lean_inc(x_8089); +lean_dec(x_8087); +x_8090 = lean_ctor_get(x_8088, 0); +lean_inc(x_8090); +x_8091 = lean_ctor_get(x_8088, 1); +lean_inc(x_8091); +lean_dec(x_8088); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8092 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_8084, x_8073, x_8079, x_8074, x_8090, x_8091, x_4, x_5, x_8089); +if (lean_obj_tag(x_8092) == 0) +{ +lean_object* x_8093; lean_object* x_8094; lean_object* x_8095; lean_object* x_8096; lean_object* x_8097; lean_object* x_8098; lean_object* x_8099; +x_8093 = lean_ctor_get(x_8092, 0); +lean_inc(x_8093); +x_8094 = lean_ctor_get(x_8092, 1); +lean_inc(x_8094); +lean_dec(x_8092); +x_8095 = lean_ctor_get(x_8093, 0); +lean_inc(x_8095); +x_8096 = lean_ctor_get(x_8093, 1); +lean_inc(x_8096); +if (lean_is_exclusive(x_8093)) { + lean_ctor_release(x_8093, 0); + lean_ctor_release(x_8093, 1); + x_8097 = x_8093; +} else { + lean_dec_ref(x_8093); + x_8097 = lean_box(0); +} +if (lean_is_scalar(x_8065)) { + x_8098 = lean_alloc_ctor(1, 1, 0); +} else { + x_8098 = x_8065; +} +lean_ctor_set(x_8098, 0, x_8095); +if (lean_is_scalar(x_8097)) { + x_8099 = lean_alloc_ctor(0, 2, 0); +} else { + x_8099 = x_8097; +} +lean_ctor_set(x_8099, 0, x_8098); +lean_ctor_set(x_8099, 1, x_8096); +x_5959 = x_8099; +x_5960 = x_8094; +goto block_7876; +} +else +{ +lean_object* x_8100; lean_object* x_8101; lean_object* x_8102; lean_object* x_8103; +lean_dec(x_8065); +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8100 = lean_ctor_get(x_8092, 0); +lean_inc(x_8100); +x_8101 = lean_ctor_get(x_8092, 1); +lean_inc(x_8101); +if (lean_is_exclusive(x_8092)) { + lean_ctor_release(x_8092, 0); + lean_ctor_release(x_8092, 1); + x_8102 = x_8092; +} else { + lean_dec_ref(x_8092); + x_8102 = lean_box(0); +} +if (lean_is_scalar(x_8102)) { + x_8103 = lean_alloc_ctor(1, 2, 0); +} else { + x_8103 = x_8102; +} +lean_ctor_set(x_8103, 0, x_8100); +lean_ctor_set(x_8103, 1, x_8101); +return x_8103; +} +} +else +{ +lean_object* x_8104; lean_object* x_8105; lean_object* x_8106; lean_object* x_8107; +lean_dec(x_8084); +lean_dec(x_8079); +lean_dec(x_8074); +lean_dec(x_8073); +lean_dec(x_8065); +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8104 = lean_ctor_get(x_8087, 0); +lean_inc(x_8104); +x_8105 = lean_ctor_get(x_8087, 1); +lean_inc(x_8105); +if (lean_is_exclusive(x_8087)) { + lean_ctor_release(x_8087, 0); + lean_ctor_release(x_8087, 1); + x_8106 = x_8087; +} else { + lean_dec_ref(x_8087); + x_8106 = lean_box(0); +} +if (lean_is_scalar(x_8106)) { + x_8107 = lean_alloc_ctor(1, 2, 0); +} else { + x_8107 = x_8106; +} +lean_ctor_set(x_8107, 0, x_8104); +lean_ctor_set(x_8107, 1, x_8105); +return x_8107; +} +} +else +{ +lean_object* x_8108; lean_object* x_8109; lean_object* x_8110; lean_object* x_8111; lean_object* x_8112; lean_object* x_8113; lean_object* x_8114; lean_object* x_8115; lean_object* x_8116; +lean_dec(x_8068); +lean_dec(x_8066); +lean_inc(x_5957); +lean_inc(x_153); +x_8108 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_8108, 0, x_153); +lean_ctor_set(x_8108, 1, x_5957); +x_8109 = lean_ctor_get(x_1, 0); +lean_inc(x_8109); +x_8110 = l_Lean_IR_ToIR_bindVar(x_8109, x_5958, x_4, x_5, x_8063); +x_8111 = lean_ctor_get(x_8110, 0); +lean_inc(x_8111); +x_8112 = lean_ctor_get(x_8110, 1); +lean_inc(x_8112); +lean_dec(x_8110); +x_8113 = lean_ctor_get(x_8111, 0); +lean_inc(x_8113); +x_8114 = lean_ctor_get(x_8111, 1); +lean_inc(x_8114); +lean_dec(x_8111); +x_8115 = lean_ctor_get(x_1, 2); +lean_inc(x_8115); +lean_inc(x_5); +lean_inc(x_4); +x_8116 = l_Lean_IR_ToIR_lowerType(x_8115, x_8114, x_4, x_5, x_8112); +if (lean_obj_tag(x_8116) == 0) +{ +lean_object* x_8117; lean_object* x_8118; lean_object* x_8119; lean_object* x_8120; lean_object* x_8121; +x_8117 = lean_ctor_get(x_8116, 0); +lean_inc(x_8117); +x_8118 = lean_ctor_get(x_8116, 1); +lean_inc(x_8118); +lean_dec(x_8116); +x_8119 = lean_ctor_get(x_8117, 0); +lean_inc(x_8119); +x_8120 = lean_ctor_get(x_8117, 1); +lean_inc(x_8120); +lean_dec(x_8117); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8121 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8113, x_8108, x_8119, x_8120, x_4, x_5, x_8118); +if (lean_obj_tag(x_8121) == 0) +{ +lean_object* x_8122; lean_object* x_8123; lean_object* x_8124; lean_object* x_8125; lean_object* x_8126; lean_object* x_8127; lean_object* x_8128; +x_8122 = lean_ctor_get(x_8121, 0); +lean_inc(x_8122); +x_8123 = lean_ctor_get(x_8121, 1); +lean_inc(x_8123); +lean_dec(x_8121); +x_8124 = lean_ctor_get(x_8122, 0); +lean_inc(x_8124); +x_8125 = lean_ctor_get(x_8122, 1); +lean_inc(x_8125); +if (lean_is_exclusive(x_8122)) { + lean_ctor_release(x_8122, 0); + lean_ctor_release(x_8122, 1); + x_8126 = x_8122; +} else { + lean_dec_ref(x_8122); + x_8126 = lean_box(0); +} +if (lean_is_scalar(x_8065)) { + x_8127 = lean_alloc_ctor(1, 1, 0); +} else { + x_8127 = x_8065; +} +lean_ctor_set(x_8127, 0, x_8124); +if (lean_is_scalar(x_8126)) { + x_8128 = lean_alloc_ctor(0, 2, 0); +} else { + x_8128 = x_8126; +} +lean_ctor_set(x_8128, 0, x_8127); +lean_ctor_set(x_8128, 1, x_8125); +x_5959 = x_8128; +x_5960 = x_8123; +goto block_7876; +} +else +{ +lean_object* x_8129; lean_object* x_8130; lean_object* x_8131; lean_object* x_8132; +lean_dec(x_8065); +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8129 = lean_ctor_get(x_8121, 0); +lean_inc(x_8129); +x_8130 = lean_ctor_get(x_8121, 1); +lean_inc(x_8130); +if (lean_is_exclusive(x_8121)) { + lean_ctor_release(x_8121, 0); + lean_ctor_release(x_8121, 1); + x_8131 = x_8121; +} else { + lean_dec_ref(x_8121); + x_8131 = lean_box(0); +} +if (lean_is_scalar(x_8131)) { + x_8132 = lean_alloc_ctor(1, 2, 0); +} else { + x_8132 = x_8131; +} +lean_ctor_set(x_8132, 0, x_8129); +lean_ctor_set(x_8132, 1, x_8130); +return x_8132; +} +} +else +{ +lean_object* x_8133; lean_object* x_8134; lean_object* x_8135; lean_object* x_8136; +lean_dec(x_8113); +lean_dec(x_8108); +lean_dec(x_8065); +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8133 = lean_ctor_get(x_8116, 0); +lean_inc(x_8133); +x_8134 = lean_ctor_get(x_8116, 1); +lean_inc(x_8134); +if (lean_is_exclusive(x_8116)) { + lean_ctor_release(x_8116, 0); + lean_ctor_release(x_8116, 1); + x_8135 = x_8116; +} else { + lean_dec_ref(x_8116); + x_8135 = lean_box(0); +} +if (lean_is_scalar(x_8135)) { + x_8136 = lean_alloc_ctor(1, 2, 0); +} else { + x_8136 = x_8135; +} +lean_ctor_set(x_8136, 0, x_8133); +lean_ctor_set(x_8136, 1, x_8134); +return x_8136; +} +} +} +else +{ +lean_object* x_8137; lean_object* x_8138; lean_object* x_8139; lean_object* x_8140; lean_object* x_8141; lean_object* x_8142; lean_object* x_8143; lean_object* x_8144; lean_object* x_8145; +lean_dec(x_8068); +lean_dec(x_8066); +lean_inc(x_5957); +lean_inc(x_153); +x_8137 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_8137, 0, x_153); +lean_ctor_set(x_8137, 1, x_5957); +x_8138 = lean_ctor_get(x_1, 0); +lean_inc(x_8138); +x_8139 = l_Lean_IR_ToIR_bindVar(x_8138, x_5958, x_4, x_5, x_8063); +x_8140 = lean_ctor_get(x_8139, 0); +lean_inc(x_8140); +x_8141 = lean_ctor_get(x_8139, 1); +lean_inc(x_8141); +lean_dec(x_8139); +x_8142 = lean_ctor_get(x_8140, 0); +lean_inc(x_8142); +x_8143 = lean_ctor_get(x_8140, 1); +lean_inc(x_8143); +lean_dec(x_8140); +x_8144 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8145 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8142, x_8137, x_8144, x_8143, x_4, x_5, x_8141); +if (lean_obj_tag(x_8145) == 0) +{ +lean_object* x_8146; lean_object* x_8147; lean_object* x_8148; lean_object* x_8149; lean_object* x_8150; lean_object* x_8151; lean_object* x_8152; +x_8146 = lean_ctor_get(x_8145, 0); +lean_inc(x_8146); +x_8147 = lean_ctor_get(x_8145, 1); +lean_inc(x_8147); +lean_dec(x_8145); +x_8148 = lean_ctor_get(x_8146, 0); +lean_inc(x_8148); +x_8149 = lean_ctor_get(x_8146, 1); +lean_inc(x_8149); +if (lean_is_exclusive(x_8146)) { + lean_ctor_release(x_8146, 0); + lean_ctor_release(x_8146, 1); + x_8150 = x_8146; +} else { + lean_dec_ref(x_8146); + x_8150 = lean_box(0); +} +if (lean_is_scalar(x_8065)) { + x_8151 = lean_alloc_ctor(1, 1, 0); +} else { + x_8151 = x_8065; +} +lean_ctor_set(x_8151, 0, x_8148); +if (lean_is_scalar(x_8150)) { + x_8152 = lean_alloc_ctor(0, 2, 0); +} else { + x_8152 = x_8150; +} +lean_ctor_set(x_8152, 0, x_8151); +lean_ctor_set(x_8152, 1, x_8149); +x_5959 = x_8152; +x_5960 = x_8147; +goto block_7876; +} +else +{ +lean_object* x_8153; lean_object* x_8154; lean_object* x_8155; lean_object* x_8156; +lean_dec(x_8065); +lean_dec(x_5957); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8153 = lean_ctor_get(x_8145, 0); +lean_inc(x_8153); +x_8154 = lean_ctor_get(x_8145, 1); +lean_inc(x_8154); +if (lean_is_exclusive(x_8145)) { + lean_ctor_release(x_8145, 0); + lean_ctor_release(x_8145, 1); + x_8155 = x_8145; +} else { + lean_dec_ref(x_8145); + x_8155 = lean_box(0); +} +if (lean_is_scalar(x_8155)) { + x_8156 = lean_alloc_ctor(1, 2, 0); +} else { + x_8156 = x_8155; +} +lean_ctor_set(x_8156, 0, x_8153); +lean_ctor_set(x_8156, 1, x_8154); +return x_8156; +} +} +} +} +block_7876: +{ +lean_object* x_5961; +x_5961 = lean_ctor_get(x_5959, 0); +lean_inc(x_5961); +if (lean_obj_tag(x_5961) == 0) +{ +uint8_t x_5962; +lean_dec(x_5955); +x_5962 = !lean_is_exclusive(x_5959); +if (x_5962 == 0) +{ +lean_object* x_5963; lean_object* x_5964; lean_object* x_5965; lean_object* x_5966; lean_object* x_5967; lean_object* x_5968; lean_object* x_5969; uint8_t x_5970; lean_object* x_5971; +x_5963 = lean_ctor_get(x_5959, 1); +x_5964 = lean_ctor_get(x_5959, 0); +lean_dec(x_5964); +x_5965 = lean_st_ref_get(x_5, x_5960); +x_5966 = lean_ctor_get(x_5965, 0); +lean_inc(x_5966); +x_5967 = lean_ctor_get(x_5965, 1); +lean_inc(x_5967); +if (lean_is_exclusive(x_5965)) { + lean_ctor_release(x_5965, 0); + lean_ctor_release(x_5965, 1); + x_5968 = x_5965; +} else { + lean_dec_ref(x_5965); + x_5968 = lean_box(0); +} +x_5969 = lean_ctor_get(x_5966, 0); +lean_inc(x_5969); +lean_dec(x_5966); +x_5970 = 0; +lean_inc(x_153); +lean_inc(x_5969); +x_5971 = l_Lean_Environment_find_x3f(x_5969, x_153, x_5970); +if (lean_obj_tag(x_5971) == 0) +{ +lean_object* x_5972; lean_object* x_5973; +lean_dec(x_5969); +lean_dec(x_5968); +lean_free_object(x_5959); +lean_dec(x_5957); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_5972 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_5973 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_5972, x_5963, x_4, x_5, x_5967); +return x_5973; +} +else +{ +lean_object* x_5974; +x_5974 = lean_ctor_get(x_5971, 0); +lean_inc(x_5974); +lean_dec(x_5971); +switch (lean_obj_tag(x_5974)) { +case 0: +{ +uint8_t x_5975; +lean_dec(x_5969); +lean_dec(x_5945); +lean_dec(x_5944); +x_5975 = !lean_is_exclusive(x_5974); +if (x_5975 == 0) +{ +lean_object* x_5976; lean_object* x_5977; uint8_t x_5978; +x_5976 = lean_ctor_get(x_5974, 0); +lean_dec(x_5976); +x_5977 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_5978 = lean_name_eq(x_153, x_5977); +if (x_5978 == 0) +{ +lean_object* x_5979; uint8_t x_5980; +x_5979 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_5980 = lean_name_eq(x_153, x_5979); +if (x_5980 == 0) +{ +lean_object* x_5981; lean_object* x_5982; lean_object* x_5983; +lean_dec(x_5968); +lean_free_object(x_5959); +lean_inc(x_153); +x_5981 = l_Lean_IR_ToIR_findDecl(x_153, x_5963, x_4, x_5, x_5967); +x_5982 = lean_ctor_get(x_5981, 0); +lean_inc(x_5982); +x_5983 = lean_ctor_get(x_5982, 0); +lean_inc(x_5983); +if (lean_obj_tag(x_5983) == 0) +{ +uint8_t x_5984; +lean_dec(x_5957); +lean_dec(x_2); +lean_dec(x_1); +x_5984 = !lean_is_exclusive(x_5981); +if (x_5984 == 0) +{ +lean_object* x_5985; lean_object* x_5986; uint8_t x_5987; +x_5985 = lean_ctor_get(x_5981, 1); +x_5986 = lean_ctor_get(x_5981, 0); +lean_dec(x_5986); +x_5987 = !lean_is_exclusive(x_5982); +if (x_5987 == 0) +{ +lean_object* x_5988; lean_object* x_5989; uint8_t x_5990; lean_object* x_5991; lean_object* x_5992; lean_object* x_5993; lean_object* x_5994; lean_object* x_5995; lean_object* x_5996; +x_5988 = lean_ctor_get(x_5982, 1); +x_5989 = lean_ctor_get(x_5982, 0); +lean_dec(x_5989); +x_5990 = 1; +x_5991 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_5992 = l_Lean_Name_toString(x_153, x_5990, x_5991); +lean_ctor_set_tag(x_5974, 3); +lean_ctor_set(x_5974, 0, x_5992); +x_5993 = l_Lean_IR_ToIR_lowerLet___closed__13; +lean_ctor_set_tag(x_5982, 5); +lean_ctor_set(x_5982, 1, x_5974); +lean_ctor_set(x_5982, 0, x_5993); +x_5994 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_5981, 5); +lean_ctor_set(x_5981, 1, x_5994); +x_5995 = l_Lean_MessageData_ofFormat(x_5981); +x_5996 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_5995, x_5988, x_4, x_5, x_5985); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_5988); +return x_5996; +} +else +{ +lean_object* x_5997; uint8_t x_5998; lean_object* x_5999; lean_object* x_6000; lean_object* x_6001; lean_object* x_6002; lean_object* x_6003; lean_object* x_6004; lean_object* x_6005; +x_5997 = lean_ctor_get(x_5982, 1); +lean_inc(x_5997); +lean_dec(x_5982); +x_5998 = 1; +x_5999 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_6000 = l_Lean_Name_toString(x_153, x_5998, x_5999); +lean_ctor_set_tag(x_5974, 3); +lean_ctor_set(x_5974, 0, x_6000); +x_6001 = l_Lean_IR_ToIR_lowerLet___closed__13; +x_6002 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_6002, 0, x_6001); +lean_ctor_set(x_6002, 1, x_5974); +x_6003 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_5981, 5); +lean_ctor_set(x_5981, 1, x_6003); +lean_ctor_set(x_5981, 0, x_6002); +x_6004 = l_Lean_MessageData_ofFormat(x_5981); +x_6005 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_6004, x_5997, x_4, x_5, x_5985); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_5997); +return x_6005; +} +} +else +{ +lean_object* x_6006; lean_object* x_6007; lean_object* x_6008; uint8_t x_6009; lean_object* x_6010; lean_object* x_6011; lean_object* x_6012; lean_object* x_6013; lean_object* x_6014; lean_object* x_6015; lean_object* x_6016; lean_object* x_6017; +x_6006 = lean_ctor_get(x_5981, 1); +lean_inc(x_6006); +lean_dec(x_5981); +x_6007 = lean_ctor_get(x_5982, 1); +lean_inc(x_6007); +if (lean_is_exclusive(x_5982)) { + lean_ctor_release(x_5982, 0); + lean_ctor_release(x_5982, 1); + x_6008 = x_5982; +} else { + lean_dec_ref(x_5982); + x_6008 = lean_box(0); +} +x_6009 = 1; +x_6010 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_6011 = l_Lean_Name_toString(x_153, x_6009, x_6010); +lean_ctor_set_tag(x_5974, 3); +lean_ctor_set(x_5974, 0, x_6011); +x_6012 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_6008)) { + x_6013 = lean_alloc_ctor(5, 2, 0); +} else { + x_6013 = x_6008; + lean_ctor_set_tag(x_6013, 5); +} +lean_ctor_set(x_6013, 0, x_6012); +lean_ctor_set(x_6013, 1, x_5974); +x_6014 = l_Lean_IR_ToIR_lowerLet___closed__16; +x_6015 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_6015, 0, x_6013); +lean_ctor_set(x_6015, 1, x_6014); +x_6016 = l_Lean_MessageData_ofFormat(x_6015); +x_6017 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_6016, x_6007, x_4, x_5, x_6006); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_6007); +return x_6017; +} +} +else +{ +lean_object* x_6018; uint8_t x_6019; +lean_free_object(x_5974); +x_6018 = lean_ctor_get(x_5981, 1); +lean_inc(x_6018); +lean_dec(x_5981); +x_6019 = !lean_is_exclusive(x_5982); +if (x_6019 == 0) +{ +lean_object* x_6020; lean_object* x_6021; lean_object* x_6022; lean_object* x_6023; lean_object* x_6024; lean_object* x_6025; uint8_t x_6026; +x_6020 = lean_ctor_get(x_5982, 1); +x_6021 = lean_ctor_get(x_5982, 0); +lean_dec(x_6021); +x_6022 = lean_ctor_get(x_5983, 0); +lean_inc(x_6022); +lean_dec(x_5983); +x_6023 = lean_array_get_size(x_5957); +x_6024 = l_Lean_IR_Decl_params(x_6022); +lean_dec(x_6022); +x_6025 = lean_array_get_size(x_6024); +lean_dec(x_6024); +x_6026 = lean_nat_dec_lt(x_6023, x_6025); +if (x_6026 == 0) +{ +uint8_t x_6027; +x_6027 = lean_nat_dec_eq(x_6023, x_6025); +if (x_6027 == 0) +{ +lean_object* x_6028; lean_object* x_6029; lean_object* x_6030; lean_object* x_6031; lean_object* x_6032; lean_object* x_6033; lean_object* x_6034; lean_object* x_6035; lean_object* x_6036; lean_object* x_6037; lean_object* x_6038; lean_object* x_6039; lean_object* x_6040; lean_object* x_6041; lean_object* x_6042; lean_object* x_6043; +x_6028 = lean_unsigned_to_nat(0u); +x_6029 = l_Array_extract___rarg(x_5957, x_6028, x_6025); +x_6030 = l_Array_extract___rarg(x_5957, x_6025, x_6023); +lean_dec(x_6023); +lean_dec(x_5957); +lean_ctor_set_tag(x_5982, 6); +lean_ctor_set(x_5982, 1, x_6029); +lean_ctor_set(x_5982, 0, x_153); +x_6031 = lean_ctor_get(x_1, 0); +lean_inc(x_6031); +x_6032 = l_Lean_IR_ToIR_bindVar(x_6031, x_6020, x_4, x_5, x_6018); +x_6033 = lean_ctor_get(x_6032, 0); +lean_inc(x_6033); +x_6034 = lean_ctor_get(x_6032, 1); +lean_inc(x_6034); +lean_dec(x_6032); +x_6035 = lean_ctor_get(x_6033, 0); +lean_inc(x_6035); +x_6036 = lean_ctor_get(x_6033, 1); +lean_inc(x_6036); +lean_dec(x_6033); +x_6037 = l_Lean_IR_ToIR_newVar(x_6036, x_4, x_5, x_6034); +x_6038 = lean_ctor_get(x_6037, 0); +lean_inc(x_6038); +x_6039 = lean_ctor_get(x_6037, 1); +lean_inc(x_6039); +lean_dec(x_6037); +x_6040 = lean_ctor_get(x_6038, 0); +lean_inc(x_6040); +x_6041 = lean_ctor_get(x_6038, 1); +lean_inc(x_6041); +lean_dec(x_6038); +x_6042 = lean_ctor_get(x_1, 2); +lean_inc(x_6042); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_6043 = l_Lean_IR_ToIR_lowerType(x_6042, x_6041, x_4, x_5, x_6039); +if (lean_obj_tag(x_6043) == 0) +{ +lean_object* x_6044; lean_object* x_6045; lean_object* x_6046; lean_object* x_6047; lean_object* x_6048; +x_6044 = lean_ctor_get(x_6043, 0); +lean_inc(x_6044); +x_6045 = lean_ctor_get(x_6043, 1); +lean_inc(x_6045); +lean_dec(x_6043); +x_6046 = lean_ctor_get(x_6044, 0); +lean_inc(x_6046); +x_6047 = lean_ctor_get(x_6044, 1); +lean_inc(x_6047); +lean_dec(x_6044); +x_6048 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_6040, x_6030, x_6035, x_5982, x_6046, x_6047, x_4, x_5, x_6045); +return x_6048; +} +else +{ +uint8_t x_6049; +lean_dec(x_6040); +lean_dec(x_6035); +lean_dec(x_5982); +lean_dec(x_6030); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_6049 = !lean_is_exclusive(x_6043); +if (x_6049 == 0) +{ +return x_6043; +} +else +{ +lean_object* x_6050; lean_object* x_6051; lean_object* x_6052; +x_6050 = lean_ctor_get(x_6043, 0); +x_6051 = lean_ctor_get(x_6043, 1); +lean_inc(x_6051); +lean_inc(x_6050); +lean_dec(x_6043); +x_6052 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6052, 0, x_6050); +lean_ctor_set(x_6052, 1, x_6051); +return x_6052; +} +} +} +else +{ +lean_object* x_6053; lean_object* x_6054; lean_object* x_6055; lean_object* x_6056; lean_object* x_6057; lean_object* x_6058; lean_object* x_6059; lean_object* x_6060; +lean_dec(x_6025); +lean_dec(x_6023); +lean_ctor_set_tag(x_5982, 6); +lean_ctor_set(x_5982, 1, x_5957); +lean_ctor_set(x_5982, 0, x_153); +x_6053 = lean_ctor_get(x_1, 0); +lean_inc(x_6053); +x_6054 = l_Lean_IR_ToIR_bindVar(x_6053, x_6020, x_4, x_5, x_6018); +x_6055 = lean_ctor_get(x_6054, 0); +lean_inc(x_6055); +x_6056 = lean_ctor_get(x_6054, 1); +lean_inc(x_6056); +lean_dec(x_6054); +x_6057 = lean_ctor_get(x_6055, 0); +lean_inc(x_6057); +x_6058 = lean_ctor_get(x_6055, 1); +lean_inc(x_6058); +lean_dec(x_6055); +x_6059 = lean_ctor_get(x_1, 2); +lean_inc(x_6059); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_6060 = l_Lean_IR_ToIR_lowerType(x_6059, x_6058, x_4, x_5, x_6056); +if (lean_obj_tag(x_6060) == 0) +{ +lean_object* x_6061; lean_object* x_6062; lean_object* x_6063; lean_object* x_6064; lean_object* x_6065; +x_6061 = lean_ctor_get(x_6060, 0); +lean_inc(x_6061); +x_6062 = lean_ctor_get(x_6060, 1); +lean_inc(x_6062); +lean_dec(x_6060); +x_6063 = lean_ctor_get(x_6061, 0); +lean_inc(x_6063); +x_6064 = lean_ctor_get(x_6061, 1); +lean_inc(x_6064); +lean_dec(x_6061); +x_6065 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6057, x_5982, x_6063, x_6064, x_4, x_5, x_6062); +return x_6065; +} +else +{ +uint8_t x_6066; +lean_dec(x_6057); +lean_dec(x_5982); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_6066 = !lean_is_exclusive(x_6060); +if (x_6066 == 0) +{ +return x_6060; +} +else +{ +lean_object* x_6067; lean_object* x_6068; lean_object* x_6069; +x_6067 = lean_ctor_get(x_6060, 0); +x_6068 = lean_ctor_get(x_6060, 1); +lean_inc(x_6068); +lean_inc(x_6067); +lean_dec(x_6060); +x_6069 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6069, 0, x_6067); +lean_ctor_set(x_6069, 1, x_6068); +return x_6069; +} +} +} +} +else +{ +lean_object* x_6070; lean_object* x_6071; lean_object* x_6072; lean_object* x_6073; lean_object* x_6074; lean_object* x_6075; lean_object* x_6076; lean_object* x_6077; +lean_dec(x_6025); +lean_dec(x_6023); +lean_ctor_set_tag(x_5982, 7); +lean_ctor_set(x_5982, 1, x_5957); +lean_ctor_set(x_5982, 0, x_153); +x_6070 = lean_ctor_get(x_1, 0); +lean_inc(x_6070); +lean_dec(x_1); +x_6071 = l_Lean_IR_ToIR_bindVar(x_6070, x_6020, x_4, x_5, x_6018); +x_6072 = lean_ctor_get(x_6071, 0); +lean_inc(x_6072); +x_6073 = lean_ctor_get(x_6071, 1); +lean_inc(x_6073); +lean_dec(x_6071); +x_6074 = lean_ctor_get(x_6072, 0); +lean_inc(x_6074); +x_6075 = lean_ctor_get(x_6072, 1); +lean_inc(x_6075); +lean_dec(x_6072); +x_6076 = lean_box(7); +x_6077 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6074, x_5982, x_6076, x_6075, x_4, x_5, x_6073); +return x_6077; +} +} +else +{ +lean_object* x_6078; lean_object* x_6079; lean_object* x_6080; lean_object* x_6081; lean_object* x_6082; uint8_t x_6083; +x_6078 = lean_ctor_get(x_5982, 1); +lean_inc(x_6078); +lean_dec(x_5982); +x_6079 = lean_ctor_get(x_5983, 0); +lean_inc(x_6079); +lean_dec(x_5983); +x_6080 = lean_array_get_size(x_5957); +x_6081 = l_Lean_IR_Decl_params(x_6079); +lean_dec(x_6079); +x_6082 = lean_array_get_size(x_6081); +lean_dec(x_6081); +x_6083 = lean_nat_dec_lt(x_6080, x_6082); +if (x_6083 == 0) +{ +uint8_t x_6084; +x_6084 = lean_nat_dec_eq(x_6080, x_6082); +if (x_6084 == 0) +{ +lean_object* x_6085; lean_object* x_6086; lean_object* x_6087; lean_object* x_6088; lean_object* x_6089; lean_object* x_6090; lean_object* x_6091; lean_object* x_6092; lean_object* x_6093; lean_object* x_6094; lean_object* x_6095; lean_object* x_6096; lean_object* x_6097; lean_object* x_6098; lean_object* x_6099; lean_object* x_6100; lean_object* x_6101; +x_6085 = lean_unsigned_to_nat(0u); +x_6086 = l_Array_extract___rarg(x_5957, x_6085, x_6082); +x_6087 = l_Array_extract___rarg(x_5957, x_6082, x_6080); +lean_dec(x_6080); +lean_dec(x_5957); +x_6088 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_6088, 0, x_153); +lean_ctor_set(x_6088, 1, x_6086); +x_6089 = lean_ctor_get(x_1, 0); +lean_inc(x_6089); +x_6090 = l_Lean_IR_ToIR_bindVar(x_6089, x_6078, x_4, x_5, x_6018); +x_6091 = lean_ctor_get(x_6090, 0); +lean_inc(x_6091); +x_6092 = lean_ctor_get(x_6090, 1); +lean_inc(x_6092); +lean_dec(x_6090); +x_6093 = lean_ctor_get(x_6091, 0); +lean_inc(x_6093); +x_6094 = lean_ctor_get(x_6091, 1); +lean_inc(x_6094); +lean_dec(x_6091); +x_6095 = l_Lean_IR_ToIR_newVar(x_6094, x_4, x_5, x_6092); +x_6096 = lean_ctor_get(x_6095, 0); +lean_inc(x_6096); +x_6097 = lean_ctor_get(x_6095, 1); +lean_inc(x_6097); +lean_dec(x_6095); +x_6098 = lean_ctor_get(x_6096, 0); +lean_inc(x_6098); +x_6099 = lean_ctor_get(x_6096, 1); +lean_inc(x_6099); +lean_dec(x_6096); +x_6100 = lean_ctor_get(x_1, 2); +lean_inc(x_6100); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_6101 = l_Lean_IR_ToIR_lowerType(x_6100, x_6099, x_4, x_5, x_6097); +if (lean_obj_tag(x_6101) == 0) +{ +lean_object* x_6102; lean_object* x_6103; lean_object* x_6104; lean_object* x_6105; lean_object* x_6106; +x_6102 = lean_ctor_get(x_6101, 0); +lean_inc(x_6102); +x_6103 = lean_ctor_get(x_6101, 1); +lean_inc(x_6103); +lean_dec(x_6101); +x_6104 = lean_ctor_get(x_6102, 0); +lean_inc(x_6104); +x_6105 = lean_ctor_get(x_6102, 1); +lean_inc(x_6105); +lean_dec(x_6102); +x_6106 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_6098, x_6087, x_6093, x_6088, x_6104, x_6105, x_4, x_5, x_6103); +return x_6106; +} +else +{ +lean_object* x_6107; lean_object* x_6108; lean_object* x_6109; lean_object* x_6110; +lean_dec(x_6098); +lean_dec(x_6093); +lean_dec(x_6088); +lean_dec(x_6087); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_6107 = lean_ctor_get(x_6101, 0); +lean_inc(x_6107); +x_6108 = lean_ctor_get(x_6101, 1); +lean_inc(x_6108); +if (lean_is_exclusive(x_6101)) { + lean_ctor_release(x_6101, 0); + lean_ctor_release(x_6101, 1); + x_6109 = x_6101; +} else { + lean_dec_ref(x_6101); + x_6109 = lean_box(0); +} +if (lean_is_scalar(x_6109)) { + x_6110 = lean_alloc_ctor(1, 2, 0); +} else { + x_6110 = x_6109; +} +lean_ctor_set(x_6110, 0, x_6107); +lean_ctor_set(x_6110, 1, x_6108); +return x_6110; +} +} +else +{ +lean_object* x_6111; lean_object* x_6112; lean_object* x_6113; lean_object* x_6114; lean_object* x_6115; lean_object* x_6116; lean_object* x_6117; lean_object* x_6118; lean_object* x_6119; +lean_dec(x_6082); +lean_dec(x_6080); +x_6111 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_6111, 0, x_153); +lean_ctor_set(x_6111, 1, x_5957); +x_6112 = lean_ctor_get(x_1, 0); +lean_inc(x_6112); +x_6113 = l_Lean_IR_ToIR_bindVar(x_6112, x_6078, x_4, x_5, x_6018); +x_6114 = lean_ctor_get(x_6113, 0); +lean_inc(x_6114); +x_6115 = lean_ctor_get(x_6113, 1); +lean_inc(x_6115); +lean_dec(x_6113); +x_6116 = lean_ctor_get(x_6114, 0); +lean_inc(x_6116); +x_6117 = lean_ctor_get(x_6114, 1); +lean_inc(x_6117); +lean_dec(x_6114); +x_6118 = lean_ctor_get(x_1, 2); +lean_inc(x_6118); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_6119 = l_Lean_IR_ToIR_lowerType(x_6118, x_6117, x_4, x_5, x_6115); +if (lean_obj_tag(x_6119) == 0) +{ +lean_object* x_6120; lean_object* x_6121; lean_object* x_6122; lean_object* x_6123; lean_object* x_6124; +x_6120 = lean_ctor_get(x_6119, 0); +lean_inc(x_6120); +x_6121 = lean_ctor_get(x_6119, 1); +lean_inc(x_6121); +lean_dec(x_6119); +x_6122 = lean_ctor_get(x_6120, 0); +lean_inc(x_6122); +x_6123 = lean_ctor_get(x_6120, 1); +lean_inc(x_6123); +lean_dec(x_6120); +x_6124 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6116, x_6111, x_6122, x_6123, x_4, x_5, x_6121); +return x_6124; +} +else +{ +lean_object* x_6125; lean_object* x_6126; lean_object* x_6127; lean_object* x_6128; +lean_dec(x_6116); +lean_dec(x_6111); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_6125 = lean_ctor_get(x_6119, 0); +lean_inc(x_6125); +x_6126 = lean_ctor_get(x_6119, 1); +lean_inc(x_6126); +if (lean_is_exclusive(x_6119)) { + lean_ctor_release(x_6119, 0); + lean_ctor_release(x_6119, 1); + x_6127 = x_6119; +} else { + lean_dec_ref(x_6119); + x_6127 = lean_box(0); +} +if (lean_is_scalar(x_6127)) { + x_6128 = lean_alloc_ctor(1, 2, 0); +} else { + x_6128 = x_6127; +} +lean_ctor_set(x_6128, 0, x_6125); +lean_ctor_set(x_6128, 1, x_6126); +return x_6128; +} +} +} +else +{ +lean_object* x_6129; lean_object* x_6130; lean_object* x_6131; lean_object* x_6132; lean_object* x_6133; lean_object* x_6134; lean_object* x_6135; lean_object* x_6136; lean_object* x_6137; +lean_dec(x_6082); +lean_dec(x_6080); +x_6129 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_6129, 0, x_153); +lean_ctor_set(x_6129, 1, x_5957); +x_6130 = lean_ctor_get(x_1, 0); +lean_inc(x_6130); +lean_dec(x_1); +x_6131 = l_Lean_IR_ToIR_bindVar(x_6130, x_6078, x_4, x_5, x_6018); +x_6132 = lean_ctor_get(x_6131, 0); +lean_inc(x_6132); +x_6133 = lean_ctor_get(x_6131, 1); +lean_inc(x_6133); +lean_dec(x_6131); +x_6134 = lean_ctor_get(x_6132, 0); +lean_inc(x_6134); +x_6135 = lean_ctor_get(x_6132, 1); +lean_inc(x_6135); +lean_dec(x_6132); +x_6136 = lean_box(7); +x_6137 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6134, x_6129, x_6136, x_6135, x_4, x_5, x_6133); +return x_6137; +} +} +} +} +else +{ +lean_object* x_6138; lean_object* x_6139; +lean_free_object(x_5974); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6138 = lean_box(13); +lean_ctor_set(x_5959, 0, x_6138); +if (lean_is_scalar(x_5968)) { + x_6139 = lean_alloc_ctor(0, 2, 0); +} else { + x_6139 = x_5968; +} +lean_ctor_set(x_6139, 0, x_5959); +lean_ctor_set(x_6139, 1, x_5967); +return x_6139; +} +} +else +{ +lean_object* x_6140; lean_object* x_6141; lean_object* x_6142; +lean_free_object(x_5974); +lean_dec(x_5968); +lean_free_object(x_5959); +lean_dec(x_153); +x_6140 = l_Lean_IR_instInhabitedArg; +x_6141 = lean_unsigned_to_nat(2u); +x_6142 = lean_array_get(x_6140, x_5957, x_6141); +lean_dec(x_5957); +if (lean_obj_tag(x_6142) == 0) +{ +lean_object* x_6143; lean_object* x_6144; lean_object* x_6145; lean_object* x_6146; lean_object* x_6147; lean_object* x_6148; lean_object* x_6149; +x_6143 = lean_ctor_get(x_6142, 0); +lean_inc(x_6143); +lean_dec(x_6142); +x_6144 = lean_ctor_get(x_1, 0); +lean_inc(x_6144); +lean_dec(x_1); +x_6145 = l_Lean_IR_ToIR_bindVarToVarId(x_6144, x_6143, x_5963, x_4, x_5, x_5967); +x_6146 = lean_ctor_get(x_6145, 0); +lean_inc(x_6146); +x_6147 = lean_ctor_get(x_6145, 1); +lean_inc(x_6147); +lean_dec(x_6145); +x_6148 = lean_ctor_get(x_6146, 1); +lean_inc(x_6148); +lean_dec(x_6146); +x_6149 = l_Lean_IR_ToIR_lowerCode(x_2, x_6148, x_4, x_5, x_6147); +return x_6149; +} +else +{ +lean_object* x_6150; lean_object* x_6151; lean_object* x_6152; lean_object* x_6153; lean_object* x_6154; lean_object* x_6155; +x_6150 = lean_ctor_get(x_1, 0); +lean_inc(x_6150); +lean_dec(x_1); +x_6151 = l_Lean_IR_ToIR_bindErased(x_6150, x_5963, x_4, x_5, x_5967); +x_6152 = lean_ctor_get(x_6151, 0); +lean_inc(x_6152); +x_6153 = lean_ctor_get(x_6151, 1); +lean_inc(x_6153); +lean_dec(x_6151); +x_6154 = lean_ctor_get(x_6152, 1); +lean_inc(x_6154); +lean_dec(x_6152); +x_6155 = l_Lean_IR_ToIR_lowerCode(x_2, x_6154, x_4, x_5, x_6153); +return x_6155; +} +} +} +else +{ +lean_object* x_6156; uint8_t x_6157; +lean_dec(x_5974); +x_6156 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_6157 = lean_name_eq(x_153, x_6156); +if (x_6157 == 0) +{ +lean_object* x_6158; uint8_t x_6159; +x_6158 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_6159 = lean_name_eq(x_153, x_6158); +if (x_6159 == 0) +{ +lean_object* x_6160; lean_object* x_6161; lean_object* x_6162; +lean_dec(x_5968); +lean_free_object(x_5959); +lean_inc(x_153); +x_6160 = l_Lean_IR_ToIR_findDecl(x_153, x_5963, x_4, x_5, x_5967); +x_6161 = lean_ctor_get(x_6160, 0); +lean_inc(x_6161); +x_6162 = lean_ctor_get(x_6161, 0); +lean_inc(x_6162); +if (lean_obj_tag(x_6162) == 0) +{ +lean_object* x_6163; lean_object* x_6164; lean_object* x_6165; lean_object* x_6166; uint8_t x_6167; lean_object* x_6168; lean_object* x_6169; lean_object* x_6170; lean_object* x_6171; lean_object* x_6172; lean_object* x_6173; lean_object* x_6174; lean_object* x_6175; lean_object* x_6176; +lean_dec(x_5957); +lean_dec(x_2); +lean_dec(x_1); +x_6163 = lean_ctor_get(x_6160, 1); +lean_inc(x_6163); +if (lean_is_exclusive(x_6160)) { + lean_ctor_release(x_6160, 0); + lean_ctor_release(x_6160, 1); + x_6164 = x_6160; +} else { + lean_dec_ref(x_6160); + x_6164 = lean_box(0); +} +x_6165 = lean_ctor_get(x_6161, 1); +lean_inc(x_6165); +if (lean_is_exclusive(x_6161)) { + lean_ctor_release(x_6161, 0); + lean_ctor_release(x_6161, 1); + x_6166 = x_6161; +} else { + lean_dec_ref(x_6161); + x_6166 = lean_box(0); +} +x_6167 = 1; +x_6168 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_6169 = l_Lean_Name_toString(x_153, x_6167, x_6168); +x_6170 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_6170, 0, x_6169); +x_6171 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_6166)) { + x_6172 = lean_alloc_ctor(5, 2, 0); +} else { + x_6172 = x_6166; + lean_ctor_set_tag(x_6172, 5); +} +lean_ctor_set(x_6172, 0, x_6171); +lean_ctor_set(x_6172, 1, x_6170); +x_6173 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_6164)) { + x_6174 = lean_alloc_ctor(5, 2, 0); +} else { + x_6174 = x_6164; + lean_ctor_set_tag(x_6174, 5); +} +lean_ctor_set(x_6174, 0, x_6172); +lean_ctor_set(x_6174, 1, x_6173); +x_6175 = l_Lean_MessageData_ofFormat(x_6174); +x_6176 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_6175, x_6165, x_4, x_5, x_6163); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_6165); +return x_6176; +} +else +{ +lean_object* x_6177; lean_object* x_6178; lean_object* x_6179; lean_object* x_6180; lean_object* x_6181; lean_object* x_6182; lean_object* x_6183; uint8_t x_6184; +x_6177 = lean_ctor_get(x_6160, 1); +lean_inc(x_6177); +lean_dec(x_6160); +x_6178 = lean_ctor_get(x_6161, 1); +lean_inc(x_6178); +if (lean_is_exclusive(x_6161)) { + lean_ctor_release(x_6161, 0); + lean_ctor_release(x_6161, 1); + x_6179 = x_6161; +} else { + lean_dec_ref(x_6161); + x_6179 = lean_box(0); +} +x_6180 = lean_ctor_get(x_6162, 0); +lean_inc(x_6180); +lean_dec(x_6162); +x_6181 = lean_array_get_size(x_5957); +x_6182 = l_Lean_IR_Decl_params(x_6180); +lean_dec(x_6180); +x_6183 = lean_array_get_size(x_6182); +lean_dec(x_6182); +x_6184 = lean_nat_dec_lt(x_6181, x_6183); +if (x_6184 == 0) +{ +uint8_t x_6185; +x_6185 = lean_nat_dec_eq(x_6181, x_6183); +if (x_6185 == 0) +{ +lean_object* x_6186; lean_object* x_6187; lean_object* x_6188; lean_object* x_6189; lean_object* x_6190; lean_object* x_6191; lean_object* x_6192; lean_object* x_6193; lean_object* x_6194; lean_object* x_6195; lean_object* x_6196; lean_object* x_6197; lean_object* x_6198; lean_object* x_6199; lean_object* x_6200; lean_object* x_6201; lean_object* x_6202; +x_6186 = lean_unsigned_to_nat(0u); +x_6187 = l_Array_extract___rarg(x_5957, x_6186, x_6183); +x_6188 = l_Array_extract___rarg(x_5957, x_6183, x_6181); +lean_dec(x_6181); +lean_dec(x_5957); +if (lean_is_scalar(x_6179)) { + x_6189 = lean_alloc_ctor(6, 2, 0); +} else { + x_6189 = x_6179; + lean_ctor_set_tag(x_6189, 6); +} +lean_ctor_set(x_6189, 0, x_153); +lean_ctor_set(x_6189, 1, x_6187); +x_6190 = lean_ctor_get(x_1, 0); +lean_inc(x_6190); +x_6191 = l_Lean_IR_ToIR_bindVar(x_6190, x_6178, x_4, x_5, x_6177); +x_6192 = lean_ctor_get(x_6191, 0); +lean_inc(x_6192); +x_6193 = lean_ctor_get(x_6191, 1); +lean_inc(x_6193); +lean_dec(x_6191); +x_6194 = lean_ctor_get(x_6192, 0); +lean_inc(x_6194); +x_6195 = lean_ctor_get(x_6192, 1); +lean_inc(x_6195); +lean_dec(x_6192); +x_6196 = l_Lean_IR_ToIR_newVar(x_6195, x_4, x_5, x_6193); +x_6197 = lean_ctor_get(x_6196, 0); +lean_inc(x_6197); +x_6198 = lean_ctor_get(x_6196, 1); +lean_inc(x_6198); +lean_dec(x_6196); +x_6199 = lean_ctor_get(x_6197, 0); +lean_inc(x_6199); +x_6200 = lean_ctor_get(x_6197, 1); +lean_inc(x_6200); +lean_dec(x_6197); +x_6201 = lean_ctor_get(x_1, 2); +lean_inc(x_6201); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_6202 = l_Lean_IR_ToIR_lowerType(x_6201, x_6200, x_4, x_5, x_6198); +if (lean_obj_tag(x_6202) == 0) +{ +lean_object* x_6203; lean_object* x_6204; lean_object* x_6205; lean_object* x_6206; lean_object* x_6207; +x_6203 = lean_ctor_get(x_6202, 0); +lean_inc(x_6203); +x_6204 = lean_ctor_get(x_6202, 1); +lean_inc(x_6204); +lean_dec(x_6202); +x_6205 = lean_ctor_get(x_6203, 0); +lean_inc(x_6205); +x_6206 = lean_ctor_get(x_6203, 1); +lean_inc(x_6206); +lean_dec(x_6203); +x_6207 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_6199, x_6188, x_6194, x_6189, x_6205, x_6206, x_4, x_5, x_6204); +return x_6207; +} +else +{ +lean_object* x_6208; lean_object* x_6209; lean_object* x_6210; lean_object* x_6211; +lean_dec(x_6199); +lean_dec(x_6194); +lean_dec(x_6189); +lean_dec(x_6188); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_6208 = lean_ctor_get(x_6202, 0); +lean_inc(x_6208); +x_6209 = lean_ctor_get(x_6202, 1); +lean_inc(x_6209); +if (lean_is_exclusive(x_6202)) { + lean_ctor_release(x_6202, 0); + lean_ctor_release(x_6202, 1); + x_6210 = x_6202; +} else { + lean_dec_ref(x_6202); + x_6210 = lean_box(0); +} +if (lean_is_scalar(x_6210)) { + x_6211 = lean_alloc_ctor(1, 2, 0); +} else { + x_6211 = x_6210; +} +lean_ctor_set(x_6211, 0, x_6208); +lean_ctor_set(x_6211, 1, x_6209); +return x_6211; +} +} +else +{ +lean_object* x_6212; lean_object* x_6213; lean_object* x_6214; lean_object* x_6215; lean_object* x_6216; lean_object* x_6217; lean_object* x_6218; lean_object* x_6219; lean_object* x_6220; +lean_dec(x_6183); +lean_dec(x_6181); +if (lean_is_scalar(x_6179)) { + x_6212 = lean_alloc_ctor(6, 2, 0); +} else { + x_6212 = x_6179; + lean_ctor_set_tag(x_6212, 6); +} +lean_ctor_set(x_6212, 0, x_153); +lean_ctor_set(x_6212, 1, x_5957); +x_6213 = lean_ctor_get(x_1, 0); +lean_inc(x_6213); +x_6214 = l_Lean_IR_ToIR_bindVar(x_6213, x_6178, x_4, x_5, x_6177); +x_6215 = lean_ctor_get(x_6214, 0); +lean_inc(x_6215); +x_6216 = lean_ctor_get(x_6214, 1); +lean_inc(x_6216); +lean_dec(x_6214); +x_6217 = lean_ctor_get(x_6215, 0); +lean_inc(x_6217); +x_6218 = lean_ctor_get(x_6215, 1); +lean_inc(x_6218); +lean_dec(x_6215); +x_6219 = lean_ctor_get(x_1, 2); +lean_inc(x_6219); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_6220 = l_Lean_IR_ToIR_lowerType(x_6219, x_6218, x_4, x_5, x_6216); +if (lean_obj_tag(x_6220) == 0) +{ +lean_object* x_6221; lean_object* x_6222; lean_object* x_6223; lean_object* x_6224; lean_object* x_6225; +x_6221 = lean_ctor_get(x_6220, 0); +lean_inc(x_6221); +x_6222 = lean_ctor_get(x_6220, 1); +lean_inc(x_6222); +lean_dec(x_6220); +x_6223 = lean_ctor_get(x_6221, 0); +lean_inc(x_6223); +x_6224 = lean_ctor_get(x_6221, 1); +lean_inc(x_6224); +lean_dec(x_6221); +x_6225 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6217, x_6212, x_6223, x_6224, x_4, x_5, x_6222); +return x_6225; +} +else +{ +lean_object* x_6226; lean_object* x_6227; lean_object* x_6228; lean_object* x_6229; +lean_dec(x_6217); +lean_dec(x_6212); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_6226 = lean_ctor_get(x_6220, 0); +lean_inc(x_6226); +x_6227 = lean_ctor_get(x_6220, 1); +lean_inc(x_6227); +if (lean_is_exclusive(x_6220)) { + lean_ctor_release(x_6220, 0); + lean_ctor_release(x_6220, 1); + x_6228 = x_6220; +} else { + lean_dec_ref(x_6220); + x_6228 = lean_box(0); +} +if (lean_is_scalar(x_6228)) { + x_6229 = lean_alloc_ctor(1, 2, 0); +} else { + x_6229 = x_6228; +} +lean_ctor_set(x_6229, 0, x_6226); +lean_ctor_set(x_6229, 1, x_6227); +return x_6229; +} +} +} +else +{ +lean_object* x_6230; lean_object* x_6231; lean_object* x_6232; lean_object* x_6233; lean_object* x_6234; lean_object* x_6235; lean_object* x_6236; lean_object* x_6237; lean_object* x_6238; +lean_dec(x_6183); +lean_dec(x_6181); +if (lean_is_scalar(x_6179)) { + x_6230 = lean_alloc_ctor(7, 2, 0); +} else { + x_6230 = x_6179; + lean_ctor_set_tag(x_6230, 7); +} +lean_ctor_set(x_6230, 0, x_153); +lean_ctor_set(x_6230, 1, x_5957); +x_6231 = lean_ctor_get(x_1, 0); +lean_inc(x_6231); +lean_dec(x_1); +x_6232 = l_Lean_IR_ToIR_bindVar(x_6231, x_6178, x_4, x_5, x_6177); +x_6233 = lean_ctor_get(x_6232, 0); +lean_inc(x_6233); +x_6234 = lean_ctor_get(x_6232, 1); +lean_inc(x_6234); +lean_dec(x_6232); +x_6235 = lean_ctor_get(x_6233, 0); +lean_inc(x_6235); +x_6236 = lean_ctor_get(x_6233, 1); +lean_inc(x_6236); +lean_dec(x_6233); +x_6237 = lean_box(7); +x_6238 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6235, x_6230, x_6237, x_6236, x_4, x_5, x_6234); +return x_6238; +} +} +} +else +{ +lean_object* x_6239; lean_object* x_6240; +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6239 = lean_box(13); +lean_ctor_set(x_5959, 0, x_6239); +if (lean_is_scalar(x_5968)) { + x_6240 = lean_alloc_ctor(0, 2, 0); +} else { + x_6240 = x_5968; +} +lean_ctor_set(x_6240, 0, x_5959); +lean_ctor_set(x_6240, 1, x_5967); +return x_6240; +} +} +else +{ +lean_object* x_6241; lean_object* x_6242; lean_object* x_6243; +lean_dec(x_5968); +lean_free_object(x_5959); +lean_dec(x_153); +x_6241 = l_Lean_IR_instInhabitedArg; +x_6242 = lean_unsigned_to_nat(2u); +x_6243 = lean_array_get(x_6241, x_5957, x_6242); +lean_dec(x_5957); +if (lean_obj_tag(x_6243) == 0) +{ +lean_object* x_6244; lean_object* x_6245; lean_object* x_6246; lean_object* x_6247; lean_object* x_6248; lean_object* x_6249; lean_object* x_6250; +x_6244 = lean_ctor_get(x_6243, 0); +lean_inc(x_6244); +lean_dec(x_6243); +x_6245 = lean_ctor_get(x_1, 0); +lean_inc(x_6245); +lean_dec(x_1); +x_6246 = l_Lean_IR_ToIR_bindVarToVarId(x_6245, x_6244, x_5963, x_4, x_5, x_5967); +x_6247 = lean_ctor_get(x_6246, 0); +lean_inc(x_6247); +x_6248 = lean_ctor_get(x_6246, 1); +lean_inc(x_6248); +lean_dec(x_6246); +x_6249 = lean_ctor_get(x_6247, 1); +lean_inc(x_6249); +lean_dec(x_6247); +x_6250 = l_Lean_IR_ToIR_lowerCode(x_2, x_6249, x_4, x_5, x_6248); +return x_6250; +} +else +{ +lean_object* x_6251; lean_object* x_6252; lean_object* x_6253; lean_object* x_6254; lean_object* x_6255; lean_object* x_6256; +x_6251 = lean_ctor_get(x_1, 0); +lean_inc(x_6251); +lean_dec(x_1); +x_6252 = l_Lean_IR_ToIR_bindErased(x_6251, x_5963, x_4, x_5, x_5967); +x_6253 = lean_ctor_get(x_6252, 0); +lean_inc(x_6253); +x_6254 = lean_ctor_get(x_6252, 1); +lean_inc(x_6254); +lean_dec(x_6252); +x_6255 = lean_ctor_get(x_6253, 1); +lean_inc(x_6255); +lean_dec(x_6253); +x_6256 = l_Lean_IR_ToIR_lowerCode(x_2, x_6255, x_4, x_5, x_6254); +return x_6256; +} +} +} +} +case 1: +{ +lean_object* x_6257; lean_object* x_6258; lean_object* x_6288; lean_object* x_6289; +lean_dec(x_5974); +lean_dec(x_5969); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_153); +x_6288 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_5967); +x_6289 = lean_ctor_get(x_6288, 0); +lean_inc(x_6289); +if (lean_obj_tag(x_6289) == 0) +{ +lean_object* x_6290; lean_object* x_6291; +x_6290 = lean_ctor_get(x_6288, 1); +lean_inc(x_6290); +lean_dec(x_6288); +x_6291 = lean_box(0); +lean_ctor_set(x_5959, 0, x_6291); +x_6257 = x_5959; +x_6258 = x_6290; +goto block_6287; +} +else +{ +uint8_t x_6292; +lean_free_object(x_5959); +x_6292 = !lean_is_exclusive(x_6288); +if (x_6292 == 0) +{ +lean_object* x_6293; lean_object* x_6294; uint8_t x_6295; +x_6293 = lean_ctor_get(x_6288, 1); +x_6294 = lean_ctor_get(x_6288, 0); +lean_dec(x_6294); +x_6295 = !lean_is_exclusive(x_6289); +if (x_6295 == 0) +{ +lean_object* x_6296; lean_object* x_6297; lean_object* x_6298; lean_object* x_6299; uint8_t x_6300; +x_6296 = lean_ctor_get(x_6289, 0); +x_6297 = lean_array_get_size(x_5957); +x_6298 = lean_ctor_get(x_6296, 3); +lean_inc(x_6298); +lean_dec(x_6296); +x_6299 = lean_array_get_size(x_6298); +lean_dec(x_6298); +x_6300 = lean_nat_dec_lt(x_6297, x_6299); +if (x_6300 == 0) +{ +uint8_t x_6301; +x_6301 = lean_nat_dec_eq(x_6297, x_6299); +if (x_6301 == 0) +{ +lean_object* x_6302; lean_object* x_6303; lean_object* x_6304; lean_object* x_6305; lean_object* x_6306; lean_object* x_6307; lean_object* x_6308; lean_object* x_6309; lean_object* x_6310; lean_object* x_6311; lean_object* x_6312; lean_object* x_6313; lean_object* x_6314; lean_object* x_6315; lean_object* x_6316; lean_object* x_6317; +x_6302 = lean_unsigned_to_nat(0u); +x_6303 = l_Array_extract___rarg(x_5957, x_6302, x_6299); +x_6304 = l_Array_extract___rarg(x_5957, x_6299, x_6297); +lean_dec(x_6297); +lean_inc(x_153); +lean_ctor_set_tag(x_6288, 6); +lean_ctor_set(x_6288, 1, x_6303); +lean_ctor_set(x_6288, 0, x_153); +x_6305 = lean_ctor_get(x_1, 0); +lean_inc(x_6305); +x_6306 = l_Lean_IR_ToIR_bindVar(x_6305, x_5963, x_4, x_5, x_6293); +x_6307 = lean_ctor_get(x_6306, 0); +lean_inc(x_6307); +x_6308 = lean_ctor_get(x_6306, 1); +lean_inc(x_6308); +lean_dec(x_6306); +x_6309 = lean_ctor_get(x_6307, 0); +lean_inc(x_6309); +x_6310 = lean_ctor_get(x_6307, 1); +lean_inc(x_6310); +lean_dec(x_6307); +x_6311 = l_Lean_IR_ToIR_newVar(x_6310, x_4, x_5, x_6308); +x_6312 = lean_ctor_get(x_6311, 0); +lean_inc(x_6312); +x_6313 = lean_ctor_get(x_6311, 1); +lean_inc(x_6313); +lean_dec(x_6311); +x_6314 = lean_ctor_get(x_6312, 0); +lean_inc(x_6314); +x_6315 = lean_ctor_get(x_6312, 1); +lean_inc(x_6315); +lean_dec(x_6312); +x_6316 = lean_ctor_get(x_1, 2); +lean_inc(x_6316); +lean_inc(x_5); +lean_inc(x_4); +x_6317 = l_Lean_IR_ToIR_lowerType(x_6316, x_6315, x_4, x_5, x_6313); +if (lean_obj_tag(x_6317) == 0) +{ +lean_object* x_6318; lean_object* x_6319; lean_object* x_6320; lean_object* x_6321; lean_object* x_6322; +x_6318 = lean_ctor_get(x_6317, 0); +lean_inc(x_6318); +x_6319 = lean_ctor_get(x_6317, 1); +lean_inc(x_6319); +lean_dec(x_6317); +x_6320 = lean_ctor_get(x_6318, 0); +lean_inc(x_6320); +x_6321 = lean_ctor_get(x_6318, 1); +lean_inc(x_6321); +lean_dec(x_6318); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6322 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_6314, x_6304, x_6309, x_6288, x_6320, x_6321, x_4, x_5, x_6319); +if (lean_obj_tag(x_6322) == 0) +{ +lean_object* x_6323; lean_object* x_6324; uint8_t x_6325; +x_6323 = lean_ctor_get(x_6322, 0); +lean_inc(x_6323); +x_6324 = lean_ctor_get(x_6322, 1); +lean_inc(x_6324); +lean_dec(x_6322); +x_6325 = !lean_is_exclusive(x_6323); +if (x_6325 == 0) +{ +lean_object* x_6326; +x_6326 = lean_ctor_get(x_6323, 0); +lean_ctor_set(x_6289, 0, x_6326); +lean_ctor_set(x_6323, 0, x_6289); +x_6257 = x_6323; +x_6258 = x_6324; +goto block_6287; +} +else +{ +lean_object* x_6327; lean_object* x_6328; lean_object* x_6329; +x_6327 = lean_ctor_get(x_6323, 0); +x_6328 = lean_ctor_get(x_6323, 1); +lean_inc(x_6328); +lean_inc(x_6327); +lean_dec(x_6323); +lean_ctor_set(x_6289, 0, x_6327); +x_6329 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_6329, 0, x_6289); +lean_ctor_set(x_6329, 1, x_6328); +x_6257 = x_6329; +x_6258 = x_6324; +goto block_6287; +} +} +else +{ +uint8_t x_6330; +lean_free_object(x_6289); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6330 = !lean_is_exclusive(x_6322); +if (x_6330 == 0) +{ +return x_6322; +} +else +{ +lean_object* x_6331; lean_object* x_6332; lean_object* x_6333; +x_6331 = lean_ctor_get(x_6322, 0); +x_6332 = lean_ctor_get(x_6322, 1); +lean_inc(x_6332); +lean_inc(x_6331); +lean_dec(x_6322); +x_6333 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6333, 0, x_6331); +lean_ctor_set(x_6333, 1, x_6332); +return x_6333; +} +} +} +else +{ +uint8_t x_6334; +lean_dec(x_6314); +lean_dec(x_6309); +lean_dec(x_6288); +lean_dec(x_6304); +lean_free_object(x_6289); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6334 = !lean_is_exclusive(x_6317); +if (x_6334 == 0) +{ +return x_6317; +} +else +{ +lean_object* x_6335; lean_object* x_6336; lean_object* x_6337; +x_6335 = lean_ctor_get(x_6317, 0); +x_6336 = lean_ctor_get(x_6317, 1); +lean_inc(x_6336); +lean_inc(x_6335); +lean_dec(x_6317); +x_6337 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6337, 0, x_6335); +lean_ctor_set(x_6337, 1, x_6336); +return x_6337; +} +} +} +else +{ +lean_object* x_6338; lean_object* x_6339; lean_object* x_6340; lean_object* x_6341; lean_object* x_6342; lean_object* x_6343; lean_object* x_6344; lean_object* x_6345; +lean_dec(x_6299); +lean_dec(x_6297); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_6288, 6); +lean_ctor_set(x_6288, 1, x_5957); +lean_ctor_set(x_6288, 0, x_153); +x_6338 = lean_ctor_get(x_1, 0); +lean_inc(x_6338); +x_6339 = l_Lean_IR_ToIR_bindVar(x_6338, x_5963, x_4, x_5, x_6293); +x_6340 = lean_ctor_get(x_6339, 0); +lean_inc(x_6340); +x_6341 = lean_ctor_get(x_6339, 1); +lean_inc(x_6341); +lean_dec(x_6339); +x_6342 = lean_ctor_get(x_6340, 0); +lean_inc(x_6342); +x_6343 = lean_ctor_get(x_6340, 1); +lean_inc(x_6343); +lean_dec(x_6340); +x_6344 = lean_ctor_get(x_1, 2); +lean_inc(x_6344); +lean_inc(x_5); +lean_inc(x_4); +x_6345 = l_Lean_IR_ToIR_lowerType(x_6344, x_6343, x_4, x_5, x_6341); +if (lean_obj_tag(x_6345) == 0) +{ +lean_object* x_6346; lean_object* x_6347; lean_object* x_6348; lean_object* x_6349; lean_object* x_6350; +x_6346 = lean_ctor_get(x_6345, 0); +lean_inc(x_6346); +x_6347 = lean_ctor_get(x_6345, 1); +lean_inc(x_6347); +lean_dec(x_6345); +x_6348 = lean_ctor_get(x_6346, 0); +lean_inc(x_6348); +x_6349 = lean_ctor_get(x_6346, 1); +lean_inc(x_6349); +lean_dec(x_6346); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6350 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6342, x_6288, x_6348, x_6349, x_4, x_5, x_6347); +if (lean_obj_tag(x_6350) == 0) +{ +lean_object* x_6351; lean_object* x_6352; uint8_t x_6353; +x_6351 = lean_ctor_get(x_6350, 0); +lean_inc(x_6351); +x_6352 = lean_ctor_get(x_6350, 1); +lean_inc(x_6352); +lean_dec(x_6350); +x_6353 = !lean_is_exclusive(x_6351); +if (x_6353 == 0) +{ +lean_object* x_6354; +x_6354 = lean_ctor_get(x_6351, 0); +lean_ctor_set(x_6289, 0, x_6354); +lean_ctor_set(x_6351, 0, x_6289); +x_6257 = x_6351; +x_6258 = x_6352; +goto block_6287; +} +else +{ +lean_object* x_6355; lean_object* x_6356; lean_object* x_6357; +x_6355 = lean_ctor_get(x_6351, 0); +x_6356 = lean_ctor_get(x_6351, 1); +lean_inc(x_6356); +lean_inc(x_6355); +lean_dec(x_6351); +lean_ctor_set(x_6289, 0, x_6355); +x_6357 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_6357, 0, x_6289); +lean_ctor_set(x_6357, 1, x_6356); +x_6257 = x_6357; +x_6258 = x_6352; +goto block_6287; +} +} +else +{ +uint8_t x_6358; +lean_free_object(x_6289); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6358 = !lean_is_exclusive(x_6350); +if (x_6358 == 0) +{ +return x_6350; +} +else +{ +lean_object* x_6359; lean_object* x_6360; lean_object* x_6361; +x_6359 = lean_ctor_get(x_6350, 0); +x_6360 = lean_ctor_get(x_6350, 1); +lean_inc(x_6360); +lean_inc(x_6359); +lean_dec(x_6350); +x_6361 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6361, 0, x_6359); +lean_ctor_set(x_6361, 1, x_6360); +return x_6361; +} +} +} +else +{ +uint8_t x_6362; +lean_dec(x_6342); +lean_dec(x_6288); +lean_free_object(x_6289); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6362 = !lean_is_exclusive(x_6345); +if (x_6362 == 0) +{ +return x_6345; +} +else +{ +lean_object* x_6363; lean_object* x_6364; lean_object* x_6365; +x_6363 = lean_ctor_get(x_6345, 0); +x_6364 = lean_ctor_get(x_6345, 1); +lean_inc(x_6364); +lean_inc(x_6363); +lean_dec(x_6345); +x_6365 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6365, 0, x_6363); +lean_ctor_set(x_6365, 1, x_6364); +return x_6365; +} +} +} +} +else +{ +lean_object* x_6366; lean_object* x_6367; lean_object* x_6368; lean_object* x_6369; lean_object* x_6370; lean_object* x_6371; lean_object* x_6372; lean_object* x_6373; +lean_dec(x_6299); +lean_dec(x_6297); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_6288, 7); +lean_ctor_set(x_6288, 1, x_5957); +lean_ctor_set(x_6288, 0, x_153); +x_6366 = lean_ctor_get(x_1, 0); +lean_inc(x_6366); +x_6367 = l_Lean_IR_ToIR_bindVar(x_6366, x_5963, x_4, x_5, x_6293); +x_6368 = lean_ctor_get(x_6367, 0); +lean_inc(x_6368); +x_6369 = lean_ctor_get(x_6367, 1); +lean_inc(x_6369); +lean_dec(x_6367); +x_6370 = lean_ctor_get(x_6368, 0); +lean_inc(x_6370); +x_6371 = lean_ctor_get(x_6368, 1); +lean_inc(x_6371); +lean_dec(x_6368); +x_6372 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6373 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6370, x_6288, x_6372, x_6371, x_4, x_5, x_6369); +if (lean_obj_tag(x_6373) == 0) +{ +lean_object* x_6374; lean_object* x_6375; uint8_t x_6376; +x_6374 = lean_ctor_get(x_6373, 0); +lean_inc(x_6374); +x_6375 = lean_ctor_get(x_6373, 1); +lean_inc(x_6375); +lean_dec(x_6373); +x_6376 = !lean_is_exclusive(x_6374); +if (x_6376 == 0) +{ +lean_object* x_6377; +x_6377 = lean_ctor_get(x_6374, 0); +lean_ctor_set(x_6289, 0, x_6377); +lean_ctor_set(x_6374, 0, x_6289); +x_6257 = x_6374; +x_6258 = x_6375; +goto block_6287; +} +else +{ +lean_object* x_6378; lean_object* x_6379; lean_object* x_6380; +x_6378 = lean_ctor_get(x_6374, 0); +x_6379 = lean_ctor_get(x_6374, 1); +lean_inc(x_6379); +lean_inc(x_6378); +lean_dec(x_6374); +lean_ctor_set(x_6289, 0, x_6378); +x_6380 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_6380, 0, x_6289); +lean_ctor_set(x_6380, 1, x_6379); +x_6257 = x_6380; +x_6258 = x_6375; +goto block_6287; +} +} +else +{ +uint8_t x_6381; +lean_free_object(x_6289); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6381 = !lean_is_exclusive(x_6373); +if (x_6381 == 0) +{ +return x_6373; +} +else +{ +lean_object* x_6382; lean_object* x_6383; lean_object* x_6384; +x_6382 = lean_ctor_get(x_6373, 0); +x_6383 = lean_ctor_get(x_6373, 1); +lean_inc(x_6383); +lean_inc(x_6382); +lean_dec(x_6373); +x_6384 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6384, 0, x_6382); +lean_ctor_set(x_6384, 1, x_6383); +return x_6384; +} +} +} +} +else +{ +lean_object* x_6385; lean_object* x_6386; lean_object* x_6387; lean_object* x_6388; uint8_t x_6389; +x_6385 = lean_ctor_get(x_6289, 0); +lean_inc(x_6385); +lean_dec(x_6289); +x_6386 = lean_array_get_size(x_5957); +x_6387 = lean_ctor_get(x_6385, 3); +lean_inc(x_6387); +lean_dec(x_6385); +x_6388 = lean_array_get_size(x_6387); +lean_dec(x_6387); +x_6389 = lean_nat_dec_lt(x_6386, x_6388); +if (x_6389 == 0) +{ +uint8_t x_6390; +x_6390 = lean_nat_dec_eq(x_6386, x_6388); +if (x_6390 == 0) +{ +lean_object* x_6391; lean_object* x_6392; lean_object* x_6393; lean_object* x_6394; lean_object* x_6395; lean_object* x_6396; lean_object* x_6397; lean_object* x_6398; lean_object* x_6399; lean_object* x_6400; lean_object* x_6401; lean_object* x_6402; lean_object* x_6403; lean_object* x_6404; lean_object* x_6405; lean_object* x_6406; +x_6391 = lean_unsigned_to_nat(0u); +x_6392 = l_Array_extract___rarg(x_5957, x_6391, x_6388); +x_6393 = l_Array_extract___rarg(x_5957, x_6388, x_6386); +lean_dec(x_6386); +lean_inc(x_153); +lean_ctor_set_tag(x_6288, 6); +lean_ctor_set(x_6288, 1, x_6392); +lean_ctor_set(x_6288, 0, x_153); +x_6394 = lean_ctor_get(x_1, 0); +lean_inc(x_6394); +x_6395 = l_Lean_IR_ToIR_bindVar(x_6394, x_5963, x_4, x_5, x_6293); +x_6396 = lean_ctor_get(x_6395, 0); +lean_inc(x_6396); +x_6397 = lean_ctor_get(x_6395, 1); +lean_inc(x_6397); +lean_dec(x_6395); +x_6398 = lean_ctor_get(x_6396, 0); +lean_inc(x_6398); +x_6399 = lean_ctor_get(x_6396, 1); +lean_inc(x_6399); +lean_dec(x_6396); +x_6400 = l_Lean_IR_ToIR_newVar(x_6399, x_4, x_5, x_6397); +x_6401 = lean_ctor_get(x_6400, 0); +lean_inc(x_6401); +x_6402 = lean_ctor_get(x_6400, 1); +lean_inc(x_6402); +lean_dec(x_6400); +x_6403 = lean_ctor_get(x_6401, 0); +lean_inc(x_6403); +x_6404 = lean_ctor_get(x_6401, 1); +lean_inc(x_6404); +lean_dec(x_6401); +x_6405 = lean_ctor_get(x_1, 2); +lean_inc(x_6405); +lean_inc(x_5); +lean_inc(x_4); +x_6406 = l_Lean_IR_ToIR_lowerType(x_6405, x_6404, x_4, x_5, x_6402); +if (lean_obj_tag(x_6406) == 0) +{ +lean_object* x_6407; lean_object* x_6408; lean_object* x_6409; lean_object* x_6410; lean_object* x_6411; +x_6407 = lean_ctor_get(x_6406, 0); +lean_inc(x_6407); +x_6408 = lean_ctor_get(x_6406, 1); +lean_inc(x_6408); +lean_dec(x_6406); +x_6409 = lean_ctor_get(x_6407, 0); +lean_inc(x_6409); +x_6410 = lean_ctor_get(x_6407, 1); +lean_inc(x_6410); +lean_dec(x_6407); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6411 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_6403, x_6393, x_6398, x_6288, x_6409, x_6410, x_4, x_5, x_6408); +if (lean_obj_tag(x_6411) == 0) +{ +lean_object* x_6412; lean_object* x_6413; lean_object* x_6414; lean_object* x_6415; lean_object* x_6416; lean_object* x_6417; lean_object* x_6418; +x_6412 = lean_ctor_get(x_6411, 0); +lean_inc(x_6412); +x_6413 = lean_ctor_get(x_6411, 1); +lean_inc(x_6413); +lean_dec(x_6411); +x_6414 = lean_ctor_get(x_6412, 0); +lean_inc(x_6414); +x_6415 = lean_ctor_get(x_6412, 1); +lean_inc(x_6415); +if (lean_is_exclusive(x_6412)) { + lean_ctor_release(x_6412, 0); + lean_ctor_release(x_6412, 1); + x_6416 = x_6412; +} else { + lean_dec_ref(x_6412); + x_6416 = lean_box(0); +} +x_6417 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_6417, 0, x_6414); +if (lean_is_scalar(x_6416)) { + x_6418 = lean_alloc_ctor(0, 2, 0); +} else { + x_6418 = x_6416; +} +lean_ctor_set(x_6418, 0, x_6417); +lean_ctor_set(x_6418, 1, x_6415); +x_6257 = x_6418; +x_6258 = x_6413; +goto block_6287; +} +else +{ +lean_object* x_6419; lean_object* x_6420; lean_object* x_6421; lean_object* x_6422; +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6419 = lean_ctor_get(x_6411, 0); +lean_inc(x_6419); +x_6420 = lean_ctor_get(x_6411, 1); +lean_inc(x_6420); +if (lean_is_exclusive(x_6411)) { + lean_ctor_release(x_6411, 0); + lean_ctor_release(x_6411, 1); + x_6421 = x_6411; +} else { + lean_dec_ref(x_6411); + x_6421 = lean_box(0); +} +if (lean_is_scalar(x_6421)) { + x_6422 = lean_alloc_ctor(1, 2, 0); +} else { + x_6422 = x_6421; +} +lean_ctor_set(x_6422, 0, x_6419); +lean_ctor_set(x_6422, 1, x_6420); +return x_6422; +} +} +else +{ +lean_object* x_6423; lean_object* x_6424; lean_object* x_6425; lean_object* x_6426; +lean_dec(x_6403); +lean_dec(x_6398); +lean_dec(x_6288); +lean_dec(x_6393); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6423 = lean_ctor_get(x_6406, 0); +lean_inc(x_6423); +x_6424 = lean_ctor_get(x_6406, 1); +lean_inc(x_6424); +if (lean_is_exclusive(x_6406)) { + lean_ctor_release(x_6406, 0); + lean_ctor_release(x_6406, 1); + x_6425 = x_6406; +} else { + lean_dec_ref(x_6406); + x_6425 = lean_box(0); +} +if (lean_is_scalar(x_6425)) { + x_6426 = lean_alloc_ctor(1, 2, 0); +} else { + x_6426 = x_6425; +} +lean_ctor_set(x_6426, 0, x_6423); +lean_ctor_set(x_6426, 1, x_6424); +return x_6426; +} +} +else +{ +lean_object* x_6427; lean_object* x_6428; lean_object* x_6429; lean_object* x_6430; lean_object* x_6431; lean_object* x_6432; lean_object* x_6433; lean_object* x_6434; +lean_dec(x_6388); +lean_dec(x_6386); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_6288, 6); +lean_ctor_set(x_6288, 1, x_5957); +lean_ctor_set(x_6288, 0, x_153); +x_6427 = lean_ctor_get(x_1, 0); +lean_inc(x_6427); +x_6428 = l_Lean_IR_ToIR_bindVar(x_6427, x_5963, x_4, x_5, x_6293); +x_6429 = lean_ctor_get(x_6428, 0); +lean_inc(x_6429); +x_6430 = lean_ctor_get(x_6428, 1); +lean_inc(x_6430); +lean_dec(x_6428); +x_6431 = lean_ctor_get(x_6429, 0); +lean_inc(x_6431); +x_6432 = lean_ctor_get(x_6429, 1); +lean_inc(x_6432); +lean_dec(x_6429); +x_6433 = lean_ctor_get(x_1, 2); +lean_inc(x_6433); +lean_inc(x_5); +lean_inc(x_4); +x_6434 = l_Lean_IR_ToIR_lowerType(x_6433, x_6432, x_4, x_5, x_6430); +if (lean_obj_tag(x_6434) == 0) +{ +lean_object* x_6435; lean_object* x_6436; lean_object* x_6437; lean_object* x_6438; lean_object* x_6439; +x_6435 = lean_ctor_get(x_6434, 0); +lean_inc(x_6435); +x_6436 = lean_ctor_get(x_6434, 1); +lean_inc(x_6436); +lean_dec(x_6434); +x_6437 = lean_ctor_get(x_6435, 0); +lean_inc(x_6437); +x_6438 = lean_ctor_get(x_6435, 1); +lean_inc(x_6438); +lean_dec(x_6435); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6439 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6431, x_6288, x_6437, x_6438, x_4, x_5, x_6436); +if (lean_obj_tag(x_6439) == 0) +{ +lean_object* x_6440; lean_object* x_6441; lean_object* x_6442; lean_object* x_6443; lean_object* x_6444; lean_object* x_6445; lean_object* x_6446; +x_6440 = lean_ctor_get(x_6439, 0); +lean_inc(x_6440); +x_6441 = lean_ctor_get(x_6439, 1); +lean_inc(x_6441); +lean_dec(x_6439); +x_6442 = lean_ctor_get(x_6440, 0); +lean_inc(x_6442); +x_6443 = lean_ctor_get(x_6440, 1); +lean_inc(x_6443); +if (lean_is_exclusive(x_6440)) { + lean_ctor_release(x_6440, 0); + lean_ctor_release(x_6440, 1); + x_6444 = x_6440; +} else { + lean_dec_ref(x_6440); + x_6444 = lean_box(0); +} +x_6445 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_6445, 0, x_6442); +if (lean_is_scalar(x_6444)) { + x_6446 = lean_alloc_ctor(0, 2, 0); +} else { + x_6446 = x_6444; +} +lean_ctor_set(x_6446, 0, x_6445); +lean_ctor_set(x_6446, 1, x_6443); +x_6257 = x_6446; +x_6258 = x_6441; +goto block_6287; +} +else +{ +lean_object* x_6447; lean_object* x_6448; lean_object* x_6449; lean_object* x_6450; +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6447 = lean_ctor_get(x_6439, 0); +lean_inc(x_6447); +x_6448 = lean_ctor_get(x_6439, 1); +lean_inc(x_6448); +if (lean_is_exclusive(x_6439)) { + lean_ctor_release(x_6439, 0); + lean_ctor_release(x_6439, 1); + x_6449 = x_6439; +} else { + lean_dec_ref(x_6439); + x_6449 = lean_box(0); +} +if (lean_is_scalar(x_6449)) { + x_6450 = lean_alloc_ctor(1, 2, 0); +} else { + x_6450 = x_6449; +} +lean_ctor_set(x_6450, 0, x_6447); +lean_ctor_set(x_6450, 1, x_6448); +return x_6450; +} +} +else +{ +lean_object* x_6451; lean_object* x_6452; lean_object* x_6453; lean_object* x_6454; +lean_dec(x_6431); +lean_dec(x_6288); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6451 = lean_ctor_get(x_6434, 0); +lean_inc(x_6451); +x_6452 = lean_ctor_get(x_6434, 1); +lean_inc(x_6452); +if (lean_is_exclusive(x_6434)) { + lean_ctor_release(x_6434, 0); + lean_ctor_release(x_6434, 1); + x_6453 = x_6434; +} else { + lean_dec_ref(x_6434); + x_6453 = lean_box(0); +} +if (lean_is_scalar(x_6453)) { + x_6454 = lean_alloc_ctor(1, 2, 0); +} else { + x_6454 = x_6453; +} +lean_ctor_set(x_6454, 0, x_6451); +lean_ctor_set(x_6454, 1, x_6452); +return x_6454; +} +} +} +else +{ +lean_object* x_6455; lean_object* x_6456; lean_object* x_6457; lean_object* x_6458; lean_object* x_6459; lean_object* x_6460; lean_object* x_6461; lean_object* x_6462; +lean_dec(x_6388); +lean_dec(x_6386); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_6288, 7); +lean_ctor_set(x_6288, 1, x_5957); +lean_ctor_set(x_6288, 0, x_153); +x_6455 = lean_ctor_get(x_1, 0); +lean_inc(x_6455); +x_6456 = l_Lean_IR_ToIR_bindVar(x_6455, x_5963, x_4, x_5, x_6293); +x_6457 = lean_ctor_get(x_6456, 0); +lean_inc(x_6457); +x_6458 = lean_ctor_get(x_6456, 1); +lean_inc(x_6458); +lean_dec(x_6456); +x_6459 = lean_ctor_get(x_6457, 0); +lean_inc(x_6459); +x_6460 = lean_ctor_get(x_6457, 1); +lean_inc(x_6460); +lean_dec(x_6457); +x_6461 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6462 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6459, x_6288, x_6461, x_6460, x_4, x_5, x_6458); +if (lean_obj_tag(x_6462) == 0) +{ +lean_object* x_6463; lean_object* x_6464; lean_object* x_6465; lean_object* x_6466; lean_object* x_6467; lean_object* x_6468; lean_object* x_6469; +x_6463 = lean_ctor_get(x_6462, 0); +lean_inc(x_6463); +x_6464 = lean_ctor_get(x_6462, 1); +lean_inc(x_6464); +lean_dec(x_6462); +x_6465 = lean_ctor_get(x_6463, 0); +lean_inc(x_6465); +x_6466 = lean_ctor_get(x_6463, 1); +lean_inc(x_6466); +if (lean_is_exclusive(x_6463)) { + lean_ctor_release(x_6463, 0); + lean_ctor_release(x_6463, 1); + x_6467 = x_6463; +} else { + lean_dec_ref(x_6463); + x_6467 = lean_box(0); +} +x_6468 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_6468, 0, x_6465); +if (lean_is_scalar(x_6467)) { + x_6469 = lean_alloc_ctor(0, 2, 0); +} else { + x_6469 = x_6467; +} +lean_ctor_set(x_6469, 0, x_6468); +lean_ctor_set(x_6469, 1, x_6466); +x_6257 = x_6469; +x_6258 = x_6464; +goto block_6287; +} +else +{ +lean_object* x_6470; lean_object* x_6471; lean_object* x_6472; lean_object* x_6473; +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6470 = lean_ctor_get(x_6462, 0); +lean_inc(x_6470); +x_6471 = lean_ctor_get(x_6462, 1); +lean_inc(x_6471); +if (lean_is_exclusive(x_6462)) { + lean_ctor_release(x_6462, 0); + lean_ctor_release(x_6462, 1); + x_6472 = x_6462; +} else { + lean_dec_ref(x_6462); + x_6472 = lean_box(0); +} +if (lean_is_scalar(x_6472)) { + x_6473 = lean_alloc_ctor(1, 2, 0); +} else { + x_6473 = x_6472; +} +lean_ctor_set(x_6473, 0, x_6470); +lean_ctor_set(x_6473, 1, x_6471); +return x_6473; +} +} +} +} +else +{ +lean_object* x_6474; lean_object* x_6475; lean_object* x_6476; lean_object* x_6477; lean_object* x_6478; lean_object* x_6479; uint8_t x_6480; +x_6474 = lean_ctor_get(x_6288, 1); +lean_inc(x_6474); +lean_dec(x_6288); +x_6475 = lean_ctor_get(x_6289, 0); +lean_inc(x_6475); +if (lean_is_exclusive(x_6289)) { + lean_ctor_release(x_6289, 0); + x_6476 = x_6289; +} else { + lean_dec_ref(x_6289); + x_6476 = lean_box(0); +} +x_6477 = lean_array_get_size(x_5957); +x_6478 = lean_ctor_get(x_6475, 3); +lean_inc(x_6478); +lean_dec(x_6475); +x_6479 = lean_array_get_size(x_6478); +lean_dec(x_6478); +x_6480 = lean_nat_dec_lt(x_6477, x_6479); +if (x_6480 == 0) +{ +uint8_t x_6481; +x_6481 = lean_nat_dec_eq(x_6477, x_6479); +if (x_6481 == 0) +{ +lean_object* x_6482; lean_object* x_6483; lean_object* x_6484; lean_object* x_6485; lean_object* x_6486; lean_object* x_6487; lean_object* x_6488; lean_object* x_6489; lean_object* x_6490; lean_object* x_6491; lean_object* x_6492; lean_object* x_6493; lean_object* x_6494; lean_object* x_6495; lean_object* x_6496; lean_object* x_6497; lean_object* x_6498; +x_6482 = lean_unsigned_to_nat(0u); +x_6483 = l_Array_extract___rarg(x_5957, x_6482, x_6479); +x_6484 = l_Array_extract___rarg(x_5957, x_6479, x_6477); +lean_dec(x_6477); +lean_inc(x_153); +x_6485 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_6485, 0, x_153); +lean_ctor_set(x_6485, 1, x_6483); +x_6486 = lean_ctor_get(x_1, 0); +lean_inc(x_6486); +x_6487 = l_Lean_IR_ToIR_bindVar(x_6486, x_5963, x_4, x_5, x_6474); +x_6488 = lean_ctor_get(x_6487, 0); +lean_inc(x_6488); +x_6489 = lean_ctor_get(x_6487, 1); +lean_inc(x_6489); +lean_dec(x_6487); +x_6490 = lean_ctor_get(x_6488, 0); +lean_inc(x_6490); +x_6491 = lean_ctor_get(x_6488, 1); +lean_inc(x_6491); +lean_dec(x_6488); +x_6492 = l_Lean_IR_ToIR_newVar(x_6491, x_4, x_5, x_6489); +x_6493 = lean_ctor_get(x_6492, 0); +lean_inc(x_6493); +x_6494 = lean_ctor_get(x_6492, 1); +lean_inc(x_6494); +lean_dec(x_6492); +x_6495 = lean_ctor_get(x_6493, 0); +lean_inc(x_6495); +x_6496 = lean_ctor_get(x_6493, 1); +lean_inc(x_6496); +lean_dec(x_6493); +x_6497 = lean_ctor_get(x_1, 2); +lean_inc(x_6497); +lean_inc(x_5); +lean_inc(x_4); +x_6498 = l_Lean_IR_ToIR_lowerType(x_6497, x_6496, x_4, x_5, x_6494); +if (lean_obj_tag(x_6498) == 0) +{ +lean_object* x_6499; lean_object* x_6500; lean_object* x_6501; lean_object* x_6502; lean_object* x_6503; +x_6499 = lean_ctor_get(x_6498, 0); +lean_inc(x_6499); +x_6500 = lean_ctor_get(x_6498, 1); +lean_inc(x_6500); +lean_dec(x_6498); +x_6501 = lean_ctor_get(x_6499, 0); +lean_inc(x_6501); +x_6502 = lean_ctor_get(x_6499, 1); +lean_inc(x_6502); +lean_dec(x_6499); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6503 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_6495, x_6484, x_6490, x_6485, x_6501, x_6502, x_4, x_5, x_6500); +if (lean_obj_tag(x_6503) == 0) +{ +lean_object* x_6504; lean_object* x_6505; lean_object* x_6506; lean_object* x_6507; lean_object* x_6508; lean_object* x_6509; lean_object* x_6510; +x_6504 = lean_ctor_get(x_6503, 0); +lean_inc(x_6504); +x_6505 = lean_ctor_get(x_6503, 1); +lean_inc(x_6505); +lean_dec(x_6503); +x_6506 = lean_ctor_get(x_6504, 0); +lean_inc(x_6506); +x_6507 = lean_ctor_get(x_6504, 1); +lean_inc(x_6507); +if (lean_is_exclusive(x_6504)) { + lean_ctor_release(x_6504, 0); + lean_ctor_release(x_6504, 1); + x_6508 = x_6504; +} else { + lean_dec_ref(x_6504); + x_6508 = lean_box(0); +} +if (lean_is_scalar(x_6476)) { + x_6509 = lean_alloc_ctor(1, 1, 0); +} else { + x_6509 = x_6476; +} +lean_ctor_set(x_6509, 0, x_6506); +if (lean_is_scalar(x_6508)) { + x_6510 = lean_alloc_ctor(0, 2, 0); +} else { + x_6510 = x_6508; +} +lean_ctor_set(x_6510, 0, x_6509); +lean_ctor_set(x_6510, 1, x_6507); +x_6257 = x_6510; +x_6258 = x_6505; +goto block_6287; +} +else +{ +lean_object* x_6511; lean_object* x_6512; lean_object* x_6513; lean_object* x_6514; +lean_dec(x_6476); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6511 = lean_ctor_get(x_6503, 0); +lean_inc(x_6511); +x_6512 = lean_ctor_get(x_6503, 1); +lean_inc(x_6512); +if (lean_is_exclusive(x_6503)) { + lean_ctor_release(x_6503, 0); + lean_ctor_release(x_6503, 1); + x_6513 = x_6503; +} else { + lean_dec_ref(x_6503); + x_6513 = lean_box(0); +} +if (lean_is_scalar(x_6513)) { + x_6514 = lean_alloc_ctor(1, 2, 0); +} else { + x_6514 = x_6513; +} +lean_ctor_set(x_6514, 0, x_6511); +lean_ctor_set(x_6514, 1, x_6512); +return x_6514; +} +} +else +{ +lean_object* x_6515; lean_object* x_6516; lean_object* x_6517; lean_object* x_6518; +lean_dec(x_6495); +lean_dec(x_6490); +lean_dec(x_6485); +lean_dec(x_6484); +lean_dec(x_6476); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6515 = lean_ctor_get(x_6498, 0); +lean_inc(x_6515); +x_6516 = lean_ctor_get(x_6498, 1); +lean_inc(x_6516); +if (lean_is_exclusive(x_6498)) { + lean_ctor_release(x_6498, 0); + lean_ctor_release(x_6498, 1); + x_6517 = x_6498; +} else { + lean_dec_ref(x_6498); + x_6517 = lean_box(0); +} +if (lean_is_scalar(x_6517)) { + x_6518 = lean_alloc_ctor(1, 2, 0); +} else { + x_6518 = x_6517; +} +lean_ctor_set(x_6518, 0, x_6515); +lean_ctor_set(x_6518, 1, x_6516); +return x_6518; +} +} +else +{ +lean_object* x_6519; lean_object* x_6520; lean_object* x_6521; lean_object* x_6522; lean_object* x_6523; lean_object* x_6524; lean_object* x_6525; lean_object* x_6526; lean_object* x_6527; +lean_dec(x_6479); +lean_dec(x_6477); +lean_inc(x_5957); +lean_inc(x_153); +x_6519 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_6519, 0, x_153); +lean_ctor_set(x_6519, 1, x_5957); +x_6520 = lean_ctor_get(x_1, 0); +lean_inc(x_6520); +x_6521 = l_Lean_IR_ToIR_bindVar(x_6520, x_5963, x_4, x_5, x_6474); +x_6522 = lean_ctor_get(x_6521, 0); +lean_inc(x_6522); +x_6523 = lean_ctor_get(x_6521, 1); +lean_inc(x_6523); +lean_dec(x_6521); +x_6524 = lean_ctor_get(x_6522, 0); +lean_inc(x_6524); +x_6525 = lean_ctor_get(x_6522, 1); +lean_inc(x_6525); +lean_dec(x_6522); +x_6526 = lean_ctor_get(x_1, 2); +lean_inc(x_6526); +lean_inc(x_5); +lean_inc(x_4); +x_6527 = l_Lean_IR_ToIR_lowerType(x_6526, x_6525, x_4, x_5, x_6523); +if (lean_obj_tag(x_6527) == 0) +{ +lean_object* x_6528; lean_object* x_6529; lean_object* x_6530; lean_object* x_6531; lean_object* x_6532; +x_6528 = lean_ctor_get(x_6527, 0); +lean_inc(x_6528); +x_6529 = lean_ctor_get(x_6527, 1); +lean_inc(x_6529); +lean_dec(x_6527); +x_6530 = lean_ctor_get(x_6528, 0); +lean_inc(x_6530); +x_6531 = lean_ctor_get(x_6528, 1); +lean_inc(x_6531); +lean_dec(x_6528); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6532 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6524, x_6519, x_6530, x_6531, x_4, x_5, x_6529); +if (lean_obj_tag(x_6532) == 0) +{ +lean_object* x_6533; lean_object* x_6534; lean_object* x_6535; lean_object* x_6536; lean_object* x_6537; lean_object* x_6538; lean_object* x_6539; +x_6533 = lean_ctor_get(x_6532, 0); +lean_inc(x_6533); +x_6534 = lean_ctor_get(x_6532, 1); +lean_inc(x_6534); +lean_dec(x_6532); +x_6535 = lean_ctor_get(x_6533, 0); +lean_inc(x_6535); +x_6536 = lean_ctor_get(x_6533, 1); +lean_inc(x_6536); +if (lean_is_exclusive(x_6533)) { + lean_ctor_release(x_6533, 0); + lean_ctor_release(x_6533, 1); + x_6537 = x_6533; +} else { + lean_dec_ref(x_6533); + x_6537 = lean_box(0); +} +if (lean_is_scalar(x_6476)) { + x_6538 = lean_alloc_ctor(1, 1, 0); +} else { + x_6538 = x_6476; +} +lean_ctor_set(x_6538, 0, x_6535); +if (lean_is_scalar(x_6537)) { + x_6539 = lean_alloc_ctor(0, 2, 0); +} else { + x_6539 = x_6537; +} +lean_ctor_set(x_6539, 0, x_6538); +lean_ctor_set(x_6539, 1, x_6536); +x_6257 = x_6539; +x_6258 = x_6534; +goto block_6287; +} +else +{ +lean_object* x_6540; lean_object* x_6541; lean_object* x_6542; lean_object* x_6543; +lean_dec(x_6476); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6540 = lean_ctor_get(x_6532, 0); +lean_inc(x_6540); +x_6541 = lean_ctor_get(x_6532, 1); +lean_inc(x_6541); +if (lean_is_exclusive(x_6532)) { + lean_ctor_release(x_6532, 0); + lean_ctor_release(x_6532, 1); + x_6542 = x_6532; +} else { + lean_dec_ref(x_6532); + x_6542 = lean_box(0); +} +if (lean_is_scalar(x_6542)) { + x_6543 = lean_alloc_ctor(1, 2, 0); +} else { + x_6543 = x_6542; +} +lean_ctor_set(x_6543, 0, x_6540); +lean_ctor_set(x_6543, 1, x_6541); +return x_6543; +} +} +else +{ +lean_object* x_6544; lean_object* x_6545; lean_object* x_6546; lean_object* x_6547; +lean_dec(x_6524); +lean_dec(x_6519); +lean_dec(x_6476); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6544 = lean_ctor_get(x_6527, 0); +lean_inc(x_6544); +x_6545 = lean_ctor_get(x_6527, 1); +lean_inc(x_6545); +if (lean_is_exclusive(x_6527)) { + lean_ctor_release(x_6527, 0); + lean_ctor_release(x_6527, 1); + x_6546 = x_6527; +} else { + lean_dec_ref(x_6527); + x_6546 = lean_box(0); +} +if (lean_is_scalar(x_6546)) { + x_6547 = lean_alloc_ctor(1, 2, 0); +} else { + x_6547 = x_6546; +} +lean_ctor_set(x_6547, 0, x_6544); +lean_ctor_set(x_6547, 1, x_6545); +return x_6547; +} +} +} +else +{ +lean_object* x_6548; lean_object* x_6549; lean_object* x_6550; lean_object* x_6551; lean_object* x_6552; lean_object* x_6553; lean_object* x_6554; lean_object* x_6555; lean_object* x_6556; +lean_dec(x_6479); +lean_dec(x_6477); +lean_inc(x_5957); +lean_inc(x_153); +x_6548 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_6548, 0, x_153); +lean_ctor_set(x_6548, 1, x_5957); +x_6549 = lean_ctor_get(x_1, 0); +lean_inc(x_6549); +x_6550 = l_Lean_IR_ToIR_bindVar(x_6549, x_5963, x_4, x_5, x_6474); +x_6551 = lean_ctor_get(x_6550, 0); +lean_inc(x_6551); +x_6552 = lean_ctor_get(x_6550, 1); +lean_inc(x_6552); +lean_dec(x_6550); +x_6553 = lean_ctor_get(x_6551, 0); +lean_inc(x_6553); +x_6554 = lean_ctor_get(x_6551, 1); +lean_inc(x_6554); +lean_dec(x_6551); +x_6555 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6556 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6553, x_6548, x_6555, x_6554, x_4, x_5, x_6552); +if (lean_obj_tag(x_6556) == 0) +{ +lean_object* x_6557; lean_object* x_6558; lean_object* x_6559; lean_object* x_6560; lean_object* x_6561; lean_object* x_6562; lean_object* x_6563; +x_6557 = lean_ctor_get(x_6556, 0); +lean_inc(x_6557); +x_6558 = lean_ctor_get(x_6556, 1); +lean_inc(x_6558); +lean_dec(x_6556); +x_6559 = lean_ctor_get(x_6557, 0); +lean_inc(x_6559); +x_6560 = lean_ctor_get(x_6557, 1); +lean_inc(x_6560); +if (lean_is_exclusive(x_6557)) { + lean_ctor_release(x_6557, 0); + lean_ctor_release(x_6557, 1); + x_6561 = x_6557; +} else { + lean_dec_ref(x_6557); + x_6561 = lean_box(0); +} +if (lean_is_scalar(x_6476)) { + x_6562 = lean_alloc_ctor(1, 1, 0); +} else { + x_6562 = x_6476; +} +lean_ctor_set(x_6562, 0, x_6559); +if (lean_is_scalar(x_6561)) { + x_6563 = lean_alloc_ctor(0, 2, 0); +} else { + x_6563 = x_6561; +} +lean_ctor_set(x_6563, 0, x_6562); +lean_ctor_set(x_6563, 1, x_6560); +x_6257 = x_6563; +x_6258 = x_6558; +goto block_6287; +} +else +{ +lean_object* x_6564; lean_object* x_6565; lean_object* x_6566; lean_object* x_6567; +lean_dec(x_6476); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6564 = lean_ctor_get(x_6556, 0); +lean_inc(x_6564); +x_6565 = lean_ctor_get(x_6556, 1); +lean_inc(x_6565); +if (lean_is_exclusive(x_6556)) { + lean_ctor_release(x_6556, 0); + lean_ctor_release(x_6556, 1); + x_6566 = x_6556; +} else { + lean_dec_ref(x_6556); + x_6566 = lean_box(0); +} +if (lean_is_scalar(x_6566)) { + x_6567 = lean_alloc_ctor(1, 2, 0); +} else { + x_6567 = x_6566; +} +lean_ctor_set(x_6567, 0, x_6564); +lean_ctor_set(x_6567, 1, x_6565); +return x_6567; +} +} +} +} +block_6287: +{ +lean_object* x_6259; +x_6259 = lean_ctor_get(x_6257, 0); +lean_inc(x_6259); +if (lean_obj_tag(x_6259) == 0) +{ +lean_object* x_6260; lean_object* x_6261; lean_object* x_6262; lean_object* x_6263; lean_object* x_6264; lean_object* x_6265; lean_object* x_6266; lean_object* x_6267; lean_object* x_6268; lean_object* x_6269; +lean_dec(x_5968); +x_6260 = lean_ctor_get(x_6257, 1); +lean_inc(x_6260); +lean_dec(x_6257); +x_6261 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_6261, 0, x_153); +lean_ctor_set(x_6261, 1, x_5957); +x_6262 = lean_ctor_get(x_1, 0); +lean_inc(x_6262); +x_6263 = l_Lean_IR_ToIR_bindVar(x_6262, x_6260, x_4, x_5, x_6258); +x_6264 = lean_ctor_get(x_6263, 0); +lean_inc(x_6264); +x_6265 = lean_ctor_get(x_6263, 1); +lean_inc(x_6265); +lean_dec(x_6263); +x_6266 = lean_ctor_get(x_6264, 0); +lean_inc(x_6266); +x_6267 = lean_ctor_get(x_6264, 1); +lean_inc(x_6267); +lean_dec(x_6264); +x_6268 = lean_ctor_get(x_1, 2); +lean_inc(x_6268); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_6269 = l_Lean_IR_ToIR_lowerType(x_6268, x_6267, x_4, x_5, x_6265); +if (lean_obj_tag(x_6269) == 0) +{ +lean_object* x_6270; lean_object* x_6271; lean_object* x_6272; lean_object* x_6273; lean_object* x_6274; +x_6270 = lean_ctor_get(x_6269, 0); +lean_inc(x_6270); +x_6271 = lean_ctor_get(x_6269, 1); +lean_inc(x_6271); +lean_dec(x_6269); +x_6272 = lean_ctor_get(x_6270, 0); +lean_inc(x_6272); +x_6273 = lean_ctor_get(x_6270, 1); +lean_inc(x_6273); +lean_dec(x_6270); +x_6274 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6266, x_6261, x_6272, x_6273, x_4, x_5, x_6271); +return x_6274; +} +else +{ +uint8_t x_6275; +lean_dec(x_6266); +lean_dec(x_6261); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_6275 = !lean_is_exclusive(x_6269); +if (x_6275 == 0) +{ +return x_6269; +} +else +{ +lean_object* x_6276; lean_object* x_6277; lean_object* x_6278; +x_6276 = lean_ctor_get(x_6269, 0); +x_6277 = lean_ctor_get(x_6269, 1); +lean_inc(x_6277); +lean_inc(x_6276); +lean_dec(x_6269); +x_6278 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6278, 0, x_6276); +lean_ctor_set(x_6278, 1, x_6277); +return x_6278; +} +} +} +else +{ +uint8_t x_6279; +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6279 = !lean_is_exclusive(x_6257); +if (x_6279 == 0) +{ +lean_object* x_6280; lean_object* x_6281; lean_object* x_6282; +x_6280 = lean_ctor_get(x_6257, 0); +lean_dec(x_6280); +x_6281 = lean_ctor_get(x_6259, 0); +lean_inc(x_6281); +lean_dec(x_6259); +lean_ctor_set(x_6257, 0, x_6281); +if (lean_is_scalar(x_5968)) { + x_6282 = lean_alloc_ctor(0, 2, 0); +} else { + x_6282 = x_5968; +} +lean_ctor_set(x_6282, 0, x_6257); +lean_ctor_set(x_6282, 1, x_6258); +return x_6282; +} +else +{ +lean_object* x_6283; lean_object* x_6284; lean_object* x_6285; lean_object* x_6286; +x_6283 = lean_ctor_get(x_6257, 1); +lean_inc(x_6283); +lean_dec(x_6257); +x_6284 = lean_ctor_get(x_6259, 0); +lean_inc(x_6284); +lean_dec(x_6259); +x_6285 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_6285, 0, x_6284); +lean_ctor_set(x_6285, 1, x_6283); +if (lean_is_scalar(x_5968)) { + x_6286 = lean_alloc_ctor(0, 2, 0); +} else { + x_6286 = x_5968; +} +lean_ctor_set(x_6286, 0, x_6285); +lean_ctor_set(x_6286, 1, x_6258); +return x_6286; +} +} +} +} +case 2: +{ +lean_object* x_6568; lean_object* x_6569; +lean_dec(x_5974); +lean_dec(x_5969); +lean_dec(x_5968); +lean_free_object(x_5959); +lean_dec(x_5957); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_6568 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_6569 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_6568, x_5963, x_4, x_5, x_5967); +return x_6569; +} +case 3: +{ +lean_object* x_6570; lean_object* x_6571; lean_object* x_6601; lean_object* x_6602; +lean_dec(x_5974); +lean_dec(x_5969); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_153); +x_6601 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_5967); +x_6602 = lean_ctor_get(x_6601, 0); +lean_inc(x_6602); +if (lean_obj_tag(x_6602) == 0) +{ +lean_object* x_6603; lean_object* x_6604; +x_6603 = lean_ctor_get(x_6601, 1); +lean_inc(x_6603); +lean_dec(x_6601); +x_6604 = lean_box(0); +lean_ctor_set(x_5959, 0, x_6604); +x_6570 = x_5959; +x_6571 = x_6603; +goto block_6600; +} +else +{ +uint8_t x_6605; +lean_free_object(x_5959); +x_6605 = !lean_is_exclusive(x_6601); +if (x_6605 == 0) +{ +lean_object* x_6606; lean_object* x_6607; uint8_t x_6608; +x_6606 = lean_ctor_get(x_6601, 1); +x_6607 = lean_ctor_get(x_6601, 0); +lean_dec(x_6607); +x_6608 = !lean_is_exclusive(x_6602); +if (x_6608 == 0) +{ +lean_object* x_6609; lean_object* x_6610; lean_object* x_6611; lean_object* x_6612; uint8_t x_6613; +x_6609 = lean_ctor_get(x_6602, 0); +x_6610 = lean_array_get_size(x_5957); +x_6611 = lean_ctor_get(x_6609, 3); +lean_inc(x_6611); +lean_dec(x_6609); +x_6612 = lean_array_get_size(x_6611); +lean_dec(x_6611); +x_6613 = lean_nat_dec_lt(x_6610, x_6612); +if (x_6613 == 0) +{ +uint8_t x_6614; +x_6614 = lean_nat_dec_eq(x_6610, x_6612); +if (x_6614 == 0) +{ +lean_object* x_6615; lean_object* x_6616; lean_object* x_6617; lean_object* x_6618; lean_object* x_6619; lean_object* x_6620; lean_object* x_6621; lean_object* x_6622; lean_object* x_6623; lean_object* x_6624; lean_object* x_6625; lean_object* x_6626; lean_object* x_6627; lean_object* x_6628; lean_object* x_6629; lean_object* x_6630; +x_6615 = lean_unsigned_to_nat(0u); +x_6616 = l_Array_extract___rarg(x_5957, x_6615, x_6612); +x_6617 = l_Array_extract___rarg(x_5957, x_6612, x_6610); +lean_dec(x_6610); +lean_inc(x_153); +lean_ctor_set_tag(x_6601, 6); +lean_ctor_set(x_6601, 1, x_6616); +lean_ctor_set(x_6601, 0, x_153); +x_6618 = lean_ctor_get(x_1, 0); +lean_inc(x_6618); +x_6619 = l_Lean_IR_ToIR_bindVar(x_6618, x_5963, x_4, x_5, x_6606); +x_6620 = lean_ctor_get(x_6619, 0); +lean_inc(x_6620); +x_6621 = lean_ctor_get(x_6619, 1); +lean_inc(x_6621); +lean_dec(x_6619); +x_6622 = lean_ctor_get(x_6620, 0); +lean_inc(x_6622); +x_6623 = lean_ctor_get(x_6620, 1); +lean_inc(x_6623); +lean_dec(x_6620); +x_6624 = l_Lean_IR_ToIR_newVar(x_6623, x_4, x_5, x_6621); +x_6625 = lean_ctor_get(x_6624, 0); +lean_inc(x_6625); +x_6626 = lean_ctor_get(x_6624, 1); +lean_inc(x_6626); +lean_dec(x_6624); +x_6627 = lean_ctor_get(x_6625, 0); +lean_inc(x_6627); +x_6628 = lean_ctor_get(x_6625, 1); +lean_inc(x_6628); +lean_dec(x_6625); +x_6629 = lean_ctor_get(x_1, 2); +lean_inc(x_6629); +lean_inc(x_5); +lean_inc(x_4); +x_6630 = l_Lean_IR_ToIR_lowerType(x_6629, x_6628, x_4, x_5, x_6626); +if (lean_obj_tag(x_6630) == 0) +{ +lean_object* x_6631; lean_object* x_6632; lean_object* x_6633; lean_object* x_6634; lean_object* x_6635; +x_6631 = lean_ctor_get(x_6630, 0); +lean_inc(x_6631); +x_6632 = lean_ctor_get(x_6630, 1); +lean_inc(x_6632); +lean_dec(x_6630); +x_6633 = lean_ctor_get(x_6631, 0); +lean_inc(x_6633); +x_6634 = lean_ctor_get(x_6631, 1); +lean_inc(x_6634); +lean_dec(x_6631); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6635 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_6627, x_6617, x_6622, x_6601, x_6633, x_6634, x_4, x_5, x_6632); +if (lean_obj_tag(x_6635) == 0) +{ +lean_object* x_6636; lean_object* x_6637; uint8_t x_6638; +x_6636 = lean_ctor_get(x_6635, 0); +lean_inc(x_6636); +x_6637 = lean_ctor_get(x_6635, 1); +lean_inc(x_6637); +lean_dec(x_6635); +x_6638 = !lean_is_exclusive(x_6636); +if (x_6638 == 0) +{ +lean_object* x_6639; +x_6639 = lean_ctor_get(x_6636, 0); +lean_ctor_set(x_6602, 0, x_6639); +lean_ctor_set(x_6636, 0, x_6602); +x_6570 = x_6636; +x_6571 = x_6637; +goto block_6600; +} +else +{ +lean_object* x_6640; lean_object* x_6641; lean_object* x_6642; +x_6640 = lean_ctor_get(x_6636, 0); +x_6641 = lean_ctor_get(x_6636, 1); +lean_inc(x_6641); +lean_inc(x_6640); +lean_dec(x_6636); +lean_ctor_set(x_6602, 0, x_6640); +x_6642 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_6642, 0, x_6602); +lean_ctor_set(x_6642, 1, x_6641); +x_6570 = x_6642; +x_6571 = x_6637; +goto block_6600; +} +} +else +{ +uint8_t x_6643; +lean_free_object(x_6602); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6643 = !lean_is_exclusive(x_6635); +if (x_6643 == 0) +{ +return x_6635; +} +else +{ +lean_object* x_6644; lean_object* x_6645; lean_object* x_6646; +x_6644 = lean_ctor_get(x_6635, 0); +x_6645 = lean_ctor_get(x_6635, 1); +lean_inc(x_6645); +lean_inc(x_6644); +lean_dec(x_6635); +x_6646 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6646, 0, x_6644); +lean_ctor_set(x_6646, 1, x_6645); +return x_6646; +} +} +} +else +{ +uint8_t x_6647; +lean_dec(x_6627); +lean_dec(x_6622); +lean_dec(x_6601); +lean_dec(x_6617); +lean_free_object(x_6602); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6647 = !lean_is_exclusive(x_6630); +if (x_6647 == 0) +{ +return x_6630; +} +else +{ +lean_object* x_6648; lean_object* x_6649; lean_object* x_6650; +x_6648 = lean_ctor_get(x_6630, 0); +x_6649 = lean_ctor_get(x_6630, 1); +lean_inc(x_6649); +lean_inc(x_6648); +lean_dec(x_6630); +x_6650 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6650, 0, x_6648); +lean_ctor_set(x_6650, 1, x_6649); +return x_6650; +} +} +} +else +{ +lean_object* x_6651; lean_object* x_6652; lean_object* x_6653; lean_object* x_6654; lean_object* x_6655; lean_object* x_6656; lean_object* x_6657; lean_object* x_6658; +lean_dec(x_6612); +lean_dec(x_6610); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_6601, 6); +lean_ctor_set(x_6601, 1, x_5957); +lean_ctor_set(x_6601, 0, x_153); +x_6651 = lean_ctor_get(x_1, 0); +lean_inc(x_6651); +x_6652 = l_Lean_IR_ToIR_bindVar(x_6651, x_5963, x_4, x_5, x_6606); +x_6653 = lean_ctor_get(x_6652, 0); +lean_inc(x_6653); +x_6654 = lean_ctor_get(x_6652, 1); +lean_inc(x_6654); +lean_dec(x_6652); +x_6655 = lean_ctor_get(x_6653, 0); +lean_inc(x_6655); +x_6656 = lean_ctor_get(x_6653, 1); +lean_inc(x_6656); +lean_dec(x_6653); +x_6657 = lean_ctor_get(x_1, 2); +lean_inc(x_6657); +lean_inc(x_5); +lean_inc(x_4); +x_6658 = l_Lean_IR_ToIR_lowerType(x_6657, x_6656, x_4, x_5, x_6654); +if (lean_obj_tag(x_6658) == 0) +{ +lean_object* x_6659; lean_object* x_6660; lean_object* x_6661; lean_object* x_6662; lean_object* x_6663; +x_6659 = lean_ctor_get(x_6658, 0); +lean_inc(x_6659); +x_6660 = lean_ctor_get(x_6658, 1); +lean_inc(x_6660); +lean_dec(x_6658); +x_6661 = lean_ctor_get(x_6659, 0); +lean_inc(x_6661); +x_6662 = lean_ctor_get(x_6659, 1); +lean_inc(x_6662); +lean_dec(x_6659); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6663 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6655, x_6601, x_6661, x_6662, x_4, x_5, x_6660); +if (lean_obj_tag(x_6663) == 0) +{ +lean_object* x_6664; lean_object* x_6665; uint8_t x_6666; +x_6664 = lean_ctor_get(x_6663, 0); +lean_inc(x_6664); +x_6665 = lean_ctor_get(x_6663, 1); +lean_inc(x_6665); +lean_dec(x_6663); +x_6666 = !lean_is_exclusive(x_6664); +if (x_6666 == 0) +{ +lean_object* x_6667; +x_6667 = lean_ctor_get(x_6664, 0); +lean_ctor_set(x_6602, 0, x_6667); +lean_ctor_set(x_6664, 0, x_6602); +x_6570 = x_6664; +x_6571 = x_6665; +goto block_6600; +} +else +{ +lean_object* x_6668; lean_object* x_6669; lean_object* x_6670; +x_6668 = lean_ctor_get(x_6664, 0); +x_6669 = lean_ctor_get(x_6664, 1); +lean_inc(x_6669); +lean_inc(x_6668); +lean_dec(x_6664); +lean_ctor_set(x_6602, 0, x_6668); +x_6670 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_6670, 0, x_6602); +lean_ctor_set(x_6670, 1, x_6669); +x_6570 = x_6670; +x_6571 = x_6665; +goto block_6600; +} +} +else +{ +uint8_t x_6671; +lean_free_object(x_6602); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6671 = !lean_is_exclusive(x_6663); +if (x_6671 == 0) +{ +return x_6663; +} +else +{ +lean_object* x_6672; lean_object* x_6673; lean_object* x_6674; +x_6672 = lean_ctor_get(x_6663, 0); +x_6673 = lean_ctor_get(x_6663, 1); +lean_inc(x_6673); +lean_inc(x_6672); +lean_dec(x_6663); +x_6674 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6674, 0, x_6672); +lean_ctor_set(x_6674, 1, x_6673); +return x_6674; +} +} +} +else +{ +uint8_t x_6675; +lean_dec(x_6655); +lean_dec(x_6601); +lean_free_object(x_6602); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6675 = !lean_is_exclusive(x_6658); +if (x_6675 == 0) +{ +return x_6658; +} +else +{ +lean_object* x_6676; lean_object* x_6677; lean_object* x_6678; +x_6676 = lean_ctor_get(x_6658, 0); +x_6677 = lean_ctor_get(x_6658, 1); +lean_inc(x_6677); +lean_inc(x_6676); +lean_dec(x_6658); +x_6678 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6678, 0, x_6676); +lean_ctor_set(x_6678, 1, x_6677); +return x_6678; +} +} +} +} +else +{ +lean_object* x_6679; lean_object* x_6680; lean_object* x_6681; lean_object* x_6682; lean_object* x_6683; lean_object* x_6684; lean_object* x_6685; lean_object* x_6686; +lean_dec(x_6612); +lean_dec(x_6610); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_6601, 7); +lean_ctor_set(x_6601, 1, x_5957); +lean_ctor_set(x_6601, 0, x_153); +x_6679 = lean_ctor_get(x_1, 0); +lean_inc(x_6679); +x_6680 = l_Lean_IR_ToIR_bindVar(x_6679, x_5963, x_4, x_5, x_6606); +x_6681 = lean_ctor_get(x_6680, 0); +lean_inc(x_6681); +x_6682 = lean_ctor_get(x_6680, 1); +lean_inc(x_6682); +lean_dec(x_6680); +x_6683 = lean_ctor_get(x_6681, 0); +lean_inc(x_6683); +x_6684 = lean_ctor_get(x_6681, 1); +lean_inc(x_6684); +lean_dec(x_6681); +x_6685 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6686 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6683, x_6601, x_6685, x_6684, x_4, x_5, x_6682); +if (lean_obj_tag(x_6686) == 0) +{ +lean_object* x_6687; lean_object* x_6688; uint8_t x_6689; +x_6687 = lean_ctor_get(x_6686, 0); +lean_inc(x_6687); +x_6688 = lean_ctor_get(x_6686, 1); +lean_inc(x_6688); +lean_dec(x_6686); +x_6689 = !lean_is_exclusive(x_6687); +if (x_6689 == 0) +{ +lean_object* x_6690; +x_6690 = lean_ctor_get(x_6687, 0); +lean_ctor_set(x_6602, 0, x_6690); +lean_ctor_set(x_6687, 0, x_6602); +x_6570 = x_6687; +x_6571 = x_6688; +goto block_6600; +} +else +{ +lean_object* x_6691; lean_object* x_6692; lean_object* x_6693; +x_6691 = lean_ctor_get(x_6687, 0); +x_6692 = lean_ctor_get(x_6687, 1); +lean_inc(x_6692); +lean_inc(x_6691); +lean_dec(x_6687); +lean_ctor_set(x_6602, 0, x_6691); +x_6693 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_6693, 0, x_6602); +lean_ctor_set(x_6693, 1, x_6692); +x_6570 = x_6693; +x_6571 = x_6688; +goto block_6600; +} +} +else +{ +uint8_t x_6694; +lean_free_object(x_6602); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6694 = !lean_is_exclusive(x_6686); +if (x_6694 == 0) +{ +return x_6686; +} +else +{ +lean_object* x_6695; lean_object* x_6696; lean_object* x_6697; +x_6695 = lean_ctor_get(x_6686, 0); +x_6696 = lean_ctor_get(x_6686, 1); +lean_inc(x_6696); +lean_inc(x_6695); +lean_dec(x_6686); +x_6697 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6697, 0, x_6695); +lean_ctor_set(x_6697, 1, x_6696); +return x_6697; +} +} +} +} +else +{ +lean_object* x_6698; lean_object* x_6699; lean_object* x_6700; lean_object* x_6701; uint8_t x_6702; +x_6698 = lean_ctor_get(x_6602, 0); +lean_inc(x_6698); +lean_dec(x_6602); +x_6699 = lean_array_get_size(x_5957); +x_6700 = lean_ctor_get(x_6698, 3); +lean_inc(x_6700); +lean_dec(x_6698); +x_6701 = lean_array_get_size(x_6700); +lean_dec(x_6700); +x_6702 = lean_nat_dec_lt(x_6699, x_6701); +if (x_6702 == 0) +{ +uint8_t x_6703; +x_6703 = lean_nat_dec_eq(x_6699, x_6701); +if (x_6703 == 0) +{ +lean_object* x_6704; lean_object* x_6705; lean_object* x_6706; lean_object* x_6707; lean_object* x_6708; lean_object* x_6709; lean_object* x_6710; lean_object* x_6711; lean_object* x_6712; lean_object* x_6713; lean_object* x_6714; lean_object* x_6715; lean_object* x_6716; lean_object* x_6717; lean_object* x_6718; lean_object* x_6719; +x_6704 = lean_unsigned_to_nat(0u); +x_6705 = l_Array_extract___rarg(x_5957, x_6704, x_6701); +x_6706 = l_Array_extract___rarg(x_5957, x_6701, x_6699); +lean_dec(x_6699); +lean_inc(x_153); +lean_ctor_set_tag(x_6601, 6); +lean_ctor_set(x_6601, 1, x_6705); +lean_ctor_set(x_6601, 0, x_153); +x_6707 = lean_ctor_get(x_1, 0); +lean_inc(x_6707); +x_6708 = l_Lean_IR_ToIR_bindVar(x_6707, x_5963, x_4, x_5, x_6606); +x_6709 = lean_ctor_get(x_6708, 0); +lean_inc(x_6709); +x_6710 = lean_ctor_get(x_6708, 1); +lean_inc(x_6710); +lean_dec(x_6708); +x_6711 = lean_ctor_get(x_6709, 0); +lean_inc(x_6711); +x_6712 = lean_ctor_get(x_6709, 1); +lean_inc(x_6712); +lean_dec(x_6709); +x_6713 = l_Lean_IR_ToIR_newVar(x_6712, x_4, x_5, x_6710); +x_6714 = lean_ctor_get(x_6713, 0); +lean_inc(x_6714); +x_6715 = lean_ctor_get(x_6713, 1); +lean_inc(x_6715); +lean_dec(x_6713); +x_6716 = lean_ctor_get(x_6714, 0); +lean_inc(x_6716); +x_6717 = lean_ctor_get(x_6714, 1); +lean_inc(x_6717); +lean_dec(x_6714); +x_6718 = lean_ctor_get(x_1, 2); +lean_inc(x_6718); +lean_inc(x_5); +lean_inc(x_4); +x_6719 = l_Lean_IR_ToIR_lowerType(x_6718, x_6717, x_4, x_5, x_6715); +if (lean_obj_tag(x_6719) == 0) +{ +lean_object* x_6720; lean_object* x_6721; lean_object* x_6722; lean_object* x_6723; lean_object* x_6724; +x_6720 = lean_ctor_get(x_6719, 0); +lean_inc(x_6720); +x_6721 = lean_ctor_get(x_6719, 1); +lean_inc(x_6721); +lean_dec(x_6719); +x_6722 = lean_ctor_get(x_6720, 0); +lean_inc(x_6722); +x_6723 = lean_ctor_get(x_6720, 1); +lean_inc(x_6723); +lean_dec(x_6720); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6724 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_6716, x_6706, x_6711, x_6601, x_6722, x_6723, x_4, x_5, x_6721); +if (lean_obj_tag(x_6724) == 0) +{ +lean_object* x_6725; lean_object* x_6726; lean_object* x_6727; lean_object* x_6728; lean_object* x_6729; lean_object* x_6730; lean_object* x_6731; +x_6725 = lean_ctor_get(x_6724, 0); +lean_inc(x_6725); +x_6726 = lean_ctor_get(x_6724, 1); +lean_inc(x_6726); +lean_dec(x_6724); +x_6727 = lean_ctor_get(x_6725, 0); +lean_inc(x_6727); +x_6728 = lean_ctor_get(x_6725, 1); +lean_inc(x_6728); +if (lean_is_exclusive(x_6725)) { + lean_ctor_release(x_6725, 0); + lean_ctor_release(x_6725, 1); + x_6729 = x_6725; +} else { + lean_dec_ref(x_6725); + x_6729 = lean_box(0); +} +x_6730 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_6730, 0, x_6727); +if (lean_is_scalar(x_6729)) { + x_6731 = lean_alloc_ctor(0, 2, 0); +} else { + x_6731 = x_6729; +} +lean_ctor_set(x_6731, 0, x_6730); +lean_ctor_set(x_6731, 1, x_6728); +x_6570 = x_6731; +x_6571 = x_6726; +goto block_6600; +} +else +{ +lean_object* x_6732; lean_object* x_6733; lean_object* x_6734; lean_object* x_6735; +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6732 = lean_ctor_get(x_6724, 0); +lean_inc(x_6732); +x_6733 = lean_ctor_get(x_6724, 1); +lean_inc(x_6733); +if (lean_is_exclusive(x_6724)) { + lean_ctor_release(x_6724, 0); + lean_ctor_release(x_6724, 1); + x_6734 = x_6724; +} else { + lean_dec_ref(x_6724); + x_6734 = lean_box(0); +} +if (lean_is_scalar(x_6734)) { + x_6735 = lean_alloc_ctor(1, 2, 0); +} else { + x_6735 = x_6734; +} +lean_ctor_set(x_6735, 0, x_6732); +lean_ctor_set(x_6735, 1, x_6733); +return x_6735; +} +} +else +{ +lean_object* x_6736; lean_object* x_6737; lean_object* x_6738; lean_object* x_6739; +lean_dec(x_6716); +lean_dec(x_6711); +lean_dec(x_6601); +lean_dec(x_6706); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6736 = lean_ctor_get(x_6719, 0); +lean_inc(x_6736); +x_6737 = lean_ctor_get(x_6719, 1); +lean_inc(x_6737); +if (lean_is_exclusive(x_6719)) { + lean_ctor_release(x_6719, 0); + lean_ctor_release(x_6719, 1); + x_6738 = x_6719; +} else { + lean_dec_ref(x_6719); + x_6738 = lean_box(0); +} +if (lean_is_scalar(x_6738)) { + x_6739 = lean_alloc_ctor(1, 2, 0); +} else { + x_6739 = x_6738; +} +lean_ctor_set(x_6739, 0, x_6736); +lean_ctor_set(x_6739, 1, x_6737); +return x_6739; +} +} +else +{ +lean_object* x_6740; lean_object* x_6741; lean_object* x_6742; lean_object* x_6743; lean_object* x_6744; lean_object* x_6745; lean_object* x_6746; lean_object* x_6747; +lean_dec(x_6701); +lean_dec(x_6699); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_6601, 6); +lean_ctor_set(x_6601, 1, x_5957); +lean_ctor_set(x_6601, 0, x_153); +x_6740 = lean_ctor_get(x_1, 0); +lean_inc(x_6740); +x_6741 = l_Lean_IR_ToIR_bindVar(x_6740, x_5963, x_4, x_5, x_6606); +x_6742 = lean_ctor_get(x_6741, 0); +lean_inc(x_6742); +x_6743 = lean_ctor_get(x_6741, 1); +lean_inc(x_6743); +lean_dec(x_6741); +x_6744 = lean_ctor_get(x_6742, 0); +lean_inc(x_6744); +x_6745 = lean_ctor_get(x_6742, 1); +lean_inc(x_6745); +lean_dec(x_6742); +x_6746 = lean_ctor_get(x_1, 2); +lean_inc(x_6746); +lean_inc(x_5); +lean_inc(x_4); +x_6747 = l_Lean_IR_ToIR_lowerType(x_6746, x_6745, x_4, x_5, x_6743); +if (lean_obj_tag(x_6747) == 0) +{ +lean_object* x_6748; lean_object* x_6749; lean_object* x_6750; lean_object* x_6751; lean_object* x_6752; +x_6748 = lean_ctor_get(x_6747, 0); +lean_inc(x_6748); +x_6749 = lean_ctor_get(x_6747, 1); +lean_inc(x_6749); +lean_dec(x_6747); +x_6750 = lean_ctor_get(x_6748, 0); +lean_inc(x_6750); +x_6751 = lean_ctor_get(x_6748, 1); +lean_inc(x_6751); +lean_dec(x_6748); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6752 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6744, x_6601, x_6750, x_6751, x_4, x_5, x_6749); +if (lean_obj_tag(x_6752) == 0) +{ +lean_object* x_6753; lean_object* x_6754; lean_object* x_6755; lean_object* x_6756; lean_object* x_6757; lean_object* x_6758; lean_object* x_6759; +x_6753 = lean_ctor_get(x_6752, 0); +lean_inc(x_6753); +x_6754 = lean_ctor_get(x_6752, 1); +lean_inc(x_6754); +lean_dec(x_6752); +x_6755 = lean_ctor_get(x_6753, 0); +lean_inc(x_6755); +x_6756 = lean_ctor_get(x_6753, 1); +lean_inc(x_6756); +if (lean_is_exclusive(x_6753)) { + lean_ctor_release(x_6753, 0); + lean_ctor_release(x_6753, 1); + x_6757 = x_6753; +} else { + lean_dec_ref(x_6753); + x_6757 = lean_box(0); +} +x_6758 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_6758, 0, x_6755); +if (lean_is_scalar(x_6757)) { + x_6759 = lean_alloc_ctor(0, 2, 0); +} else { + x_6759 = x_6757; +} +lean_ctor_set(x_6759, 0, x_6758); +lean_ctor_set(x_6759, 1, x_6756); +x_6570 = x_6759; +x_6571 = x_6754; +goto block_6600; +} +else +{ +lean_object* x_6760; lean_object* x_6761; lean_object* x_6762; lean_object* x_6763; +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6760 = lean_ctor_get(x_6752, 0); +lean_inc(x_6760); +x_6761 = lean_ctor_get(x_6752, 1); +lean_inc(x_6761); +if (lean_is_exclusive(x_6752)) { + lean_ctor_release(x_6752, 0); + lean_ctor_release(x_6752, 1); + x_6762 = x_6752; +} else { + lean_dec_ref(x_6752); + x_6762 = lean_box(0); +} +if (lean_is_scalar(x_6762)) { + x_6763 = lean_alloc_ctor(1, 2, 0); +} else { + x_6763 = x_6762; +} +lean_ctor_set(x_6763, 0, x_6760); +lean_ctor_set(x_6763, 1, x_6761); +return x_6763; +} +} +else +{ +lean_object* x_6764; lean_object* x_6765; lean_object* x_6766; lean_object* x_6767; +lean_dec(x_6744); +lean_dec(x_6601); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6764 = lean_ctor_get(x_6747, 0); +lean_inc(x_6764); +x_6765 = lean_ctor_get(x_6747, 1); +lean_inc(x_6765); +if (lean_is_exclusive(x_6747)) { + lean_ctor_release(x_6747, 0); + lean_ctor_release(x_6747, 1); + x_6766 = x_6747; +} else { + lean_dec_ref(x_6747); + x_6766 = lean_box(0); +} +if (lean_is_scalar(x_6766)) { + x_6767 = lean_alloc_ctor(1, 2, 0); +} else { + x_6767 = x_6766; +} +lean_ctor_set(x_6767, 0, x_6764); +lean_ctor_set(x_6767, 1, x_6765); +return x_6767; +} +} +} +else +{ +lean_object* x_6768; lean_object* x_6769; lean_object* x_6770; lean_object* x_6771; lean_object* x_6772; lean_object* x_6773; lean_object* x_6774; lean_object* x_6775; +lean_dec(x_6701); +lean_dec(x_6699); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_6601, 7); +lean_ctor_set(x_6601, 1, x_5957); +lean_ctor_set(x_6601, 0, x_153); +x_6768 = lean_ctor_get(x_1, 0); +lean_inc(x_6768); +x_6769 = l_Lean_IR_ToIR_bindVar(x_6768, x_5963, x_4, x_5, x_6606); +x_6770 = lean_ctor_get(x_6769, 0); +lean_inc(x_6770); +x_6771 = lean_ctor_get(x_6769, 1); +lean_inc(x_6771); +lean_dec(x_6769); +x_6772 = lean_ctor_get(x_6770, 0); +lean_inc(x_6772); +x_6773 = lean_ctor_get(x_6770, 1); +lean_inc(x_6773); +lean_dec(x_6770); +x_6774 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6775 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6772, x_6601, x_6774, x_6773, x_4, x_5, x_6771); +if (lean_obj_tag(x_6775) == 0) +{ +lean_object* x_6776; lean_object* x_6777; lean_object* x_6778; lean_object* x_6779; lean_object* x_6780; lean_object* x_6781; lean_object* x_6782; +x_6776 = lean_ctor_get(x_6775, 0); +lean_inc(x_6776); +x_6777 = lean_ctor_get(x_6775, 1); +lean_inc(x_6777); +lean_dec(x_6775); +x_6778 = lean_ctor_get(x_6776, 0); +lean_inc(x_6778); +x_6779 = lean_ctor_get(x_6776, 1); +lean_inc(x_6779); +if (lean_is_exclusive(x_6776)) { + lean_ctor_release(x_6776, 0); + lean_ctor_release(x_6776, 1); + x_6780 = x_6776; +} else { + lean_dec_ref(x_6776); + x_6780 = lean_box(0); +} +x_6781 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_6781, 0, x_6778); +if (lean_is_scalar(x_6780)) { + x_6782 = lean_alloc_ctor(0, 2, 0); +} else { + x_6782 = x_6780; +} +lean_ctor_set(x_6782, 0, x_6781); +lean_ctor_set(x_6782, 1, x_6779); +x_6570 = x_6782; +x_6571 = x_6777; +goto block_6600; +} +else +{ +lean_object* x_6783; lean_object* x_6784; lean_object* x_6785; lean_object* x_6786; +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6783 = lean_ctor_get(x_6775, 0); +lean_inc(x_6783); +x_6784 = lean_ctor_get(x_6775, 1); +lean_inc(x_6784); +if (lean_is_exclusive(x_6775)) { + lean_ctor_release(x_6775, 0); + lean_ctor_release(x_6775, 1); + x_6785 = x_6775; +} else { + lean_dec_ref(x_6775); + x_6785 = lean_box(0); +} +if (lean_is_scalar(x_6785)) { + x_6786 = lean_alloc_ctor(1, 2, 0); +} else { + x_6786 = x_6785; +} +lean_ctor_set(x_6786, 0, x_6783); +lean_ctor_set(x_6786, 1, x_6784); +return x_6786; +} +} +} +} +else +{ +lean_object* x_6787; lean_object* x_6788; lean_object* x_6789; lean_object* x_6790; lean_object* x_6791; lean_object* x_6792; uint8_t x_6793; +x_6787 = lean_ctor_get(x_6601, 1); +lean_inc(x_6787); +lean_dec(x_6601); +x_6788 = lean_ctor_get(x_6602, 0); +lean_inc(x_6788); +if (lean_is_exclusive(x_6602)) { + lean_ctor_release(x_6602, 0); + x_6789 = x_6602; +} else { + lean_dec_ref(x_6602); + x_6789 = lean_box(0); +} +x_6790 = lean_array_get_size(x_5957); +x_6791 = lean_ctor_get(x_6788, 3); +lean_inc(x_6791); +lean_dec(x_6788); +x_6792 = lean_array_get_size(x_6791); +lean_dec(x_6791); +x_6793 = lean_nat_dec_lt(x_6790, x_6792); +if (x_6793 == 0) +{ +uint8_t x_6794; +x_6794 = lean_nat_dec_eq(x_6790, x_6792); +if (x_6794 == 0) +{ +lean_object* x_6795; lean_object* x_6796; lean_object* x_6797; lean_object* x_6798; lean_object* x_6799; lean_object* x_6800; lean_object* x_6801; lean_object* x_6802; lean_object* x_6803; lean_object* x_6804; lean_object* x_6805; lean_object* x_6806; lean_object* x_6807; lean_object* x_6808; lean_object* x_6809; lean_object* x_6810; lean_object* x_6811; +x_6795 = lean_unsigned_to_nat(0u); +x_6796 = l_Array_extract___rarg(x_5957, x_6795, x_6792); +x_6797 = l_Array_extract___rarg(x_5957, x_6792, x_6790); +lean_dec(x_6790); +lean_inc(x_153); +x_6798 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_6798, 0, x_153); +lean_ctor_set(x_6798, 1, x_6796); +x_6799 = lean_ctor_get(x_1, 0); +lean_inc(x_6799); +x_6800 = l_Lean_IR_ToIR_bindVar(x_6799, x_5963, x_4, x_5, x_6787); +x_6801 = lean_ctor_get(x_6800, 0); +lean_inc(x_6801); +x_6802 = lean_ctor_get(x_6800, 1); +lean_inc(x_6802); +lean_dec(x_6800); +x_6803 = lean_ctor_get(x_6801, 0); +lean_inc(x_6803); +x_6804 = lean_ctor_get(x_6801, 1); +lean_inc(x_6804); +lean_dec(x_6801); +x_6805 = l_Lean_IR_ToIR_newVar(x_6804, x_4, x_5, x_6802); +x_6806 = lean_ctor_get(x_6805, 0); +lean_inc(x_6806); +x_6807 = lean_ctor_get(x_6805, 1); +lean_inc(x_6807); +lean_dec(x_6805); +x_6808 = lean_ctor_get(x_6806, 0); +lean_inc(x_6808); +x_6809 = lean_ctor_get(x_6806, 1); +lean_inc(x_6809); +lean_dec(x_6806); +x_6810 = lean_ctor_get(x_1, 2); +lean_inc(x_6810); +lean_inc(x_5); +lean_inc(x_4); +x_6811 = l_Lean_IR_ToIR_lowerType(x_6810, x_6809, x_4, x_5, x_6807); +if (lean_obj_tag(x_6811) == 0) +{ +lean_object* x_6812; lean_object* x_6813; lean_object* x_6814; lean_object* x_6815; lean_object* x_6816; +x_6812 = lean_ctor_get(x_6811, 0); +lean_inc(x_6812); +x_6813 = lean_ctor_get(x_6811, 1); +lean_inc(x_6813); +lean_dec(x_6811); +x_6814 = lean_ctor_get(x_6812, 0); +lean_inc(x_6814); +x_6815 = lean_ctor_get(x_6812, 1); +lean_inc(x_6815); +lean_dec(x_6812); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6816 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_6808, x_6797, x_6803, x_6798, x_6814, x_6815, x_4, x_5, x_6813); +if (lean_obj_tag(x_6816) == 0) +{ +lean_object* x_6817; lean_object* x_6818; lean_object* x_6819; lean_object* x_6820; lean_object* x_6821; lean_object* x_6822; lean_object* x_6823; +x_6817 = lean_ctor_get(x_6816, 0); +lean_inc(x_6817); +x_6818 = lean_ctor_get(x_6816, 1); +lean_inc(x_6818); +lean_dec(x_6816); +x_6819 = lean_ctor_get(x_6817, 0); +lean_inc(x_6819); +x_6820 = lean_ctor_get(x_6817, 1); +lean_inc(x_6820); +if (lean_is_exclusive(x_6817)) { + lean_ctor_release(x_6817, 0); + lean_ctor_release(x_6817, 1); + x_6821 = x_6817; +} else { + lean_dec_ref(x_6817); + x_6821 = lean_box(0); +} +if (lean_is_scalar(x_6789)) { + x_6822 = lean_alloc_ctor(1, 1, 0); +} else { + x_6822 = x_6789; +} +lean_ctor_set(x_6822, 0, x_6819); +if (lean_is_scalar(x_6821)) { + x_6823 = lean_alloc_ctor(0, 2, 0); +} else { + x_6823 = x_6821; +} +lean_ctor_set(x_6823, 0, x_6822); +lean_ctor_set(x_6823, 1, x_6820); +x_6570 = x_6823; +x_6571 = x_6818; +goto block_6600; +} +else +{ +lean_object* x_6824; lean_object* x_6825; lean_object* x_6826; lean_object* x_6827; +lean_dec(x_6789); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6824 = lean_ctor_get(x_6816, 0); +lean_inc(x_6824); +x_6825 = lean_ctor_get(x_6816, 1); +lean_inc(x_6825); +if (lean_is_exclusive(x_6816)) { + lean_ctor_release(x_6816, 0); + lean_ctor_release(x_6816, 1); + x_6826 = x_6816; +} else { + lean_dec_ref(x_6816); + x_6826 = lean_box(0); +} +if (lean_is_scalar(x_6826)) { + x_6827 = lean_alloc_ctor(1, 2, 0); +} else { + x_6827 = x_6826; +} +lean_ctor_set(x_6827, 0, x_6824); +lean_ctor_set(x_6827, 1, x_6825); +return x_6827; +} +} +else +{ +lean_object* x_6828; lean_object* x_6829; lean_object* x_6830; lean_object* x_6831; +lean_dec(x_6808); +lean_dec(x_6803); +lean_dec(x_6798); +lean_dec(x_6797); +lean_dec(x_6789); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6828 = lean_ctor_get(x_6811, 0); +lean_inc(x_6828); +x_6829 = lean_ctor_get(x_6811, 1); +lean_inc(x_6829); +if (lean_is_exclusive(x_6811)) { + lean_ctor_release(x_6811, 0); + lean_ctor_release(x_6811, 1); + x_6830 = x_6811; +} else { + lean_dec_ref(x_6811); + x_6830 = lean_box(0); +} +if (lean_is_scalar(x_6830)) { + x_6831 = lean_alloc_ctor(1, 2, 0); +} else { + x_6831 = x_6830; +} +lean_ctor_set(x_6831, 0, x_6828); +lean_ctor_set(x_6831, 1, x_6829); +return x_6831; +} +} +else +{ +lean_object* x_6832; lean_object* x_6833; lean_object* x_6834; lean_object* x_6835; lean_object* x_6836; lean_object* x_6837; lean_object* x_6838; lean_object* x_6839; lean_object* x_6840; +lean_dec(x_6792); +lean_dec(x_6790); +lean_inc(x_5957); +lean_inc(x_153); +x_6832 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_6832, 0, x_153); +lean_ctor_set(x_6832, 1, x_5957); +x_6833 = lean_ctor_get(x_1, 0); +lean_inc(x_6833); +x_6834 = l_Lean_IR_ToIR_bindVar(x_6833, x_5963, x_4, x_5, x_6787); +x_6835 = lean_ctor_get(x_6834, 0); +lean_inc(x_6835); +x_6836 = lean_ctor_get(x_6834, 1); +lean_inc(x_6836); +lean_dec(x_6834); +x_6837 = lean_ctor_get(x_6835, 0); +lean_inc(x_6837); +x_6838 = lean_ctor_get(x_6835, 1); +lean_inc(x_6838); +lean_dec(x_6835); +x_6839 = lean_ctor_get(x_1, 2); +lean_inc(x_6839); +lean_inc(x_5); +lean_inc(x_4); +x_6840 = l_Lean_IR_ToIR_lowerType(x_6839, x_6838, x_4, x_5, x_6836); +if (lean_obj_tag(x_6840) == 0) +{ +lean_object* x_6841; lean_object* x_6842; lean_object* x_6843; lean_object* x_6844; lean_object* x_6845; +x_6841 = lean_ctor_get(x_6840, 0); +lean_inc(x_6841); +x_6842 = lean_ctor_get(x_6840, 1); +lean_inc(x_6842); +lean_dec(x_6840); +x_6843 = lean_ctor_get(x_6841, 0); +lean_inc(x_6843); +x_6844 = lean_ctor_get(x_6841, 1); +lean_inc(x_6844); +lean_dec(x_6841); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6845 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6837, x_6832, x_6843, x_6844, x_4, x_5, x_6842); +if (lean_obj_tag(x_6845) == 0) +{ +lean_object* x_6846; lean_object* x_6847; lean_object* x_6848; lean_object* x_6849; lean_object* x_6850; lean_object* x_6851; lean_object* x_6852; +x_6846 = lean_ctor_get(x_6845, 0); +lean_inc(x_6846); +x_6847 = lean_ctor_get(x_6845, 1); +lean_inc(x_6847); +lean_dec(x_6845); +x_6848 = lean_ctor_get(x_6846, 0); +lean_inc(x_6848); +x_6849 = lean_ctor_get(x_6846, 1); +lean_inc(x_6849); +if (lean_is_exclusive(x_6846)) { + lean_ctor_release(x_6846, 0); + lean_ctor_release(x_6846, 1); + x_6850 = x_6846; +} else { + lean_dec_ref(x_6846); + x_6850 = lean_box(0); +} +if (lean_is_scalar(x_6789)) { + x_6851 = lean_alloc_ctor(1, 1, 0); +} else { + x_6851 = x_6789; +} +lean_ctor_set(x_6851, 0, x_6848); +if (lean_is_scalar(x_6850)) { + x_6852 = lean_alloc_ctor(0, 2, 0); +} else { + x_6852 = x_6850; +} +lean_ctor_set(x_6852, 0, x_6851); +lean_ctor_set(x_6852, 1, x_6849); +x_6570 = x_6852; +x_6571 = x_6847; +goto block_6600; +} +else +{ +lean_object* x_6853; lean_object* x_6854; lean_object* x_6855; lean_object* x_6856; +lean_dec(x_6789); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6853 = lean_ctor_get(x_6845, 0); +lean_inc(x_6853); +x_6854 = lean_ctor_get(x_6845, 1); +lean_inc(x_6854); +if (lean_is_exclusive(x_6845)) { + lean_ctor_release(x_6845, 0); + lean_ctor_release(x_6845, 1); + x_6855 = x_6845; +} else { + lean_dec_ref(x_6845); + x_6855 = lean_box(0); +} +if (lean_is_scalar(x_6855)) { + x_6856 = lean_alloc_ctor(1, 2, 0); +} else { + x_6856 = x_6855; +} +lean_ctor_set(x_6856, 0, x_6853); +lean_ctor_set(x_6856, 1, x_6854); +return x_6856; +} +} +else +{ +lean_object* x_6857; lean_object* x_6858; lean_object* x_6859; lean_object* x_6860; +lean_dec(x_6837); +lean_dec(x_6832); +lean_dec(x_6789); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6857 = lean_ctor_get(x_6840, 0); +lean_inc(x_6857); +x_6858 = lean_ctor_get(x_6840, 1); +lean_inc(x_6858); +if (lean_is_exclusive(x_6840)) { + lean_ctor_release(x_6840, 0); + lean_ctor_release(x_6840, 1); + x_6859 = x_6840; +} else { + lean_dec_ref(x_6840); + x_6859 = lean_box(0); +} +if (lean_is_scalar(x_6859)) { + x_6860 = lean_alloc_ctor(1, 2, 0); +} else { + x_6860 = x_6859; +} +lean_ctor_set(x_6860, 0, x_6857); +lean_ctor_set(x_6860, 1, x_6858); +return x_6860; +} +} +} +else +{ +lean_object* x_6861; lean_object* x_6862; lean_object* x_6863; lean_object* x_6864; lean_object* x_6865; lean_object* x_6866; lean_object* x_6867; lean_object* x_6868; lean_object* x_6869; +lean_dec(x_6792); +lean_dec(x_6790); +lean_inc(x_5957); +lean_inc(x_153); +x_6861 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_6861, 0, x_153); +lean_ctor_set(x_6861, 1, x_5957); +x_6862 = lean_ctor_get(x_1, 0); +lean_inc(x_6862); +x_6863 = l_Lean_IR_ToIR_bindVar(x_6862, x_5963, x_4, x_5, x_6787); +x_6864 = lean_ctor_get(x_6863, 0); +lean_inc(x_6864); +x_6865 = lean_ctor_get(x_6863, 1); +lean_inc(x_6865); +lean_dec(x_6863); +x_6866 = lean_ctor_get(x_6864, 0); +lean_inc(x_6866); +x_6867 = lean_ctor_get(x_6864, 1); +lean_inc(x_6867); +lean_dec(x_6864); +x_6868 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_6869 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6866, x_6861, x_6868, x_6867, x_4, x_5, x_6865); +if (lean_obj_tag(x_6869) == 0) +{ +lean_object* x_6870; lean_object* x_6871; lean_object* x_6872; lean_object* x_6873; lean_object* x_6874; lean_object* x_6875; lean_object* x_6876; +x_6870 = lean_ctor_get(x_6869, 0); +lean_inc(x_6870); +x_6871 = lean_ctor_get(x_6869, 1); +lean_inc(x_6871); +lean_dec(x_6869); +x_6872 = lean_ctor_get(x_6870, 0); +lean_inc(x_6872); +x_6873 = lean_ctor_get(x_6870, 1); +lean_inc(x_6873); +if (lean_is_exclusive(x_6870)) { + lean_ctor_release(x_6870, 0); + lean_ctor_release(x_6870, 1); + x_6874 = x_6870; +} else { + lean_dec_ref(x_6870); + x_6874 = lean_box(0); +} +if (lean_is_scalar(x_6789)) { + x_6875 = lean_alloc_ctor(1, 1, 0); +} else { + x_6875 = x_6789; +} +lean_ctor_set(x_6875, 0, x_6872); +if (lean_is_scalar(x_6874)) { + x_6876 = lean_alloc_ctor(0, 2, 0); +} else { + x_6876 = x_6874; +} +lean_ctor_set(x_6876, 0, x_6875); +lean_ctor_set(x_6876, 1, x_6873); +x_6570 = x_6876; +x_6571 = x_6871; +goto block_6600; +} +else +{ +lean_object* x_6877; lean_object* x_6878; lean_object* x_6879; lean_object* x_6880; +lean_dec(x_6789); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6877 = lean_ctor_get(x_6869, 0); +lean_inc(x_6877); +x_6878 = lean_ctor_get(x_6869, 1); +lean_inc(x_6878); +if (lean_is_exclusive(x_6869)) { + lean_ctor_release(x_6869, 0); + lean_ctor_release(x_6869, 1); + x_6879 = x_6869; +} else { + lean_dec_ref(x_6869); + x_6879 = lean_box(0); +} +if (lean_is_scalar(x_6879)) { + x_6880 = lean_alloc_ctor(1, 2, 0); +} else { + x_6880 = x_6879; +} +lean_ctor_set(x_6880, 0, x_6877); +lean_ctor_set(x_6880, 1, x_6878); +return x_6880; +} +} +} +} +block_6600: +{ +lean_object* x_6572; +x_6572 = lean_ctor_get(x_6570, 0); +lean_inc(x_6572); +if (lean_obj_tag(x_6572) == 0) +{ +lean_object* x_6573; lean_object* x_6574; lean_object* x_6575; lean_object* x_6576; lean_object* x_6577; lean_object* x_6578; lean_object* x_6579; lean_object* x_6580; lean_object* x_6581; lean_object* x_6582; +lean_dec(x_5968); +x_6573 = lean_ctor_get(x_6570, 1); +lean_inc(x_6573); +lean_dec(x_6570); +x_6574 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_6574, 0, x_153); +lean_ctor_set(x_6574, 1, x_5957); +x_6575 = lean_ctor_get(x_1, 0); +lean_inc(x_6575); +x_6576 = l_Lean_IR_ToIR_bindVar(x_6575, x_6573, x_4, x_5, x_6571); +x_6577 = lean_ctor_get(x_6576, 0); +lean_inc(x_6577); +x_6578 = lean_ctor_get(x_6576, 1); +lean_inc(x_6578); +lean_dec(x_6576); +x_6579 = lean_ctor_get(x_6577, 0); +lean_inc(x_6579); +x_6580 = lean_ctor_get(x_6577, 1); +lean_inc(x_6580); +lean_dec(x_6577); +x_6581 = lean_ctor_get(x_1, 2); +lean_inc(x_6581); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_6582 = l_Lean_IR_ToIR_lowerType(x_6581, x_6580, x_4, x_5, x_6578); +if (lean_obj_tag(x_6582) == 0) +{ +lean_object* x_6583; lean_object* x_6584; lean_object* x_6585; lean_object* x_6586; lean_object* x_6587; +x_6583 = lean_ctor_get(x_6582, 0); +lean_inc(x_6583); +x_6584 = lean_ctor_get(x_6582, 1); +lean_inc(x_6584); +lean_dec(x_6582); +x_6585 = lean_ctor_get(x_6583, 0); +lean_inc(x_6585); +x_6586 = lean_ctor_get(x_6583, 1); +lean_inc(x_6586); +lean_dec(x_6583); +x_6587 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6579, x_6574, x_6585, x_6586, x_4, x_5, x_6584); +return x_6587; +} +else +{ +uint8_t x_6588; +lean_dec(x_6579); +lean_dec(x_6574); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_6588 = !lean_is_exclusive(x_6582); +if (x_6588 == 0) +{ +return x_6582; +} +else +{ +lean_object* x_6589; lean_object* x_6590; lean_object* x_6591; +x_6589 = lean_ctor_get(x_6582, 0); +x_6590 = lean_ctor_get(x_6582, 1); +lean_inc(x_6590); +lean_inc(x_6589); +lean_dec(x_6582); +x_6591 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6591, 0, x_6589); +lean_ctor_set(x_6591, 1, x_6590); +return x_6591; +} +} +} +else +{ +uint8_t x_6592; +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6592 = !lean_is_exclusive(x_6570); +if (x_6592 == 0) +{ +lean_object* x_6593; lean_object* x_6594; lean_object* x_6595; +x_6593 = lean_ctor_get(x_6570, 0); +lean_dec(x_6593); +x_6594 = lean_ctor_get(x_6572, 0); +lean_inc(x_6594); +lean_dec(x_6572); +lean_ctor_set(x_6570, 0, x_6594); +if (lean_is_scalar(x_5968)) { + x_6595 = lean_alloc_ctor(0, 2, 0); +} else { + x_6595 = x_5968; +} +lean_ctor_set(x_6595, 0, x_6570); +lean_ctor_set(x_6595, 1, x_6571); +return x_6595; +} +else +{ +lean_object* x_6596; lean_object* x_6597; lean_object* x_6598; lean_object* x_6599; +x_6596 = lean_ctor_get(x_6570, 1); +lean_inc(x_6596); +lean_dec(x_6570); +x_6597 = lean_ctor_get(x_6572, 0); +lean_inc(x_6597); +lean_dec(x_6572); +x_6598 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_6598, 0, x_6597); +lean_ctor_set(x_6598, 1, x_6596); +if (lean_is_scalar(x_5968)) { + x_6599 = lean_alloc_ctor(0, 2, 0); +} else { + x_6599 = x_5968; +} +lean_ctor_set(x_6599, 0, x_6598); +lean_ctor_set(x_6599, 1, x_6571); +return x_6599; +} +} +} +} +case 4: +{ +uint8_t x_6881; +lean_dec(x_5969); +lean_dec(x_5968); +lean_free_object(x_5959); +lean_dec(x_5945); +lean_dec(x_5944); +x_6881 = !lean_is_exclusive(x_5974); +if (x_6881 == 0) +{ +lean_object* x_6882; lean_object* x_6883; uint8_t x_6884; +x_6882 = lean_ctor_get(x_5974, 0); +lean_dec(x_6882); +x_6883 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_6884 = lean_name_eq(x_153, x_6883); +if (x_6884 == 0) +{ +uint8_t x_6885; lean_object* x_6886; lean_object* x_6887; lean_object* x_6888; lean_object* x_6889; lean_object* x_6890; lean_object* x_6891; lean_object* x_6892; lean_object* x_6893; +lean_dec(x_5957); +lean_dec(x_2); +lean_dec(x_1); +x_6885 = 1; +x_6886 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_6887 = l_Lean_Name_toString(x_153, x_6885, x_6886); +lean_ctor_set_tag(x_5974, 3); +lean_ctor_set(x_5974, 0, x_6887); +x_6888 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_6889 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_6889, 0, x_6888); +lean_ctor_set(x_6889, 1, x_5974); +x_6890 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_6891 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_6891, 0, x_6889); +lean_ctor_set(x_6891, 1, x_6890); +x_6892 = l_Lean_MessageData_ofFormat(x_6891); +x_6893 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_6892, x_5963, x_4, x_5, x_5967); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_5963); +return x_6893; +} +else +{ +lean_object* x_6894; lean_object* x_6895; lean_object* x_6896; +lean_free_object(x_5974); +lean_dec(x_153); +x_6894 = l_Lean_IR_instInhabitedArg; +x_6895 = lean_unsigned_to_nat(2u); +x_6896 = lean_array_get(x_6894, x_5957, x_6895); +lean_dec(x_5957); +if (lean_obj_tag(x_6896) == 0) +{ +lean_object* x_6897; lean_object* x_6898; lean_object* x_6899; lean_object* x_6900; lean_object* x_6901; lean_object* x_6902; lean_object* x_6903; +x_6897 = lean_ctor_get(x_6896, 0); +lean_inc(x_6897); +lean_dec(x_6896); +x_6898 = lean_ctor_get(x_1, 0); +lean_inc(x_6898); +lean_dec(x_1); +x_6899 = l_Lean_IR_ToIR_bindVarToVarId(x_6898, x_6897, x_5963, x_4, x_5, x_5967); +x_6900 = lean_ctor_get(x_6899, 0); +lean_inc(x_6900); +x_6901 = lean_ctor_get(x_6899, 1); +lean_inc(x_6901); +lean_dec(x_6899); +x_6902 = lean_ctor_get(x_6900, 1); +lean_inc(x_6902); +lean_dec(x_6900); +x_6903 = l_Lean_IR_ToIR_lowerCode(x_2, x_6902, x_4, x_5, x_6901); +return x_6903; +} +else +{ +lean_object* x_6904; lean_object* x_6905; lean_object* x_6906; lean_object* x_6907; lean_object* x_6908; lean_object* x_6909; +x_6904 = lean_ctor_get(x_1, 0); +lean_inc(x_6904); +lean_dec(x_1); +x_6905 = l_Lean_IR_ToIR_bindErased(x_6904, x_5963, x_4, x_5, x_5967); +x_6906 = lean_ctor_get(x_6905, 0); +lean_inc(x_6906); +x_6907 = lean_ctor_get(x_6905, 1); +lean_inc(x_6907); +lean_dec(x_6905); +x_6908 = lean_ctor_get(x_6906, 1); +lean_inc(x_6908); +lean_dec(x_6906); +x_6909 = l_Lean_IR_ToIR_lowerCode(x_2, x_6908, x_4, x_5, x_6907); +return x_6909; +} +} +} +else +{ +lean_object* x_6910; uint8_t x_6911; +lean_dec(x_5974); +x_6910 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_6911 = lean_name_eq(x_153, x_6910); +if (x_6911 == 0) +{ +uint8_t x_6912; lean_object* x_6913; lean_object* x_6914; lean_object* x_6915; lean_object* x_6916; lean_object* x_6917; lean_object* x_6918; lean_object* x_6919; lean_object* x_6920; lean_object* x_6921; +lean_dec(x_5957); +lean_dec(x_2); +lean_dec(x_1); +x_6912 = 1; +x_6913 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_6914 = l_Lean_Name_toString(x_153, x_6912, x_6913); +x_6915 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_6915, 0, x_6914); +x_6916 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_6917 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_6917, 0, x_6916); +lean_ctor_set(x_6917, 1, x_6915); +x_6918 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_6919 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_6919, 0, x_6917); +lean_ctor_set(x_6919, 1, x_6918); +x_6920 = l_Lean_MessageData_ofFormat(x_6919); +x_6921 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_6920, x_5963, x_4, x_5, x_5967); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_5963); +return x_6921; +} +else +{ +lean_object* x_6922; lean_object* x_6923; lean_object* x_6924; +lean_dec(x_153); +x_6922 = l_Lean_IR_instInhabitedArg; +x_6923 = lean_unsigned_to_nat(2u); +x_6924 = lean_array_get(x_6922, x_5957, x_6923); +lean_dec(x_5957); +if (lean_obj_tag(x_6924) == 0) +{ +lean_object* x_6925; lean_object* x_6926; lean_object* x_6927; lean_object* x_6928; lean_object* x_6929; lean_object* x_6930; lean_object* x_6931; +x_6925 = lean_ctor_get(x_6924, 0); +lean_inc(x_6925); +lean_dec(x_6924); +x_6926 = lean_ctor_get(x_1, 0); +lean_inc(x_6926); +lean_dec(x_1); +x_6927 = l_Lean_IR_ToIR_bindVarToVarId(x_6926, x_6925, x_5963, x_4, x_5, x_5967); +x_6928 = lean_ctor_get(x_6927, 0); +lean_inc(x_6928); +x_6929 = lean_ctor_get(x_6927, 1); +lean_inc(x_6929); +lean_dec(x_6927); +x_6930 = lean_ctor_get(x_6928, 1); +lean_inc(x_6930); +lean_dec(x_6928); +x_6931 = l_Lean_IR_ToIR_lowerCode(x_2, x_6930, x_4, x_5, x_6929); +return x_6931; +} +else +{ +lean_object* x_6932; lean_object* x_6933; lean_object* x_6934; lean_object* x_6935; lean_object* x_6936; lean_object* x_6937; +x_6932 = lean_ctor_get(x_1, 0); +lean_inc(x_6932); +lean_dec(x_1); +x_6933 = l_Lean_IR_ToIR_bindErased(x_6932, x_5963, x_4, x_5, x_5967); +x_6934 = lean_ctor_get(x_6933, 0); +lean_inc(x_6934); +x_6935 = lean_ctor_get(x_6933, 1); +lean_inc(x_6935); +lean_dec(x_6933); +x_6936 = lean_ctor_get(x_6934, 1); +lean_inc(x_6936); +lean_dec(x_6934); +x_6937 = l_Lean_IR_ToIR_lowerCode(x_2, x_6936, x_4, x_5, x_6935); +return x_6937; +} +} +} +} +case 5: +{ +lean_object* x_6938; lean_object* x_6939; +lean_dec(x_5974); +lean_dec(x_5969); +lean_dec(x_5968); +lean_free_object(x_5959); +lean_dec(x_5957); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_6938 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_6939 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_6938, x_5963, x_4, x_5, x_5967); +return x_6939; +} +case 6: +{ +lean_object* x_6940; uint8_t x_6941; +x_6940 = lean_ctor_get(x_5974, 0); +lean_inc(x_6940); +lean_dec(x_5974); +lean_inc(x_153); +x_6941 = l_Lean_isExtern(x_5969, x_153); +if (x_6941 == 0) +{ +lean_object* x_6942; +lean_dec(x_5968); +lean_free_object(x_5959); +lean_dec(x_5957); +lean_inc(x_5); +lean_inc(x_4); +x_6942 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_5963, x_4, x_5, x_5967); +if (lean_obj_tag(x_6942) == 0) +{ +lean_object* x_6943; lean_object* x_6944; lean_object* x_6945; lean_object* x_6946; lean_object* x_6947; lean_object* x_6948; lean_object* x_6949; lean_object* x_6950; lean_object* x_6951; lean_object* x_6952; lean_object* x_6953; lean_object* x_6954; lean_object* x_6955; lean_object* x_6956; lean_object* x_6957; lean_object* x_6958; lean_object* x_6959; lean_object* x_6960; lean_object* x_6961; lean_object* x_6962; +x_6943 = lean_ctor_get(x_6942, 0); +lean_inc(x_6943); +x_6944 = lean_ctor_get(x_6943, 0); +lean_inc(x_6944); +x_6945 = lean_ctor_get(x_6942, 1); +lean_inc(x_6945); +lean_dec(x_6942); +x_6946 = lean_ctor_get(x_6943, 1); +lean_inc(x_6946); +lean_dec(x_6943); +x_6947 = lean_ctor_get(x_6944, 0); +lean_inc(x_6947); +x_6948 = lean_ctor_get(x_6944, 1); +lean_inc(x_6948); +lean_dec(x_6944); +x_6949 = lean_ctor_get(x_6940, 3); +lean_inc(x_6949); +lean_dec(x_6940); +x_6950 = lean_array_get_size(x_5944); +x_6951 = l_Array_extract___rarg(x_5944, x_6949, x_6950); +lean_dec(x_6950); +lean_dec(x_5944); +x_6952 = lean_array_get_size(x_6948); +x_6953 = lean_unsigned_to_nat(0u); +x_6954 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_5945)) { + x_6955 = lean_alloc_ctor(0, 3, 0); +} else { + x_6955 = x_5945; + lean_ctor_set_tag(x_6955, 0); +} +lean_ctor_set(x_6955, 0, x_6953); +lean_ctor_set(x_6955, 1, x_6952); +lean_ctor_set(x_6955, 2, x_6954); +x_6956 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_6957 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__4(x_6948, x_6951, x_6955, x_6955, x_6956, x_6953, lean_box(0), lean_box(0), x_6946, x_4, x_5, x_6945); +lean_dec(x_6955); +x_6958 = lean_ctor_get(x_6957, 0); +lean_inc(x_6958); +x_6959 = lean_ctor_get(x_6957, 1); +lean_inc(x_6959); +lean_dec(x_6957); +x_6960 = lean_ctor_get(x_6958, 0); +lean_inc(x_6960); +x_6961 = lean_ctor_get(x_6958, 1); +lean_inc(x_6961); +lean_dec(x_6958); +x_6962 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_6947, x_6948, x_6951, x_6960, x_6961, x_4, x_5, x_6959); +lean_dec(x_6951); +lean_dec(x_6948); +return x_6962; +} +else +{ +uint8_t x_6963; +lean_dec(x_6940); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6963 = !lean_is_exclusive(x_6942); +if (x_6963 == 0) +{ +return x_6942; +} +else +{ +lean_object* x_6964; lean_object* x_6965; lean_object* x_6966; +x_6964 = lean_ctor_get(x_6942, 0); +x_6965 = lean_ctor_get(x_6942, 1); +lean_inc(x_6965); +lean_inc(x_6964); +lean_dec(x_6942); +x_6966 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6966, 0, x_6964); +lean_ctor_set(x_6966, 1, x_6965); +return x_6966; +} +} +} +else +{ +lean_object* x_6967; lean_object* x_6968; lean_object* x_6998; lean_object* x_6999; +lean_dec(x_6940); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_153); +x_6998 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_5967); +x_6999 = lean_ctor_get(x_6998, 0); +lean_inc(x_6999); +if (lean_obj_tag(x_6999) == 0) +{ +lean_object* x_7000; lean_object* x_7001; +x_7000 = lean_ctor_get(x_6998, 1); +lean_inc(x_7000); +lean_dec(x_6998); +x_7001 = lean_box(0); +lean_ctor_set(x_5959, 0, x_7001); +x_6967 = x_5959; +x_6968 = x_7000; +goto block_6997; +} +else +{ +uint8_t x_7002; +lean_free_object(x_5959); +x_7002 = !lean_is_exclusive(x_6998); +if (x_7002 == 0) +{ +lean_object* x_7003; lean_object* x_7004; uint8_t x_7005; +x_7003 = lean_ctor_get(x_6998, 1); +x_7004 = lean_ctor_get(x_6998, 0); +lean_dec(x_7004); +x_7005 = !lean_is_exclusive(x_6999); +if (x_7005 == 0) +{ +lean_object* x_7006; lean_object* x_7007; lean_object* x_7008; lean_object* x_7009; uint8_t x_7010; +x_7006 = lean_ctor_get(x_6999, 0); +x_7007 = lean_array_get_size(x_5957); +x_7008 = lean_ctor_get(x_7006, 3); +lean_inc(x_7008); +lean_dec(x_7006); +x_7009 = lean_array_get_size(x_7008); +lean_dec(x_7008); +x_7010 = lean_nat_dec_lt(x_7007, x_7009); +if (x_7010 == 0) +{ +uint8_t x_7011; +x_7011 = lean_nat_dec_eq(x_7007, x_7009); +if (x_7011 == 0) +{ +lean_object* x_7012; lean_object* x_7013; lean_object* x_7014; lean_object* x_7015; lean_object* x_7016; lean_object* x_7017; lean_object* x_7018; lean_object* x_7019; lean_object* x_7020; lean_object* x_7021; lean_object* x_7022; lean_object* x_7023; lean_object* x_7024; lean_object* x_7025; lean_object* x_7026; lean_object* x_7027; +x_7012 = lean_unsigned_to_nat(0u); +x_7013 = l_Array_extract___rarg(x_5957, x_7012, x_7009); +x_7014 = l_Array_extract___rarg(x_5957, x_7009, x_7007); +lean_dec(x_7007); +lean_inc(x_153); +lean_ctor_set_tag(x_6998, 6); +lean_ctor_set(x_6998, 1, x_7013); +lean_ctor_set(x_6998, 0, x_153); +x_7015 = lean_ctor_get(x_1, 0); +lean_inc(x_7015); +x_7016 = l_Lean_IR_ToIR_bindVar(x_7015, x_5963, x_4, x_5, x_7003); +x_7017 = lean_ctor_get(x_7016, 0); +lean_inc(x_7017); +x_7018 = lean_ctor_get(x_7016, 1); +lean_inc(x_7018); +lean_dec(x_7016); +x_7019 = lean_ctor_get(x_7017, 0); +lean_inc(x_7019); +x_7020 = lean_ctor_get(x_7017, 1); +lean_inc(x_7020); +lean_dec(x_7017); +x_7021 = l_Lean_IR_ToIR_newVar(x_7020, x_4, x_5, x_7018); +x_7022 = lean_ctor_get(x_7021, 0); +lean_inc(x_7022); +x_7023 = lean_ctor_get(x_7021, 1); +lean_inc(x_7023); +lean_dec(x_7021); +x_7024 = lean_ctor_get(x_7022, 0); +lean_inc(x_7024); +x_7025 = lean_ctor_get(x_7022, 1); +lean_inc(x_7025); +lean_dec(x_7022); +x_7026 = lean_ctor_get(x_1, 2); +lean_inc(x_7026); +lean_inc(x_5); +lean_inc(x_4); +x_7027 = l_Lean_IR_ToIR_lowerType(x_7026, x_7025, x_4, x_5, x_7023); +if (lean_obj_tag(x_7027) == 0) +{ +lean_object* x_7028; lean_object* x_7029; lean_object* x_7030; lean_object* x_7031; lean_object* x_7032; +x_7028 = lean_ctor_get(x_7027, 0); +lean_inc(x_7028); +x_7029 = lean_ctor_get(x_7027, 1); +lean_inc(x_7029); +lean_dec(x_7027); +x_7030 = lean_ctor_get(x_7028, 0); +lean_inc(x_7030); +x_7031 = lean_ctor_get(x_7028, 1); +lean_inc(x_7031); +lean_dec(x_7028); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7032 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_7024, x_7014, x_7019, x_6998, x_7030, x_7031, x_4, x_5, x_7029); +if (lean_obj_tag(x_7032) == 0) +{ +lean_object* x_7033; lean_object* x_7034; uint8_t x_7035; +x_7033 = lean_ctor_get(x_7032, 0); +lean_inc(x_7033); +x_7034 = lean_ctor_get(x_7032, 1); +lean_inc(x_7034); +lean_dec(x_7032); +x_7035 = !lean_is_exclusive(x_7033); +if (x_7035 == 0) +{ +lean_object* x_7036; +x_7036 = lean_ctor_get(x_7033, 0); +lean_ctor_set(x_6999, 0, x_7036); +lean_ctor_set(x_7033, 0, x_6999); +x_6967 = x_7033; +x_6968 = x_7034; +goto block_6997; +} +else +{ +lean_object* x_7037; lean_object* x_7038; lean_object* x_7039; +x_7037 = lean_ctor_get(x_7033, 0); +x_7038 = lean_ctor_get(x_7033, 1); +lean_inc(x_7038); +lean_inc(x_7037); +lean_dec(x_7033); +lean_ctor_set(x_6999, 0, x_7037); +x_7039 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_7039, 0, x_6999); +lean_ctor_set(x_7039, 1, x_7038); +x_6967 = x_7039; +x_6968 = x_7034; +goto block_6997; +} +} +else +{ +uint8_t x_7040; +lean_free_object(x_6999); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7040 = !lean_is_exclusive(x_7032); +if (x_7040 == 0) +{ +return x_7032; +} +else +{ +lean_object* x_7041; lean_object* x_7042; lean_object* x_7043; +x_7041 = lean_ctor_get(x_7032, 0); +x_7042 = lean_ctor_get(x_7032, 1); +lean_inc(x_7042); +lean_inc(x_7041); +lean_dec(x_7032); +x_7043 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_7043, 0, x_7041); +lean_ctor_set(x_7043, 1, x_7042); +return x_7043; +} +} +} +else +{ +uint8_t x_7044; +lean_dec(x_7024); +lean_dec(x_7019); +lean_dec(x_6998); +lean_dec(x_7014); +lean_free_object(x_6999); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7044 = !lean_is_exclusive(x_7027); +if (x_7044 == 0) +{ +return x_7027; +} +else +{ +lean_object* x_7045; lean_object* x_7046; lean_object* x_7047; +x_7045 = lean_ctor_get(x_7027, 0); +x_7046 = lean_ctor_get(x_7027, 1); +lean_inc(x_7046); +lean_inc(x_7045); +lean_dec(x_7027); +x_7047 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_7047, 0, x_7045); +lean_ctor_set(x_7047, 1, x_7046); +return x_7047; +} +} +} +else +{ +lean_object* x_7048; lean_object* x_7049; lean_object* x_7050; lean_object* x_7051; lean_object* x_7052; lean_object* x_7053; lean_object* x_7054; lean_object* x_7055; +lean_dec(x_7009); +lean_dec(x_7007); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_6998, 6); +lean_ctor_set(x_6998, 1, x_5957); +lean_ctor_set(x_6998, 0, x_153); +x_7048 = lean_ctor_get(x_1, 0); +lean_inc(x_7048); +x_7049 = l_Lean_IR_ToIR_bindVar(x_7048, x_5963, x_4, x_5, x_7003); +x_7050 = lean_ctor_get(x_7049, 0); +lean_inc(x_7050); +x_7051 = lean_ctor_get(x_7049, 1); +lean_inc(x_7051); +lean_dec(x_7049); +x_7052 = lean_ctor_get(x_7050, 0); +lean_inc(x_7052); +x_7053 = lean_ctor_get(x_7050, 1); +lean_inc(x_7053); +lean_dec(x_7050); +x_7054 = lean_ctor_get(x_1, 2); +lean_inc(x_7054); +lean_inc(x_5); +lean_inc(x_4); +x_7055 = l_Lean_IR_ToIR_lowerType(x_7054, x_7053, x_4, x_5, x_7051); +if (lean_obj_tag(x_7055) == 0) +{ +lean_object* x_7056; lean_object* x_7057; lean_object* x_7058; lean_object* x_7059; lean_object* x_7060; +x_7056 = lean_ctor_get(x_7055, 0); +lean_inc(x_7056); +x_7057 = lean_ctor_get(x_7055, 1); +lean_inc(x_7057); +lean_dec(x_7055); +x_7058 = lean_ctor_get(x_7056, 0); +lean_inc(x_7058); +x_7059 = lean_ctor_get(x_7056, 1); +lean_inc(x_7059); +lean_dec(x_7056); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7060 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7052, x_6998, x_7058, x_7059, x_4, x_5, x_7057); +if (lean_obj_tag(x_7060) == 0) +{ +lean_object* x_7061; lean_object* x_7062; uint8_t x_7063; +x_7061 = lean_ctor_get(x_7060, 0); +lean_inc(x_7061); +x_7062 = lean_ctor_get(x_7060, 1); +lean_inc(x_7062); +lean_dec(x_7060); +x_7063 = !lean_is_exclusive(x_7061); +if (x_7063 == 0) +{ +lean_object* x_7064; +x_7064 = lean_ctor_get(x_7061, 0); +lean_ctor_set(x_6999, 0, x_7064); +lean_ctor_set(x_7061, 0, x_6999); +x_6967 = x_7061; +x_6968 = x_7062; +goto block_6997; +} +else +{ +lean_object* x_7065; lean_object* x_7066; lean_object* x_7067; +x_7065 = lean_ctor_get(x_7061, 0); +x_7066 = lean_ctor_get(x_7061, 1); +lean_inc(x_7066); +lean_inc(x_7065); +lean_dec(x_7061); +lean_ctor_set(x_6999, 0, x_7065); +x_7067 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_7067, 0, x_6999); +lean_ctor_set(x_7067, 1, x_7066); +x_6967 = x_7067; +x_6968 = x_7062; +goto block_6997; +} +} +else +{ +uint8_t x_7068; +lean_free_object(x_6999); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7068 = !lean_is_exclusive(x_7060); +if (x_7068 == 0) +{ +return x_7060; +} +else +{ +lean_object* x_7069; lean_object* x_7070; lean_object* x_7071; +x_7069 = lean_ctor_get(x_7060, 0); +x_7070 = lean_ctor_get(x_7060, 1); +lean_inc(x_7070); +lean_inc(x_7069); +lean_dec(x_7060); +x_7071 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_7071, 0, x_7069); +lean_ctor_set(x_7071, 1, x_7070); +return x_7071; +} +} +} +else +{ +uint8_t x_7072; +lean_dec(x_7052); +lean_dec(x_6998); +lean_free_object(x_6999); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7072 = !lean_is_exclusive(x_7055); +if (x_7072 == 0) +{ +return x_7055; +} +else +{ +lean_object* x_7073; lean_object* x_7074; lean_object* x_7075; +x_7073 = lean_ctor_get(x_7055, 0); +x_7074 = lean_ctor_get(x_7055, 1); +lean_inc(x_7074); +lean_inc(x_7073); +lean_dec(x_7055); +x_7075 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_7075, 0, x_7073); +lean_ctor_set(x_7075, 1, x_7074); +return x_7075; +} +} +} +} +else +{ +lean_object* x_7076; lean_object* x_7077; lean_object* x_7078; lean_object* x_7079; lean_object* x_7080; lean_object* x_7081; lean_object* x_7082; lean_object* x_7083; +lean_dec(x_7009); +lean_dec(x_7007); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_6998, 7); +lean_ctor_set(x_6998, 1, x_5957); +lean_ctor_set(x_6998, 0, x_153); +x_7076 = lean_ctor_get(x_1, 0); +lean_inc(x_7076); +x_7077 = l_Lean_IR_ToIR_bindVar(x_7076, x_5963, x_4, x_5, x_7003); +x_7078 = lean_ctor_get(x_7077, 0); +lean_inc(x_7078); +x_7079 = lean_ctor_get(x_7077, 1); +lean_inc(x_7079); +lean_dec(x_7077); +x_7080 = lean_ctor_get(x_7078, 0); +lean_inc(x_7080); +x_7081 = lean_ctor_get(x_7078, 1); +lean_inc(x_7081); +lean_dec(x_7078); +x_7082 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7083 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7080, x_6998, x_7082, x_7081, x_4, x_5, x_7079); +if (lean_obj_tag(x_7083) == 0) +{ +lean_object* x_7084; lean_object* x_7085; uint8_t x_7086; +x_7084 = lean_ctor_get(x_7083, 0); +lean_inc(x_7084); +x_7085 = lean_ctor_get(x_7083, 1); +lean_inc(x_7085); +lean_dec(x_7083); +x_7086 = !lean_is_exclusive(x_7084); +if (x_7086 == 0) +{ +lean_object* x_7087; +x_7087 = lean_ctor_get(x_7084, 0); +lean_ctor_set(x_6999, 0, x_7087); +lean_ctor_set(x_7084, 0, x_6999); +x_6967 = x_7084; +x_6968 = x_7085; +goto block_6997; +} +else +{ +lean_object* x_7088; lean_object* x_7089; lean_object* x_7090; +x_7088 = lean_ctor_get(x_7084, 0); +x_7089 = lean_ctor_get(x_7084, 1); +lean_inc(x_7089); +lean_inc(x_7088); +lean_dec(x_7084); +lean_ctor_set(x_6999, 0, x_7088); +x_7090 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_7090, 0, x_6999); +lean_ctor_set(x_7090, 1, x_7089); +x_6967 = x_7090; +x_6968 = x_7085; +goto block_6997; +} +} +else +{ +uint8_t x_7091; +lean_free_object(x_6999); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7091 = !lean_is_exclusive(x_7083); +if (x_7091 == 0) +{ +return x_7083; +} +else +{ +lean_object* x_7092; lean_object* x_7093; lean_object* x_7094; +x_7092 = lean_ctor_get(x_7083, 0); +x_7093 = lean_ctor_get(x_7083, 1); +lean_inc(x_7093); +lean_inc(x_7092); +lean_dec(x_7083); +x_7094 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_7094, 0, x_7092); +lean_ctor_set(x_7094, 1, x_7093); +return x_7094; +} +} +} +} +else +{ +lean_object* x_7095; lean_object* x_7096; lean_object* x_7097; lean_object* x_7098; uint8_t x_7099; +x_7095 = lean_ctor_get(x_6999, 0); +lean_inc(x_7095); +lean_dec(x_6999); +x_7096 = lean_array_get_size(x_5957); +x_7097 = lean_ctor_get(x_7095, 3); +lean_inc(x_7097); +lean_dec(x_7095); +x_7098 = lean_array_get_size(x_7097); +lean_dec(x_7097); +x_7099 = lean_nat_dec_lt(x_7096, x_7098); +if (x_7099 == 0) +{ +uint8_t x_7100; +x_7100 = lean_nat_dec_eq(x_7096, x_7098); +if (x_7100 == 0) +{ +lean_object* x_7101; lean_object* x_7102; lean_object* x_7103; lean_object* x_7104; lean_object* x_7105; lean_object* x_7106; lean_object* x_7107; lean_object* x_7108; lean_object* x_7109; lean_object* x_7110; lean_object* x_7111; lean_object* x_7112; lean_object* x_7113; lean_object* x_7114; lean_object* x_7115; lean_object* x_7116; +x_7101 = lean_unsigned_to_nat(0u); +x_7102 = l_Array_extract___rarg(x_5957, x_7101, x_7098); +x_7103 = l_Array_extract___rarg(x_5957, x_7098, x_7096); +lean_dec(x_7096); +lean_inc(x_153); +lean_ctor_set_tag(x_6998, 6); +lean_ctor_set(x_6998, 1, x_7102); +lean_ctor_set(x_6998, 0, x_153); +x_7104 = lean_ctor_get(x_1, 0); +lean_inc(x_7104); +x_7105 = l_Lean_IR_ToIR_bindVar(x_7104, x_5963, x_4, x_5, x_7003); +x_7106 = lean_ctor_get(x_7105, 0); +lean_inc(x_7106); +x_7107 = lean_ctor_get(x_7105, 1); +lean_inc(x_7107); +lean_dec(x_7105); +x_7108 = lean_ctor_get(x_7106, 0); +lean_inc(x_7108); +x_7109 = lean_ctor_get(x_7106, 1); +lean_inc(x_7109); +lean_dec(x_7106); +x_7110 = l_Lean_IR_ToIR_newVar(x_7109, x_4, x_5, x_7107); +x_7111 = lean_ctor_get(x_7110, 0); +lean_inc(x_7111); +x_7112 = lean_ctor_get(x_7110, 1); +lean_inc(x_7112); +lean_dec(x_7110); +x_7113 = lean_ctor_get(x_7111, 0); +lean_inc(x_7113); +x_7114 = lean_ctor_get(x_7111, 1); +lean_inc(x_7114); +lean_dec(x_7111); +x_7115 = lean_ctor_get(x_1, 2); +lean_inc(x_7115); +lean_inc(x_5); +lean_inc(x_4); +x_7116 = l_Lean_IR_ToIR_lowerType(x_7115, x_7114, x_4, x_5, x_7112); +if (lean_obj_tag(x_7116) == 0) +{ +lean_object* x_7117; lean_object* x_7118; lean_object* x_7119; lean_object* x_7120; lean_object* x_7121; +x_7117 = lean_ctor_get(x_7116, 0); +lean_inc(x_7117); +x_7118 = lean_ctor_get(x_7116, 1); +lean_inc(x_7118); +lean_dec(x_7116); +x_7119 = lean_ctor_get(x_7117, 0); +lean_inc(x_7119); +x_7120 = lean_ctor_get(x_7117, 1); +lean_inc(x_7120); +lean_dec(x_7117); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7121 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_7113, x_7103, x_7108, x_6998, x_7119, x_7120, x_4, x_5, x_7118); +if (lean_obj_tag(x_7121) == 0) +{ +lean_object* x_7122; lean_object* x_7123; lean_object* x_7124; lean_object* x_7125; lean_object* x_7126; lean_object* x_7127; lean_object* x_7128; +x_7122 = lean_ctor_get(x_7121, 0); +lean_inc(x_7122); +x_7123 = lean_ctor_get(x_7121, 1); +lean_inc(x_7123); +lean_dec(x_7121); +x_7124 = lean_ctor_get(x_7122, 0); +lean_inc(x_7124); +x_7125 = lean_ctor_get(x_7122, 1); +lean_inc(x_7125); +if (lean_is_exclusive(x_7122)) { + lean_ctor_release(x_7122, 0); + lean_ctor_release(x_7122, 1); + x_7126 = x_7122; +} else { + lean_dec_ref(x_7122); + x_7126 = lean_box(0); +} +x_7127 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_7127, 0, x_7124); +if (lean_is_scalar(x_7126)) { + x_7128 = lean_alloc_ctor(0, 2, 0); +} else { + x_7128 = x_7126; +} +lean_ctor_set(x_7128, 0, x_7127); +lean_ctor_set(x_7128, 1, x_7125); +x_6967 = x_7128; +x_6968 = x_7123; +goto block_6997; +} +else +{ +lean_object* x_7129; lean_object* x_7130; lean_object* x_7131; lean_object* x_7132; +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7129 = lean_ctor_get(x_7121, 0); +lean_inc(x_7129); +x_7130 = lean_ctor_get(x_7121, 1); +lean_inc(x_7130); +if (lean_is_exclusive(x_7121)) { + lean_ctor_release(x_7121, 0); + lean_ctor_release(x_7121, 1); + x_7131 = x_7121; +} else { + lean_dec_ref(x_7121); + x_7131 = lean_box(0); +} +if (lean_is_scalar(x_7131)) { + x_7132 = lean_alloc_ctor(1, 2, 0); +} else { + x_7132 = x_7131; +} +lean_ctor_set(x_7132, 0, x_7129); +lean_ctor_set(x_7132, 1, x_7130); +return x_7132; +} +} +else +{ +lean_object* x_7133; lean_object* x_7134; lean_object* x_7135; lean_object* x_7136; +lean_dec(x_7113); +lean_dec(x_7108); +lean_dec(x_6998); +lean_dec(x_7103); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7133 = lean_ctor_get(x_7116, 0); +lean_inc(x_7133); +x_7134 = lean_ctor_get(x_7116, 1); +lean_inc(x_7134); +if (lean_is_exclusive(x_7116)) { + lean_ctor_release(x_7116, 0); + lean_ctor_release(x_7116, 1); + x_7135 = x_7116; +} else { + lean_dec_ref(x_7116); + x_7135 = lean_box(0); +} +if (lean_is_scalar(x_7135)) { + x_7136 = lean_alloc_ctor(1, 2, 0); +} else { + x_7136 = x_7135; +} +lean_ctor_set(x_7136, 0, x_7133); +lean_ctor_set(x_7136, 1, x_7134); +return x_7136; +} +} +else +{ +lean_object* x_7137; lean_object* x_7138; lean_object* x_7139; lean_object* x_7140; lean_object* x_7141; lean_object* x_7142; lean_object* x_7143; lean_object* x_7144; +lean_dec(x_7098); +lean_dec(x_7096); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_6998, 6); +lean_ctor_set(x_6998, 1, x_5957); +lean_ctor_set(x_6998, 0, x_153); +x_7137 = lean_ctor_get(x_1, 0); +lean_inc(x_7137); +x_7138 = l_Lean_IR_ToIR_bindVar(x_7137, x_5963, x_4, x_5, x_7003); +x_7139 = lean_ctor_get(x_7138, 0); +lean_inc(x_7139); +x_7140 = lean_ctor_get(x_7138, 1); +lean_inc(x_7140); +lean_dec(x_7138); +x_7141 = lean_ctor_get(x_7139, 0); +lean_inc(x_7141); +x_7142 = lean_ctor_get(x_7139, 1); +lean_inc(x_7142); +lean_dec(x_7139); +x_7143 = lean_ctor_get(x_1, 2); +lean_inc(x_7143); +lean_inc(x_5); +lean_inc(x_4); +x_7144 = l_Lean_IR_ToIR_lowerType(x_7143, x_7142, x_4, x_5, x_7140); +if (lean_obj_tag(x_7144) == 0) +{ +lean_object* x_7145; lean_object* x_7146; lean_object* x_7147; lean_object* x_7148; lean_object* x_7149; +x_7145 = lean_ctor_get(x_7144, 0); +lean_inc(x_7145); +x_7146 = lean_ctor_get(x_7144, 1); +lean_inc(x_7146); +lean_dec(x_7144); +x_7147 = lean_ctor_get(x_7145, 0); +lean_inc(x_7147); +x_7148 = lean_ctor_get(x_7145, 1); +lean_inc(x_7148); +lean_dec(x_7145); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7149 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7141, x_6998, x_7147, x_7148, x_4, x_5, x_7146); +if (lean_obj_tag(x_7149) == 0) +{ +lean_object* x_7150; lean_object* x_7151; lean_object* x_7152; lean_object* x_7153; lean_object* x_7154; lean_object* x_7155; lean_object* x_7156; +x_7150 = lean_ctor_get(x_7149, 0); +lean_inc(x_7150); +x_7151 = lean_ctor_get(x_7149, 1); +lean_inc(x_7151); +lean_dec(x_7149); +x_7152 = lean_ctor_get(x_7150, 0); +lean_inc(x_7152); +x_7153 = lean_ctor_get(x_7150, 1); +lean_inc(x_7153); +if (lean_is_exclusive(x_7150)) { + lean_ctor_release(x_7150, 0); + lean_ctor_release(x_7150, 1); + x_7154 = x_7150; +} else { + lean_dec_ref(x_7150); + x_7154 = lean_box(0); +} +x_7155 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_7155, 0, x_7152); +if (lean_is_scalar(x_7154)) { + x_7156 = lean_alloc_ctor(0, 2, 0); +} else { + x_7156 = x_7154; +} +lean_ctor_set(x_7156, 0, x_7155); +lean_ctor_set(x_7156, 1, x_7153); +x_6967 = x_7156; +x_6968 = x_7151; +goto block_6997; +} +else +{ +lean_object* x_7157; lean_object* x_7158; lean_object* x_7159; lean_object* x_7160; +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7157 = lean_ctor_get(x_7149, 0); +lean_inc(x_7157); +x_7158 = lean_ctor_get(x_7149, 1); +lean_inc(x_7158); +if (lean_is_exclusive(x_7149)) { + lean_ctor_release(x_7149, 0); + lean_ctor_release(x_7149, 1); + x_7159 = x_7149; +} else { + lean_dec_ref(x_7149); + x_7159 = lean_box(0); +} +if (lean_is_scalar(x_7159)) { + x_7160 = lean_alloc_ctor(1, 2, 0); +} else { + x_7160 = x_7159; +} +lean_ctor_set(x_7160, 0, x_7157); +lean_ctor_set(x_7160, 1, x_7158); +return x_7160; +} +} +else +{ +lean_object* x_7161; lean_object* x_7162; lean_object* x_7163; lean_object* x_7164; +lean_dec(x_7141); +lean_dec(x_6998); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7161 = lean_ctor_get(x_7144, 0); +lean_inc(x_7161); +x_7162 = lean_ctor_get(x_7144, 1); +lean_inc(x_7162); +if (lean_is_exclusive(x_7144)) { + lean_ctor_release(x_7144, 0); + lean_ctor_release(x_7144, 1); + x_7163 = x_7144; +} else { + lean_dec_ref(x_7144); + x_7163 = lean_box(0); +} +if (lean_is_scalar(x_7163)) { + x_7164 = lean_alloc_ctor(1, 2, 0); +} else { + x_7164 = x_7163; +} +lean_ctor_set(x_7164, 0, x_7161); +lean_ctor_set(x_7164, 1, x_7162); +return x_7164; +} +} +} +else +{ +lean_object* x_7165; lean_object* x_7166; lean_object* x_7167; lean_object* x_7168; lean_object* x_7169; lean_object* x_7170; lean_object* x_7171; lean_object* x_7172; +lean_dec(x_7098); +lean_dec(x_7096); +lean_inc(x_5957); +lean_inc(x_153); +lean_ctor_set_tag(x_6998, 7); +lean_ctor_set(x_6998, 1, x_5957); +lean_ctor_set(x_6998, 0, x_153); +x_7165 = lean_ctor_get(x_1, 0); +lean_inc(x_7165); +x_7166 = l_Lean_IR_ToIR_bindVar(x_7165, x_5963, x_4, x_5, x_7003); +x_7167 = lean_ctor_get(x_7166, 0); +lean_inc(x_7167); +x_7168 = lean_ctor_get(x_7166, 1); +lean_inc(x_7168); +lean_dec(x_7166); +x_7169 = lean_ctor_get(x_7167, 0); +lean_inc(x_7169); +x_7170 = lean_ctor_get(x_7167, 1); +lean_inc(x_7170); +lean_dec(x_7167); +x_7171 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7172 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7169, x_6998, x_7171, x_7170, x_4, x_5, x_7168); +if (lean_obj_tag(x_7172) == 0) +{ +lean_object* x_7173; lean_object* x_7174; lean_object* x_7175; lean_object* x_7176; lean_object* x_7177; lean_object* x_7178; lean_object* x_7179; +x_7173 = lean_ctor_get(x_7172, 0); +lean_inc(x_7173); +x_7174 = lean_ctor_get(x_7172, 1); +lean_inc(x_7174); +lean_dec(x_7172); +x_7175 = lean_ctor_get(x_7173, 0); +lean_inc(x_7175); +x_7176 = lean_ctor_get(x_7173, 1); +lean_inc(x_7176); +if (lean_is_exclusive(x_7173)) { + lean_ctor_release(x_7173, 0); + lean_ctor_release(x_7173, 1); + x_7177 = x_7173; +} else { + lean_dec_ref(x_7173); + x_7177 = lean_box(0); +} +x_7178 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_7178, 0, x_7175); +if (lean_is_scalar(x_7177)) { + x_7179 = lean_alloc_ctor(0, 2, 0); +} else { + x_7179 = x_7177; +} +lean_ctor_set(x_7179, 0, x_7178); +lean_ctor_set(x_7179, 1, x_7176); +x_6967 = x_7179; +x_6968 = x_7174; +goto block_6997; +} +else +{ +lean_object* x_7180; lean_object* x_7181; lean_object* x_7182; lean_object* x_7183; +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7180 = lean_ctor_get(x_7172, 0); +lean_inc(x_7180); +x_7181 = lean_ctor_get(x_7172, 1); +lean_inc(x_7181); +if (lean_is_exclusive(x_7172)) { + lean_ctor_release(x_7172, 0); + lean_ctor_release(x_7172, 1); + x_7182 = x_7172; +} else { + lean_dec_ref(x_7172); + x_7182 = lean_box(0); +} +if (lean_is_scalar(x_7182)) { + x_7183 = lean_alloc_ctor(1, 2, 0); +} else { + x_7183 = x_7182; +} +lean_ctor_set(x_7183, 0, x_7180); +lean_ctor_set(x_7183, 1, x_7181); +return x_7183; +} +} +} +} +else +{ +lean_object* x_7184; lean_object* x_7185; lean_object* x_7186; lean_object* x_7187; lean_object* x_7188; lean_object* x_7189; uint8_t x_7190; +x_7184 = lean_ctor_get(x_6998, 1); +lean_inc(x_7184); +lean_dec(x_6998); +x_7185 = lean_ctor_get(x_6999, 0); +lean_inc(x_7185); +if (lean_is_exclusive(x_6999)) { + lean_ctor_release(x_6999, 0); + x_7186 = x_6999; +} else { + lean_dec_ref(x_6999); + x_7186 = lean_box(0); +} +x_7187 = lean_array_get_size(x_5957); +x_7188 = lean_ctor_get(x_7185, 3); +lean_inc(x_7188); +lean_dec(x_7185); +x_7189 = lean_array_get_size(x_7188); +lean_dec(x_7188); +x_7190 = lean_nat_dec_lt(x_7187, x_7189); +if (x_7190 == 0) +{ +uint8_t x_7191; +x_7191 = lean_nat_dec_eq(x_7187, x_7189); +if (x_7191 == 0) +{ +lean_object* x_7192; lean_object* x_7193; lean_object* x_7194; lean_object* x_7195; lean_object* x_7196; lean_object* x_7197; lean_object* x_7198; lean_object* x_7199; lean_object* x_7200; lean_object* x_7201; lean_object* x_7202; lean_object* x_7203; lean_object* x_7204; lean_object* x_7205; lean_object* x_7206; lean_object* x_7207; lean_object* x_7208; +x_7192 = lean_unsigned_to_nat(0u); +x_7193 = l_Array_extract___rarg(x_5957, x_7192, x_7189); +x_7194 = l_Array_extract___rarg(x_5957, x_7189, x_7187); +lean_dec(x_7187); +lean_inc(x_153); +x_7195 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_7195, 0, x_153); +lean_ctor_set(x_7195, 1, x_7193); +x_7196 = lean_ctor_get(x_1, 0); +lean_inc(x_7196); +x_7197 = l_Lean_IR_ToIR_bindVar(x_7196, x_5963, x_4, x_5, x_7184); +x_7198 = lean_ctor_get(x_7197, 0); +lean_inc(x_7198); +x_7199 = lean_ctor_get(x_7197, 1); +lean_inc(x_7199); +lean_dec(x_7197); +x_7200 = lean_ctor_get(x_7198, 0); +lean_inc(x_7200); +x_7201 = lean_ctor_get(x_7198, 1); +lean_inc(x_7201); +lean_dec(x_7198); +x_7202 = l_Lean_IR_ToIR_newVar(x_7201, x_4, x_5, x_7199); +x_7203 = lean_ctor_get(x_7202, 0); +lean_inc(x_7203); +x_7204 = lean_ctor_get(x_7202, 1); +lean_inc(x_7204); +lean_dec(x_7202); +x_7205 = lean_ctor_get(x_7203, 0); +lean_inc(x_7205); +x_7206 = lean_ctor_get(x_7203, 1); +lean_inc(x_7206); +lean_dec(x_7203); +x_7207 = lean_ctor_get(x_1, 2); +lean_inc(x_7207); +lean_inc(x_5); +lean_inc(x_4); +x_7208 = l_Lean_IR_ToIR_lowerType(x_7207, x_7206, x_4, x_5, x_7204); +if (lean_obj_tag(x_7208) == 0) +{ +lean_object* x_7209; lean_object* x_7210; lean_object* x_7211; lean_object* x_7212; lean_object* x_7213; +x_7209 = lean_ctor_get(x_7208, 0); +lean_inc(x_7209); +x_7210 = lean_ctor_get(x_7208, 1); +lean_inc(x_7210); +lean_dec(x_7208); +x_7211 = lean_ctor_get(x_7209, 0); +lean_inc(x_7211); +x_7212 = lean_ctor_get(x_7209, 1); +lean_inc(x_7212); +lean_dec(x_7209); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7213 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_7205, x_7194, x_7200, x_7195, x_7211, x_7212, x_4, x_5, x_7210); +if (lean_obj_tag(x_7213) == 0) +{ +lean_object* x_7214; lean_object* x_7215; lean_object* x_7216; lean_object* x_7217; lean_object* x_7218; lean_object* x_7219; lean_object* x_7220; +x_7214 = lean_ctor_get(x_7213, 0); +lean_inc(x_7214); +x_7215 = lean_ctor_get(x_7213, 1); +lean_inc(x_7215); +lean_dec(x_7213); +x_7216 = lean_ctor_get(x_7214, 0); +lean_inc(x_7216); +x_7217 = lean_ctor_get(x_7214, 1); +lean_inc(x_7217); +if (lean_is_exclusive(x_7214)) { + lean_ctor_release(x_7214, 0); + lean_ctor_release(x_7214, 1); + x_7218 = x_7214; +} else { + lean_dec_ref(x_7214); + x_7218 = lean_box(0); +} +if (lean_is_scalar(x_7186)) { + x_7219 = lean_alloc_ctor(1, 1, 0); +} else { + x_7219 = x_7186; +} +lean_ctor_set(x_7219, 0, x_7216); +if (lean_is_scalar(x_7218)) { + x_7220 = lean_alloc_ctor(0, 2, 0); +} else { + x_7220 = x_7218; +} +lean_ctor_set(x_7220, 0, x_7219); +lean_ctor_set(x_7220, 1, x_7217); +x_6967 = x_7220; +x_6968 = x_7215; +goto block_6997; +} +else +{ +lean_object* x_7221; lean_object* x_7222; lean_object* x_7223; lean_object* x_7224; +lean_dec(x_7186); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7221 = lean_ctor_get(x_7213, 0); +lean_inc(x_7221); +x_7222 = lean_ctor_get(x_7213, 1); +lean_inc(x_7222); +if (lean_is_exclusive(x_7213)) { + lean_ctor_release(x_7213, 0); + lean_ctor_release(x_7213, 1); + x_7223 = x_7213; +} else { + lean_dec_ref(x_7213); + x_7223 = lean_box(0); +} +if (lean_is_scalar(x_7223)) { + x_7224 = lean_alloc_ctor(1, 2, 0); +} else { + x_7224 = x_7223; +} +lean_ctor_set(x_7224, 0, x_7221); +lean_ctor_set(x_7224, 1, x_7222); +return x_7224; +} +} +else +{ +lean_object* x_7225; lean_object* x_7226; lean_object* x_7227; lean_object* x_7228; +lean_dec(x_7205); +lean_dec(x_7200); +lean_dec(x_7195); +lean_dec(x_7194); +lean_dec(x_7186); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7225 = lean_ctor_get(x_7208, 0); +lean_inc(x_7225); +x_7226 = lean_ctor_get(x_7208, 1); +lean_inc(x_7226); +if (lean_is_exclusive(x_7208)) { + lean_ctor_release(x_7208, 0); + lean_ctor_release(x_7208, 1); + x_7227 = x_7208; +} else { + lean_dec_ref(x_7208); + x_7227 = lean_box(0); +} +if (lean_is_scalar(x_7227)) { + x_7228 = lean_alloc_ctor(1, 2, 0); +} else { + x_7228 = x_7227; +} +lean_ctor_set(x_7228, 0, x_7225); +lean_ctor_set(x_7228, 1, x_7226); +return x_7228; +} +} +else +{ +lean_object* x_7229; lean_object* x_7230; lean_object* x_7231; lean_object* x_7232; lean_object* x_7233; lean_object* x_7234; lean_object* x_7235; lean_object* x_7236; lean_object* x_7237; +lean_dec(x_7189); +lean_dec(x_7187); +lean_inc(x_5957); +lean_inc(x_153); +x_7229 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_7229, 0, x_153); +lean_ctor_set(x_7229, 1, x_5957); +x_7230 = lean_ctor_get(x_1, 0); +lean_inc(x_7230); +x_7231 = l_Lean_IR_ToIR_bindVar(x_7230, x_5963, x_4, x_5, x_7184); +x_7232 = lean_ctor_get(x_7231, 0); +lean_inc(x_7232); +x_7233 = lean_ctor_get(x_7231, 1); +lean_inc(x_7233); +lean_dec(x_7231); +x_7234 = lean_ctor_get(x_7232, 0); +lean_inc(x_7234); +x_7235 = lean_ctor_get(x_7232, 1); +lean_inc(x_7235); +lean_dec(x_7232); +x_7236 = lean_ctor_get(x_1, 2); +lean_inc(x_7236); +lean_inc(x_5); +lean_inc(x_4); +x_7237 = l_Lean_IR_ToIR_lowerType(x_7236, x_7235, x_4, x_5, x_7233); +if (lean_obj_tag(x_7237) == 0) +{ +lean_object* x_7238; lean_object* x_7239; lean_object* x_7240; lean_object* x_7241; lean_object* x_7242; +x_7238 = lean_ctor_get(x_7237, 0); +lean_inc(x_7238); +x_7239 = lean_ctor_get(x_7237, 1); +lean_inc(x_7239); +lean_dec(x_7237); +x_7240 = lean_ctor_get(x_7238, 0); +lean_inc(x_7240); +x_7241 = lean_ctor_get(x_7238, 1); +lean_inc(x_7241); +lean_dec(x_7238); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7242 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7234, x_7229, x_7240, x_7241, x_4, x_5, x_7239); +if (lean_obj_tag(x_7242) == 0) +{ +lean_object* x_7243; lean_object* x_7244; lean_object* x_7245; lean_object* x_7246; lean_object* x_7247; lean_object* x_7248; lean_object* x_7249; +x_7243 = lean_ctor_get(x_7242, 0); +lean_inc(x_7243); +x_7244 = lean_ctor_get(x_7242, 1); +lean_inc(x_7244); +lean_dec(x_7242); +x_7245 = lean_ctor_get(x_7243, 0); +lean_inc(x_7245); +x_7246 = lean_ctor_get(x_7243, 1); +lean_inc(x_7246); +if (lean_is_exclusive(x_7243)) { + lean_ctor_release(x_7243, 0); + lean_ctor_release(x_7243, 1); + x_7247 = x_7243; +} else { + lean_dec_ref(x_7243); + x_7247 = lean_box(0); +} +if (lean_is_scalar(x_7186)) { + x_7248 = lean_alloc_ctor(1, 1, 0); +} else { + x_7248 = x_7186; +} +lean_ctor_set(x_7248, 0, x_7245); +if (lean_is_scalar(x_7247)) { + x_7249 = lean_alloc_ctor(0, 2, 0); +} else { + x_7249 = x_7247; +} +lean_ctor_set(x_7249, 0, x_7248); +lean_ctor_set(x_7249, 1, x_7246); +x_6967 = x_7249; +x_6968 = x_7244; +goto block_6997; +} +else +{ +lean_object* x_7250; lean_object* x_7251; lean_object* x_7252; lean_object* x_7253; +lean_dec(x_7186); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7250 = lean_ctor_get(x_7242, 0); +lean_inc(x_7250); +x_7251 = lean_ctor_get(x_7242, 1); +lean_inc(x_7251); +if (lean_is_exclusive(x_7242)) { + lean_ctor_release(x_7242, 0); + lean_ctor_release(x_7242, 1); + x_7252 = x_7242; +} else { + lean_dec_ref(x_7242); + x_7252 = lean_box(0); +} +if (lean_is_scalar(x_7252)) { + x_7253 = lean_alloc_ctor(1, 2, 0); +} else { + x_7253 = x_7252; +} +lean_ctor_set(x_7253, 0, x_7250); +lean_ctor_set(x_7253, 1, x_7251); +return x_7253; +} +} +else +{ +lean_object* x_7254; lean_object* x_7255; lean_object* x_7256; lean_object* x_7257; +lean_dec(x_7234); +lean_dec(x_7229); +lean_dec(x_7186); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7254 = lean_ctor_get(x_7237, 0); +lean_inc(x_7254); +x_7255 = lean_ctor_get(x_7237, 1); +lean_inc(x_7255); +if (lean_is_exclusive(x_7237)) { + lean_ctor_release(x_7237, 0); + lean_ctor_release(x_7237, 1); + x_7256 = x_7237; +} else { + lean_dec_ref(x_7237); + x_7256 = lean_box(0); +} +if (lean_is_scalar(x_7256)) { + x_7257 = lean_alloc_ctor(1, 2, 0); +} else { + x_7257 = x_7256; +} +lean_ctor_set(x_7257, 0, x_7254); +lean_ctor_set(x_7257, 1, x_7255); +return x_7257; +} +} +} +else +{ +lean_object* x_7258; lean_object* x_7259; lean_object* x_7260; lean_object* x_7261; lean_object* x_7262; lean_object* x_7263; lean_object* x_7264; lean_object* x_7265; lean_object* x_7266; +lean_dec(x_7189); +lean_dec(x_7187); +lean_inc(x_5957); +lean_inc(x_153); +x_7258 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_7258, 0, x_153); +lean_ctor_set(x_7258, 1, x_5957); +x_7259 = lean_ctor_get(x_1, 0); +lean_inc(x_7259); +x_7260 = l_Lean_IR_ToIR_bindVar(x_7259, x_5963, x_4, x_5, x_7184); +x_7261 = lean_ctor_get(x_7260, 0); +lean_inc(x_7261); +x_7262 = lean_ctor_get(x_7260, 1); +lean_inc(x_7262); +lean_dec(x_7260); +x_7263 = lean_ctor_get(x_7261, 0); +lean_inc(x_7263); +x_7264 = lean_ctor_get(x_7261, 1); +lean_inc(x_7264); +lean_dec(x_7261); +x_7265 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7266 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7263, x_7258, x_7265, x_7264, x_4, x_5, x_7262); +if (lean_obj_tag(x_7266) == 0) +{ +lean_object* x_7267; lean_object* x_7268; lean_object* x_7269; lean_object* x_7270; lean_object* x_7271; lean_object* x_7272; lean_object* x_7273; +x_7267 = lean_ctor_get(x_7266, 0); +lean_inc(x_7267); +x_7268 = lean_ctor_get(x_7266, 1); +lean_inc(x_7268); +lean_dec(x_7266); +x_7269 = lean_ctor_get(x_7267, 0); +lean_inc(x_7269); +x_7270 = lean_ctor_get(x_7267, 1); +lean_inc(x_7270); +if (lean_is_exclusive(x_7267)) { + lean_ctor_release(x_7267, 0); + lean_ctor_release(x_7267, 1); + x_7271 = x_7267; +} else { + lean_dec_ref(x_7267); + x_7271 = lean_box(0); +} +if (lean_is_scalar(x_7186)) { + x_7272 = lean_alloc_ctor(1, 1, 0); +} else { + x_7272 = x_7186; +} +lean_ctor_set(x_7272, 0, x_7269); +if (lean_is_scalar(x_7271)) { + x_7273 = lean_alloc_ctor(0, 2, 0); +} else { + x_7273 = x_7271; +} +lean_ctor_set(x_7273, 0, x_7272); +lean_ctor_set(x_7273, 1, x_7270); +x_6967 = x_7273; +x_6968 = x_7268; +goto block_6997; +} +else +{ +lean_object* x_7274; lean_object* x_7275; lean_object* x_7276; lean_object* x_7277; +lean_dec(x_7186); +lean_dec(x_5968); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7274 = lean_ctor_get(x_7266, 0); +lean_inc(x_7274); +x_7275 = lean_ctor_get(x_7266, 1); +lean_inc(x_7275); +if (lean_is_exclusive(x_7266)) { + lean_ctor_release(x_7266, 0); + lean_ctor_release(x_7266, 1); + x_7276 = x_7266; +} else { + lean_dec_ref(x_7266); + x_7276 = lean_box(0); +} +if (lean_is_scalar(x_7276)) { + x_7277 = lean_alloc_ctor(1, 2, 0); +} else { + x_7277 = x_7276; +} +lean_ctor_set(x_7277, 0, x_7274); +lean_ctor_set(x_7277, 1, x_7275); +return x_7277; +} +} +} +} +block_6997: +{ +lean_object* x_6969; +x_6969 = lean_ctor_get(x_6967, 0); +lean_inc(x_6969); +if (lean_obj_tag(x_6969) == 0) +{ +lean_object* x_6970; lean_object* x_6971; lean_object* x_6972; lean_object* x_6973; lean_object* x_6974; lean_object* x_6975; lean_object* x_6976; lean_object* x_6977; lean_object* x_6978; lean_object* x_6979; +lean_dec(x_5968); +x_6970 = lean_ctor_get(x_6967, 1); +lean_inc(x_6970); +lean_dec(x_6967); +x_6971 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_6971, 0, x_153); +lean_ctor_set(x_6971, 1, x_5957); +x_6972 = lean_ctor_get(x_1, 0); +lean_inc(x_6972); +x_6973 = l_Lean_IR_ToIR_bindVar(x_6972, x_6970, x_4, x_5, x_6968); +x_6974 = lean_ctor_get(x_6973, 0); +lean_inc(x_6974); +x_6975 = lean_ctor_get(x_6973, 1); +lean_inc(x_6975); +lean_dec(x_6973); +x_6976 = lean_ctor_get(x_6974, 0); +lean_inc(x_6976); +x_6977 = lean_ctor_get(x_6974, 1); +lean_inc(x_6977); +lean_dec(x_6974); +x_6978 = lean_ctor_get(x_1, 2); +lean_inc(x_6978); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_6979 = l_Lean_IR_ToIR_lowerType(x_6978, x_6977, x_4, x_5, x_6975); +if (lean_obj_tag(x_6979) == 0) +{ +lean_object* x_6980; lean_object* x_6981; lean_object* x_6982; lean_object* x_6983; lean_object* x_6984; +x_6980 = lean_ctor_get(x_6979, 0); +lean_inc(x_6980); +x_6981 = lean_ctor_get(x_6979, 1); +lean_inc(x_6981); +lean_dec(x_6979); +x_6982 = lean_ctor_get(x_6980, 0); +lean_inc(x_6982); +x_6983 = lean_ctor_get(x_6980, 1); +lean_inc(x_6983); +lean_dec(x_6980); +x_6984 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_6976, x_6971, x_6982, x_6983, x_4, x_5, x_6981); +return x_6984; +} +else +{ +uint8_t x_6985; +lean_dec(x_6976); +lean_dec(x_6971); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_6985 = !lean_is_exclusive(x_6979); +if (x_6985 == 0) +{ +return x_6979; +} +else +{ +lean_object* x_6986; lean_object* x_6987; lean_object* x_6988; +x_6986 = lean_ctor_get(x_6979, 0); +x_6987 = lean_ctor_get(x_6979, 1); +lean_inc(x_6987); +lean_inc(x_6986); +lean_dec(x_6979); +x_6988 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_6988, 0, x_6986); +lean_ctor_set(x_6988, 1, x_6987); +return x_6988; +} +} +} +else +{ +uint8_t x_6989; +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_6989 = !lean_is_exclusive(x_6967); +if (x_6989 == 0) +{ +lean_object* x_6990; lean_object* x_6991; lean_object* x_6992; +x_6990 = lean_ctor_get(x_6967, 0); +lean_dec(x_6990); +x_6991 = lean_ctor_get(x_6969, 0); +lean_inc(x_6991); +lean_dec(x_6969); +lean_ctor_set(x_6967, 0, x_6991); +if (lean_is_scalar(x_5968)) { + x_6992 = lean_alloc_ctor(0, 2, 0); +} else { + x_6992 = x_5968; +} +lean_ctor_set(x_6992, 0, x_6967); +lean_ctor_set(x_6992, 1, x_6968); +return x_6992; +} +else +{ +lean_object* x_6993; lean_object* x_6994; lean_object* x_6995; lean_object* x_6996; +x_6993 = lean_ctor_get(x_6967, 1); +lean_inc(x_6993); +lean_dec(x_6967); +x_6994 = lean_ctor_get(x_6969, 0); +lean_inc(x_6994); +lean_dec(x_6969); +x_6995 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_6995, 0, x_6994); +lean_ctor_set(x_6995, 1, x_6993); +if (lean_is_scalar(x_5968)) { + x_6996 = lean_alloc_ctor(0, 2, 0); +} else { + x_6996 = x_5968; +} +lean_ctor_set(x_6996, 0, x_6995); +lean_ctor_set(x_6996, 1, x_6968); +return x_6996; +} +} +} +} +} +default: +{ +uint8_t x_7278; +lean_dec(x_5969); +lean_dec(x_5968); +lean_free_object(x_5959); +lean_dec(x_5957); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +x_7278 = !lean_is_exclusive(x_5974); +if (x_7278 == 0) +{ +lean_object* x_7279; uint8_t x_7280; lean_object* x_7281; lean_object* x_7282; lean_object* x_7283; lean_object* x_7284; lean_object* x_7285; lean_object* x_7286; lean_object* x_7287; lean_object* x_7288; +x_7279 = lean_ctor_get(x_5974, 0); +lean_dec(x_7279); +x_7280 = 1; +x_7281 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_7282 = l_Lean_Name_toString(x_153, x_7280, x_7281); +lean_ctor_set_tag(x_5974, 3); +lean_ctor_set(x_5974, 0, x_7282); +x_7283 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_7284 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_7284, 0, x_7283); +lean_ctor_set(x_7284, 1, x_5974); +x_7285 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_7286 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_7286, 0, x_7284); +lean_ctor_set(x_7286, 1, x_7285); +x_7287 = l_Lean_MessageData_ofFormat(x_7286); +x_7288 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_7287, x_5963, x_4, x_5, x_5967); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_5963); +return x_7288; +} +else +{ +uint8_t x_7289; lean_object* x_7290; lean_object* x_7291; lean_object* x_7292; lean_object* x_7293; lean_object* x_7294; lean_object* x_7295; lean_object* x_7296; lean_object* x_7297; lean_object* x_7298; +lean_dec(x_5974); +x_7289 = 1; +x_7290 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_7291 = l_Lean_Name_toString(x_153, x_7289, x_7290); +x_7292 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_7292, 0, x_7291); +x_7293 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_7294 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_7294, 0, x_7293); +lean_ctor_set(x_7294, 1, x_7292); +x_7295 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_7296 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_7296, 0, x_7294); +lean_ctor_set(x_7296, 1, x_7295); +x_7297 = l_Lean_MessageData_ofFormat(x_7296); +x_7298 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_7297, x_5963, x_4, x_5, x_5967); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_5963); +return x_7298; +} +} +} +} +} +else +{ +lean_object* x_7299; lean_object* x_7300; lean_object* x_7301; lean_object* x_7302; lean_object* x_7303; lean_object* x_7304; uint8_t x_7305; lean_object* x_7306; +x_7299 = lean_ctor_get(x_5959, 1); +lean_inc(x_7299); +lean_dec(x_5959); +x_7300 = lean_st_ref_get(x_5, x_5960); +x_7301 = lean_ctor_get(x_7300, 0); +lean_inc(x_7301); +x_7302 = lean_ctor_get(x_7300, 1); +lean_inc(x_7302); +if (lean_is_exclusive(x_7300)) { + lean_ctor_release(x_7300, 0); + lean_ctor_release(x_7300, 1); + x_7303 = x_7300; +} else { + lean_dec_ref(x_7300); + x_7303 = lean_box(0); +} +x_7304 = lean_ctor_get(x_7301, 0); +lean_inc(x_7304); +lean_dec(x_7301); +x_7305 = 0; +lean_inc(x_153); +lean_inc(x_7304); +x_7306 = l_Lean_Environment_find_x3f(x_7304, x_153, x_7305); +if (lean_obj_tag(x_7306) == 0) +{ +lean_object* x_7307; lean_object* x_7308; +lean_dec(x_7304); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_7307 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_7308 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_7307, x_7299, x_4, x_5, x_7302); +return x_7308; +} +else +{ +lean_object* x_7309; +x_7309 = lean_ctor_get(x_7306, 0); +lean_inc(x_7309); +lean_dec(x_7306); +switch (lean_obj_tag(x_7309)) { +case 0: +{ +lean_object* x_7310; lean_object* x_7311; uint8_t x_7312; +lean_dec(x_7304); +lean_dec(x_5945); +lean_dec(x_5944); +if (lean_is_exclusive(x_7309)) { + lean_ctor_release(x_7309, 0); + x_7310 = x_7309; +} else { + lean_dec_ref(x_7309); + x_7310 = lean_box(0); +} +x_7311 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_7312 = lean_name_eq(x_153, x_7311); +if (x_7312 == 0) +{ +lean_object* x_7313; uint8_t x_7314; +x_7313 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_7314 = lean_name_eq(x_153, x_7313); +if (x_7314 == 0) +{ +lean_object* x_7315; lean_object* x_7316; lean_object* x_7317; +lean_dec(x_7303); +lean_inc(x_153); +x_7315 = l_Lean_IR_ToIR_findDecl(x_153, x_7299, x_4, x_5, x_7302); +x_7316 = lean_ctor_get(x_7315, 0); +lean_inc(x_7316); +x_7317 = lean_ctor_get(x_7316, 0); +lean_inc(x_7317); +if (lean_obj_tag(x_7317) == 0) +{ +lean_object* x_7318; lean_object* x_7319; lean_object* x_7320; lean_object* x_7321; uint8_t x_7322; lean_object* x_7323; lean_object* x_7324; lean_object* x_7325; lean_object* x_7326; lean_object* x_7327; lean_object* x_7328; lean_object* x_7329; lean_object* x_7330; lean_object* x_7331; +lean_dec(x_5957); +lean_dec(x_2); +lean_dec(x_1); +x_7318 = lean_ctor_get(x_7315, 1); +lean_inc(x_7318); +if (lean_is_exclusive(x_7315)) { + lean_ctor_release(x_7315, 0); + lean_ctor_release(x_7315, 1); + x_7319 = x_7315; +} else { + lean_dec_ref(x_7315); + x_7319 = lean_box(0); +} +x_7320 = lean_ctor_get(x_7316, 1); +lean_inc(x_7320); +if (lean_is_exclusive(x_7316)) { + lean_ctor_release(x_7316, 0); + lean_ctor_release(x_7316, 1); + x_7321 = x_7316; +} else { + lean_dec_ref(x_7316); + x_7321 = lean_box(0); +} +x_7322 = 1; +x_7323 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_7324 = l_Lean_Name_toString(x_153, x_7322, x_7323); +if (lean_is_scalar(x_7310)) { + x_7325 = lean_alloc_ctor(3, 1, 0); +} else { + x_7325 = x_7310; + lean_ctor_set_tag(x_7325, 3); +} +lean_ctor_set(x_7325, 0, x_7324); +x_7326 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_7321)) { + x_7327 = lean_alloc_ctor(5, 2, 0); +} else { + x_7327 = x_7321; + lean_ctor_set_tag(x_7327, 5); +} +lean_ctor_set(x_7327, 0, x_7326); +lean_ctor_set(x_7327, 1, x_7325); +x_7328 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_7319)) { + x_7329 = lean_alloc_ctor(5, 2, 0); +} else { + x_7329 = x_7319; + lean_ctor_set_tag(x_7329, 5); +} +lean_ctor_set(x_7329, 0, x_7327); +lean_ctor_set(x_7329, 1, x_7328); +x_7330 = l_Lean_MessageData_ofFormat(x_7329); +x_7331 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_7330, x_7320, x_4, x_5, x_7318); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_7320); +return x_7331; +} +else +{ +lean_object* x_7332; lean_object* x_7333; lean_object* x_7334; lean_object* x_7335; lean_object* x_7336; lean_object* x_7337; lean_object* x_7338; uint8_t x_7339; +lean_dec(x_7310); +x_7332 = lean_ctor_get(x_7315, 1); +lean_inc(x_7332); +lean_dec(x_7315); +x_7333 = lean_ctor_get(x_7316, 1); +lean_inc(x_7333); +if (lean_is_exclusive(x_7316)) { + lean_ctor_release(x_7316, 0); + lean_ctor_release(x_7316, 1); + x_7334 = x_7316; +} else { + lean_dec_ref(x_7316); + x_7334 = lean_box(0); +} +x_7335 = lean_ctor_get(x_7317, 0); +lean_inc(x_7335); +lean_dec(x_7317); +x_7336 = lean_array_get_size(x_5957); +x_7337 = l_Lean_IR_Decl_params(x_7335); +lean_dec(x_7335); +x_7338 = lean_array_get_size(x_7337); +lean_dec(x_7337); +x_7339 = lean_nat_dec_lt(x_7336, x_7338); +if (x_7339 == 0) +{ +uint8_t x_7340; +x_7340 = lean_nat_dec_eq(x_7336, x_7338); +if (x_7340 == 0) +{ +lean_object* x_7341; lean_object* x_7342; lean_object* x_7343; lean_object* x_7344; lean_object* x_7345; lean_object* x_7346; lean_object* x_7347; lean_object* x_7348; lean_object* x_7349; lean_object* x_7350; lean_object* x_7351; lean_object* x_7352; lean_object* x_7353; lean_object* x_7354; lean_object* x_7355; lean_object* x_7356; lean_object* x_7357; +x_7341 = lean_unsigned_to_nat(0u); +x_7342 = l_Array_extract___rarg(x_5957, x_7341, x_7338); +x_7343 = l_Array_extract___rarg(x_5957, x_7338, x_7336); +lean_dec(x_7336); +lean_dec(x_5957); +if (lean_is_scalar(x_7334)) { + x_7344 = lean_alloc_ctor(6, 2, 0); +} else { + x_7344 = x_7334; + lean_ctor_set_tag(x_7344, 6); +} +lean_ctor_set(x_7344, 0, x_153); +lean_ctor_set(x_7344, 1, x_7342); +x_7345 = lean_ctor_get(x_1, 0); +lean_inc(x_7345); +x_7346 = l_Lean_IR_ToIR_bindVar(x_7345, x_7333, x_4, x_5, x_7332); +x_7347 = lean_ctor_get(x_7346, 0); +lean_inc(x_7347); +x_7348 = lean_ctor_get(x_7346, 1); +lean_inc(x_7348); +lean_dec(x_7346); +x_7349 = lean_ctor_get(x_7347, 0); +lean_inc(x_7349); +x_7350 = lean_ctor_get(x_7347, 1); +lean_inc(x_7350); +lean_dec(x_7347); +x_7351 = l_Lean_IR_ToIR_newVar(x_7350, x_4, x_5, x_7348); +x_7352 = lean_ctor_get(x_7351, 0); +lean_inc(x_7352); +x_7353 = lean_ctor_get(x_7351, 1); +lean_inc(x_7353); +lean_dec(x_7351); +x_7354 = lean_ctor_get(x_7352, 0); +lean_inc(x_7354); +x_7355 = lean_ctor_get(x_7352, 1); +lean_inc(x_7355); +lean_dec(x_7352); +x_7356 = lean_ctor_get(x_1, 2); +lean_inc(x_7356); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_7357 = l_Lean_IR_ToIR_lowerType(x_7356, x_7355, x_4, x_5, x_7353); +if (lean_obj_tag(x_7357) == 0) +{ +lean_object* x_7358; lean_object* x_7359; lean_object* x_7360; lean_object* x_7361; lean_object* x_7362; +x_7358 = lean_ctor_get(x_7357, 0); +lean_inc(x_7358); +x_7359 = lean_ctor_get(x_7357, 1); +lean_inc(x_7359); +lean_dec(x_7357); +x_7360 = lean_ctor_get(x_7358, 0); +lean_inc(x_7360); +x_7361 = lean_ctor_get(x_7358, 1); +lean_inc(x_7361); +lean_dec(x_7358); +x_7362 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_7354, x_7343, x_7349, x_7344, x_7360, x_7361, x_4, x_5, x_7359); +return x_7362; +} +else +{ +lean_object* x_7363; lean_object* x_7364; lean_object* x_7365; lean_object* x_7366; +lean_dec(x_7354); +lean_dec(x_7349); +lean_dec(x_7344); +lean_dec(x_7343); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_7363 = lean_ctor_get(x_7357, 0); +lean_inc(x_7363); +x_7364 = lean_ctor_get(x_7357, 1); +lean_inc(x_7364); +if (lean_is_exclusive(x_7357)) { + lean_ctor_release(x_7357, 0); + lean_ctor_release(x_7357, 1); + x_7365 = x_7357; +} else { + lean_dec_ref(x_7357); + x_7365 = lean_box(0); +} +if (lean_is_scalar(x_7365)) { + x_7366 = lean_alloc_ctor(1, 2, 0); +} else { + x_7366 = x_7365; +} +lean_ctor_set(x_7366, 0, x_7363); +lean_ctor_set(x_7366, 1, x_7364); +return x_7366; +} +} +else +{ +lean_object* x_7367; lean_object* x_7368; lean_object* x_7369; lean_object* x_7370; lean_object* x_7371; lean_object* x_7372; lean_object* x_7373; lean_object* x_7374; lean_object* x_7375; +lean_dec(x_7338); +lean_dec(x_7336); +if (lean_is_scalar(x_7334)) { + x_7367 = lean_alloc_ctor(6, 2, 0); +} else { + x_7367 = x_7334; + lean_ctor_set_tag(x_7367, 6); +} +lean_ctor_set(x_7367, 0, x_153); +lean_ctor_set(x_7367, 1, x_5957); +x_7368 = lean_ctor_get(x_1, 0); +lean_inc(x_7368); +x_7369 = l_Lean_IR_ToIR_bindVar(x_7368, x_7333, x_4, x_5, x_7332); +x_7370 = lean_ctor_get(x_7369, 0); +lean_inc(x_7370); +x_7371 = lean_ctor_get(x_7369, 1); +lean_inc(x_7371); +lean_dec(x_7369); +x_7372 = lean_ctor_get(x_7370, 0); +lean_inc(x_7372); +x_7373 = lean_ctor_get(x_7370, 1); +lean_inc(x_7373); +lean_dec(x_7370); +x_7374 = lean_ctor_get(x_1, 2); +lean_inc(x_7374); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_7375 = l_Lean_IR_ToIR_lowerType(x_7374, x_7373, x_4, x_5, x_7371); +if (lean_obj_tag(x_7375) == 0) +{ +lean_object* x_7376; lean_object* x_7377; lean_object* x_7378; lean_object* x_7379; lean_object* x_7380; +x_7376 = lean_ctor_get(x_7375, 0); +lean_inc(x_7376); +x_7377 = lean_ctor_get(x_7375, 1); +lean_inc(x_7377); +lean_dec(x_7375); +x_7378 = lean_ctor_get(x_7376, 0); +lean_inc(x_7378); +x_7379 = lean_ctor_get(x_7376, 1); +lean_inc(x_7379); +lean_dec(x_7376); +x_7380 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7372, x_7367, x_7378, x_7379, x_4, x_5, x_7377); +return x_7380; +} +else +{ +lean_object* x_7381; lean_object* x_7382; lean_object* x_7383; lean_object* x_7384; +lean_dec(x_7372); +lean_dec(x_7367); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_7381 = lean_ctor_get(x_7375, 0); +lean_inc(x_7381); +x_7382 = lean_ctor_get(x_7375, 1); +lean_inc(x_7382); +if (lean_is_exclusive(x_7375)) { + lean_ctor_release(x_7375, 0); + lean_ctor_release(x_7375, 1); + x_7383 = x_7375; +} else { + lean_dec_ref(x_7375); + x_7383 = lean_box(0); +} +if (lean_is_scalar(x_7383)) { + x_7384 = lean_alloc_ctor(1, 2, 0); +} else { + x_7384 = x_7383; +} +lean_ctor_set(x_7384, 0, x_7381); +lean_ctor_set(x_7384, 1, x_7382); +return x_7384; +} +} +} +else +{ +lean_object* x_7385; lean_object* x_7386; lean_object* x_7387; lean_object* x_7388; lean_object* x_7389; lean_object* x_7390; lean_object* x_7391; lean_object* x_7392; lean_object* x_7393; +lean_dec(x_7338); +lean_dec(x_7336); +if (lean_is_scalar(x_7334)) { + x_7385 = lean_alloc_ctor(7, 2, 0); +} else { + x_7385 = x_7334; + lean_ctor_set_tag(x_7385, 7); +} +lean_ctor_set(x_7385, 0, x_153); +lean_ctor_set(x_7385, 1, x_5957); +x_7386 = lean_ctor_get(x_1, 0); +lean_inc(x_7386); +lean_dec(x_1); +x_7387 = l_Lean_IR_ToIR_bindVar(x_7386, x_7333, x_4, x_5, x_7332); +x_7388 = lean_ctor_get(x_7387, 0); +lean_inc(x_7388); +x_7389 = lean_ctor_get(x_7387, 1); +lean_inc(x_7389); +lean_dec(x_7387); +x_7390 = lean_ctor_get(x_7388, 0); +lean_inc(x_7390); +x_7391 = lean_ctor_get(x_7388, 1); +lean_inc(x_7391); +lean_dec(x_7388); +x_7392 = lean_box(7); +x_7393 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7390, x_7385, x_7392, x_7391, x_4, x_5, x_7389); +return x_7393; +} +} +} +else +{ +lean_object* x_7394; lean_object* x_7395; lean_object* x_7396; +lean_dec(x_7310); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7394 = lean_box(13); +x_7395 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_7395, 0, x_7394); +lean_ctor_set(x_7395, 1, x_7299); +if (lean_is_scalar(x_7303)) { + x_7396 = lean_alloc_ctor(0, 2, 0); +} else { + x_7396 = x_7303; +} +lean_ctor_set(x_7396, 0, x_7395); +lean_ctor_set(x_7396, 1, x_7302); +return x_7396; +} +} +else +{ +lean_object* x_7397; lean_object* x_7398; lean_object* x_7399; +lean_dec(x_7310); +lean_dec(x_7303); +lean_dec(x_153); +x_7397 = l_Lean_IR_instInhabitedArg; +x_7398 = lean_unsigned_to_nat(2u); +x_7399 = lean_array_get(x_7397, x_5957, x_7398); +lean_dec(x_5957); +if (lean_obj_tag(x_7399) == 0) +{ +lean_object* x_7400; lean_object* x_7401; lean_object* x_7402; lean_object* x_7403; lean_object* x_7404; lean_object* x_7405; lean_object* x_7406; +x_7400 = lean_ctor_get(x_7399, 0); +lean_inc(x_7400); +lean_dec(x_7399); +x_7401 = lean_ctor_get(x_1, 0); +lean_inc(x_7401); +lean_dec(x_1); +x_7402 = l_Lean_IR_ToIR_bindVarToVarId(x_7401, x_7400, x_7299, x_4, x_5, x_7302); +x_7403 = lean_ctor_get(x_7402, 0); +lean_inc(x_7403); +x_7404 = lean_ctor_get(x_7402, 1); +lean_inc(x_7404); +lean_dec(x_7402); +x_7405 = lean_ctor_get(x_7403, 1); +lean_inc(x_7405); +lean_dec(x_7403); +x_7406 = l_Lean_IR_ToIR_lowerCode(x_2, x_7405, x_4, x_5, x_7404); +return x_7406; +} +else +{ +lean_object* x_7407; lean_object* x_7408; lean_object* x_7409; lean_object* x_7410; lean_object* x_7411; lean_object* x_7412; +x_7407 = lean_ctor_get(x_1, 0); +lean_inc(x_7407); +lean_dec(x_1); +x_7408 = l_Lean_IR_ToIR_bindErased(x_7407, x_7299, x_4, x_5, x_7302); +x_7409 = lean_ctor_get(x_7408, 0); +lean_inc(x_7409); +x_7410 = lean_ctor_get(x_7408, 1); +lean_inc(x_7410); +lean_dec(x_7408); +x_7411 = lean_ctor_get(x_7409, 1); +lean_inc(x_7411); +lean_dec(x_7409); +x_7412 = l_Lean_IR_ToIR_lowerCode(x_2, x_7411, x_4, x_5, x_7410); +return x_7412; +} +} +} +case 1: +{ +lean_object* x_7413; lean_object* x_7414; lean_object* x_7441; lean_object* x_7442; +lean_dec(x_7309); +lean_dec(x_7304); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_153); +x_7441 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_7302); +x_7442 = lean_ctor_get(x_7441, 0); +lean_inc(x_7442); +if (lean_obj_tag(x_7442) == 0) +{ +lean_object* x_7443; lean_object* x_7444; lean_object* x_7445; +x_7443 = lean_ctor_get(x_7441, 1); +lean_inc(x_7443); +lean_dec(x_7441); +x_7444 = lean_box(0); +x_7445 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_7445, 0, x_7444); +lean_ctor_set(x_7445, 1, x_7299); +x_7413 = x_7445; +x_7414 = x_7443; +goto block_7440; +} +else +{ +lean_object* x_7446; lean_object* x_7447; lean_object* x_7448; lean_object* x_7449; lean_object* x_7450; lean_object* x_7451; lean_object* x_7452; uint8_t x_7453; +x_7446 = lean_ctor_get(x_7441, 1); +lean_inc(x_7446); +if (lean_is_exclusive(x_7441)) { + lean_ctor_release(x_7441, 0); + lean_ctor_release(x_7441, 1); + x_7447 = x_7441; +} else { + lean_dec_ref(x_7441); + x_7447 = lean_box(0); +} +x_7448 = lean_ctor_get(x_7442, 0); +lean_inc(x_7448); +if (lean_is_exclusive(x_7442)) { + lean_ctor_release(x_7442, 0); + x_7449 = x_7442; +} else { + lean_dec_ref(x_7442); + x_7449 = lean_box(0); +} +x_7450 = lean_array_get_size(x_5957); +x_7451 = lean_ctor_get(x_7448, 3); +lean_inc(x_7451); +lean_dec(x_7448); +x_7452 = lean_array_get_size(x_7451); +lean_dec(x_7451); +x_7453 = lean_nat_dec_lt(x_7450, x_7452); +if (x_7453 == 0) +{ +uint8_t x_7454; +x_7454 = lean_nat_dec_eq(x_7450, x_7452); +if (x_7454 == 0) +{ +lean_object* x_7455; lean_object* x_7456; lean_object* x_7457; lean_object* x_7458; lean_object* x_7459; lean_object* x_7460; lean_object* x_7461; lean_object* x_7462; lean_object* x_7463; lean_object* x_7464; lean_object* x_7465; lean_object* x_7466; lean_object* x_7467; lean_object* x_7468; lean_object* x_7469; lean_object* x_7470; lean_object* x_7471; +x_7455 = lean_unsigned_to_nat(0u); +x_7456 = l_Array_extract___rarg(x_5957, x_7455, x_7452); +x_7457 = l_Array_extract___rarg(x_5957, x_7452, x_7450); +lean_dec(x_7450); +lean_inc(x_153); +if (lean_is_scalar(x_7447)) { + x_7458 = lean_alloc_ctor(6, 2, 0); +} else { + x_7458 = x_7447; + lean_ctor_set_tag(x_7458, 6); +} +lean_ctor_set(x_7458, 0, x_153); +lean_ctor_set(x_7458, 1, x_7456); +x_7459 = lean_ctor_get(x_1, 0); +lean_inc(x_7459); +x_7460 = l_Lean_IR_ToIR_bindVar(x_7459, x_7299, x_4, x_5, x_7446); +x_7461 = lean_ctor_get(x_7460, 0); +lean_inc(x_7461); +x_7462 = lean_ctor_get(x_7460, 1); +lean_inc(x_7462); +lean_dec(x_7460); +x_7463 = lean_ctor_get(x_7461, 0); +lean_inc(x_7463); +x_7464 = lean_ctor_get(x_7461, 1); +lean_inc(x_7464); +lean_dec(x_7461); +x_7465 = l_Lean_IR_ToIR_newVar(x_7464, x_4, x_5, x_7462); +x_7466 = lean_ctor_get(x_7465, 0); +lean_inc(x_7466); +x_7467 = lean_ctor_get(x_7465, 1); +lean_inc(x_7467); +lean_dec(x_7465); +x_7468 = lean_ctor_get(x_7466, 0); +lean_inc(x_7468); +x_7469 = lean_ctor_get(x_7466, 1); +lean_inc(x_7469); +lean_dec(x_7466); +x_7470 = lean_ctor_get(x_1, 2); +lean_inc(x_7470); +lean_inc(x_5); +lean_inc(x_4); +x_7471 = l_Lean_IR_ToIR_lowerType(x_7470, x_7469, x_4, x_5, x_7467); +if (lean_obj_tag(x_7471) == 0) +{ +lean_object* x_7472; lean_object* x_7473; lean_object* x_7474; lean_object* x_7475; lean_object* x_7476; +x_7472 = lean_ctor_get(x_7471, 0); +lean_inc(x_7472); +x_7473 = lean_ctor_get(x_7471, 1); +lean_inc(x_7473); +lean_dec(x_7471); +x_7474 = lean_ctor_get(x_7472, 0); +lean_inc(x_7474); +x_7475 = lean_ctor_get(x_7472, 1); +lean_inc(x_7475); +lean_dec(x_7472); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7476 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_7468, x_7457, x_7463, x_7458, x_7474, x_7475, x_4, x_5, x_7473); +if (lean_obj_tag(x_7476) == 0) +{ +lean_object* x_7477; lean_object* x_7478; lean_object* x_7479; lean_object* x_7480; lean_object* x_7481; lean_object* x_7482; lean_object* x_7483; +x_7477 = lean_ctor_get(x_7476, 0); +lean_inc(x_7477); +x_7478 = lean_ctor_get(x_7476, 1); +lean_inc(x_7478); +lean_dec(x_7476); +x_7479 = lean_ctor_get(x_7477, 0); +lean_inc(x_7479); +x_7480 = lean_ctor_get(x_7477, 1); +lean_inc(x_7480); +if (lean_is_exclusive(x_7477)) { + lean_ctor_release(x_7477, 0); + lean_ctor_release(x_7477, 1); + x_7481 = x_7477; +} else { + lean_dec_ref(x_7477); + x_7481 = lean_box(0); +} +if (lean_is_scalar(x_7449)) { + x_7482 = lean_alloc_ctor(1, 1, 0); +} else { + x_7482 = x_7449; +} +lean_ctor_set(x_7482, 0, x_7479); +if (lean_is_scalar(x_7481)) { + x_7483 = lean_alloc_ctor(0, 2, 0); +} else { + x_7483 = x_7481; +} +lean_ctor_set(x_7483, 0, x_7482); +lean_ctor_set(x_7483, 1, x_7480); +x_7413 = x_7483; +x_7414 = x_7478; +goto block_7440; +} +else +{ +lean_object* x_7484; lean_object* x_7485; lean_object* x_7486; lean_object* x_7487; +lean_dec(x_7449); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7484 = lean_ctor_get(x_7476, 0); +lean_inc(x_7484); +x_7485 = lean_ctor_get(x_7476, 1); +lean_inc(x_7485); +if (lean_is_exclusive(x_7476)) { + lean_ctor_release(x_7476, 0); + lean_ctor_release(x_7476, 1); + x_7486 = x_7476; +} else { + lean_dec_ref(x_7476); + x_7486 = lean_box(0); +} +if (lean_is_scalar(x_7486)) { + x_7487 = lean_alloc_ctor(1, 2, 0); +} else { + x_7487 = x_7486; +} +lean_ctor_set(x_7487, 0, x_7484); +lean_ctor_set(x_7487, 1, x_7485); +return x_7487; +} +} +else +{ +lean_object* x_7488; lean_object* x_7489; lean_object* x_7490; lean_object* x_7491; +lean_dec(x_7468); +lean_dec(x_7463); +lean_dec(x_7458); +lean_dec(x_7457); +lean_dec(x_7449); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7488 = lean_ctor_get(x_7471, 0); +lean_inc(x_7488); +x_7489 = lean_ctor_get(x_7471, 1); +lean_inc(x_7489); +if (lean_is_exclusive(x_7471)) { + lean_ctor_release(x_7471, 0); + lean_ctor_release(x_7471, 1); + x_7490 = x_7471; +} else { + lean_dec_ref(x_7471); + x_7490 = lean_box(0); +} +if (lean_is_scalar(x_7490)) { + x_7491 = lean_alloc_ctor(1, 2, 0); +} else { + x_7491 = x_7490; +} +lean_ctor_set(x_7491, 0, x_7488); +lean_ctor_set(x_7491, 1, x_7489); +return x_7491; +} +} +else +{ +lean_object* x_7492; lean_object* x_7493; lean_object* x_7494; lean_object* x_7495; lean_object* x_7496; lean_object* x_7497; lean_object* x_7498; lean_object* x_7499; lean_object* x_7500; +lean_dec(x_7452); +lean_dec(x_7450); +lean_inc(x_5957); +lean_inc(x_153); +if (lean_is_scalar(x_7447)) { + x_7492 = lean_alloc_ctor(6, 2, 0); +} else { + x_7492 = x_7447; + lean_ctor_set_tag(x_7492, 6); +} +lean_ctor_set(x_7492, 0, x_153); +lean_ctor_set(x_7492, 1, x_5957); +x_7493 = lean_ctor_get(x_1, 0); +lean_inc(x_7493); +x_7494 = l_Lean_IR_ToIR_bindVar(x_7493, x_7299, x_4, x_5, x_7446); +x_7495 = lean_ctor_get(x_7494, 0); +lean_inc(x_7495); +x_7496 = lean_ctor_get(x_7494, 1); +lean_inc(x_7496); +lean_dec(x_7494); +x_7497 = lean_ctor_get(x_7495, 0); +lean_inc(x_7497); +x_7498 = lean_ctor_get(x_7495, 1); +lean_inc(x_7498); +lean_dec(x_7495); +x_7499 = lean_ctor_get(x_1, 2); +lean_inc(x_7499); +lean_inc(x_5); +lean_inc(x_4); +x_7500 = l_Lean_IR_ToIR_lowerType(x_7499, x_7498, x_4, x_5, x_7496); +if (lean_obj_tag(x_7500) == 0) +{ +lean_object* x_7501; lean_object* x_7502; lean_object* x_7503; lean_object* x_7504; lean_object* x_7505; +x_7501 = lean_ctor_get(x_7500, 0); +lean_inc(x_7501); +x_7502 = lean_ctor_get(x_7500, 1); +lean_inc(x_7502); +lean_dec(x_7500); +x_7503 = lean_ctor_get(x_7501, 0); +lean_inc(x_7503); +x_7504 = lean_ctor_get(x_7501, 1); +lean_inc(x_7504); +lean_dec(x_7501); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7505 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7497, x_7492, x_7503, x_7504, x_4, x_5, x_7502); +if (lean_obj_tag(x_7505) == 0) +{ +lean_object* x_7506; lean_object* x_7507; lean_object* x_7508; lean_object* x_7509; lean_object* x_7510; lean_object* x_7511; lean_object* x_7512; +x_7506 = lean_ctor_get(x_7505, 0); +lean_inc(x_7506); +x_7507 = lean_ctor_get(x_7505, 1); +lean_inc(x_7507); +lean_dec(x_7505); +x_7508 = lean_ctor_get(x_7506, 0); +lean_inc(x_7508); +x_7509 = lean_ctor_get(x_7506, 1); +lean_inc(x_7509); +if (lean_is_exclusive(x_7506)) { + lean_ctor_release(x_7506, 0); + lean_ctor_release(x_7506, 1); + x_7510 = x_7506; +} else { + lean_dec_ref(x_7506); + x_7510 = lean_box(0); +} +if (lean_is_scalar(x_7449)) { + x_7511 = lean_alloc_ctor(1, 1, 0); +} else { + x_7511 = x_7449; +} +lean_ctor_set(x_7511, 0, x_7508); +if (lean_is_scalar(x_7510)) { + x_7512 = lean_alloc_ctor(0, 2, 0); +} else { + x_7512 = x_7510; +} +lean_ctor_set(x_7512, 0, x_7511); +lean_ctor_set(x_7512, 1, x_7509); +x_7413 = x_7512; +x_7414 = x_7507; +goto block_7440; +} +else +{ +lean_object* x_7513; lean_object* x_7514; lean_object* x_7515; lean_object* x_7516; +lean_dec(x_7449); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7513 = lean_ctor_get(x_7505, 0); +lean_inc(x_7513); +x_7514 = lean_ctor_get(x_7505, 1); +lean_inc(x_7514); +if (lean_is_exclusive(x_7505)) { + lean_ctor_release(x_7505, 0); + lean_ctor_release(x_7505, 1); + x_7515 = x_7505; +} else { + lean_dec_ref(x_7505); + x_7515 = lean_box(0); +} +if (lean_is_scalar(x_7515)) { + x_7516 = lean_alloc_ctor(1, 2, 0); +} else { + x_7516 = x_7515; +} +lean_ctor_set(x_7516, 0, x_7513); +lean_ctor_set(x_7516, 1, x_7514); +return x_7516; +} +} +else +{ +lean_object* x_7517; lean_object* x_7518; lean_object* x_7519; lean_object* x_7520; +lean_dec(x_7497); +lean_dec(x_7492); +lean_dec(x_7449); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7517 = lean_ctor_get(x_7500, 0); +lean_inc(x_7517); +x_7518 = lean_ctor_get(x_7500, 1); +lean_inc(x_7518); +if (lean_is_exclusive(x_7500)) { + lean_ctor_release(x_7500, 0); + lean_ctor_release(x_7500, 1); + x_7519 = x_7500; +} else { + lean_dec_ref(x_7500); + x_7519 = lean_box(0); +} +if (lean_is_scalar(x_7519)) { + x_7520 = lean_alloc_ctor(1, 2, 0); +} else { + x_7520 = x_7519; +} +lean_ctor_set(x_7520, 0, x_7517); +lean_ctor_set(x_7520, 1, x_7518); +return x_7520; +} +} +} +else +{ +lean_object* x_7521; lean_object* x_7522; lean_object* x_7523; lean_object* x_7524; lean_object* x_7525; lean_object* x_7526; lean_object* x_7527; lean_object* x_7528; lean_object* x_7529; +lean_dec(x_7452); +lean_dec(x_7450); +lean_inc(x_5957); +lean_inc(x_153); +if (lean_is_scalar(x_7447)) { + x_7521 = lean_alloc_ctor(7, 2, 0); +} else { + x_7521 = x_7447; + lean_ctor_set_tag(x_7521, 7); +} +lean_ctor_set(x_7521, 0, x_153); +lean_ctor_set(x_7521, 1, x_5957); +x_7522 = lean_ctor_get(x_1, 0); +lean_inc(x_7522); +x_7523 = l_Lean_IR_ToIR_bindVar(x_7522, x_7299, x_4, x_5, x_7446); +x_7524 = lean_ctor_get(x_7523, 0); +lean_inc(x_7524); +x_7525 = lean_ctor_get(x_7523, 1); +lean_inc(x_7525); +lean_dec(x_7523); +x_7526 = lean_ctor_get(x_7524, 0); +lean_inc(x_7526); +x_7527 = lean_ctor_get(x_7524, 1); +lean_inc(x_7527); +lean_dec(x_7524); +x_7528 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7529 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7526, x_7521, x_7528, x_7527, x_4, x_5, x_7525); +if (lean_obj_tag(x_7529) == 0) +{ +lean_object* x_7530; lean_object* x_7531; lean_object* x_7532; lean_object* x_7533; lean_object* x_7534; lean_object* x_7535; lean_object* x_7536; +x_7530 = lean_ctor_get(x_7529, 0); +lean_inc(x_7530); +x_7531 = lean_ctor_get(x_7529, 1); +lean_inc(x_7531); +lean_dec(x_7529); +x_7532 = lean_ctor_get(x_7530, 0); +lean_inc(x_7532); +x_7533 = lean_ctor_get(x_7530, 1); +lean_inc(x_7533); +if (lean_is_exclusive(x_7530)) { + lean_ctor_release(x_7530, 0); + lean_ctor_release(x_7530, 1); + x_7534 = x_7530; +} else { + lean_dec_ref(x_7530); + x_7534 = lean_box(0); +} +if (lean_is_scalar(x_7449)) { + x_7535 = lean_alloc_ctor(1, 1, 0); +} else { + x_7535 = x_7449; +} +lean_ctor_set(x_7535, 0, x_7532); +if (lean_is_scalar(x_7534)) { + x_7536 = lean_alloc_ctor(0, 2, 0); +} else { + x_7536 = x_7534; +} +lean_ctor_set(x_7536, 0, x_7535); +lean_ctor_set(x_7536, 1, x_7533); +x_7413 = x_7536; +x_7414 = x_7531; +goto block_7440; +} +else +{ +lean_object* x_7537; lean_object* x_7538; lean_object* x_7539; lean_object* x_7540; +lean_dec(x_7449); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7537 = lean_ctor_get(x_7529, 0); +lean_inc(x_7537); +x_7538 = lean_ctor_get(x_7529, 1); +lean_inc(x_7538); +if (lean_is_exclusive(x_7529)) { + lean_ctor_release(x_7529, 0); + lean_ctor_release(x_7529, 1); + x_7539 = x_7529; +} else { + lean_dec_ref(x_7529); + x_7539 = lean_box(0); +} +if (lean_is_scalar(x_7539)) { + x_7540 = lean_alloc_ctor(1, 2, 0); +} else { + x_7540 = x_7539; +} +lean_ctor_set(x_7540, 0, x_7537); +lean_ctor_set(x_7540, 1, x_7538); +return x_7540; +} +} +} +block_7440: +{ +lean_object* x_7415; +x_7415 = lean_ctor_get(x_7413, 0); +lean_inc(x_7415); +if (lean_obj_tag(x_7415) == 0) +{ +lean_object* x_7416; lean_object* x_7417; lean_object* x_7418; lean_object* x_7419; lean_object* x_7420; lean_object* x_7421; lean_object* x_7422; lean_object* x_7423; lean_object* x_7424; lean_object* x_7425; +lean_dec(x_7303); +x_7416 = lean_ctor_get(x_7413, 1); +lean_inc(x_7416); +lean_dec(x_7413); +x_7417 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_7417, 0, x_153); +lean_ctor_set(x_7417, 1, x_5957); +x_7418 = lean_ctor_get(x_1, 0); +lean_inc(x_7418); +x_7419 = l_Lean_IR_ToIR_bindVar(x_7418, x_7416, x_4, x_5, x_7414); +x_7420 = lean_ctor_get(x_7419, 0); +lean_inc(x_7420); +x_7421 = lean_ctor_get(x_7419, 1); +lean_inc(x_7421); +lean_dec(x_7419); +x_7422 = lean_ctor_get(x_7420, 0); +lean_inc(x_7422); +x_7423 = lean_ctor_get(x_7420, 1); +lean_inc(x_7423); +lean_dec(x_7420); +x_7424 = lean_ctor_get(x_1, 2); +lean_inc(x_7424); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_7425 = l_Lean_IR_ToIR_lowerType(x_7424, x_7423, x_4, x_5, x_7421); +if (lean_obj_tag(x_7425) == 0) +{ +lean_object* x_7426; lean_object* x_7427; lean_object* x_7428; lean_object* x_7429; lean_object* x_7430; +x_7426 = lean_ctor_get(x_7425, 0); +lean_inc(x_7426); +x_7427 = lean_ctor_get(x_7425, 1); +lean_inc(x_7427); +lean_dec(x_7425); +x_7428 = lean_ctor_get(x_7426, 0); +lean_inc(x_7428); +x_7429 = lean_ctor_get(x_7426, 1); +lean_inc(x_7429); +lean_dec(x_7426); +x_7430 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7422, x_7417, x_7428, x_7429, x_4, x_5, x_7427); +return x_7430; +} +else +{ +lean_object* x_7431; lean_object* x_7432; lean_object* x_7433; lean_object* x_7434; +lean_dec(x_7422); +lean_dec(x_7417); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_7431 = lean_ctor_get(x_7425, 0); +lean_inc(x_7431); +x_7432 = lean_ctor_get(x_7425, 1); +lean_inc(x_7432); +if (lean_is_exclusive(x_7425)) { + lean_ctor_release(x_7425, 0); + lean_ctor_release(x_7425, 1); + x_7433 = x_7425; +} else { + lean_dec_ref(x_7425); + x_7433 = lean_box(0); +} +if (lean_is_scalar(x_7433)) { + x_7434 = lean_alloc_ctor(1, 2, 0); +} else { + x_7434 = x_7433; +} +lean_ctor_set(x_7434, 0, x_7431); +lean_ctor_set(x_7434, 1, x_7432); +return x_7434; +} +} +else +{ +lean_object* x_7435; lean_object* x_7436; lean_object* x_7437; lean_object* x_7438; lean_object* x_7439; +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7435 = lean_ctor_get(x_7413, 1); +lean_inc(x_7435); +if (lean_is_exclusive(x_7413)) { + lean_ctor_release(x_7413, 0); + lean_ctor_release(x_7413, 1); + x_7436 = x_7413; +} else { + lean_dec_ref(x_7413); + x_7436 = lean_box(0); +} +x_7437 = lean_ctor_get(x_7415, 0); +lean_inc(x_7437); +lean_dec(x_7415); +if (lean_is_scalar(x_7436)) { + x_7438 = lean_alloc_ctor(0, 2, 0); +} else { + x_7438 = x_7436; +} +lean_ctor_set(x_7438, 0, x_7437); +lean_ctor_set(x_7438, 1, x_7435); +if (lean_is_scalar(x_7303)) { + x_7439 = lean_alloc_ctor(0, 2, 0); +} else { + x_7439 = x_7303; +} +lean_ctor_set(x_7439, 0, x_7438); +lean_ctor_set(x_7439, 1, x_7414); +return x_7439; +} +} +} +case 2: +{ +lean_object* x_7541; lean_object* x_7542; +lean_dec(x_7309); +lean_dec(x_7304); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_7541 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_7542 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_7541, x_7299, x_4, x_5, x_7302); +return x_7542; +} +case 3: +{ +lean_object* x_7543; lean_object* x_7544; lean_object* x_7571; lean_object* x_7572; +lean_dec(x_7309); +lean_dec(x_7304); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_153); +x_7571 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_7302); +x_7572 = lean_ctor_get(x_7571, 0); +lean_inc(x_7572); +if (lean_obj_tag(x_7572) == 0) +{ +lean_object* x_7573; lean_object* x_7574; lean_object* x_7575; +x_7573 = lean_ctor_get(x_7571, 1); +lean_inc(x_7573); +lean_dec(x_7571); +x_7574 = lean_box(0); +x_7575 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_7575, 0, x_7574); +lean_ctor_set(x_7575, 1, x_7299); +x_7543 = x_7575; +x_7544 = x_7573; +goto block_7570; +} +else +{ +lean_object* x_7576; lean_object* x_7577; lean_object* x_7578; lean_object* x_7579; lean_object* x_7580; lean_object* x_7581; lean_object* x_7582; uint8_t x_7583; +x_7576 = lean_ctor_get(x_7571, 1); +lean_inc(x_7576); +if (lean_is_exclusive(x_7571)) { + lean_ctor_release(x_7571, 0); + lean_ctor_release(x_7571, 1); + x_7577 = x_7571; +} else { + lean_dec_ref(x_7571); + x_7577 = lean_box(0); +} +x_7578 = lean_ctor_get(x_7572, 0); +lean_inc(x_7578); +if (lean_is_exclusive(x_7572)) { + lean_ctor_release(x_7572, 0); + x_7579 = x_7572; +} else { + lean_dec_ref(x_7572); + x_7579 = lean_box(0); +} +x_7580 = lean_array_get_size(x_5957); +x_7581 = lean_ctor_get(x_7578, 3); +lean_inc(x_7581); +lean_dec(x_7578); +x_7582 = lean_array_get_size(x_7581); +lean_dec(x_7581); +x_7583 = lean_nat_dec_lt(x_7580, x_7582); +if (x_7583 == 0) +{ +uint8_t x_7584; +x_7584 = lean_nat_dec_eq(x_7580, x_7582); +if (x_7584 == 0) +{ +lean_object* x_7585; lean_object* x_7586; lean_object* x_7587; lean_object* x_7588; lean_object* x_7589; lean_object* x_7590; lean_object* x_7591; lean_object* x_7592; lean_object* x_7593; lean_object* x_7594; lean_object* x_7595; lean_object* x_7596; lean_object* x_7597; lean_object* x_7598; lean_object* x_7599; lean_object* x_7600; lean_object* x_7601; +x_7585 = lean_unsigned_to_nat(0u); +x_7586 = l_Array_extract___rarg(x_5957, x_7585, x_7582); +x_7587 = l_Array_extract___rarg(x_5957, x_7582, x_7580); +lean_dec(x_7580); +lean_inc(x_153); +if (lean_is_scalar(x_7577)) { + x_7588 = lean_alloc_ctor(6, 2, 0); +} else { + x_7588 = x_7577; + lean_ctor_set_tag(x_7588, 6); +} +lean_ctor_set(x_7588, 0, x_153); +lean_ctor_set(x_7588, 1, x_7586); +x_7589 = lean_ctor_get(x_1, 0); +lean_inc(x_7589); +x_7590 = l_Lean_IR_ToIR_bindVar(x_7589, x_7299, x_4, x_5, x_7576); +x_7591 = lean_ctor_get(x_7590, 0); +lean_inc(x_7591); +x_7592 = lean_ctor_get(x_7590, 1); +lean_inc(x_7592); +lean_dec(x_7590); +x_7593 = lean_ctor_get(x_7591, 0); +lean_inc(x_7593); +x_7594 = lean_ctor_get(x_7591, 1); +lean_inc(x_7594); +lean_dec(x_7591); +x_7595 = l_Lean_IR_ToIR_newVar(x_7594, x_4, x_5, x_7592); +x_7596 = lean_ctor_get(x_7595, 0); +lean_inc(x_7596); +x_7597 = lean_ctor_get(x_7595, 1); +lean_inc(x_7597); +lean_dec(x_7595); +x_7598 = lean_ctor_get(x_7596, 0); +lean_inc(x_7598); +x_7599 = lean_ctor_get(x_7596, 1); +lean_inc(x_7599); +lean_dec(x_7596); +x_7600 = lean_ctor_get(x_1, 2); +lean_inc(x_7600); +lean_inc(x_5); +lean_inc(x_4); +x_7601 = l_Lean_IR_ToIR_lowerType(x_7600, x_7599, x_4, x_5, x_7597); +if (lean_obj_tag(x_7601) == 0) +{ +lean_object* x_7602; lean_object* x_7603; lean_object* x_7604; lean_object* x_7605; lean_object* x_7606; +x_7602 = lean_ctor_get(x_7601, 0); +lean_inc(x_7602); +x_7603 = lean_ctor_get(x_7601, 1); +lean_inc(x_7603); +lean_dec(x_7601); +x_7604 = lean_ctor_get(x_7602, 0); +lean_inc(x_7604); +x_7605 = lean_ctor_get(x_7602, 1); +lean_inc(x_7605); +lean_dec(x_7602); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7606 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_7598, x_7587, x_7593, x_7588, x_7604, x_7605, x_4, x_5, x_7603); +if (lean_obj_tag(x_7606) == 0) +{ +lean_object* x_7607; lean_object* x_7608; lean_object* x_7609; lean_object* x_7610; lean_object* x_7611; lean_object* x_7612; lean_object* x_7613; +x_7607 = lean_ctor_get(x_7606, 0); +lean_inc(x_7607); +x_7608 = lean_ctor_get(x_7606, 1); +lean_inc(x_7608); +lean_dec(x_7606); +x_7609 = lean_ctor_get(x_7607, 0); +lean_inc(x_7609); +x_7610 = lean_ctor_get(x_7607, 1); +lean_inc(x_7610); +if (lean_is_exclusive(x_7607)) { + lean_ctor_release(x_7607, 0); + lean_ctor_release(x_7607, 1); + x_7611 = x_7607; +} else { + lean_dec_ref(x_7607); + x_7611 = lean_box(0); +} +if (lean_is_scalar(x_7579)) { + x_7612 = lean_alloc_ctor(1, 1, 0); +} else { + x_7612 = x_7579; +} +lean_ctor_set(x_7612, 0, x_7609); +if (lean_is_scalar(x_7611)) { + x_7613 = lean_alloc_ctor(0, 2, 0); +} else { + x_7613 = x_7611; +} +lean_ctor_set(x_7613, 0, x_7612); +lean_ctor_set(x_7613, 1, x_7610); +x_7543 = x_7613; +x_7544 = x_7608; +goto block_7570; +} +else +{ +lean_object* x_7614; lean_object* x_7615; lean_object* x_7616; lean_object* x_7617; +lean_dec(x_7579); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7614 = lean_ctor_get(x_7606, 0); +lean_inc(x_7614); +x_7615 = lean_ctor_get(x_7606, 1); +lean_inc(x_7615); +if (lean_is_exclusive(x_7606)) { + lean_ctor_release(x_7606, 0); + lean_ctor_release(x_7606, 1); + x_7616 = x_7606; +} else { + lean_dec_ref(x_7606); + x_7616 = lean_box(0); +} +if (lean_is_scalar(x_7616)) { + x_7617 = lean_alloc_ctor(1, 2, 0); +} else { + x_7617 = x_7616; +} +lean_ctor_set(x_7617, 0, x_7614); +lean_ctor_set(x_7617, 1, x_7615); +return x_7617; +} +} +else +{ +lean_object* x_7618; lean_object* x_7619; lean_object* x_7620; lean_object* x_7621; +lean_dec(x_7598); +lean_dec(x_7593); +lean_dec(x_7588); +lean_dec(x_7587); +lean_dec(x_7579); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7618 = lean_ctor_get(x_7601, 0); +lean_inc(x_7618); +x_7619 = lean_ctor_get(x_7601, 1); +lean_inc(x_7619); +if (lean_is_exclusive(x_7601)) { + lean_ctor_release(x_7601, 0); + lean_ctor_release(x_7601, 1); + x_7620 = x_7601; +} else { + lean_dec_ref(x_7601); + x_7620 = lean_box(0); +} +if (lean_is_scalar(x_7620)) { + x_7621 = lean_alloc_ctor(1, 2, 0); +} else { + x_7621 = x_7620; +} +lean_ctor_set(x_7621, 0, x_7618); +lean_ctor_set(x_7621, 1, x_7619); +return x_7621; +} +} +else +{ +lean_object* x_7622; lean_object* x_7623; lean_object* x_7624; lean_object* x_7625; lean_object* x_7626; lean_object* x_7627; lean_object* x_7628; lean_object* x_7629; lean_object* x_7630; +lean_dec(x_7582); +lean_dec(x_7580); +lean_inc(x_5957); +lean_inc(x_153); +if (lean_is_scalar(x_7577)) { + x_7622 = lean_alloc_ctor(6, 2, 0); +} else { + x_7622 = x_7577; + lean_ctor_set_tag(x_7622, 6); +} +lean_ctor_set(x_7622, 0, x_153); +lean_ctor_set(x_7622, 1, x_5957); +x_7623 = lean_ctor_get(x_1, 0); +lean_inc(x_7623); +x_7624 = l_Lean_IR_ToIR_bindVar(x_7623, x_7299, x_4, x_5, x_7576); +x_7625 = lean_ctor_get(x_7624, 0); +lean_inc(x_7625); +x_7626 = lean_ctor_get(x_7624, 1); +lean_inc(x_7626); +lean_dec(x_7624); +x_7627 = lean_ctor_get(x_7625, 0); +lean_inc(x_7627); +x_7628 = lean_ctor_get(x_7625, 1); +lean_inc(x_7628); +lean_dec(x_7625); +x_7629 = lean_ctor_get(x_1, 2); +lean_inc(x_7629); +lean_inc(x_5); +lean_inc(x_4); +x_7630 = l_Lean_IR_ToIR_lowerType(x_7629, x_7628, x_4, x_5, x_7626); +if (lean_obj_tag(x_7630) == 0) +{ +lean_object* x_7631; lean_object* x_7632; lean_object* x_7633; lean_object* x_7634; lean_object* x_7635; +x_7631 = lean_ctor_get(x_7630, 0); +lean_inc(x_7631); +x_7632 = lean_ctor_get(x_7630, 1); +lean_inc(x_7632); +lean_dec(x_7630); +x_7633 = lean_ctor_get(x_7631, 0); +lean_inc(x_7633); +x_7634 = lean_ctor_get(x_7631, 1); +lean_inc(x_7634); +lean_dec(x_7631); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7635 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7627, x_7622, x_7633, x_7634, x_4, x_5, x_7632); +if (lean_obj_tag(x_7635) == 0) +{ +lean_object* x_7636; lean_object* x_7637; lean_object* x_7638; lean_object* x_7639; lean_object* x_7640; lean_object* x_7641; lean_object* x_7642; +x_7636 = lean_ctor_get(x_7635, 0); +lean_inc(x_7636); +x_7637 = lean_ctor_get(x_7635, 1); +lean_inc(x_7637); +lean_dec(x_7635); +x_7638 = lean_ctor_get(x_7636, 0); +lean_inc(x_7638); +x_7639 = lean_ctor_get(x_7636, 1); +lean_inc(x_7639); +if (lean_is_exclusive(x_7636)) { + lean_ctor_release(x_7636, 0); + lean_ctor_release(x_7636, 1); + x_7640 = x_7636; +} else { + lean_dec_ref(x_7636); + x_7640 = lean_box(0); +} +if (lean_is_scalar(x_7579)) { + x_7641 = lean_alloc_ctor(1, 1, 0); +} else { + x_7641 = x_7579; +} +lean_ctor_set(x_7641, 0, x_7638); +if (lean_is_scalar(x_7640)) { + x_7642 = lean_alloc_ctor(0, 2, 0); +} else { + x_7642 = x_7640; +} +lean_ctor_set(x_7642, 0, x_7641); +lean_ctor_set(x_7642, 1, x_7639); +x_7543 = x_7642; +x_7544 = x_7637; +goto block_7570; +} +else +{ +lean_object* x_7643; lean_object* x_7644; lean_object* x_7645; lean_object* x_7646; +lean_dec(x_7579); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7643 = lean_ctor_get(x_7635, 0); +lean_inc(x_7643); +x_7644 = lean_ctor_get(x_7635, 1); +lean_inc(x_7644); +if (lean_is_exclusive(x_7635)) { + lean_ctor_release(x_7635, 0); + lean_ctor_release(x_7635, 1); + x_7645 = x_7635; +} else { + lean_dec_ref(x_7635); + x_7645 = lean_box(0); +} +if (lean_is_scalar(x_7645)) { + x_7646 = lean_alloc_ctor(1, 2, 0); +} else { + x_7646 = x_7645; +} +lean_ctor_set(x_7646, 0, x_7643); +lean_ctor_set(x_7646, 1, x_7644); +return x_7646; +} +} +else +{ +lean_object* x_7647; lean_object* x_7648; lean_object* x_7649; lean_object* x_7650; +lean_dec(x_7627); +lean_dec(x_7622); +lean_dec(x_7579); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7647 = lean_ctor_get(x_7630, 0); +lean_inc(x_7647); +x_7648 = lean_ctor_get(x_7630, 1); +lean_inc(x_7648); +if (lean_is_exclusive(x_7630)) { + lean_ctor_release(x_7630, 0); + lean_ctor_release(x_7630, 1); + x_7649 = x_7630; +} else { + lean_dec_ref(x_7630); + x_7649 = lean_box(0); +} +if (lean_is_scalar(x_7649)) { + x_7650 = lean_alloc_ctor(1, 2, 0); +} else { + x_7650 = x_7649; +} +lean_ctor_set(x_7650, 0, x_7647); +lean_ctor_set(x_7650, 1, x_7648); +return x_7650; +} +} +} +else +{ +lean_object* x_7651; lean_object* x_7652; lean_object* x_7653; lean_object* x_7654; lean_object* x_7655; lean_object* x_7656; lean_object* x_7657; lean_object* x_7658; lean_object* x_7659; +lean_dec(x_7582); +lean_dec(x_7580); +lean_inc(x_5957); +lean_inc(x_153); +if (lean_is_scalar(x_7577)) { + x_7651 = lean_alloc_ctor(7, 2, 0); +} else { + x_7651 = x_7577; + lean_ctor_set_tag(x_7651, 7); +} +lean_ctor_set(x_7651, 0, x_153); +lean_ctor_set(x_7651, 1, x_5957); +x_7652 = lean_ctor_get(x_1, 0); +lean_inc(x_7652); +x_7653 = l_Lean_IR_ToIR_bindVar(x_7652, x_7299, x_4, x_5, x_7576); +x_7654 = lean_ctor_get(x_7653, 0); +lean_inc(x_7654); +x_7655 = lean_ctor_get(x_7653, 1); +lean_inc(x_7655); +lean_dec(x_7653); +x_7656 = lean_ctor_get(x_7654, 0); +lean_inc(x_7656); +x_7657 = lean_ctor_get(x_7654, 1); +lean_inc(x_7657); +lean_dec(x_7654); +x_7658 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7659 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7656, x_7651, x_7658, x_7657, x_4, x_5, x_7655); +if (lean_obj_tag(x_7659) == 0) +{ +lean_object* x_7660; lean_object* x_7661; lean_object* x_7662; lean_object* x_7663; lean_object* x_7664; lean_object* x_7665; lean_object* x_7666; +x_7660 = lean_ctor_get(x_7659, 0); +lean_inc(x_7660); +x_7661 = lean_ctor_get(x_7659, 1); +lean_inc(x_7661); +lean_dec(x_7659); +x_7662 = lean_ctor_get(x_7660, 0); +lean_inc(x_7662); +x_7663 = lean_ctor_get(x_7660, 1); +lean_inc(x_7663); +if (lean_is_exclusive(x_7660)) { + lean_ctor_release(x_7660, 0); + lean_ctor_release(x_7660, 1); + x_7664 = x_7660; +} else { + lean_dec_ref(x_7660); + x_7664 = lean_box(0); +} +if (lean_is_scalar(x_7579)) { + x_7665 = lean_alloc_ctor(1, 1, 0); +} else { + x_7665 = x_7579; +} +lean_ctor_set(x_7665, 0, x_7662); +if (lean_is_scalar(x_7664)) { + x_7666 = lean_alloc_ctor(0, 2, 0); +} else { + x_7666 = x_7664; +} +lean_ctor_set(x_7666, 0, x_7665); +lean_ctor_set(x_7666, 1, x_7663); +x_7543 = x_7666; +x_7544 = x_7661; +goto block_7570; +} +else +{ +lean_object* x_7667; lean_object* x_7668; lean_object* x_7669; lean_object* x_7670; +lean_dec(x_7579); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7667 = lean_ctor_get(x_7659, 0); +lean_inc(x_7667); +x_7668 = lean_ctor_get(x_7659, 1); +lean_inc(x_7668); +if (lean_is_exclusive(x_7659)) { + lean_ctor_release(x_7659, 0); + lean_ctor_release(x_7659, 1); + x_7669 = x_7659; +} else { + lean_dec_ref(x_7659); + x_7669 = lean_box(0); +} +if (lean_is_scalar(x_7669)) { + x_7670 = lean_alloc_ctor(1, 2, 0); +} else { + x_7670 = x_7669; +} +lean_ctor_set(x_7670, 0, x_7667); +lean_ctor_set(x_7670, 1, x_7668); +return x_7670; +} +} +} +block_7570: +{ +lean_object* x_7545; +x_7545 = lean_ctor_get(x_7543, 0); +lean_inc(x_7545); +if (lean_obj_tag(x_7545) == 0) +{ +lean_object* x_7546; lean_object* x_7547; lean_object* x_7548; lean_object* x_7549; lean_object* x_7550; lean_object* x_7551; lean_object* x_7552; lean_object* x_7553; lean_object* x_7554; lean_object* x_7555; +lean_dec(x_7303); +x_7546 = lean_ctor_get(x_7543, 1); +lean_inc(x_7546); +lean_dec(x_7543); +x_7547 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_7547, 0, x_153); +lean_ctor_set(x_7547, 1, x_5957); +x_7548 = lean_ctor_get(x_1, 0); +lean_inc(x_7548); +x_7549 = l_Lean_IR_ToIR_bindVar(x_7548, x_7546, x_4, x_5, x_7544); +x_7550 = lean_ctor_get(x_7549, 0); +lean_inc(x_7550); +x_7551 = lean_ctor_get(x_7549, 1); +lean_inc(x_7551); +lean_dec(x_7549); +x_7552 = lean_ctor_get(x_7550, 0); +lean_inc(x_7552); +x_7553 = lean_ctor_get(x_7550, 1); +lean_inc(x_7553); +lean_dec(x_7550); +x_7554 = lean_ctor_get(x_1, 2); +lean_inc(x_7554); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_7555 = l_Lean_IR_ToIR_lowerType(x_7554, x_7553, x_4, x_5, x_7551); +if (lean_obj_tag(x_7555) == 0) +{ +lean_object* x_7556; lean_object* x_7557; lean_object* x_7558; lean_object* x_7559; lean_object* x_7560; +x_7556 = lean_ctor_get(x_7555, 0); +lean_inc(x_7556); +x_7557 = lean_ctor_get(x_7555, 1); +lean_inc(x_7557); +lean_dec(x_7555); +x_7558 = lean_ctor_get(x_7556, 0); +lean_inc(x_7558); +x_7559 = lean_ctor_get(x_7556, 1); +lean_inc(x_7559); +lean_dec(x_7556); +x_7560 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7552, x_7547, x_7558, x_7559, x_4, x_5, x_7557); +return x_7560; +} +else +{ +lean_object* x_7561; lean_object* x_7562; lean_object* x_7563; lean_object* x_7564; +lean_dec(x_7552); +lean_dec(x_7547); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_7561 = lean_ctor_get(x_7555, 0); +lean_inc(x_7561); +x_7562 = lean_ctor_get(x_7555, 1); +lean_inc(x_7562); +if (lean_is_exclusive(x_7555)) { + lean_ctor_release(x_7555, 0); + lean_ctor_release(x_7555, 1); + x_7563 = x_7555; +} else { + lean_dec_ref(x_7555); + x_7563 = lean_box(0); +} +if (lean_is_scalar(x_7563)) { + x_7564 = lean_alloc_ctor(1, 2, 0); +} else { + x_7564 = x_7563; +} +lean_ctor_set(x_7564, 0, x_7561); +lean_ctor_set(x_7564, 1, x_7562); +return x_7564; +} +} +else +{ +lean_object* x_7565; lean_object* x_7566; lean_object* x_7567; lean_object* x_7568; lean_object* x_7569; +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7565 = lean_ctor_get(x_7543, 1); +lean_inc(x_7565); +if (lean_is_exclusive(x_7543)) { + lean_ctor_release(x_7543, 0); + lean_ctor_release(x_7543, 1); + x_7566 = x_7543; +} else { + lean_dec_ref(x_7543); + x_7566 = lean_box(0); +} +x_7567 = lean_ctor_get(x_7545, 0); +lean_inc(x_7567); +lean_dec(x_7545); +if (lean_is_scalar(x_7566)) { + x_7568 = lean_alloc_ctor(0, 2, 0); +} else { + x_7568 = x_7566; +} +lean_ctor_set(x_7568, 0, x_7567); +lean_ctor_set(x_7568, 1, x_7565); +if (lean_is_scalar(x_7303)) { + x_7569 = lean_alloc_ctor(0, 2, 0); +} else { + x_7569 = x_7303; +} +lean_ctor_set(x_7569, 0, x_7568); +lean_ctor_set(x_7569, 1, x_7544); +return x_7569; +} +} +} +case 4: +{ +lean_object* x_7671; lean_object* x_7672; uint8_t x_7673; +lean_dec(x_7304); +lean_dec(x_7303); +lean_dec(x_5945); +lean_dec(x_5944); +if (lean_is_exclusive(x_7309)) { + lean_ctor_release(x_7309, 0); + x_7671 = x_7309; +} else { + lean_dec_ref(x_7309); + x_7671 = lean_box(0); +} +x_7672 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_7673 = lean_name_eq(x_153, x_7672); +if (x_7673 == 0) +{ +uint8_t x_7674; lean_object* x_7675; lean_object* x_7676; lean_object* x_7677; lean_object* x_7678; lean_object* x_7679; lean_object* x_7680; lean_object* x_7681; lean_object* x_7682; lean_object* x_7683; +lean_dec(x_5957); +lean_dec(x_2); +lean_dec(x_1); +x_7674 = 1; +x_7675 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_7676 = l_Lean_Name_toString(x_153, x_7674, x_7675); +if (lean_is_scalar(x_7671)) { + x_7677 = lean_alloc_ctor(3, 1, 0); +} else { + x_7677 = x_7671; + lean_ctor_set_tag(x_7677, 3); +} +lean_ctor_set(x_7677, 0, x_7676); +x_7678 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_7679 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_7679, 0, x_7678); +lean_ctor_set(x_7679, 1, x_7677); +x_7680 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_7681 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_7681, 0, x_7679); +lean_ctor_set(x_7681, 1, x_7680); +x_7682 = l_Lean_MessageData_ofFormat(x_7681); +x_7683 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_7682, x_7299, x_4, x_5, x_7302); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_7299); +return x_7683; +} +else +{ +lean_object* x_7684; lean_object* x_7685; lean_object* x_7686; +lean_dec(x_7671); +lean_dec(x_153); +x_7684 = l_Lean_IR_instInhabitedArg; +x_7685 = lean_unsigned_to_nat(2u); +x_7686 = lean_array_get(x_7684, x_5957, x_7685); +lean_dec(x_5957); +if (lean_obj_tag(x_7686) == 0) +{ +lean_object* x_7687; lean_object* x_7688; lean_object* x_7689; lean_object* x_7690; lean_object* x_7691; lean_object* x_7692; lean_object* x_7693; +x_7687 = lean_ctor_get(x_7686, 0); +lean_inc(x_7687); +lean_dec(x_7686); +x_7688 = lean_ctor_get(x_1, 0); +lean_inc(x_7688); +lean_dec(x_1); +x_7689 = l_Lean_IR_ToIR_bindVarToVarId(x_7688, x_7687, x_7299, x_4, x_5, x_7302); +x_7690 = lean_ctor_get(x_7689, 0); +lean_inc(x_7690); +x_7691 = lean_ctor_get(x_7689, 1); +lean_inc(x_7691); +lean_dec(x_7689); +x_7692 = lean_ctor_get(x_7690, 1); +lean_inc(x_7692); +lean_dec(x_7690); +x_7693 = l_Lean_IR_ToIR_lowerCode(x_2, x_7692, x_4, x_5, x_7691); +return x_7693; +} +else +{ +lean_object* x_7694; lean_object* x_7695; lean_object* x_7696; lean_object* x_7697; lean_object* x_7698; lean_object* x_7699; +x_7694 = lean_ctor_get(x_1, 0); +lean_inc(x_7694); +lean_dec(x_1); +x_7695 = l_Lean_IR_ToIR_bindErased(x_7694, x_7299, x_4, x_5, x_7302); +x_7696 = lean_ctor_get(x_7695, 0); +lean_inc(x_7696); +x_7697 = lean_ctor_get(x_7695, 1); +lean_inc(x_7697); +lean_dec(x_7695); +x_7698 = lean_ctor_get(x_7696, 1); +lean_inc(x_7698); +lean_dec(x_7696); +x_7699 = l_Lean_IR_ToIR_lowerCode(x_2, x_7698, x_4, x_5, x_7697); +return x_7699; +} +} +} +case 5: +{ +lean_object* x_7700; lean_object* x_7701; +lean_dec(x_7309); +lean_dec(x_7304); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_7700 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_7701 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_7700, x_7299, x_4, x_5, x_7302); +return x_7701; +} +case 6: +{ +lean_object* x_7702; uint8_t x_7703; +x_7702 = lean_ctor_get(x_7309, 0); +lean_inc(x_7702); +lean_dec(x_7309); +lean_inc(x_153); +x_7703 = l_Lean_isExtern(x_7304, x_153); +if (x_7703 == 0) +{ +lean_object* x_7704; +lean_dec(x_7303); +lean_dec(x_5957); +lean_inc(x_5); +lean_inc(x_4); +x_7704 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_7299, x_4, x_5, x_7302); +if (lean_obj_tag(x_7704) == 0) +{ +lean_object* x_7705; lean_object* x_7706; lean_object* x_7707; lean_object* x_7708; lean_object* x_7709; lean_object* x_7710; lean_object* x_7711; lean_object* x_7712; lean_object* x_7713; lean_object* x_7714; lean_object* x_7715; lean_object* x_7716; lean_object* x_7717; lean_object* x_7718; lean_object* x_7719; lean_object* x_7720; lean_object* x_7721; lean_object* x_7722; lean_object* x_7723; lean_object* x_7724; +x_7705 = lean_ctor_get(x_7704, 0); +lean_inc(x_7705); +x_7706 = lean_ctor_get(x_7705, 0); +lean_inc(x_7706); +x_7707 = lean_ctor_get(x_7704, 1); +lean_inc(x_7707); +lean_dec(x_7704); +x_7708 = lean_ctor_get(x_7705, 1); +lean_inc(x_7708); +lean_dec(x_7705); +x_7709 = lean_ctor_get(x_7706, 0); +lean_inc(x_7709); +x_7710 = lean_ctor_get(x_7706, 1); +lean_inc(x_7710); +lean_dec(x_7706); +x_7711 = lean_ctor_get(x_7702, 3); +lean_inc(x_7711); +lean_dec(x_7702); +x_7712 = lean_array_get_size(x_5944); +x_7713 = l_Array_extract___rarg(x_5944, x_7711, x_7712); +lean_dec(x_7712); +lean_dec(x_5944); +x_7714 = lean_array_get_size(x_7710); +x_7715 = lean_unsigned_to_nat(0u); +x_7716 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_5945)) { + x_7717 = lean_alloc_ctor(0, 3, 0); +} else { + x_7717 = x_5945; + lean_ctor_set_tag(x_7717, 0); +} +lean_ctor_set(x_7717, 0, x_7715); +lean_ctor_set(x_7717, 1, x_7714); +lean_ctor_set(x_7717, 2, x_7716); +x_7718 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_7719 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__4(x_7710, x_7713, x_7717, x_7717, x_7718, x_7715, lean_box(0), lean_box(0), x_7708, x_4, x_5, x_7707); +lean_dec(x_7717); +x_7720 = lean_ctor_get(x_7719, 0); +lean_inc(x_7720); +x_7721 = lean_ctor_get(x_7719, 1); +lean_inc(x_7721); +lean_dec(x_7719); +x_7722 = lean_ctor_get(x_7720, 0); +lean_inc(x_7722); +x_7723 = lean_ctor_get(x_7720, 1); +lean_inc(x_7723); +lean_dec(x_7720); +x_7724 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_7709, x_7710, x_7713, x_7722, x_7723, x_4, x_5, x_7721); +lean_dec(x_7713); +lean_dec(x_7710); +return x_7724; +} +else +{ +lean_object* x_7725; lean_object* x_7726; lean_object* x_7727; lean_object* x_7728; +lean_dec(x_7702); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7725 = lean_ctor_get(x_7704, 0); +lean_inc(x_7725); +x_7726 = lean_ctor_get(x_7704, 1); +lean_inc(x_7726); +if (lean_is_exclusive(x_7704)) { + lean_ctor_release(x_7704, 0); + lean_ctor_release(x_7704, 1); + x_7727 = x_7704; +} else { + lean_dec_ref(x_7704); + x_7727 = lean_box(0); +} +if (lean_is_scalar(x_7727)) { + x_7728 = lean_alloc_ctor(1, 2, 0); +} else { + x_7728 = x_7727; +} +lean_ctor_set(x_7728, 0, x_7725); +lean_ctor_set(x_7728, 1, x_7726); +return x_7728; +} +} +else +{ +lean_object* x_7729; lean_object* x_7730; lean_object* x_7757; lean_object* x_7758; +lean_dec(x_7702); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_153); +x_7757 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_7302); +x_7758 = lean_ctor_get(x_7757, 0); +lean_inc(x_7758); +if (lean_obj_tag(x_7758) == 0) +{ +lean_object* x_7759; lean_object* x_7760; lean_object* x_7761; +x_7759 = lean_ctor_get(x_7757, 1); +lean_inc(x_7759); +lean_dec(x_7757); +x_7760 = lean_box(0); +x_7761 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_7761, 0, x_7760); +lean_ctor_set(x_7761, 1, x_7299); +x_7729 = x_7761; +x_7730 = x_7759; +goto block_7756; +} +else +{ +lean_object* x_7762; lean_object* x_7763; lean_object* x_7764; lean_object* x_7765; lean_object* x_7766; lean_object* x_7767; lean_object* x_7768; uint8_t x_7769; +x_7762 = lean_ctor_get(x_7757, 1); +lean_inc(x_7762); +if (lean_is_exclusive(x_7757)) { + lean_ctor_release(x_7757, 0); + lean_ctor_release(x_7757, 1); + x_7763 = x_7757; +} else { + lean_dec_ref(x_7757); + x_7763 = lean_box(0); +} +x_7764 = lean_ctor_get(x_7758, 0); +lean_inc(x_7764); +if (lean_is_exclusive(x_7758)) { + lean_ctor_release(x_7758, 0); + x_7765 = x_7758; +} else { + lean_dec_ref(x_7758); + x_7765 = lean_box(0); +} +x_7766 = lean_array_get_size(x_5957); +x_7767 = lean_ctor_get(x_7764, 3); +lean_inc(x_7767); +lean_dec(x_7764); +x_7768 = lean_array_get_size(x_7767); +lean_dec(x_7767); +x_7769 = lean_nat_dec_lt(x_7766, x_7768); +if (x_7769 == 0) +{ +uint8_t x_7770; +x_7770 = lean_nat_dec_eq(x_7766, x_7768); +if (x_7770 == 0) +{ +lean_object* x_7771; lean_object* x_7772; lean_object* x_7773; lean_object* x_7774; lean_object* x_7775; lean_object* x_7776; lean_object* x_7777; lean_object* x_7778; lean_object* x_7779; lean_object* x_7780; lean_object* x_7781; lean_object* x_7782; lean_object* x_7783; lean_object* x_7784; lean_object* x_7785; lean_object* x_7786; lean_object* x_7787; +x_7771 = lean_unsigned_to_nat(0u); +x_7772 = l_Array_extract___rarg(x_5957, x_7771, x_7768); +x_7773 = l_Array_extract___rarg(x_5957, x_7768, x_7766); +lean_dec(x_7766); +lean_inc(x_153); +if (lean_is_scalar(x_7763)) { + x_7774 = lean_alloc_ctor(6, 2, 0); +} else { + x_7774 = x_7763; + lean_ctor_set_tag(x_7774, 6); +} +lean_ctor_set(x_7774, 0, x_153); +lean_ctor_set(x_7774, 1, x_7772); +x_7775 = lean_ctor_get(x_1, 0); +lean_inc(x_7775); +x_7776 = l_Lean_IR_ToIR_bindVar(x_7775, x_7299, x_4, x_5, x_7762); +x_7777 = lean_ctor_get(x_7776, 0); +lean_inc(x_7777); +x_7778 = lean_ctor_get(x_7776, 1); +lean_inc(x_7778); +lean_dec(x_7776); +x_7779 = lean_ctor_get(x_7777, 0); +lean_inc(x_7779); +x_7780 = lean_ctor_get(x_7777, 1); +lean_inc(x_7780); +lean_dec(x_7777); +x_7781 = l_Lean_IR_ToIR_newVar(x_7780, x_4, x_5, x_7778); +x_7782 = lean_ctor_get(x_7781, 0); +lean_inc(x_7782); +x_7783 = lean_ctor_get(x_7781, 1); +lean_inc(x_7783); +lean_dec(x_7781); +x_7784 = lean_ctor_get(x_7782, 0); +lean_inc(x_7784); +x_7785 = lean_ctor_get(x_7782, 1); +lean_inc(x_7785); +lean_dec(x_7782); +x_7786 = lean_ctor_get(x_1, 2); +lean_inc(x_7786); +lean_inc(x_5); +lean_inc(x_4); +x_7787 = l_Lean_IR_ToIR_lowerType(x_7786, x_7785, x_4, x_5, x_7783); +if (lean_obj_tag(x_7787) == 0) +{ +lean_object* x_7788; lean_object* x_7789; lean_object* x_7790; lean_object* x_7791; lean_object* x_7792; +x_7788 = lean_ctor_get(x_7787, 0); +lean_inc(x_7788); +x_7789 = lean_ctor_get(x_7787, 1); +lean_inc(x_7789); +lean_dec(x_7787); +x_7790 = lean_ctor_get(x_7788, 0); +lean_inc(x_7790); +x_7791 = lean_ctor_get(x_7788, 1); +lean_inc(x_7791); +lean_dec(x_7788); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7792 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_7784, x_7773, x_7779, x_7774, x_7790, x_7791, x_4, x_5, x_7789); +if (lean_obj_tag(x_7792) == 0) +{ +lean_object* x_7793; lean_object* x_7794; lean_object* x_7795; lean_object* x_7796; lean_object* x_7797; lean_object* x_7798; lean_object* x_7799; +x_7793 = lean_ctor_get(x_7792, 0); +lean_inc(x_7793); +x_7794 = lean_ctor_get(x_7792, 1); +lean_inc(x_7794); +lean_dec(x_7792); +x_7795 = lean_ctor_get(x_7793, 0); +lean_inc(x_7795); +x_7796 = lean_ctor_get(x_7793, 1); +lean_inc(x_7796); +if (lean_is_exclusive(x_7793)) { + lean_ctor_release(x_7793, 0); + lean_ctor_release(x_7793, 1); + x_7797 = x_7793; +} else { + lean_dec_ref(x_7793); + x_7797 = lean_box(0); +} +if (lean_is_scalar(x_7765)) { + x_7798 = lean_alloc_ctor(1, 1, 0); +} else { + x_7798 = x_7765; +} +lean_ctor_set(x_7798, 0, x_7795); +if (lean_is_scalar(x_7797)) { + x_7799 = lean_alloc_ctor(0, 2, 0); +} else { + x_7799 = x_7797; +} +lean_ctor_set(x_7799, 0, x_7798); +lean_ctor_set(x_7799, 1, x_7796); +x_7729 = x_7799; +x_7730 = x_7794; +goto block_7756; +} +else +{ +lean_object* x_7800; lean_object* x_7801; lean_object* x_7802; lean_object* x_7803; +lean_dec(x_7765); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7800 = lean_ctor_get(x_7792, 0); +lean_inc(x_7800); +x_7801 = lean_ctor_get(x_7792, 1); +lean_inc(x_7801); +if (lean_is_exclusive(x_7792)) { + lean_ctor_release(x_7792, 0); + lean_ctor_release(x_7792, 1); + x_7802 = x_7792; +} else { + lean_dec_ref(x_7792); + x_7802 = lean_box(0); +} +if (lean_is_scalar(x_7802)) { + x_7803 = lean_alloc_ctor(1, 2, 0); +} else { + x_7803 = x_7802; +} +lean_ctor_set(x_7803, 0, x_7800); +lean_ctor_set(x_7803, 1, x_7801); +return x_7803; +} +} +else +{ +lean_object* x_7804; lean_object* x_7805; lean_object* x_7806; lean_object* x_7807; +lean_dec(x_7784); +lean_dec(x_7779); +lean_dec(x_7774); +lean_dec(x_7773); +lean_dec(x_7765); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7804 = lean_ctor_get(x_7787, 0); +lean_inc(x_7804); +x_7805 = lean_ctor_get(x_7787, 1); +lean_inc(x_7805); +if (lean_is_exclusive(x_7787)) { + lean_ctor_release(x_7787, 0); + lean_ctor_release(x_7787, 1); + x_7806 = x_7787; +} else { + lean_dec_ref(x_7787); + x_7806 = lean_box(0); +} +if (lean_is_scalar(x_7806)) { + x_7807 = lean_alloc_ctor(1, 2, 0); +} else { + x_7807 = x_7806; +} +lean_ctor_set(x_7807, 0, x_7804); +lean_ctor_set(x_7807, 1, x_7805); +return x_7807; +} +} +else +{ +lean_object* x_7808; lean_object* x_7809; lean_object* x_7810; lean_object* x_7811; lean_object* x_7812; lean_object* x_7813; lean_object* x_7814; lean_object* x_7815; lean_object* x_7816; +lean_dec(x_7768); +lean_dec(x_7766); +lean_inc(x_5957); +lean_inc(x_153); +if (lean_is_scalar(x_7763)) { + x_7808 = lean_alloc_ctor(6, 2, 0); +} else { + x_7808 = x_7763; + lean_ctor_set_tag(x_7808, 6); +} +lean_ctor_set(x_7808, 0, x_153); +lean_ctor_set(x_7808, 1, x_5957); +x_7809 = lean_ctor_get(x_1, 0); +lean_inc(x_7809); +x_7810 = l_Lean_IR_ToIR_bindVar(x_7809, x_7299, x_4, x_5, x_7762); +x_7811 = lean_ctor_get(x_7810, 0); +lean_inc(x_7811); +x_7812 = lean_ctor_get(x_7810, 1); +lean_inc(x_7812); +lean_dec(x_7810); +x_7813 = lean_ctor_get(x_7811, 0); +lean_inc(x_7813); +x_7814 = lean_ctor_get(x_7811, 1); +lean_inc(x_7814); +lean_dec(x_7811); +x_7815 = lean_ctor_get(x_1, 2); +lean_inc(x_7815); +lean_inc(x_5); +lean_inc(x_4); +x_7816 = l_Lean_IR_ToIR_lowerType(x_7815, x_7814, x_4, x_5, x_7812); +if (lean_obj_tag(x_7816) == 0) +{ +lean_object* x_7817; lean_object* x_7818; lean_object* x_7819; lean_object* x_7820; lean_object* x_7821; +x_7817 = lean_ctor_get(x_7816, 0); +lean_inc(x_7817); +x_7818 = lean_ctor_get(x_7816, 1); +lean_inc(x_7818); +lean_dec(x_7816); +x_7819 = lean_ctor_get(x_7817, 0); +lean_inc(x_7819); +x_7820 = lean_ctor_get(x_7817, 1); +lean_inc(x_7820); +lean_dec(x_7817); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7821 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7813, x_7808, x_7819, x_7820, x_4, x_5, x_7818); +if (lean_obj_tag(x_7821) == 0) +{ +lean_object* x_7822; lean_object* x_7823; lean_object* x_7824; lean_object* x_7825; lean_object* x_7826; lean_object* x_7827; lean_object* x_7828; +x_7822 = lean_ctor_get(x_7821, 0); +lean_inc(x_7822); +x_7823 = lean_ctor_get(x_7821, 1); +lean_inc(x_7823); +lean_dec(x_7821); +x_7824 = lean_ctor_get(x_7822, 0); +lean_inc(x_7824); +x_7825 = lean_ctor_get(x_7822, 1); +lean_inc(x_7825); +if (lean_is_exclusive(x_7822)) { + lean_ctor_release(x_7822, 0); + lean_ctor_release(x_7822, 1); + x_7826 = x_7822; +} else { + lean_dec_ref(x_7822); + x_7826 = lean_box(0); +} +if (lean_is_scalar(x_7765)) { + x_7827 = lean_alloc_ctor(1, 1, 0); +} else { + x_7827 = x_7765; +} +lean_ctor_set(x_7827, 0, x_7824); +if (lean_is_scalar(x_7826)) { + x_7828 = lean_alloc_ctor(0, 2, 0); +} else { + x_7828 = x_7826; +} +lean_ctor_set(x_7828, 0, x_7827); +lean_ctor_set(x_7828, 1, x_7825); +x_7729 = x_7828; +x_7730 = x_7823; +goto block_7756; +} +else +{ +lean_object* x_7829; lean_object* x_7830; lean_object* x_7831; lean_object* x_7832; +lean_dec(x_7765); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7829 = lean_ctor_get(x_7821, 0); +lean_inc(x_7829); +x_7830 = lean_ctor_get(x_7821, 1); +lean_inc(x_7830); +if (lean_is_exclusive(x_7821)) { + lean_ctor_release(x_7821, 0); + lean_ctor_release(x_7821, 1); + x_7831 = x_7821; +} else { + lean_dec_ref(x_7821); + x_7831 = lean_box(0); +} +if (lean_is_scalar(x_7831)) { + x_7832 = lean_alloc_ctor(1, 2, 0); +} else { + x_7832 = x_7831; +} +lean_ctor_set(x_7832, 0, x_7829); +lean_ctor_set(x_7832, 1, x_7830); +return x_7832; +} +} +else +{ +lean_object* x_7833; lean_object* x_7834; lean_object* x_7835; lean_object* x_7836; +lean_dec(x_7813); +lean_dec(x_7808); +lean_dec(x_7765); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7833 = lean_ctor_get(x_7816, 0); +lean_inc(x_7833); +x_7834 = lean_ctor_get(x_7816, 1); +lean_inc(x_7834); +if (lean_is_exclusive(x_7816)) { + lean_ctor_release(x_7816, 0); + lean_ctor_release(x_7816, 1); + x_7835 = x_7816; +} else { + lean_dec_ref(x_7816); + x_7835 = lean_box(0); +} +if (lean_is_scalar(x_7835)) { + x_7836 = lean_alloc_ctor(1, 2, 0); +} else { + x_7836 = x_7835; +} +lean_ctor_set(x_7836, 0, x_7833); +lean_ctor_set(x_7836, 1, x_7834); +return x_7836; +} +} +} +else +{ +lean_object* x_7837; lean_object* x_7838; lean_object* x_7839; lean_object* x_7840; lean_object* x_7841; lean_object* x_7842; lean_object* x_7843; lean_object* x_7844; lean_object* x_7845; +lean_dec(x_7768); +lean_dec(x_7766); +lean_inc(x_5957); +lean_inc(x_153); +if (lean_is_scalar(x_7763)) { + x_7837 = lean_alloc_ctor(7, 2, 0); +} else { + x_7837 = x_7763; + lean_ctor_set_tag(x_7837, 7); +} +lean_ctor_set(x_7837, 0, x_153); +lean_ctor_set(x_7837, 1, x_5957); +x_7838 = lean_ctor_get(x_1, 0); +lean_inc(x_7838); +x_7839 = l_Lean_IR_ToIR_bindVar(x_7838, x_7299, x_4, x_5, x_7762); +x_7840 = lean_ctor_get(x_7839, 0); +lean_inc(x_7840); +x_7841 = lean_ctor_get(x_7839, 1); +lean_inc(x_7841); +lean_dec(x_7839); +x_7842 = lean_ctor_get(x_7840, 0); +lean_inc(x_7842); +x_7843 = lean_ctor_get(x_7840, 1); +lean_inc(x_7843); +lean_dec(x_7840); +x_7844 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_7845 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7842, x_7837, x_7844, x_7843, x_4, x_5, x_7841); +if (lean_obj_tag(x_7845) == 0) +{ +lean_object* x_7846; lean_object* x_7847; lean_object* x_7848; lean_object* x_7849; lean_object* x_7850; lean_object* x_7851; lean_object* x_7852; +x_7846 = lean_ctor_get(x_7845, 0); +lean_inc(x_7846); +x_7847 = lean_ctor_get(x_7845, 1); +lean_inc(x_7847); +lean_dec(x_7845); +x_7848 = lean_ctor_get(x_7846, 0); +lean_inc(x_7848); +x_7849 = lean_ctor_get(x_7846, 1); +lean_inc(x_7849); +if (lean_is_exclusive(x_7846)) { + lean_ctor_release(x_7846, 0); + lean_ctor_release(x_7846, 1); + x_7850 = x_7846; +} else { + lean_dec_ref(x_7846); + x_7850 = lean_box(0); +} +if (lean_is_scalar(x_7765)) { + x_7851 = lean_alloc_ctor(1, 1, 0); +} else { + x_7851 = x_7765; +} +lean_ctor_set(x_7851, 0, x_7848); +if (lean_is_scalar(x_7850)) { + x_7852 = lean_alloc_ctor(0, 2, 0); +} else { + x_7852 = x_7850; +} +lean_ctor_set(x_7852, 0, x_7851); +lean_ctor_set(x_7852, 1, x_7849); +x_7729 = x_7852; +x_7730 = x_7847; +goto block_7756; +} +else +{ +lean_object* x_7853; lean_object* x_7854; lean_object* x_7855; lean_object* x_7856; +lean_dec(x_7765); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7853 = lean_ctor_get(x_7845, 0); +lean_inc(x_7853); +x_7854 = lean_ctor_get(x_7845, 1); +lean_inc(x_7854); +if (lean_is_exclusive(x_7845)) { + lean_ctor_release(x_7845, 0); + lean_ctor_release(x_7845, 1); + x_7855 = x_7845; +} else { + lean_dec_ref(x_7845); + x_7855 = lean_box(0); +} +if (lean_is_scalar(x_7855)) { + x_7856 = lean_alloc_ctor(1, 2, 0); +} else { + x_7856 = x_7855; +} +lean_ctor_set(x_7856, 0, x_7853); +lean_ctor_set(x_7856, 1, x_7854); +return x_7856; +} +} +} +block_7756: +{ +lean_object* x_7731; +x_7731 = lean_ctor_get(x_7729, 0); +lean_inc(x_7731); +if (lean_obj_tag(x_7731) == 0) +{ +lean_object* x_7732; lean_object* x_7733; lean_object* x_7734; lean_object* x_7735; lean_object* x_7736; lean_object* x_7737; lean_object* x_7738; lean_object* x_7739; lean_object* x_7740; lean_object* x_7741; +lean_dec(x_7303); +x_7732 = lean_ctor_get(x_7729, 1); +lean_inc(x_7732); +lean_dec(x_7729); +x_7733 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_7733, 0, x_153); +lean_ctor_set(x_7733, 1, x_5957); +x_7734 = lean_ctor_get(x_1, 0); +lean_inc(x_7734); +x_7735 = l_Lean_IR_ToIR_bindVar(x_7734, x_7732, x_4, x_5, x_7730); +x_7736 = lean_ctor_get(x_7735, 0); +lean_inc(x_7736); +x_7737 = lean_ctor_get(x_7735, 1); +lean_inc(x_7737); +lean_dec(x_7735); +x_7738 = lean_ctor_get(x_7736, 0); +lean_inc(x_7738); +x_7739 = lean_ctor_get(x_7736, 1); +lean_inc(x_7739); +lean_dec(x_7736); +x_7740 = lean_ctor_get(x_1, 2); +lean_inc(x_7740); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_7741 = l_Lean_IR_ToIR_lowerType(x_7740, x_7739, x_4, x_5, x_7737); +if (lean_obj_tag(x_7741) == 0) +{ +lean_object* x_7742; lean_object* x_7743; lean_object* x_7744; lean_object* x_7745; lean_object* x_7746; +x_7742 = lean_ctor_get(x_7741, 0); +lean_inc(x_7742); +x_7743 = lean_ctor_get(x_7741, 1); +lean_inc(x_7743); +lean_dec(x_7741); +x_7744 = lean_ctor_get(x_7742, 0); +lean_inc(x_7744); +x_7745 = lean_ctor_get(x_7742, 1); +lean_inc(x_7745); +lean_dec(x_7742); +x_7746 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_7738, x_7733, x_7744, x_7745, x_4, x_5, x_7743); +return x_7746; +} +else +{ +lean_object* x_7747; lean_object* x_7748; lean_object* x_7749; lean_object* x_7750; +lean_dec(x_7738); +lean_dec(x_7733); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_7747 = lean_ctor_get(x_7741, 0); +lean_inc(x_7747); +x_7748 = lean_ctor_get(x_7741, 1); +lean_inc(x_7748); +if (lean_is_exclusive(x_7741)) { + lean_ctor_release(x_7741, 0); + lean_ctor_release(x_7741, 1); + x_7749 = x_7741; +} else { + lean_dec_ref(x_7741); + x_7749 = lean_box(0); +} +if (lean_is_scalar(x_7749)) { + x_7750 = lean_alloc_ctor(1, 2, 0); +} else { + x_7750 = x_7749; +} +lean_ctor_set(x_7750, 0, x_7747); +lean_ctor_set(x_7750, 1, x_7748); +return x_7750; +} +} +else +{ +lean_object* x_7751; lean_object* x_7752; lean_object* x_7753; lean_object* x_7754; lean_object* x_7755; +lean_dec(x_5957); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7751 = lean_ctor_get(x_7729, 1); +lean_inc(x_7751); +if (lean_is_exclusive(x_7729)) { + lean_ctor_release(x_7729, 0); + lean_ctor_release(x_7729, 1); + x_7752 = x_7729; +} else { + lean_dec_ref(x_7729); + x_7752 = lean_box(0); +} +x_7753 = lean_ctor_get(x_7731, 0); +lean_inc(x_7753); +lean_dec(x_7731); +if (lean_is_scalar(x_7752)) { + x_7754 = lean_alloc_ctor(0, 2, 0); +} else { + x_7754 = x_7752; +} +lean_ctor_set(x_7754, 0, x_7753); +lean_ctor_set(x_7754, 1, x_7751); +if (lean_is_scalar(x_7303)) { + x_7755 = lean_alloc_ctor(0, 2, 0); +} else { + x_7755 = x_7303; +} +lean_ctor_set(x_7755, 0, x_7754); +lean_ctor_set(x_7755, 1, x_7730); +return x_7755; +} +} +} +} +default: +{ +lean_object* x_7857; uint8_t x_7858; lean_object* x_7859; lean_object* x_7860; lean_object* x_7861; lean_object* x_7862; lean_object* x_7863; lean_object* x_7864; lean_object* x_7865; lean_object* x_7866; lean_object* x_7867; +lean_dec(x_7304); +lean_dec(x_7303); +lean_dec(x_5957); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_7309)) { + lean_ctor_release(x_7309, 0); + x_7857 = x_7309; +} else { + lean_dec_ref(x_7309); + x_7857 = lean_box(0); +} +x_7858 = 1; +x_7859 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_7860 = l_Lean_Name_toString(x_153, x_7858, x_7859); +if (lean_is_scalar(x_7857)) { + x_7861 = lean_alloc_ctor(3, 1, 0); +} else { + x_7861 = x_7857; + lean_ctor_set_tag(x_7861, 3); +} +lean_ctor_set(x_7861, 0, x_7860); +x_7862 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_7863 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_7863, 0, x_7862); +lean_ctor_set(x_7863, 1, x_7861); +x_7864 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_7865 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_7865, 0, x_7863); +lean_ctor_set(x_7865, 1, x_7864); +x_7866 = l_Lean_MessageData_ofFormat(x_7865); +x_7867 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_7866, x_7299, x_4, x_5, x_7302); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_7299); +return x_7867; +} +} +} +} +} +else +{ +uint8_t x_7868; +lean_dec(x_5957); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_7868 = !lean_is_exclusive(x_5959); +if (x_7868 == 0) +{ +lean_object* x_7869; lean_object* x_7870; lean_object* x_7871; +x_7869 = lean_ctor_get(x_5959, 0); +lean_dec(x_7869); +x_7870 = lean_ctor_get(x_5961, 0); +lean_inc(x_7870); +lean_dec(x_5961); +lean_ctor_set(x_5959, 0, x_7870); +if (lean_is_scalar(x_5955)) { + x_7871 = lean_alloc_ctor(0, 2, 0); +} else { + x_7871 = x_5955; +} +lean_ctor_set(x_7871, 0, x_5959); +lean_ctor_set(x_7871, 1, x_5960); +return x_7871; +} +else +{ +lean_object* x_7872; lean_object* x_7873; lean_object* x_7874; lean_object* x_7875; +x_7872 = lean_ctor_get(x_5959, 1); +lean_inc(x_7872); +lean_dec(x_5959); +x_7873 = lean_ctor_get(x_5961, 0); +lean_inc(x_7873); +lean_dec(x_5961); +x_7874 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_7874, 0, x_7873); +lean_ctor_set(x_7874, 1, x_7872); +if (lean_is_scalar(x_5955)) { + x_7875 = lean_alloc_ctor(0, 2, 0); +} else { + x_7875 = x_5955; +} +lean_ctor_set(x_7875, 0, x_7874); +lean_ctor_set(x_7875, 1, x_5960); +return x_7875; +} +} +} +} +else +{ +lean_object* x_8157; lean_object* x_8158; lean_object* x_8159; lean_object* x_8160; lean_object* x_8738; lean_object* x_8739; +x_8157 = lean_ctor_get(x_5953, 0); +x_8158 = lean_ctor_get(x_5953, 1); +lean_inc(x_8158); +lean_inc(x_8157); +lean_dec(x_5953); +lean_inc(x_153); +x_8738 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_5954); +x_8739 = lean_ctor_get(x_8738, 0); +lean_inc(x_8739); +if (lean_obj_tag(x_8739) == 0) +{ +lean_object* x_8740; lean_object* x_8741; lean_object* x_8742; +x_8740 = lean_ctor_get(x_8738, 1); +lean_inc(x_8740); +lean_dec(x_8738); +x_8741 = lean_box(0); +x_8742 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_8742, 0, x_8741); +lean_ctor_set(x_8742, 1, x_8158); +x_8159 = x_8742; +x_8160 = x_8740; +goto block_8737; +} +else +{ +lean_object* x_8743; lean_object* x_8744; lean_object* x_8745; lean_object* x_8746; lean_object* x_8747; lean_object* x_8748; lean_object* x_8749; uint8_t x_8750; +x_8743 = lean_ctor_get(x_8738, 1); +lean_inc(x_8743); +if (lean_is_exclusive(x_8738)) { + lean_ctor_release(x_8738, 0); + lean_ctor_release(x_8738, 1); + x_8744 = x_8738; +} else { + lean_dec_ref(x_8738); + x_8744 = lean_box(0); +} +x_8745 = lean_ctor_get(x_8739, 0); +lean_inc(x_8745); +if (lean_is_exclusive(x_8739)) { + lean_ctor_release(x_8739, 0); + x_8746 = x_8739; +} else { + lean_dec_ref(x_8739); + x_8746 = lean_box(0); +} +x_8747 = lean_array_get_size(x_8157); +x_8748 = lean_ctor_get(x_8745, 3); +lean_inc(x_8748); +lean_dec(x_8745); +x_8749 = lean_array_get_size(x_8748); +lean_dec(x_8748); +x_8750 = lean_nat_dec_lt(x_8747, x_8749); +if (x_8750 == 0) +{ +uint8_t x_8751; +x_8751 = lean_nat_dec_eq(x_8747, x_8749); +if (x_8751 == 0) +{ +lean_object* x_8752; lean_object* x_8753; lean_object* x_8754; lean_object* x_8755; lean_object* x_8756; lean_object* x_8757; lean_object* x_8758; lean_object* x_8759; lean_object* x_8760; lean_object* x_8761; lean_object* x_8762; lean_object* x_8763; lean_object* x_8764; lean_object* x_8765; lean_object* x_8766; lean_object* x_8767; lean_object* x_8768; +x_8752 = lean_unsigned_to_nat(0u); +x_8753 = l_Array_extract___rarg(x_8157, x_8752, x_8749); +x_8754 = l_Array_extract___rarg(x_8157, x_8749, x_8747); +lean_dec(x_8747); +lean_inc(x_153); +if (lean_is_scalar(x_8744)) { + x_8755 = lean_alloc_ctor(6, 2, 0); +} else { + x_8755 = x_8744; + lean_ctor_set_tag(x_8755, 6); +} +lean_ctor_set(x_8755, 0, x_153); +lean_ctor_set(x_8755, 1, x_8753); +x_8756 = lean_ctor_get(x_1, 0); +lean_inc(x_8756); +x_8757 = l_Lean_IR_ToIR_bindVar(x_8756, x_8158, x_4, x_5, x_8743); +x_8758 = lean_ctor_get(x_8757, 0); +lean_inc(x_8758); +x_8759 = lean_ctor_get(x_8757, 1); +lean_inc(x_8759); +lean_dec(x_8757); +x_8760 = lean_ctor_get(x_8758, 0); +lean_inc(x_8760); +x_8761 = lean_ctor_get(x_8758, 1); +lean_inc(x_8761); +lean_dec(x_8758); +x_8762 = l_Lean_IR_ToIR_newVar(x_8761, x_4, x_5, x_8759); +x_8763 = lean_ctor_get(x_8762, 0); +lean_inc(x_8763); +x_8764 = lean_ctor_get(x_8762, 1); +lean_inc(x_8764); +lean_dec(x_8762); +x_8765 = lean_ctor_get(x_8763, 0); +lean_inc(x_8765); +x_8766 = lean_ctor_get(x_8763, 1); +lean_inc(x_8766); +lean_dec(x_8763); +x_8767 = lean_ctor_get(x_1, 2); +lean_inc(x_8767); +lean_inc(x_5); +lean_inc(x_4); +x_8768 = l_Lean_IR_ToIR_lowerType(x_8767, x_8766, x_4, x_5, x_8764); +if (lean_obj_tag(x_8768) == 0) +{ +lean_object* x_8769; lean_object* x_8770; lean_object* x_8771; lean_object* x_8772; lean_object* x_8773; +x_8769 = lean_ctor_get(x_8768, 0); +lean_inc(x_8769); +x_8770 = lean_ctor_get(x_8768, 1); +lean_inc(x_8770); +lean_dec(x_8768); +x_8771 = lean_ctor_get(x_8769, 0); +lean_inc(x_8771); +x_8772 = lean_ctor_get(x_8769, 1); +lean_inc(x_8772); +lean_dec(x_8769); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8773 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_8765, x_8754, x_8760, x_8755, x_8771, x_8772, x_4, x_5, x_8770); +if (lean_obj_tag(x_8773) == 0) +{ +lean_object* x_8774; lean_object* x_8775; lean_object* x_8776; lean_object* x_8777; lean_object* x_8778; lean_object* x_8779; lean_object* x_8780; +x_8774 = lean_ctor_get(x_8773, 0); +lean_inc(x_8774); +x_8775 = lean_ctor_get(x_8773, 1); +lean_inc(x_8775); +lean_dec(x_8773); +x_8776 = lean_ctor_get(x_8774, 0); +lean_inc(x_8776); +x_8777 = lean_ctor_get(x_8774, 1); +lean_inc(x_8777); +if (lean_is_exclusive(x_8774)) { + lean_ctor_release(x_8774, 0); + lean_ctor_release(x_8774, 1); + x_8778 = x_8774; +} else { + lean_dec_ref(x_8774); + x_8778 = lean_box(0); +} +if (lean_is_scalar(x_8746)) { + x_8779 = lean_alloc_ctor(1, 1, 0); +} else { + x_8779 = x_8746; +} +lean_ctor_set(x_8779, 0, x_8776); +if (lean_is_scalar(x_8778)) { + x_8780 = lean_alloc_ctor(0, 2, 0); +} else { + x_8780 = x_8778; +} +lean_ctor_set(x_8780, 0, x_8779); +lean_ctor_set(x_8780, 1, x_8777); +x_8159 = x_8780; +x_8160 = x_8775; +goto block_8737; +} +else +{ +lean_object* x_8781; lean_object* x_8782; lean_object* x_8783; lean_object* x_8784; +lean_dec(x_8746); +lean_dec(x_8157); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8781 = lean_ctor_get(x_8773, 0); +lean_inc(x_8781); +x_8782 = lean_ctor_get(x_8773, 1); +lean_inc(x_8782); +if (lean_is_exclusive(x_8773)) { + lean_ctor_release(x_8773, 0); + lean_ctor_release(x_8773, 1); + x_8783 = x_8773; +} else { + lean_dec_ref(x_8773); + x_8783 = lean_box(0); +} +if (lean_is_scalar(x_8783)) { + x_8784 = lean_alloc_ctor(1, 2, 0); +} else { + x_8784 = x_8783; +} +lean_ctor_set(x_8784, 0, x_8781); +lean_ctor_set(x_8784, 1, x_8782); +return x_8784; +} +} +else +{ +lean_object* x_8785; lean_object* x_8786; lean_object* x_8787; lean_object* x_8788; +lean_dec(x_8765); +lean_dec(x_8760); +lean_dec(x_8755); +lean_dec(x_8754); +lean_dec(x_8746); +lean_dec(x_8157); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8785 = lean_ctor_get(x_8768, 0); +lean_inc(x_8785); +x_8786 = lean_ctor_get(x_8768, 1); +lean_inc(x_8786); +if (lean_is_exclusive(x_8768)) { + lean_ctor_release(x_8768, 0); + lean_ctor_release(x_8768, 1); + x_8787 = x_8768; +} else { + lean_dec_ref(x_8768); + x_8787 = lean_box(0); +} +if (lean_is_scalar(x_8787)) { + x_8788 = lean_alloc_ctor(1, 2, 0); +} else { + x_8788 = x_8787; +} +lean_ctor_set(x_8788, 0, x_8785); +lean_ctor_set(x_8788, 1, x_8786); +return x_8788; +} +} +else +{ +lean_object* x_8789; lean_object* x_8790; lean_object* x_8791; lean_object* x_8792; lean_object* x_8793; lean_object* x_8794; lean_object* x_8795; lean_object* x_8796; lean_object* x_8797; +lean_dec(x_8749); +lean_dec(x_8747); +lean_inc(x_8157); +lean_inc(x_153); +if (lean_is_scalar(x_8744)) { + x_8789 = lean_alloc_ctor(6, 2, 0); +} else { + x_8789 = x_8744; + lean_ctor_set_tag(x_8789, 6); +} +lean_ctor_set(x_8789, 0, x_153); +lean_ctor_set(x_8789, 1, x_8157); +x_8790 = lean_ctor_get(x_1, 0); +lean_inc(x_8790); +x_8791 = l_Lean_IR_ToIR_bindVar(x_8790, x_8158, x_4, x_5, x_8743); +x_8792 = lean_ctor_get(x_8791, 0); +lean_inc(x_8792); +x_8793 = lean_ctor_get(x_8791, 1); +lean_inc(x_8793); +lean_dec(x_8791); +x_8794 = lean_ctor_get(x_8792, 0); +lean_inc(x_8794); +x_8795 = lean_ctor_get(x_8792, 1); +lean_inc(x_8795); +lean_dec(x_8792); +x_8796 = lean_ctor_get(x_1, 2); +lean_inc(x_8796); +lean_inc(x_5); +lean_inc(x_4); +x_8797 = l_Lean_IR_ToIR_lowerType(x_8796, x_8795, x_4, x_5, x_8793); +if (lean_obj_tag(x_8797) == 0) +{ +lean_object* x_8798; lean_object* x_8799; lean_object* x_8800; lean_object* x_8801; lean_object* x_8802; +x_8798 = lean_ctor_get(x_8797, 0); +lean_inc(x_8798); +x_8799 = lean_ctor_get(x_8797, 1); +lean_inc(x_8799); +lean_dec(x_8797); +x_8800 = lean_ctor_get(x_8798, 0); +lean_inc(x_8800); +x_8801 = lean_ctor_get(x_8798, 1); +lean_inc(x_8801); +lean_dec(x_8798); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8802 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8794, x_8789, x_8800, x_8801, x_4, x_5, x_8799); +if (lean_obj_tag(x_8802) == 0) +{ +lean_object* x_8803; lean_object* x_8804; lean_object* x_8805; lean_object* x_8806; lean_object* x_8807; lean_object* x_8808; lean_object* x_8809; +x_8803 = lean_ctor_get(x_8802, 0); +lean_inc(x_8803); +x_8804 = lean_ctor_get(x_8802, 1); +lean_inc(x_8804); +lean_dec(x_8802); +x_8805 = lean_ctor_get(x_8803, 0); +lean_inc(x_8805); +x_8806 = lean_ctor_get(x_8803, 1); +lean_inc(x_8806); +if (lean_is_exclusive(x_8803)) { + lean_ctor_release(x_8803, 0); + lean_ctor_release(x_8803, 1); + x_8807 = x_8803; +} else { + lean_dec_ref(x_8803); + x_8807 = lean_box(0); +} +if (lean_is_scalar(x_8746)) { + x_8808 = lean_alloc_ctor(1, 1, 0); +} else { + x_8808 = x_8746; +} +lean_ctor_set(x_8808, 0, x_8805); +if (lean_is_scalar(x_8807)) { + x_8809 = lean_alloc_ctor(0, 2, 0); +} else { + x_8809 = x_8807; +} +lean_ctor_set(x_8809, 0, x_8808); +lean_ctor_set(x_8809, 1, x_8806); +x_8159 = x_8809; +x_8160 = x_8804; +goto block_8737; +} +else +{ +lean_object* x_8810; lean_object* x_8811; lean_object* x_8812; lean_object* x_8813; +lean_dec(x_8746); +lean_dec(x_8157); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8810 = lean_ctor_get(x_8802, 0); +lean_inc(x_8810); +x_8811 = lean_ctor_get(x_8802, 1); +lean_inc(x_8811); +if (lean_is_exclusive(x_8802)) { + lean_ctor_release(x_8802, 0); + lean_ctor_release(x_8802, 1); + x_8812 = x_8802; +} else { + lean_dec_ref(x_8802); + x_8812 = lean_box(0); +} +if (lean_is_scalar(x_8812)) { + x_8813 = lean_alloc_ctor(1, 2, 0); +} else { + x_8813 = x_8812; +} +lean_ctor_set(x_8813, 0, x_8810); +lean_ctor_set(x_8813, 1, x_8811); +return x_8813; +} +} +else +{ +lean_object* x_8814; lean_object* x_8815; lean_object* x_8816; lean_object* x_8817; +lean_dec(x_8794); +lean_dec(x_8789); +lean_dec(x_8746); +lean_dec(x_8157); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8814 = lean_ctor_get(x_8797, 0); +lean_inc(x_8814); +x_8815 = lean_ctor_get(x_8797, 1); +lean_inc(x_8815); +if (lean_is_exclusive(x_8797)) { + lean_ctor_release(x_8797, 0); + lean_ctor_release(x_8797, 1); + x_8816 = x_8797; +} else { + lean_dec_ref(x_8797); + x_8816 = lean_box(0); +} +if (lean_is_scalar(x_8816)) { + x_8817 = lean_alloc_ctor(1, 2, 0); +} else { + x_8817 = x_8816; +} +lean_ctor_set(x_8817, 0, x_8814); +lean_ctor_set(x_8817, 1, x_8815); +return x_8817; +} +} +} +else +{ +lean_object* x_8818; lean_object* x_8819; lean_object* x_8820; lean_object* x_8821; lean_object* x_8822; lean_object* x_8823; lean_object* x_8824; lean_object* x_8825; lean_object* x_8826; +lean_dec(x_8749); +lean_dec(x_8747); +lean_inc(x_8157); +lean_inc(x_153); +if (lean_is_scalar(x_8744)) { + x_8818 = lean_alloc_ctor(7, 2, 0); +} else { + x_8818 = x_8744; + lean_ctor_set_tag(x_8818, 7); +} +lean_ctor_set(x_8818, 0, x_153); +lean_ctor_set(x_8818, 1, x_8157); +x_8819 = lean_ctor_get(x_1, 0); +lean_inc(x_8819); +x_8820 = l_Lean_IR_ToIR_bindVar(x_8819, x_8158, x_4, x_5, x_8743); +x_8821 = lean_ctor_get(x_8820, 0); +lean_inc(x_8821); +x_8822 = lean_ctor_get(x_8820, 1); +lean_inc(x_8822); +lean_dec(x_8820); +x_8823 = lean_ctor_get(x_8821, 0); +lean_inc(x_8823); +x_8824 = lean_ctor_get(x_8821, 1); +lean_inc(x_8824); +lean_dec(x_8821); +x_8825 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8826 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8823, x_8818, x_8825, x_8824, x_4, x_5, x_8822); +if (lean_obj_tag(x_8826) == 0) +{ +lean_object* x_8827; lean_object* x_8828; lean_object* x_8829; lean_object* x_8830; lean_object* x_8831; lean_object* x_8832; lean_object* x_8833; +x_8827 = lean_ctor_get(x_8826, 0); +lean_inc(x_8827); +x_8828 = lean_ctor_get(x_8826, 1); +lean_inc(x_8828); +lean_dec(x_8826); +x_8829 = lean_ctor_get(x_8827, 0); +lean_inc(x_8829); +x_8830 = lean_ctor_get(x_8827, 1); +lean_inc(x_8830); +if (lean_is_exclusive(x_8827)) { + lean_ctor_release(x_8827, 0); + lean_ctor_release(x_8827, 1); + x_8831 = x_8827; +} else { + lean_dec_ref(x_8827); + x_8831 = lean_box(0); +} +if (lean_is_scalar(x_8746)) { + x_8832 = lean_alloc_ctor(1, 1, 0); +} else { + x_8832 = x_8746; +} +lean_ctor_set(x_8832, 0, x_8829); +if (lean_is_scalar(x_8831)) { + x_8833 = lean_alloc_ctor(0, 2, 0); +} else { + x_8833 = x_8831; +} +lean_ctor_set(x_8833, 0, x_8832); +lean_ctor_set(x_8833, 1, x_8830); +x_8159 = x_8833; +x_8160 = x_8828; +goto block_8737; +} +else +{ +lean_object* x_8834; lean_object* x_8835; lean_object* x_8836; lean_object* x_8837; +lean_dec(x_8746); +lean_dec(x_8157); +lean_dec(x_5955); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8834 = lean_ctor_get(x_8826, 0); +lean_inc(x_8834); +x_8835 = lean_ctor_get(x_8826, 1); +lean_inc(x_8835); +if (lean_is_exclusive(x_8826)) { + lean_ctor_release(x_8826, 0); + lean_ctor_release(x_8826, 1); + x_8836 = x_8826; +} else { + lean_dec_ref(x_8826); + x_8836 = lean_box(0); +} +if (lean_is_scalar(x_8836)) { + x_8837 = lean_alloc_ctor(1, 2, 0); +} else { + x_8837 = x_8836; +} +lean_ctor_set(x_8837, 0, x_8834); +lean_ctor_set(x_8837, 1, x_8835); +return x_8837; +} +} +} +block_8737: +{ +lean_object* x_8161; +x_8161 = lean_ctor_get(x_8159, 0); +lean_inc(x_8161); +if (lean_obj_tag(x_8161) == 0) +{ +lean_object* x_8162; lean_object* x_8163; lean_object* x_8164; lean_object* x_8165; lean_object* x_8166; lean_object* x_8167; lean_object* x_8168; uint8_t x_8169; lean_object* x_8170; +lean_dec(x_5955); +x_8162 = lean_ctor_get(x_8159, 1); +lean_inc(x_8162); +if (lean_is_exclusive(x_8159)) { + lean_ctor_release(x_8159, 0); + lean_ctor_release(x_8159, 1); + x_8163 = x_8159; +} else { + lean_dec_ref(x_8159); + x_8163 = lean_box(0); +} +x_8164 = lean_st_ref_get(x_5, x_8160); +x_8165 = lean_ctor_get(x_8164, 0); +lean_inc(x_8165); +x_8166 = lean_ctor_get(x_8164, 1); +lean_inc(x_8166); +if (lean_is_exclusive(x_8164)) { + lean_ctor_release(x_8164, 0); + lean_ctor_release(x_8164, 1); + x_8167 = x_8164; +} else { + lean_dec_ref(x_8164); + x_8167 = lean_box(0); +} +x_8168 = lean_ctor_get(x_8165, 0); +lean_inc(x_8168); +lean_dec(x_8165); +x_8169 = 0; +lean_inc(x_153); +lean_inc(x_8168); +x_8170 = l_Lean_Environment_find_x3f(x_8168, x_153, x_8169); +if (lean_obj_tag(x_8170) == 0) +{ +lean_object* x_8171; lean_object* x_8172; +lean_dec(x_8168); +lean_dec(x_8167); +lean_dec(x_8163); +lean_dec(x_8157); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_8171 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_8172 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_8171, x_8162, x_4, x_5, x_8166); +return x_8172; +} +else +{ +lean_object* x_8173; +x_8173 = lean_ctor_get(x_8170, 0); +lean_inc(x_8173); +lean_dec(x_8170); +switch (lean_obj_tag(x_8173)) { +case 0: +{ +lean_object* x_8174; lean_object* x_8175; uint8_t x_8176; +lean_dec(x_8168); +lean_dec(x_5945); +lean_dec(x_5944); +if (lean_is_exclusive(x_8173)) { + lean_ctor_release(x_8173, 0); + x_8174 = x_8173; +} else { + lean_dec_ref(x_8173); + x_8174 = lean_box(0); +} +x_8175 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_8176 = lean_name_eq(x_153, x_8175); +if (x_8176 == 0) +{ +lean_object* x_8177; uint8_t x_8178; +x_8177 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_8178 = lean_name_eq(x_153, x_8177); +if (x_8178 == 0) +{ +lean_object* x_8179; lean_object* x_8180; lean_object* x_8181; +lean_dec(x_8167); +lean_dec(x_8163); +lean_inc(x_153); +x_8179 = l_Lean_IR_ToIR_findDecl(x_153, x_8162, x_4, x_5, x_8166); +x_8180 = lean_ctor_get(x_8179, 0); +lean_inc(x_8180); +x_8181 = lean_ctor_get(x_8180, 0); +lean_inc(x_8181); +if (lean_obj_tag(x_8181) == 0) +{ +lean_object* x_8182; lean_object* x_8183; lean_object* x_8184; lean_object* x_8185; uint8_t x_8186; lean_object* x_8187; lean_object* x_8188; lean_object* x_8189; lean_object* x_8190; lean_object* x_8191; lean_object* x_8192; lean_object* x_8193; lean_object* x_8194; lean_object* x_8195; +lean_dec(x_8157); +lean_dec(x_2); +lean_dec(x_1); +x_8182 = lean_ctor_get(x_8179, 1); +lean_inc(x_8182); +if (lean_is_exclusive(x_8179)) { + lean_ctor_release(x_8179, 0); + lean_ctor_release(x_8179, 1); + x_8183 = x_8179; +} else { + lean_dec_ref(x_8179); + x_8183 = lean_box(0); +} +x_8184 = lean_ctor_get(x_8180, 1); +lean_inc(x_8184); +if (lean_is_exclusive(x_8180)) { + lean_ctor_release(x_8180, 0); + lean_ctor_release(x_8180, 1); + x_8185 = x_8180; +} else { + lean_dec_ref(x_8180); + x_8185 = lean_box(0); +} +x_8186 = 1; +x_8187 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_8188 = l_Lean_Name_toString(x_153, x_8186, x_8187); +if (lean_is_scalar(x_8174)) { + x_8189 = lean_alloc_ctor(3, 1, 0); +} else { + x_8189 = x_8174; + lean_ctor_set_tag(x_8189, 3); +} +lean_ctor_set(x_8189, 0, x_8188); +x_8190 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_8185)) { + x_8191 = lean_alloc_ctor(5, 2, 0); +} else { + x_8191 = x_8185; + lean_ctor_set_tag(x_8191, 5); +} +lean_ctor_set(x_8191, 0, x_8190); +lean_ctor_set(x_8191, 1, x_8189); +x_8192 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_8183)) { + x_8193 = lean_alloc_ctor(5, 2, 0); +} else { + x_8193 = x_8183; + lean_ctor_set_tag(x_8193, 5); +} +lean_ctor_set(x_8193, 0, x_8191); +lean_ctor_set(x_8193, 1, x_8192); +x_8194 = l_Lean_MessageData_ofFormat(x_8193); +x_8195 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_8194, x_8184, x_4, x_5, x_8182); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_8184); +return x_8195; +} +else +{ +lean_object* x_8196; lean_object* x_8197; lean_object* x_8198; lean_object* x_8199; lean_object* x_8200; lean_object* x_8201; lean_object* x_8202; uint8_t x_8203; +lean_dec(x_8174); +x_8196 = lean_ctor_get(x_8179, 1); +lean_inc(x_8196); +lean_dec(x_8179); +x_8197 = lean_ctor_get(x_8180, 1); +lean_inc(x_8197); +if (lean_is_exclusive(x_8180)) { + lean_ctor_release(x_8180, 0); + lean_ctor_release(x_8180, 1); + x_8198 = x_8180; +} else { + lean_dec_ref(x_8180); + x_8198 = lean_box(0); +} +x_8199 = lean_ctor_get(x_8181, 0); +lean_inc(x_8199); +lean_dec(x_8181); +x_8200 = lean_array_get_size(x_8157); +x_8201 = l_Lean_IR_Decl_params(x_8199); +lean_dec(x_8199); +x_8202 = lean_array_get_size(x_8201); +lean_dec(x_8201); +x_8203 = lean_nat_dec_lt(x_8200, x_8202); +if (x_8203 == 0) +{ +uint8_t x_8204; +x_8204 = lean_nat_dec_eq(x_8200, x_8202); +if (x_8204 == 0) +{ +lean_object* x_8205; lean_object* x_8206; lean_object* x_8207; lean_object* x_8208; lean_object* x_8209; lean_object* x_8210; lean_object* x_8211; lean_object* x_8212; lean_object* x_8213; lean_object* x_8214; lean_object* x_8215; lean_object* x_8216; lean_object* x_8217; lean_object* x_8218; lean_object* x_8219; lean_object* x_8220; lean_object* x_8221; +x_8205 = lean_unsigned_to_nat(0u); +x_8206 = l_Array_extract___rarg(x_8157, x_8205, x_8202); +x_8207 = l_Array_extract___rarg(x_8157, x_8202, x_8200); +lean_dec(x_8200); +lean_dec(x_8157); +if (lean_is_scalar(x_8198)) { + x_8208 = lean_alloc_ctor(6, 2, 0); +} else { + x_8208 = x_8198; + lean_ctor_set_tag(x_8208, 6); +} +lean_ctor_set(x_8208, 0, x_153); +lean_ctor_set(x_8208, 1, x_8206); +x_8209 = lean_ctor_get(x_1, 0); +lean_inc(x_8209); +x_8210 = l_Lean_IR_ToIR_bindVar(x_8209, x_8197, x_4, x_5, x_8196); +x_8211 = lean_ctor_get(x_8210, 0); +lean_inc(x_8211); +x_8212 = lean_ctor_get(x_8210, 1); +lean_inc(x_8212); +lean_dec(x_8210); +x_8213 = lean_ctor_get(x_8211, 0); +lean_inc(x_8213); +x_8214 = lean_ctor_get(x_8211, 1); +lean_inc(x_8214); +lean_dec(x_8211); +x_8215 = l_Lean_IR_ToIR_newVar(x_8214, x_4, x_5, x_8212); +x_8216 = lean_ctor_get(x_8215, 0); +lean_inc(x_8216); +x_8217 = lean_ctor_get(x_8215, 1); +lean_inc(x_8217); +lean_dec(x_8215); +x_8218 = lean_ctor_get(x_8216, 0); +lean_inc(x_8218); +x_8219 = lean_ctor_get(x_8216, 1); +lean_inc(x_8219); +lean_dec(x_8216); +x_8220 = lean_ctor_get(x_1, 2); +lean_inc(x_8220); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_8221 = l_Lean_IR_ToIR_lowerType(x_8220, x_8219, x_4, x_5, x_8217); +if (lean_obj_tag(x_8221) == 0) +{ +lean_object* x_8222; lean_object* x_8223; lean_object* x_8224; lean_object* x_8225; lean_object* x_8226; +x_8222 = lean_ctor_get(x_8221, 0); +lean_inc(x_8222); +x_8223 = lean_ctor_get(x_8221, 1); +lean_inc(x_8223); +lean_dec(x_8221); +x_8224 = lean_ctor_get(x_8222, 0); +lean_inc(x_8224); +x_8225 = lean_ctor_get(x_8222, 1); +lean_inc(x_8225); +lean_dec(x_8222); +x_8226 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_8218, x_8207, x_8213, x_8208, x_8224, x_8225, x_4, x_5, x_8223); +return x_8226; +} +else +{ +lean_object* x_8227; lean_object* x_8228; lean_object* x_8229; lean_object* x_8230; +lean_dec(x_8218); +lean_dec(x_8213); +lean_dec(x_8208); +lean_dec(x_8207); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_8227 = lean_ctor_get(x_8221, 0); +lean_inc(x_8227); +x_8228 = lean_ctor_get(x_8221, 1); +lean_inc(x_8228); +if (lean_is_exclusive(x_8221)) { + lean_ctor_release(x_8221, 0); + lean_ctor_release(x_8221, 1); + x_8229 = x_8221; +} else { + lean_dec_ref(x_8221); + x_8229 = lean_box(0); +} +if (lean_is_scalar(x_8229)) { + x_8230 = lean_alloc_ctor(1, 2, 0); +} else { + x_8230 = x_8229; +} +lean_ctor_set(x_8230, 0, x_8227); +lean_ctor_set(x_8230, 1, x_8228); +return x_8230; +} +} +else +{ +lean_object* x_8231; lean_object* x_8232; lean_object* x_8233; lean_object* x_8234; lean_object* x_8235; lean_object* x_8236; lean_object* x_8237; lean_object* x_8238; lean_object* x_8239; +lean_dec(x_8202); +lean_dec(x_8200); +if (lean_is_scalar(x_8198)) { + x_8231 = lean_alloc_ctor(6, 2, 0); +} else { + x_8231 = x_8198; + lean_ctor_set_tag(x_8231, 6); +} +lean_ctor_set(x_8231, 0, x_153); +lean_ctor_set(x_8231, 1, x_8157); +x_8232 = lean_ctor_get(x_1, 0); +lean_inc(x_8232); +x_8233 = l_Lean_IR_ToIR_bindVar(x_8232, x_8197, x_4, x_5, x_8196); +x_8234 = lean_ctor_get(x_8233, 0); +lean_inc(x_8234); +x_8235 = lean_ctor_get(x_8233, 1); +lean_inc(x_8235); +lean_dec(x_8233); +x_8236 = lean_ctor_get(x_8234, 0); +lean_inc(x_8236); +x_8237 = lean_ctor_get(x_8234, 1); +lean_inc(x_8237); +lean_dec(x_8234); +x_8238 = lean_ctor_get(x_1, 2); +lean_inc(x_8238); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_8239 = l_Lean_IR_ToIR_lowerType(x_8238, x_8237, x_4, x_5, x_8235); +if (lean_obj_tag(x_8239) == 0) +{ +lean_object* x_8240; lean_object* x_8241; lean_object* x_8242; lean_object* x_8243; lean_object* x_8244; +x_8240 = lean_ctor_get(x_8239, 0); +lean_inc(x_8240); +x_8241 = lean_ctor_get(x_8239, 1); +lean_inc(x_8241); +lean_dec(x_8239); +x_8242 = lean_ctor_get(x_8240, 0); +lean_inc(x_8242); +x_8243 = lean_ctor_get(x_8240, 1); +lean_inc(x_8243); +lean_dec(x_8240); +x_8244 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8236, x_8231, x_8242, x_8243, x_4, x_5, x_8241); +return x_8244; +} +else +{ +lean_object* x_8245; lean_object* x_8246; lean_object* x_8247; lean_object* x_8248; +lean_dec(x_8236); +lean_dec(x_8231); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_8245 = lean_ctor_get(x_8239, 0); +lean_inc(x_8245); +x_8246 = lean_ctor_get(x_8239, 1); +lean_inc(x_8246); +if (lean_is_exclusive(x_8239)) { + lean_ctor_release(x_8239, 0); + lean_ctor_release(x_8239, 1); + x_8247 = x_8239; +} else { + lean_dec_ref(x_8239); + x_8247 = lean_box(0); +} +if (lean_is_scalar(x_8247)) { + x_8248 = lean_alloc_ctor(1, 2, 0); +} else { + x_8248 = x_8247; +} +lean_ctor_set(x_8248, 0, x_8245); +lean_ctor_set(x_8248, 1, x_8246); +return x_8248; +} +} +} +else +{ +lean_object* x_8249; lean_object* x_8250; lean_object* x_8251; lean_object* x_8252; lean_object* x_8253; lean_object* x_8254; lean_object* x_8255; lean_object* x_8256; lean_object* x_8257; +lean_dec(x_8202); +lean_dec(x_8200); +if (lean_is_scalar(x_8198)) { + x_8249 = lean_alloc_ctor(7, 2, 0); +} else { + x_8249 = x_8198; + lean_ctor_set_tag(x_8249, 7); +} +lean_ctor_set(x_8249, 0, x_153); +lean_ctor_set(x_8249, 1, x_8157); +x_8250 = lean_ctor_get(x_1, 0); +lean_inc(x_8250); +lean_dec(x_1); +x_8251 = l_Lean_IR_ToIR_bindVar(x_8250, x_8197, x_4, x_5, x_8196); +x_8252 = lean_ctor_get(x_8251, 0); +lean_inc(x_8252); +x_8253 = lean_ctor_get(x_8251, 1); +lean_inc(x_8253); +lean_dec(x_8251); +x_8254 = lean_ctor_get(x_8252, 0); +lean_inc(x_8254); +x_8255 = lean_ctor_get(x_8252, 1); +lean_inc(x_8255); +lean_dec(x_8252); +x_8256 = lean_box(7); +x_8257 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8254, x_8249, x_8256, x_8255, x_4, x_5, x_8253); +return x_8257; +} +} +} +else +{ +lean_object* x_8258; lean_object* x_8259; lean_object* x_8260; +lean_dec(x_8174); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8258 = lean_box(13); +if (lean_is_scalar(x_8163)) { + x_8259 = lean_alloc_ctor(0, 2, 0); +} else { + x_8259 = x_8163; +} +lean_ctor_set(x_8259, 0, x_8258); +lean_ctor_set(x_8259, 1, x_8162); +if (lean_is_scalar(x_8167)) { + x_8260 = lean_alloc_ctor(0, 2, 0); +} else { + x_8260 = x_8167; +} +lean_ctor_set(x_8260, 0, x_8259); +lean_ctor_set(x_8260, 1, x_8166); +return x_8260; +} +} +else +{ +lean_object* x_8261; lean_object* x_8262; lean_object* x_8263; +lean_dec(x_8174); +lean_dec(x_8167); +lean_dec(x_8163); +lean_dec(x_153); +x_8261 = l_Lean_IR_instInhabitedArg; +x_8262 = lean_unsigned_to_nat(2u); +x_8263 = lean_array_get(x_8261, x_8157, x_8262); +lean_dec(x_8157); +if (lean_obj_tag(x_8263) == 0) +{ +lean_object* x_8264; lean_object* x_8265; lean_object* x_8266; lean_object* x_8267; lean_object* x_8268; lean_object* x_8269; lean_object* x_8270; +x_8264 = lean_ctor_get(x_8263, 0); +lean_inc(x_8264); +lean_dec(x_8263); +x_8265 = lean_ctor_get(x_1, 0); +lean_inc(x_8265); +lean_dec(x_1); +x_8266 = l_Lean_IR_ToIR_bindVarToVarId(x_8265, x_8264, x_8162, x_4, x_5, x_8166); +x_8267 = lean_ctor_get(x_8266, 0); +lean_inc(x_8267); +x_8268 = lean_ctor_get(x_8266, 1); +lean_inc(x_8268); +lean_dec(x_8266); +x_8269 = lean_ctor_get(x_8267, 1); +lean_inc(x_8269); +lean_dec(x_8267); +x_8270 = l_Lean_IR_ToIR_lowerCode(x_2, x_8269, x_4, x_5, x_8268); +return x_8270; +} +else +{ +lean_object* x_8271; lean_object* x_8272; lean_object* x_8273; lean_object* x_8274; lean_object* x_8275; lean_object* x_8276; +x_8271 = lean_ctor_get(x_1, 0); +lean_inc(x_8271); +lean_dec(x_1); +x_8272 = l_Lean_IR_ToIR_bindErased(x_8271, x_8162, x_4, x_5, x_8166); +x_8273 = lean_ctor_get(x_8272, 0); +lean_inc(x_8273); +x_8274 = lean_ctor_get(x_8272, 1); +lean_inc(x_8274); +lean_dec(x_8272); +x_8275 = lean_ctor_get(x_8273, 1); +lean_inc(x_8275); +lean_dec(x_8273); +x_8276 = l_Lean_IR_ToIR_lowerCode(x_2, x_8275, x_4, x_5, x_8274); +return x_8276; +} +} +} +case 1: +{ +lean_object* x_8277; lean_object* x_8278; lean_object* x_8305; lean_object* x_8306; +lean_dec(x_8173); +lean_dec(x_8168); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_153); +x_8305 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_8166); +x_8306 = lean_ctor_get(x_8305, 0); +lean_inc(x_8306); +if (lean_obj_tag(x_8306) == 0) +{ +lean_object* x_8307; lean_object* x_8308; lean_object* x_8309; +x_8307 = lean_ctor_get(x_8305, 1); +lean_inc(x_8307); +lean_dec(x_8305); +x_8308 = lean_box(0); +if (lean_is_scalar(x_8163)) { + x_8309 = lean_alloc_ctor(0, 2, 0); +} else { + x_8309 = x_8163; +} +lean_ctor_set(x_8309, 0, x_8308); +lean_ctor_set(x_8309, 1, x_8162); +x_8277 = x_8309; +x_8278 = x_8307; +goto block_8304; +} +else +{ +lean_object* x_8310; lean_object* x_8311; lean_object* x_8312; lean_object* x_8313; lean_object* x_8314; lean_object* x_8315; lean_object* x_8316; uint8_t x_8317; +lean_dec(x_8163); +x_8310 = lean_ctor_get(x_8305, 1); +lean_inc(x_8310); +if (lean_is_exclusive(x_8305)) { + lean_ctor_release(x_8305, 0); + lean_ctor_release(x_8305, 1); + x_8311 = x_8305; +} else { + lean_dec_ref(x_8305); + x_8311 = lean_box(0); +} +x_8312 = lean_ctor_get(x_8306, 0); +lean_inc(x_8312); +if (lean_is_exclusive(x_8306)) { + lean_ctor_release(x_8306, 0); + x_8313 = x_8306; +} else { + lean_dec_ref(x_8306); + x_8313 = lean_box(0); +} +x_8314 = lean_array_get_size(x_8157); +x_8315 = lean_ctor_get(x_8312, 3); +lean_inc(x_8315); +lean_dec(x_8312); +x_8316 = lean_array_get_size(x_8315); +lean_dec(x_8315); +x_8317 = lean_nat_dec_lt(x_8314, x_8316); +if (x_8317 == 0) +{ +uint8_t x_8318; +x_8318 = lean_nat_dec_eq(x_8314, x_8316); +if (x_8318 == 0) +{ +lean_object* x_8319; lean_object* x_8320; lean_object* x_8321; lean_object* x_8322; lean_object* x_8323; lean_object* x_8324; lean_object* x_8325; lean_object* x_8326; lean_object* x_8327; lean_object* x_8328; lean_object* x_8329; lean_object* x_8330; lean_object* x_8331; lean_object* x_8332; lean_object* x_8333; lean_object* x_8334; lean_object* x_8335; +x_8319 = lean_unsigned_to_nat(0u); +x_8320 = l_Array_extract___rarg(x_8157, x_8319, x_8316); +x_8321 = l_Array_extract___rarg(x_8157, x_8316, x_8314); +lean_dec(x_8314); +lean_inc(x_153); +if (lean_is_scalar(x_8311)) { + x_8322 = lean_alloc_ctor(6, 2, 0); +} else { + x_8322 = x_8311; + lean_ctor_set_tag(x_8322, 6); +} +lean_ctor_set(x_8322, 0, x_153); +lean_ctor_set(x_8322, 1, x_8320); +x_8323 = lean_ctor_get(x_1, 0); +lean_inc(x_8323); +x_8324 = l_Lean_IR_ToIR_bindVar(x_8323, x_8162, x_4, x_5, x_8310); +x_8325 = lean_ctor_get(x_8324, 0); +lean_inc(x_8325); +x_8326 = lean_ctor_get(x_8324, 1); +lean_inc(x_8326); +lean_dec(x_8324); +x_8327 = lean_ctor_get(x_8325, 0); +lean_inc(x_8327); +x_8328 = lean_ctor_get(x_8325, 1); +lean_inc(x_8328); +lean_dec(x_8325); +x_8329 = l_Lean_IR_ToIR_newVar(x_8328, x_4, x_5, x_8326); +x_8330 = lean_ctor_get(x_8329, 0); +lean_inc(x_8330); +x_8331 = lean_ctor_get(x_8329, 1); +lean_inc(x_8331); +lean_dec(x_8329); +x_8332 = lean_ctor_get(x_8330, 0); +lean_inc(x_8332); +x_8333 = lean_ctor_get(x_8330, 1); +lean_inc(x_8333); +lean_dec(x_8330); +x_8334 = lean_ctor_get(x_1, 2); +lean_inc(x_8334); +lean_inc(x_5); +lean_inc(x_4); +x_8335 = l_Lean_IR_ToIR_lowerType(x_8334, x_8333, x_4, x_5, x_8331); +if (lean_obj_tag(x_8335) == 0) +{ +lean_object* x_8336; lean_object* x_8337; lean_object* x_8338; lean_object* x_8339; lean_object* x_8340; +x_8336 = lean_ctor_get(x_8335, 0); +lean_inc(x_8336); +x_8337 = lean_ctor_get(x_8335, 1); +lean_inc(x_8337); +lean_dec(x_8335); +x_8338 = lean_ctor_get(x_8336, 0); +lean_inc(x_8338); +x_8339 = lean_ctor_get(x_8336, 1); +lean_inc(x_8339); +lean_dec(x_8336); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8340 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_8332, x_8321, x_8327, x_8322, x_8338, x_8339, x_4, x_5, x_8337); +if (lean_obj_tag(x_8340) == 0) +{ +lean_object* x_8341; lean_object* x_8342; lean_object* x_8343; lean_object* x_8344; lean_object* x_8345; lean_object* x_8346; lean_object* x_8347; +x_8341 = lean_ctor_get(x_8340, 0); +lean_inc(x_8341); +x_8342 = lean_ctor_get(x_8340, 1); +lean_inc(x_8342); +lean_dec(x_8340); +x_8343 = lean_ctor_get(x_8341, 0); +lean_inc(x_8343); +x_8344 = lean_ctor_get(x_8341, 1); +lean_inc(x_8344); +if (lean_is_exclusive(x_8341)) { + lean_ctor_release(x_8341, 0); + lean_ctor_release(x_8341, 1); + x_8345 = x_8341; +} else { + lean_dec_ref(x_8341); + x_8345 = lean_box(0); +} +if (lean_is_scalar(x_8313)) { + x_8346 = lean_alloc_ctor(1, 1, 0); +} else { + x_8346 = x_8313; +} +lean_ctor_set(x_8346, 0, x_8343); +if (lean_is_scalar(x_8345)) { + x_8347 = lean_alloc_ctor(0, 2, 0); +} else { + x_8347 = x_8345; +} +lean_ctor_set(x_8347, 0, x_8346); +lean_ctor_set(x_8347, 1, x_8344); +x_8277 = x_8347; +x_8278 = x_8342; +goto block_8304; +} +else +{ +lean_object* x_8348; lean_object* x_8349; lean_object* x_8350; lean_object* x_8351; +lean_dec(x_8313); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8348 = lean_ctor_get(x_8340, 0); +lean_inc(x_8348); +x_8349 = lean_ctor_get(x_8340, 1); +lean_inc(x_8349); +if (lean_is_exclusive(x_8340)) { + lean_ctor_release(x_8340, 0); + lean_ctor_release(x_8340, 1); + x_8350 = x_8340; +} else { + lean_dec_ref(x_8340); + x_8350 = lean_box(0); +} +if (lean_is_scalar(x_8350)) { + x_8351 = lean_alloc_ctor(1, 2, 0); +} else { + x_8351 = x_8350; +} +lean_ctor_set(x_8351, 0, x_8348); +lean_ctor_set(x_8351, 1, x_8349); +return x_8351; +} +} +else +{ +lean_object* x_8352; lean_object* x_8353; lean_object* x_8354; lean_object* x_8355; +lean_dec(x_8332); +lean_dec(x_8327); +lean_dec(x_8322); +lean_dec(x_8321); +lean_dec(x_8313); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8352 = lean_ctor_get(x_8335, 0); +lean_inc(x_8352); +x_8353 = lean_ctor_get(x_8335, 1); +lean_inc(x_8353); +if (lean_is_exclusive(x_8335)) { + lean_ctor_release(x_8335, 0); + lean_ctor_release(x_8335, 1); + x_8354 = x_8335; +} else { + lean_dec_ref(x_8335); + x_8354 = lean_box(0); +} +if (lean_is_scalar(x_8354)) { + x_8355 = lean_alloc_ctor(1, 2, 0); +} else { + x_8355 = x_8354; +} +lean_ctor_set(x_8355, 0, x_8352); +lean_ctor_set(x_8355, 1, x_8353); +return x_8355; +} +} +else +{ +lean_object* x_8356; lean_object* x_8357; lean_object* x_8358; lean_object* x_8359; lean_object* x_8360; lean_object* x_8361; lean_object* x_8362; lean_object* x_8363; lean_object* x_8364; +lean_dec(x_8316); +lean_dec(x_8314); +lean_inc(x_8157); +lean_inc(x_153); +if (lean_is_scalar(x_8311)) { + x_8356 = lean_alloc_ctor(6, 2, 0); +} else { + x_8356 = x_8311; + lean_ctor_set_tag(x_8356, 6); +} +lean_ctor_set(x_8356, 0, x_153); +lean_ctor_set(x_8356, 1, x_8157); +x_8357 = lean_ctor_get(x_1, 0); +lean_inc(x_8357); +x_8358 = l_Lean_IR_ToIR_bindVar(x_8357, x_8162, x_4, x_5, x_8310); +x_8359 = lean_ctor_get(x_8358, 0); +lean_inc(x_8359); +x_8360 = lean_ctor_get(x_8358, 1); +lean_inc(x_8360); +lean_dec(x_8358); +x_8361 = lean_ctor_get(x_8359, 0); +lean_inc(x_8361); +x_8362 = lean_ctor_get(x_8359, 1); +lean_inc(x_8362); +lean_dec(x_8359); +x_8363 = lean_ctor_get(x_1, 2); +lean_inc(x_8363); +lean_inc(x_5); +lean_inc(x_4); +x_8364 = l_Lean_IR_ToIR_lowerType(x_8363, x_8362, x_4, x_5, x_8360); +if (lean_obj_tag(x_8364) == 0) +{ +lean_object* x_8365; lean_object* x_8366; lean_object* x_8367; lean_object* x_8368; lean_object* x_8369; +x_8365 = lean_ctor_get(x_8364, 0); +lean_inc(x_8365); +x_8366 = lean_ctor_get(x_8364, 1); +lean_inc(x_8366); +lean_dec(x_8364); +x_8367 = lean_ctor_get(x_8365, 0); +lean_inc(x_8367); +x_8368 = lean_ctor_get(x_8365, 1); +lean_inc(x_8368); +lean_dec(x_8365); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8369 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8361, x_8356, x_8367, x_8368, x_4, x_5, x_8366); +if (lean_obj_tag(x_8369) == 0) +{ +lean_object* x_8370; lean_object* x_8371; lean_object* x_8372; lean_object* x_8373; lean_object* x_8374; lean_object* x_8375; lean_object* x_8376; +x_8370 = lean_ctor_get(x_8369, 0); +lean_inc(x_8370); +x_8371 = lean_ctor_get(x_8369, 1); +lean_inc(x_8371); +lean_dec(x_8369); +x_8372 = lean_ctor_get(x_8370, 0); +lean_inc(x_8372); +x_8373 = lean_ctor_get(x_8370, 1); +lean_inc(x_8373); +if (lean_is_exclusive(x_8370)) { + lean_ctor_release(x_8370, 0); + lean_ctor_release(x_8370, 1); + x_8374 = x_8370; +} else { + lean_dec_ref(x_8370); + x_8374 = lean_box(0); +} +if (lean_is_scalar(x_8313)) { + x_8375 = lean_alloc_ctor(1, 1, 0); +} else { + x_8375 = x_8313; +} +lean_ctor_set(x_8375, 0, x_8372); +if (lean_is_scalar(x_8374)) { + x_8376 = lean_alloc_ctor(0, 2, 0); +} else { + x_8376 = x_8374; +} +lean_ctor_set(x_8376, 0, x_8375); +lean_ctor_set(x_8376, 1, x_8373); +x_8277 = x_8376; +x_8278 = x_8371; +goto block_8304; +} +else +{ +lean_object* x_8377; lean_object* x_8378; lean_object* x_8379; lean_object* x_8380; +lean_dec(x_8313); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8377 = lean_ctor_get(x_8369, 0); +lean_inc(x_8377); +x_8378 = lean_ctor_get(x_8369, 1); +lean_inc(x_8378); +if (lean_is_exclusive(x_8369)) { + lean_ctor_release(x_8369, 0); + lean_ctor_release(x_8369, 1); + x_8379 = x_8369; +} else { + lean_dec_ref(x_8369); + x_8379 = lean_box(0); +} +if (lean_is_scalar(x_8379)) { + x_8380 = lean_alloc_ctor(1, 2, 0); +} else { + x_8380 = x_8379; +} +lean_ctor_set(x_8380, 0, x_8377); +lean_ctor_set(x_8380, 1, x_8378); +return x_8380; +} +} +else +{ +lean_object* x_8381; lean_object* x_8382; lean_object* x_8383; lean_object* x_8384; +lean_dec(x_8361); +lean_dec(x_8356); +lean_dec(x_8313); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8381 = lean_ctor_get(x_8364, 0); +lean_inc(x_8381); +x_8382 = lean_ctor_get(x_8364, 1); +lean_inc(x_8382); +if (lean_is_exclusive(x_8364)) { + lean_ctor_release(x_8364, 0); + lean_ctor_release(x_8364, 1); + x_8383 = x_8364; +} else { + lean_dec_ref(x_8364); + x_8383 = lean_box(0); +} +if (lean_is_scalar(x_8383)) { + x_8384 = lean_alloc_ctor(1, 2, 0); +} else { + x_8384 = x_8383; +} +lean_ctor_set(x_8384, 0, x_8381); +lean_ctor_set(x_8384, 1, x_8382); +return x_8384; +} +} +} +else +{ +lean_object* x_8385; lean_object* x_8386; lean_object* x_8387; lean_object* x_8388; lean_object* x_8389; lean_object* x_8390; lean_object* x_8391; lean_object* x_8392; lean_object* x_8393; +lean_dec(x_8316); +lean_dec(x_8314); +lean_inc(x_8157); +lean_inc(x_153); +if (lean_is_scalar(x_8311)) { + x_8385 = lean_alloc_ctor(7, 2, 0); +} else { + x_8385 = x_8311; + lean_ctor_set_tag(x_8385, 7); +} +lean_ctor_set(x_8385, 0, x_153); +lean_ctor_set(x_8385, 1, x_8157); +x_8386 = lean_ctor_get(x_1, 0); +lean_inc(x_8386); +x_8387 = l_Lean_IR_ToIR_bindVar(x_8386, x_8162, x_4, x_5, x_8310); +x_8388 = lean_ctor_get(x_8387, 0); +lean_inc(x_8388); +x_8389 = lean_ctor_get(x_8387, 1); +lean_inc(x_8389); +lean_dec(x_8387); +x_8390 = lean_ctor_get(x_8388, 0); +lean_inc(x_8390); +x_8391 = lean_ctor_get(x_8388, 1); +lean_inc(x_8391); +lean_dec(x_8388); +x_8392 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8393 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8390, x_8385, x_8392, x_8391, x_4, x_5, x_8389); +if (lean_obj_tag(x_8393) == 0) +{ +lean_object* x_8394; lean_object* x_8395; lean_object* x_8396; lean_object* x_8397; lean_object* x_8398; lean_object* x_8399; lean_object* x_8400; +x_8394 = lean_ctor_get(x_8393, 0); +lean_inc(x_8394); +x_8395 = lean_ctor_get(x_8393, 1); +lean_inc(x_8395); +lean_dec(x_8393); +x_8396 = lean_ctor_get(x_8394, 0); +lean_inc(x_8396); +x_8397 = lean_ctor_get(x_8394, 1); +lean_inc(x_8397); +if (lean_is_exclusive(x_8394)) { + lean_ctor_release(x_8394, 0); + lean_ctor_release(x_8394, 1); + x_8398 = x_8394; +} else { + lean_dec_ref(x_8394); + x_8398 = lean_box(0); +} +if (lean_is_scalar(x_8313)) { + x_8399 = lean_alloc_ctor(1, 1, 0); +} else { + x_8399 = x_8313; +} +lean_ctor_set(x_8399, 0, x_8396); +if (lean_is_scalar(x_8398)) { + x_8400 = lean_alloc_ctor(0, 2, 0); +} else { + x_8400 = x_8398; +} +lean_ctor_set(x_8400, 0, x_8399); +lean_ctor_set(x_8400, 1, x_8397); +x_8277 = x_8400; +x_8278 = x_8395; +goto block_8304; +} +else +{ +lean_object* x_8401; lean_object* x_8402; lean_object* x_8403; lean_object* x_8404; +lean_dec(x_8313); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8401 = lean_ctor_get(x_8393, 0); +lean_inc(x_8401); +x_8402 = lean_ctor_get(x_8393, 1); +lean_inc(x_8402); +if (lean_is_exclusive(x_8393)) { + lean_ctor_release(x_8393, 0); + lean_ctor_release(x_8393, 1); + x_8403 = x_8393; +} else { + lean_dec_ref(x_8393); + x_8403 = lean_box(0); +} +if (lean_is_scalar(x_8403)) { + x_8404 = lean_alloc_ctor(1, 2, 0); +} else { + x_8404 = x_8403; +} +lean_ctor_set(x_8404, 0, x_8401); +lean_ctor_set(x_8404, 1, x_8402); +return x_8404; +} +} +} +block_8304: +{ +lean_object* x_8279; +x_8279 = lean_ctor_get(x_8277, 0); +lean_inc(x_8279); +if (lean_obj_tag(x_8279) == 0) +{ +lean_object* x_8280; lean_object* x_8281; lean_object* x_8282; lean_object* x_8283; lean_object* x_8284; lean_object* x_8285; lean_object* x_8286; lean_object* x_8287; lean_object* x_8288; lean_object* x_8289; +lean_dec(x_8167); +x_8280 = lean_ctor_get(x_8277, 1); +lean_inc(x_8280); +lean_dec(x_8277); +x_8281 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_8281, 0, x_153); +lean_ctor_set(x_8281, 1, x_8157); +x_8282 = lean_ctor_get(x_1, 0); +lean_inc(x_8282); +x_8283 = l_Lean_IR_ToIR_bindVar(x_8282, x_8280, x_4, x_5, x_8278); +x_8284 = lean_ctor_get(x_8283, 0); +lean_inc(x_8284); +x_8285 = lean_ctor_get(x_8283, 1); +lean_inc(x_8285); +lean_dec(x_8283); +x_8286 = lean_ctor_get(x_8284, 0); +lean_inc(x_8286); +x_8287 = lean_ctor_get(x_8284, 1); +lean_inc(x_8287); +lean_dec(x_8284); +x_8288 = lean_ctor_get(x_1, 2); +lean_inc(x_8288); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_8289 = l_Lean_IR_ToIR_lowerType(x_8288, x_8287, x_4, x_5, x_8285); +if (lean_obj_tag(x_8289) == 0) +{ +lean_object* x_8290; lean_object* x_8291; lean_object* x_8292; lean_object* x_8293; lean_object* x_8294; +x_8290 = lean_ctor_get(x_8289, 0); +lean_inc(x_8290); +x_8291 = lean_ctor_get(x_8289, 1); +lean_inc(x_8291); +lean_dec(x_8289); +x_8292 = lean_ctor_get(x_8290, 0); +lean_inc(x_8292); +x_8293 = lean_ctor_get(x_8290, 1); +lean_inc(x_8293); +lean_dec(x_8290); +x_8294 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8286, x_8281, x_8292, x_8293, x_4, x_5, x_8291); +return x_8294; +} +else +{ +lean_object* x_8295; lean_object* x_8296; lean_object* x_8297; lean_object* x_8298; +lean_dec(x_8286); +lean_dec(x_8281); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_8295 = lean_ctor_get(x_8289, 0); +lean_inc(x_8295); +x_8296 = lean_ctor_get(x_8289, 1); +lean_inc(x_8296); +if (lean_is_exclusive(x_8289)) { + lean_ctor_release(x_8289, 0); + lean_ctor_release(x_8289, 1); + x_8297 = x_8289; +} else { + lean_dec_ref(x_8289); + x_8297 = lean_box(0); +} +if (lean_is_scalar(x_8297)) { + x_8298 = lean_alloc_ctor(1, 2, 0); +} else { + x_8298 = x_8297; +} +lean_ctor_set(x_8298, 0, x_8295); +lean_ctor_set(x_8298, 1, x_8296); +return x_8298; +} +} +else +{ +lean_object* x_8299; lean_object* x_8300; lean_object* x_8301; lean_object* x_8302; lean_object* x_8303; +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8299 = lean_ctor_get(x_8277, 1); +lean_inc(x_8299); +if (lean_is_exclusive(x_8277)) { + lean_ctor_release(x_8277, 0); + lean_ctor_release(x_8277, 1); + x_8300 = x_8277; +} else { + lean_dec_ref(x_8277); + x_8300 = lean_box(0); +} +x_8301 = lean_ctor_get(x_8279, 0); +lean_inc(x_8301); +lean_dec(x_8279); +if (lean_is_scalar(x_8300)) { + x_8302 = lean_alloc_ctor(0, 2, 0); +} else { + x_8302 = x_8300; +} +lean_ctor_set(x_8302, 0, x_8301); +lean_ctor_set(x_8302, 1, x_8299); +if (lean_is_scalar(x_8167)) { + x_8303 = lean_alloc_ctor(0, 2, 0); +} else { + x_8303 = x_8167; +} +lean_ctor_set(x_8303, 0, x_8302); +lean_ctor_set(x_8303, 1, x_8278); +return x_8303; +} +} +} +case 2: +{ +lean_object* x_8405; lean_object* x_8406; +lean_dec(x_8173); +lean_dec(x_8168); +lean_dec(x_8167); +lean_dec(x_8163); +lean_dec(x_8157); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_8405 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_8406 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_8405, x_8162, x_4, x_5, x_8166); +return x_8406; +} +case 3: +{ +lean_object* x_8407; lean_object* x_8408; lean_object* x_8435; lean_object* x_8436; +lean_dec(x_8173); +lean_dec(x_8168); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_153); +x_8435 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_8166); +x_8436 = lean_ctor_get(x_8435, 0); +lean_inc(x_8436); +if (lean_obj_tag(x_8436) == 0) +{ +lean_object* x_8437; lean_object* x_8438; lean_object* x_8439; +x_8437 = lean_ctor_get(x_8435, 1); +lean_inc(x_8437); +lean_dec(x_8435); +x_8438 = lean_box(0); +if (lean_is_scalar(x_8163)) { + x_8439 = lean_alloc_ctor(0, 2, 0); +} else { + x_8439 = x_8163; +} +lean_ctor_set(x_8439, 0, x_8438); +lean_ctor_set(x_8439, 1, x_8162); +x_8407 = x_8439; +x_8408 = x_8437; +goto block_8434; +} +else +{ +lean_object* x_8440; lean_object* x_8441; lean_object* x_8442; lean_object* x_8443; lean_object* x_8444; lean_object* x_8445; lean_object* x_8446; uint8_t x_8447; +lean_dec(x_8163); +x_8440 = lean_ctor_get(x_8435, 1); +lean_inc(x_8440); +if (lean_is_exclusive(x_8435)) { + lean_ctor_release(x_8435, 0); + lean_ctor_release(x_8435, 1); + x_8441 = x_8435; +} else { + lean_dec_ref(x_8435); + x_8441 = lean_box(0); +} +x_8442 = lean_ctor_get(x_8436, 0); +lean_inc(x_8442); +if (lean_is_exclusive(x_8436)) { + lean_ctor_release(x_8436, 0); + x_8443 = x_8436; +} else { + lean_dec_ref(x_8436); + x_8443 = lean_box(0); +} +x_8444 = lean_array_get_size(x_8157); +x_8445 = lean_ctor_get(x_8442, 3); +lean_inc(x_8445); +lean_dec(x_8442); +x_8446 = lean_array_get_size(x_8445); +lean_dec(x_8445); +x_8447 = lean_nat_dec_lt(x_8444, x_8446); +if (x_8447 == 0) +{ +uint8_t x_8448; +x_8448 = lean_nat_dec_eq(x_8444, x_8446); +if (x_8448 == 0) +{ +lean_object* x_8449; lean_object* x_8450; lean_object* x_8451; lean_object* x_8452; lean_object* x_8453; lean_object* x_8454; lean_object* x_8455; lean_object* x_8456; lean_object* x_8457; lean_object* x_8458; lean_object* x_8459; lean_object* x_8460; lean_object* x_8461; lean_object* x_8462; lean_object* x_8463; lean_object* x_8464; lean_object* x_8465; +x_8449 = lean_unsigned_to_nat(0u); +x_8450 = l_Array_extract___rarg(x_8157, x_8449, x_8446); +x_8451 = l_Array_extract___rarg(x_8157, x_8446, x_8444); +lean_dec(x_8444); +lean_inc(x_153); +if (lean_is_scalar(x_8441)) { + x_8452 = lean_alloc_ctor(6, 2, 0); +} else { + x_8452 = x_8441; + lean_ctor_set_tag(x_8452, 6); +} +lean_ctor_set(x_8452, 0, x_153); +lean_ctor_set(x_8452, 1, x_8450); +x_8453 = lean_ctor_get(x_1, 0); +lean_inc(x_8453); +x_8454 = l_Lean_IR_ToIR_bindVar(x_8453, x_8162, x_4, x_5, x_8440); +x_8455 = lean_ctor_get(x_8454, 0); +lean_inc(x_8455); +x_8456 = lean_ctor_get(x_8454, 1); +lean_inc(x_8456); +lean_dec(x_8454); +x_8457 = lean_ctor_get(x_8455, 0); +lean_inc(x_8457); +x_8458 = lean_ctor_get(x_8455, 1); +lean_inc(x_8458); +lean_dec(x_8455); +x_8459 = l_Lean_IR_ToIR_newVar(x_8458, x_4, x_5, x_8456); +x_8460 = lean_ctor_get(x_8459, 0); +lean_inc(x_8460); +x_8461 = lean_ctor_get(x_8459, 1); +lean_inc(x_8461); +lean_dec(x_8459); +x_8462 = lean_ctor_get(x_8460, 0); +lean_inc(x_8462); +x_8463 = lean_ctor_get(x_8460, 1); +lean_inc(x_8463); +lean_dec(x_8460); +x_8464 = lean_ctor_get(x_1, 2); +lean_inc(x_8464); +lean_inc(x_5); +lean_inc(x_4); +x_8465 = l_Lean_IR_ToIR_lowerType(x_8464, x_8463, x_4, x_5, x_8461); +if (lean_obj_tag(x_8465) == 0) +{ +lean_object* x_8466; lean_object* x_8467; lean_object* x_8468; lean_object* x_8469; lean_object* x_8470; +x_8466 = lean_ctor_get(x_8465, 0); +lean_inc(x_8466); +x_8467 = lean_ctor_get(x_8465, 1); +lean_inc(x_8467); +lean_dec(x_8465); +x_8468 = lean_ctor_get(x_8466, 0); +lean_inc(x_8468); +x_8469 = lean_ctor_get(x_8466, 1); +lean_inc(x_8469); +lean_dec(x_8466); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8470 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_8462, x_8451, x_8457, x_8452, x_8468, x_8469, x_4, x_5, x_8467); +if (lean_obj_tag(x_8470) == 0) +{ +lean_object* x_8471; lean_object* x_8472; lean_object* x_8473; lean_object* x_8474; lean_object* x_8475; lean_object* x_8476; lean_object* x_8477; +x_8471 = lean_ctor_get(x_8470, 0); +lean_inc(x_8471); +x_8472 = lean_ctor_get(x_8470, 1); +lean_inc(x_8472); +lean_dec(x_8470); +x_8473 = lean_ctor_get(x_8471, 0); +lean_inc(x_8473); +x_8474 = lean_ctor_get(x_8471, 1); +lean_inc(x_8474); +if (lean_is_exclusive(x_8471)) { + lean_ctor_release(x_8471, 0); + lean_ctor_release(x_8471, 1); + x_8475 = x_8471; +} else { + lean_dec_ref(x_8471); + x_8475 = lean_box(0); +} +if (lean_is_scalar(x_8443)) { + x_8476 = lean_alloc_ctor(1, 1, 0); +} else { + x_8476 = x_8443; +} +lean_ctor_set(x_8476, 0, x_8473); +if (lean_is_scalar(x_8475)) { + x_8477 = lean_alloc_ctor(0, 2, 0); +} else { + x_8477 = x_8475; +} +lean_ctor_set(x_8477, 0, x_8476); +lean_ctor_set(x_8477, 1, x_8474); +x_8407 = x_8477; +x_8408 = x_8472; +goto block_8434; +} +else +{ +lean_object* x_8478; lean_object* x_8479; lean_object* x_8480; lean_object* x_8481; +lean_dec(x_8443); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8478 = lean_ctor_get(x_8470, 0); +lean_inc(x_8478); +x_8479 = lean_ctor_get(x_8470, 1); +lean_inc(x_8479); +if (lean_is_exclusive(x_8470)) { + lean_ctor_release(x_8470, 0); + lean_ctor_release(x_8470, 1); + x_8480 = x_8470; +} else { + lean_dec_ref(x_8470); + x_8480 = lean_box(0); +} +if (lean_is_scalar(x_8480)) { + x_8481 = lean_alloc_ctor(1, 2, 0); +} else { + x_8481 = x_8480; +} +lean_ctor_set(x_8481, 0, x_8478); +lean_ctor_set(x_8481, 1, x_8479); +return x_8481; +} +} +else +{ +lean_object* x_8482; lean_object* x_8483; lean_object* x_8484; lean_object* x_8485; +lean_dec(x_8462); +lean_dec(x_8457); +lean_dec(x_8452); +lean_dec(x_8451); +lean_dec(x_8443); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8482 = lean_ctor_get(x_8465, 0); +lean_inc(x_8482); +x_8483 = lean_ctor_get(x_8465, 1); +lean_inc(x_8483); +if (lean_is_exclusive(x_8465)) { + lean_ctor_release(x_8465, 0); + lean_ctor_release(x_8465, 1); + x_8484 = x_8465; +} else { + lean_dec_ref(x_8465); + x_8484 = lean_box(0); +} +if (lean_is_scalar(x_8484)) { + x_8485 = lean_alloc_ctor(1, 2, 0); +} else { + x_8485 = x_8484; +} +lean_ctor_set(x_8485, 0, x_8482); +lean_ctor_set(x_8485, 1, x_8483); +return x_8485; +} +} +else +{ +lean_object* x_8486; lean_object* x_8487; lean_object* x_8488; lean_object* x_8489; lean_object* x_8490; lean_object* x_8491; lean_object* x_8492; lean_object* x_8493; lean_object* x_8494; +lean_dec(x_8446); +lean_dec(x_8444); +lean_inc(x_8157); +lean_inc(x_153); +if (lean_is_scalar(x_8441)) { + x_8486 = lean_alloc_ctor(6, 2, 0); +} else { + x_8486 = x_8441; + lean_ctor_set_tag(x_8486, 6); +} +lean_ctor_set(x_8486, 0, x_153); +lean_ctor_set(x_8486, 1, x_8157); +x_8487 = lean_ctor_get(x_1, 0); +lean_inc(x_8487); +x_8488 = l_Lean_IR_ToIR_bindVar(x_8487, x_8162, x_4, x_5, x_8440); +x_8489 = lean_ctor_get(x_8488, 0); +lean_inc(x_8489); +x_8490 = lean_ctor_get(x_8488, 1); +lean_inc(x_8490); +lean_dec(x_8488); +x_8491 = lean_ctor_get(x_8489, 0); +lean_inc(x_8491); +x_8492 = lean_ctor_get(x_8489, 1); +lean_inc(x_8492); +lean_dec(x_8489); +x_8493 = lean_ctor_get(x_1, 2); +lean_inc(x_8493); +lean_inc(x_5); +lean_inc(x_4); +x_8494 = l_Lean_IR_ToIR_lowerType(x_8493, x_8492, x_4, x_5, x_8490); +if (lean_obj_tag(x_8494) == 0) +{ +lean_object* x_8495; lean_object* x_8496; lean_object* x_8497; lean_object* x_8498; lean_object* x_8499; +x_8495 = lean_ctor_get(x_8494, 0); +lean_inc(x_8495); +x_8496 = lean_ctor_get(x_8494, 1); +lean_inc(x_8496); +lean_dec(x_8494); +x_8497 = lean_ctor_get(x_8495, 0); +lean_inc(x_8497); +x_8498 = lean_ctor_get(x_8495, 1); +lean_inc(x_8498); +lean_dec(x_8495); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8499 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8491, x_8486, x_8497, x_8498, x_4, x_5, x_8496); +if (lean_obj_tag(x_8499) == 0) +{ +lean_object* x_8500; lean_object* x_8501; lean_object* x_8502; lean_object* x_8503; lean_object* x_8504; lean_object* x_8505; lean_object* x_8506; +x_8500 = lean_ctor_get(x_8499, 0); +lean_inc(x_8500); +x_8501 = lean_ctor_get(x_8499, 1); +lean_inc(x_8501); +lean_dec(x_8499); +x_8502 = lean_ctor_get(x_8500, 0); +lean_inc(x_8502); +x_8503 = lean_ctor_get(x_8500, 1); +lean_inc(x_8503); +if (lean_is_exclusive(x_8500)) { + lean_ctor_release(x_8500, 0); + lean_ctor_release(x_8500, 1); + x_8504 = x_8500; +} else { + lean_dec_ref(x_8500); + x_8504 = lean_box(0); +} +if (lean_is_scalar(x_8443)) { + x_8505 = lean_alloc_ctor(1, 1, 0); +} else { + x_8505 = x_8443; +} +lean_ctor_set(x_8505, 0, x_8502); +if (lean_is_scalar(x_8504)) { + x_8506 = lean_alloc_ctor(0, 2, 0); +} else { + x_8506 = x_8504; +} +lean_ctor_set(x_8506, 0, x_8505); +lean_ctor_set(x_8506, 1, x_8503); +x_8407 = x_8506; +x_8408 = x_8501; +goto block_8434; +} +else +{ +lean_object* x_8507; lean_object* x_8508; lean_object* x_8509; lean_object* x_8510; +lean_dec(x_8443); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8507 = lean_ctor_get(x_8499, 0); +lean_inc(x_8507); +x_8508 = lean_ctor_get(x_8499, 1); +lean_inc(x_8508); +if (lean_is_exclusive(x_8499)) { + lean_ctor_release(x_8499, 0); + lean_ctor_release(x_8499, 1); + x_8509 = x_8499; +} else { + lean_dec_ref(x_8499); + x_8509 = lean_box(0); +} +if (lean_is_scalar(x_8509)) { + x_8510 = lean_alloc_ctor(1, 2, 0); +} else { + x_8510 = x_8509; +} +lean_ctor_set(x_8510, 0, x_8507); +lean_ctor_set(x_8510, 1, x_8508); +return x_8510; +} +} +else +{ +lean_object* x_8511; lean_object* x_8512; lean_object* x_8513; lean_object* x_8514; +lean_dec(x_8491); +lean_dec(x_8486); +lean_dec(x_8443); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8511 = lean_ctor_get(x_8494, 0); +lean_inc(x_8511); +x_8512 = lean_ctor_get(x_8494, 1); +lean_inc(x_8512); +if (lean_is_exclusive(x_8494)) { + lean_ctor_release(x_8494, 0); + lean_ctor_release(x_8494, 1); + x_8513 = x_8494; +} else { + lean_dec_ref(x_8494); + x_8513 = lean_box(0); +} +if (lean_is_scalar(x_8513)) { + x_8514 = lean_alloc_ctor(1, 2, 0); +} else { + x_8514 = x_8513; +} +lean_ctor_set(x_8514, 0, x_8511); +lean_ctor_set(x_8514, 1, x_8512); +return x_8514; +} +} +} +else +{ +lean_object* x_8515; lean_object* x_8516; lean_object* x_8517; lean_object* x_8518; lean_object* x_8519; lean_object* x_8520; lean_object* x_8521; lean_object* x_8522; lean_object* x_8523; +lean_dec(x_8446); +lean_dec(x_8444); +lean_inc(x_8157); +lean_inc(x_153); +if (lean_is_scalar(x_8441)) { + x_8515 = lean_alloc_ctor(7, 2, 0); +} else { + x_8515 = x_8441; + lean_ctor_set_tag(x_8515, 7); +} +lean_ctor_set(x_8515, 0, x_153); +lean_ctor_set(x_8515, 1, x_8157); +x_8516 = lean_ctor_get(x_1, 0); +lean_inc(x_8516); +x_8517 = l_Lean_IR_ToIR_bindVar(x_8516, x_8162, x_4, x_5, x_8440); +x_8518 = lean_ctor_get(x_8517, 0); +lean_inc(x_8518); +x_8519 = lean_ctor_get(x_8517, 1); +lean_inc(x_8519); +lean_dec(x_8517); +x_8520 = lean_ctor_get(x_8518, 0); +lean_inc(x_8520); +x_8521 = lean_ctor_get(x_8518, 1); +lean_inc(x_8521); +lean_dec(x_8518); +x_8522 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8523 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8520, x_8515, x_8522, x_8521, x_4, x_5, x_8519); +if (lean_obj_tag(x_8523) == 0) +{ +lean_object* x_8524; lean_object* x_8525; lean_object* x_8526; lean_object* x_8527; lean_object* x_8528; lean_object* x_8529; lean_object* x_8530; +x_8524 = lean_ctor_get(x_8523, 0); +lean_inc(x_8524); +x_8525 = lean_ctor_get(x_8523, 1); +lean_inc(x_8525); +lean_dec(x_8523); +x_8526 = lean_ctor_get(x_8524, 0); +lean_inc(x_8526); +x_8527 = lean_ctor_get(x_8524, 1); +lean_inc(x_8527); +if (lean_is_exclusive(x_8524)) { + lean_ctor_release(x_8524, 0); + lean_ctor_release(x_8524, 1); + x_8528 = x_8524; +} else { + lean_dec_ref(x_8524); + x_8528 = lean_box(0); +} +if (lean_is_scalar(x_8443)) { + x_8529 = lean_alloc_ctor(1, 1, 0); +} else { + x_8529 = x_8443; +} +lean_ctor_set(x_8529, 0, x_8526); +if (lean_is_scalar(x_8528)) { + x_8530 = lean_alloc_ctor(0, 2, 0); +} else { + x_8530 = x_8528; +} +lean_ctor_set(x_8530, 0, x_8529); +lean_ctor_set(x_8530, 1, x_8527); +x_8407 = x_8530; +x_8408 = x_8525; +goto block_8434; +} +else +{ +lean_object* x_8531; lean_object* x_8532; lean_object* x_8533; lean_object* x_8534; +lean_dec(x_8443); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8531 = lean_ctor_get(x_8523, 0); +lean_inc(x_8531); +x_8532 = lean_ctor_get(x_8523, 1); +lean_inc(x_8532); +if (lean_is_exclusive(x_8523)) { + lean_ctor_release(x_8523, 0); + lean_ctor_release(x_8523, 1); + x_8533 = x_8523; +} else { + lean_dec_ref(x_8523); + x_8533 = lean_box(0); +} +if (lean_is_scalar(x_8533)) { + x_8534 = lean_alloc_ctor(1, 2, 0); +} else { + x_8534 = x_8533; +} +lean_ctor_set(x_8534, 0, x_8531); +lean_ctor_set(x_8534, 1, x_8532); +return x_8534; +} +} +} +block_8434: +{ +lean_object* x_8409; +x_8409 = lean_ctor_get(x_8407, 0); +lean_inc(x_8409); +if (lean_obj_tag(x_8409) == 0) +{ +lean_object* x_8410; lean_object* x_8411; lean_object* x_8412; lean_object* x_8413; lean_object* x_8414; lean_object* x_8415; lean_object* x_8416; lean_object* x_8417; lean_object* x_8418; lean_object* x_8419; +lean_dec(x_8167); +x_8410 = lean_ctor_get(x_8407, 1); +lean_inc(x_8410); +lean_dec(x_8407); +x_8411 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_8411, 0, x_153); +lean_ctor_set(x_8411, 1, x_8157); +x_8412 = lean_ctor_get(x_1, 0); +lean_inc(x_8412); +x_8413 = l_Lean_IR_ToIR_bindVar(x_8412, x_8410, x_4, x_5, x_8408); +x_8414 = lean_ctor_get(x_8413, 0); +lean_inc(x_8414); +x_8415 = lean_ctor_get(x_8413, 1); +lean_inc(x_8415); +lean_dec(x_8413); +x_8416 = lean_ctor_get(x_8414, 0); +lean_inc(x_8416); +x_8417 = lean_ctor_get(x_8414, 1); +lean_inc(x_8417); +lean_dec(x_8414); +x_8418 = lean_ctor_get(x_1, 2); +lean_inc(x_8418); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_8419 = l_Lean_IR_ToIR_lowerType(x_8418, x_8417, x_4, x_5, x_8415); +if (lean_obj_tag(x_8419) == 0) +{ +lean_object* x_8420; lean_object* x_8421; lean_object* x_8422; lean_object* x_8423; lean_object* x_8424; +x_8420 = lean_ctor_get(x_8419, 0); +lean_inc(x_8420); +x_8421 = lean_ctor_get(x_8419, 1); +lean_inc(x_8421); +lean_dec(x_8419); +x_8422 = lean_ctor_get(x_8420, 0); +lean_inc(x_8422); +x_8423 = lean_ctor_get(x_8420, 1); +lean_inc(x_8423); +lean_dec(x_8420); +x_8424 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8416, x_8411, x_8422, x_8423, x_4, x_5, x_8421); +return x_8424; +} +else +{ +lean_object* x_8425; lean_object* x_8426; lean_object* x_8427; lean_object* x_8428; +lean_dec(x_8416); +lean_dec(x_8411); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_8425 = lean_ctor_get(x_8419, 0); +lean_inc(x_8425); +x_8426 = lean_ctor_get(x_8419, 1); +lean_inc(x_8426); +if (lean_is_exclusive(x_8419)) { + lean_ctor_release(x_8419, 0); + lean_ctor_release(x_8419, 1); + x_8427 = x_8419; +} else { + lean_dec_ref(x_8419); + x_8427 = lean_box(0); +} +if (lean_is_scalar(x_8427)) { + x_8428 = lean_alloc_ctor(1, 2, 0); +} else { + x_8428 = x_8427; +} +lean_ctor_set(x_8428, 0, x_8425); +lean_ctor_set(x_8428, 1, x_8426); +return x_8428; +} +} +else +{ +lean_object* x_8429; lean_object* x_8430; lean_object* x_8431; lean_object* x_8432; lean_object* x_8433; +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8429 = lean_ctor_get(x_8407, 1); +lean_inc(x_8429); +if (lean_is_exclusive(x_8407)) { + lean_ctor_release(x_8407, 0); + lean_ctor_release(x_8407, 1); + x_8430 = x_8407; +} else { + lean_dec_ref(x_8407); + x_8430 = lean_box(0); +} +x_8431 = lean_ctor_get(x_8409, 0); +lean_inc(x_8431); +lean_dec(x_8409); +if (lean_is_scalar(x_8430)) { + x_8432 = lean_alloc_ctor(0, 2, 0); +} else { + x_8432 = x_8430; +} +lean_ctor_set(x_8432, 0, x_8431); +lean_ctor_set(x_8432, 1, x_8429); +if (lean_is_scalar(x_8167)) { + x_8433 = lean_alloc_ctor(0, 2, 0); +} else { + x_8433 = x_8167; +} +lean_ctor_set(x_8433, 0, x_8432); +lean_ctor_set(x_8433, 1, x_8408); +return x_8433; +} +} +} +case 4: +{ +lean_object* x_8535; lean_object* x_8536; uint8_t x_8537; +lean_dec(x_8168); +lean_dec(x_8167); +lean_dec(x_8163); +lean_dec(x_5945); +lean_dec(x_5944); +if (lean_is_exclusive(x_8173)) { + lean_ctor_release(x_8173, 0); + x_8535 = x_8173; +} else { + lean_dec_ref(x_8173); + x_8535 = lean_box(0); +} +x_8536 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_8537 = lean_name_eq(x_153, x_8536); +if (x_8537 == 0) +{ +uint8_t x_8538; lean_object* x_8539; lean_object* x_8540; lean_object* x_8541; lean_object* x_8542; lean_object* x_8543; lean_object* x_8544; lean_object* x_8545; lean_object* x_8546; lean_object* x_8547; +lean_dec(x_8157); +lean_dec(x_2); +lean_dec(x_1); +x_8538 = 1; +x_8539 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_8540 = l_Lean_Name_toString(x_153, x_8538, x_8539); +if (lean_is_scalar(x_8535)) { + x_8541 = lean_alloc_ctor(3, 1, 0); +} else { + x_8541 = x_8535; + lean_ctor_set_tag(x_8541, 3); +} +lean_ctor_set(x_8541, 0, x_8540); +x_8542 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_8543 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_8543, 0, x_8542); +lean_ctor_set(x_8543, 1, x_8541); +x_8544 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_8545 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_8545, 0, x_8543); +lean_ctor_set(x_8545, 1, x_8544); +x_8546 = l_Lean_MessageData_ofFormat(x_8545); +x_8547 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_8546, x_8162, x_4, x_5, x_8166); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_8162); +return x_8547; +} +else +{ +lean_object* x_8548; lean_object* x_8549; lean_object* x_8550; +lean_dec(x_8535); +lean_dec(x_153); +x_8548 = l_Lean_IR_instInhabitedArg; +x_8549 = lean_unsigned_to_nat(2u); +x_8550 = lean_array_get(x_8548, x_8157, x_8549); +lean_dec(x_8157); +if (lean_obj_tag(x_8550) == 0) +{ +lean_object* x_8551; lean_object* x_8552; lean_object* x_8553; lean_object* x_8554; lean_object* x_8555; lean_object* x_8556; lean_object* x_8557; +x_8551 = lean_ctor_get(x_8550, 0); +lean_inc(x_8551); +lean_dec(x_8550); +x_8552 = lean_ctor_get(x_1, 0); +lean_inc(x_8552); +lean_dec(x_1); +x_8553 = l_Lean_IR_ToIR_bindVarToVarId(x_8552, x_8551, x_8162, x_4, x_5, x_8166); +x_8554 = lean_ctor_get(x_8553, 0); +lean_inc(x_8554); +x_8555 = lean_ctor_get(x_8553, 1); +lean_inc(x_8555); +lean_dec(x_8553); +x_8556 = lean_ctor_get(x_8554, 1); +lean_inc(x_8556); +lean_dec(x_8554); +x_8557 = l_Lean_IR_ToIR_lowerCode(x_2, x_8556, x_4, x_5, x_8555); +return x_8557; +} +else +{ +lean_object* x_8558; lean_object* x_8559; lean_object* x_8560; lean_object* x_8561; lean_object* x_8562; lean_object* x_8563; +x_8558 = lean_ctor_get(x_1, 0); +lean_inc(x_8558); +lean_dec(x_1); +x_8559 = l_Lean_IR_ToIR_bindErased(x_8558, x_8162, x_4, x_5, x_8166); +x_8560 = lean_ctor_get(x_8559, 0); +lean_inc(x_8560); +x_8561 = lean_ctor_get(x_8559, 1); +lean_inc(x_8561); +lean_dec(x_8559); +x_8562 = lean_ctor_get(x_8560, 1); +lean_inc(x_8562); +lean_dec(x_8560); +x_8563 = l_Lean_IR_ToIR_lowerCode(x_2, x_8562, x_4, x_5, x_8561); +return x_8563; +} +} +} +case 5: +{ +lean_object* x_8564; lean_object* x_8565; +lean_dec(x_8173); +lean_dec(x_8168); +lean_dec(x_8167); +lean_dec(x_8163); +lean_dec(x_8157); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_8564 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_8565 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_8564, x_8162, x_4, x_5, x_8166); +return x_8565; +} +case 6: +{ +lean_object* x_8566; uint8_t x_8567; +x_8566 = lean_ctor_get(x_8173, 0); +lean_inc(x_8566); +lean_dec(x_8173); +lean_inc(x_153); +x_8567 = l_Lean_isExtern(x_8168, x_153); +if (x_8567 == 0) +{ +lean_object* x_8568; +lean_dec(x_8167); +lean_dec(x_8163); +lean_dec(x_8157); +lean_inc(x_5); +lean_inc(x_4); +x_8568 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_8162, x_4, x_5, x_8166); +if (lean_obj_tag(x_8568) == 0) +{ +lean_object* x_8569; lean_object* x_8570; lean_object* x_8571; lean_object* x_8572; lean_object* x_8573; lean_object* x_8574; lean_object* x_8575; lean_object* x_8576; lean_object* x_8577; lean_object* x_8578; lean_object* x_8579; lean_object* x_8580; lean_object* x_8581; lean_object* x_8582; lean_object* x_8583; lean_object* x_8584; lean_object* x_8585; lean_object* x_8586; lean_object* x_8587; lean_object* x_8588; +x_8569 = lean_ctor_get(x_8568, 0); +lean_inc(x_8569); +x_8570 = lean_ctor_get(x_8569, 0); +lean_inc(x_8570); +x_8571 = lean_ctor_get(x_8568, 1); +lean_inc(x_8571); +lean_dec(x_8568); +x_8572 = lean_ctor_get(x_8569, 1); +lean_inc(x_8572); +lean_dec(x_8569); +x_8573 = lean_ctor_get(x_8570, 0); +lean_inc(x_8573); +x_8574 = lean_ctor_get(x_8570, 1); +lean_inc(x_8574); +lean_dec(x_8570); +x_8575 = lean_ctor_get(x_8566, 3); +lean_inc(x_8575); +lean_dec(x_8566); +x_8576 = lean_array_get_size(x_5944); +x_8577 = l_Array_extract___rarg(x_5944, x_8575, x_8576); +lean_dec(x_8576); +lean_dec(x_5944); +x_8578 = lean_array_get_size(x_8574); +x_8579 = lean_unsigned_to_nat(0u); +x_8580 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_5945)) { + x_8581 = lean_alloc_ctor(0, 3, 0); +} else { + x_8581 = x_5945; + lean_ctor_set_tag(x_8581, 0); +} +lean_ctor_set(x_8581, 0, x_8579); +lean_ctor_set(x_8581, 1, x_8578); +lean_ctor_set(x_8581, 2, x_8580); +x_8582 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_8583 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__4(x_8574, x_8577, x_8581, x_8581, x_8582, x_8579, lean_box(0), lean_box(0), x_8572, x_4, x_5, x_8571); +lean_dec(x_8581); +x_8584 = lean_ctor_get(x_8583, 0); +lean_inc(x_8584); +x_8585 = lean_ctor_get(x_8583, 1); +lean_inc(x_8585); +lean_dec(x_8583); +x_8586 = lean_ctor_get(x_8584, 0); +lean_inc(x_8586); +x_8587 = lean_ctor_get(x_8584, 1); +lean_inc(x_8587); +lean_dec(x_8584); +x_8588 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_8573, x_8574, x_8577, x_8586, x_8587, x_4, x_5, x_8585); +lean_dec(x_8577); +lean_dec(x_8574); +return x_8588; +} +else +{ +lean_object* x_8589; lean_object* x_8590; lean_object* x_8591; lean_object* x_8592; +lean_dec(x_8566); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8589 = lean_ctor_get(x_8568, 0); +lean_inc(x_8589); +x_8590 = lean_ctor_get(x_8568, 1); +lean_inc(x_8590); +if (lean_is_exclusive(x_8568)) { + lean_ctor_release(x_8568, 0); + lean_ctor_release(x_8568, 1); + x_8591 = x_8568; +} else { + lean_dec_ref(x_8568); + x_8591 = lean_box(0); +} +if (lean_is_scalar(x_8591)) { + x_8592 = lean_alloc_ctor(1, 2, 0); +} else { + x_8592 = x_8591; +} +lean_ctor_set(x_8592, 0, x_8589); +lean_ctor_set(x_8592, 1, x_8590); +return x_8592; +} +} +else +{ +lean_object* x_8593; lean_object* x_8594; lean_object* x_8621; lean_object* x_8622; +lean_dec(x_8566); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_153); +x_8621 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_8166); +x_8622 = lean_ctor_get(x_8621, 0); +lean_inc(x_8622); +if (lean_obj_tag(x_8622) == 0) +{ +lean_object* x_8623; lean_object* x_8624; lean_object* x_8625; +x_8623 = lean_ctor_get(x_8621, 1); +lean_inc(x_8623); +lean_dec(x_8621); +x_8624 = lean_box(0); +if (lean_is_scalar(x_8163)) { + x_8625 = lean_alloc_ctor(0, 2, 0); +} else { + x_8625 = x_8163; +} +lean_ctor_set(x_8625, 0, x_8624); +lean_ctor_set(x_8625, 1, x_8162); +x_8593 = x_8625; +x_8594 = x_8623; +goto block_8620; +} +else +{ +lean_object* x_8626; lean_object* x_8627; lean_object* x_8628; lean_object* x_8629; lean_object* x_8630; lean_object* x_8631; lean_object* x_8632; uint8_t x_8633; +lean_dec(x_8163); +x_8626 = lean_ctor_get(x_8621, 1); +lean_inc(x_8626); +if (lean_is_exclusive(x_8621)) { + lean_ctor_release(x_8621, 0); + lean_ctor_release(x_8621, 1); + x_8627 = x_8621; +} else { + lean_dec_ref(x_8621); + x_8627 = lean_box(0); +} +x_8628 = lean_ctor_get(x_8622, 0); +lean_inc(x_8628); +if (lean_is_exclusive(x_8622)) { + lean_ctor_release(x_8622, 0); + x_8629 = x_8622; +} else { + lean_dec_ref(x_8622); + x_8629 = lean_box(0); +} +x_8630 = lean_array_get_size(x_8157); +x_8631 = lean_ctor_get(x_8628, 3); +lean_inc(x_8631); +lean_dec(x_8628); +x_8632 = lean_array_get_size(x_8631); +lean_dec(x_8631); +x_8633 = lean_nat_dec_lt(x_8630, x_8632); +if (x_8633 == 0) +{ +uint8_t x_8634; +x_8634 = lean_nat_dec_eq(x_8630, x_8632); +if (x_8634 == 0) +{ +lean_object* x_8635; lean_object* x_8636; lean_object* x_8637; lean_object* x_8638; lean_object* x_8639; lean_object* x_8640; lean_object* x_8641; lean_object* x_8642; lean_object* x_8643; lean_object* x_8644; lean_object* x_8645; lean_object* x_8646; lean_object* x_8647; lean_object* x_8648; lean_object* x_8649; lean_object* x_8650; lean_object* x_8651; +x_8635 = lean_unsigned_to_nat(0u); +x_8636 = l_Array_extract___rarg(x_8157, x_8635, x_8632); +x_8637 = l_Array_extract___rarg(x_8157, x_8632, x_8630); +lean_dec(x_8630); +lean_inc(x_153); +if (lean_is_scalar(x_8627)) { + x_8638 = lean_alloc_ctor(6, 2, 0); +} else { + x_8638 = x_8627; + lean_ctor_set_tag(x_8638, 6); +} +lean_ctor_set(x_8638, 0, x_153); +lean_ctor_set(x_8638, 1, x_8636); +x_8639 = lean_ctor_get(x_1, 0); +lean_inc(x_8639); +x_8640 = l_Lean_IR_ToIR_bindVar(x_8639, x_8162, x_4, x_5, x_8626); +x_8641 = lean_ctor_get(x_8640, 0); +lean_inc(x_8641); +x_8642 = lean_ctor_get(x_8640, 1); +lean_inc(x_8642); +lean_dec(x_8640); +x_8643 = lean_ctor_get(x_8641, 0); +lean_inc(x_8643); +x_8644 = lean_ctor_get(x_8641, 1); +lean_inc(x_8644); +lean_dec(x_8641); +x_8645 = l_Lean_IR_ToIR_newVar(x_8644, x_4, x_5, x_8642); +x_8646 = lean_ctor_get(x_8645, 0); +lean_inc(x_8646); +x_8647 = lean_ctor_get(x_8645, 1); +lean_inc(x_8647); +lean_dec(x_8645); +x_8648 = lean_ctor_get(x_8646, 0); +lean_inc(x_8648); +x_8649 = lean_ctor_get(x_8646, 1); +lean_inc(x_8649); +lean_dec(x_8646); +x_8650 = lean_ctor_get(x_1, 2); +lean_inc(x_8650); +lean_inc(x_5); +lean_inc(x_4); +x_8651 = l_Lean_IR_ToIR_lowerType(x_8650, x_8649, x_4, x_5, x_8647); +if (lean_obj_tag(x_8651) == 0) +{ +lean_object* x_8652; lean_object* x_8653; lean_object* x_8654; lean_object* x_8655; lean_object* x_8656; +x_8652 = lean_ctor_get(x_8651, 0); +lean_inc(x_8652); +x_8653 = lean_ctor_get(x_8651, 1); +lean_inc(x_8653); +lean_dec(x_8651); +x_8654 = lean_ctor_get(x_8652, 0); +lean_inc(x_8654); +x_8655 = lean_ctor_get(x_8652, 1); +lean_inc(x_8655); +lean_dec(x_8652); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8656 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_8648, x_8637, x_8643, x_8638, x_8654, x_8655, x_4, x_5, x_8653); +if (lean_obj_tag(x_8656) == 0) +{ +lean_object* x_8657; lean_object* x_8658; lean_object* x_8659; lean_object* x_8660; lean_object* x_8661; lean_object* x_8662; lean_object* x_8663; +x_8657 = lean_ctor_get(x_8656, 0); +lean_inc(x_8657); +x_8658 = lean_ctor_get(x_8656, 1); +lean_inc(x_8658); +lean_dec(x_8656); +x_8659 = lean_ctor_get(x_8657, 0); +lean_inc(x_8659); +x_8660 = lean_ctor_get(x_8657, 1); +lean_inc(x_8660); +if (lean_is_exclusive(x_8657)) { + lean_ctor_release(x_8657, 0); + lean_ctor_release(x_8657, 1); + x_8661 = x_8657; +} else { + lean_dec_ref(x_8657); + x_8661 = lean_box(0); +} +if (lean_is_scalar(x_8629)) { + x_8662 = lean_alloc_ctor(1, 1, 0); +} else { + x_8662 = x_8629; +} +lean_ctor_set(x_8662, 0, x_8659); +if (lean_is_scalar(x_8661)) { + x_8663 = lean_alloc_ctor(0, 2, 0); +} else { + x_8663 = x_8661; +} +lean_ctor_set(x_8663, 0, x_8662); +lean_ctor_set(x_8663, 1, x_8660); +x_8593 = x_8663; +x_8594 = x_8658; +goto block_8620; +} +else +{ +lean_object* x_8664; lean_object* x_8665; lean_object* x_8666; lean_object* x_8667; +lean_dec(x_8629); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8664 = lean_ctor_get(x_8656, 0); +lean_inc(x_8664); +x_8665 = lean_ctor_get(x_8656, 1); +lean_inc(x_8665); +if (lean_is_exclusive(x_8656)) { + lean_ctor_release(x_8656, 0); + lean_ctor_release(x_8656, 1); + x_8666 = x_8656; +} else { + lean_dec_ref(x_8656); + x_8666 = lean_box(0); +} +if (lean_is_scalar(x_8666)) { + x_8667 = lean_alloc_ctor(1, 2, 0); +} else { + x_8667 = x_8666; +} +lean_ctor_set(x_8667, 0, x_8664); +lean_ctor_set(x_8667, 1, x_8665); +return x_8667; +} +} +else +{ +lean_object* x_8668; lean_object* x_8669; lean_object* x_8670; lean_object* x_8671; +lean_dec(x_8648); +lean_dec(x_8643); +lean_dec(x_8638); +lean_dec(x_8637); +lean_dec(x_8629); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8668 = lean_ctor_get(x_8651, 0); +lean_inc(x_8668); +x_8669 = lean_ctor_get(x_8651, 1); +lean_inc(x_8669); +if (lean_is_exclusive(x_8651)) { + lean_ctor_release(x_8651, 0); + lean_ctor_release(x_8651, 1); + x_8670 = x_8651; +} else { + lean_dec_ref(x_8651); + x_8670 = lean_box(0); +} +if (lean_is_scalar(x_8670)) { + x_8671 = lean_alloc_ctor(1, 2, 0); +} else { + x_8671 = x_8670; +} +lean_ctor_set(x_8671, 0, x_8668); +lean_ctor_set(x_8671, 1, x_8669); +return x_8671; +} +} +else +{ +lean_object* x_8672; lean_object* x_8673; lean_object* x_8674; lean_object* x_8675; lean_object* x_8676; lean_object* x_8677; lean_object* x_8678; lean_object* x_8679; lean_object* x_8680; +lean_dec(x_8632); +lean_dec(x_8630); +lean_inc(x_8157); +lean_inc(x_153); +if (lean_is_scalar(x_8627)) { + x_8672 = lean_alloc_ctor(6, 2, 0); +} else { + x_8672 = x_8627; + lean_ctor_set_tag(x_8672, 6); +} +lean_ctor_set(x_8672, 0, x_153); +lean_ctor_set(x_8672, 1, x_8157); +x_8673 = lean_ctor_get(x_1, 0); +lean_inc(x_8673); +x_8674 = l_Lean_IR_ToIR_bindVar(x_8673, x_8162, x_4, x_5, x_8626); +x_8675 = lean_ctor_get(x_8674, 0); +lean_inc(x_8675); +x_8676 = lean_ctor_get(x_8674, 1); +lean_inc(x_8676); +lean_dec(x_8674); +x_8677 = lean_ctor_get(x_8675, 0); +lean_inc(x_8677); +x_8678 = lean_ctor_get(x_8675, 1); +lean_inc(x_8678); +lean_dec(x_8675); +x_8679 = lean_ctor_get(x_1, 2); +lean_inc(x_8679); +lean_inc(x_5); +lean_inc(x_4); +x_8680 = l_Lean_IR_ToIR_lowerType(x_8679, x_8678, x_4, x_5, x_8676); +if (lean_obj_tag(x_8680) == 0) +{ +lean_object* x_8681; lean_object* x_8682; lean_object* x_8683; lean_object* x_8684; lean_object* x_8685; +x_8681 = lean_ctor_get(x_8680, 0); +lean_inc(x_8681); +x_8682 = lean_ctor_get(x_8680, 1); +lean_inc(x_8682); +lean_dec(x_8680); +x_8683 = lean_ctor_get(x_8681, 0); +lean_inc(x_8683); +x_8684 = lean_ctor_get(x_8681, 1); +lean_inc(x_8684); +lean_dec(x_8681); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8685 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8677, x_8672, x_8683, x_8684, x_4, x_5, x_8682); +if (lean_obj_tag(x_8685) == 0) +{ +lean_object* x_8686; lean_object* x_8687; lean_object* x_8688; lean_object* x_8689; lean_object* x_8690; lean_object* x_8691; lean_object* x_8692; +x_8686 = lean_ctor_get(x_8685, 0); +lean_inc(x_8686); +x_8687 = lean_ctor_get(x_8685, 1); +lean_inc(x_8687); +lean_dec(x_8685); +x_8688 = lean_ctor_get(x_8686, 0); +lean_inc(x_8688); +x_8689 = lean_ctor_get(x_8686, 1); +lean_inc(x_8689); +if (lean_is_exclusive(x_8686)) { + lean_ctor_release(x_8686, 0); + lean_ctor_release(x_8686, 1); + x_8690 = x_8686; +} else { + lean_dec_ref(x_8686); + x_8690 = lean_box(0); +} +if (lean_is_scalar(x_8629)) { + x_8691 = lean_alloc_ctor(1, 1, 0); +} else { + x_8691 = x_8629; +} +lean_ctor_set(x_8691, 0, x_8688); +if (lean_is_scalar(x_8690)) { + x_8692 = lean_alloc_ctor(0, 2, 0); +} else { + x_8692 = x_8690; +} +lean_ctor_set(x_8692, 0, x_8691); +lean_ctor_set(x_8692, 1, x_8689); +x_8593 = x_8692; +x_8594 = x_8687; +goto block_8620; +} +else +{ +lean_object* x_8693; lean_object* x_8694; lean_object* x_8695; lean_object* x_8696; +lean_dec(x_8629); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8693 = lean_ctor_get(x_8685, 0); +lean_inc(x_8693); +x_8694 = lean_ctor_get(x_8685, 1); +lean_inc(x_8694); +if (lean_is_exclusive(x_8685)) { + lean_ctor_release(x_8685, 0); + lean_ctor_release(x_8685, 1); + x_8695 = x_8685; +} else { + lean_dec_ref(x_8685); + x_8695 = lean_box(0); +} +if (lean_is_scalar(x_8695)) { + x_8696 = lean_alloc_ctor(1, 2, 0); +} else { + x_8696 = x_8695; +} +lean_ctor_set(x_8696, 0, x_8693); +lean_ctor_set(x_8696, 1, x_8694); +return x_8696; +} +} +else +{ +lean_object* x_8697; lean_object* x_8698; lean_object* x_8699; lean_object* x_8700; +lean_dec(x_8677); +lean_dec(x_8672); +lean_dec(x_8629); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8697 = lean_ctor_get(x_8680, 0); +lean_inc(x_8697); +x_8698 = lean_ctor_get(x_8680, 1); +lean_inc(x_8698); +if (lean_is_exclusive(x_8680)) { + lean_ctor_release(x_8680, 0); + lean_ctor_release(x_8680, 1); + x_8699 = x_8680; +} else { + lean_dec_ref(x_8680); + x_8699 = lean_box(0); +} +if (lean_is_scalar(x_8699)) { + x_8700 = lean_alloc_ctor(1, 2, 0); +} else { + x_8700 = x_8699; +} +lean_ctor_set(x_8700, 0, x_8697); +lean_ctor_set(x_8700, 1, x_8698); +return x_8700; +} +} +} +else +{ +lean_object* x_8701; lean_object* x_8702; lean_object* x_8703; lean_object* x_8704; lean_object* x_8705; lean_object* x_8706; lean_object* x_8707; lean_object* x_8708; lean_object* x_8709; +lean_dec(x_8632); +lean_dec(x_8630); +lean_inc(x_8157); +lean_inc(x_153); +if (lean_is_scalar(x_8627)) { + x_8701 = lean_alloc_ctor(7, 2, 0); +} else { + x_8701 = x_8627; + lean_ctor_set_tag(x_8701, 7); +} +lean_ctor_set(x_8701, 0, x_153); +lean_ctor_set(x_8701, 1, x_8157); +x_8702 = lean_ctor_get(x_1, 0); +lean_inc(x_8702); +x_8703 = l_Lean_IR_ToIR_bindVar(x_8702, x_8162, x_4, x_5, x_8626); +x_8704 = lean_ctor_get(x_8703, 0); +lean_inc(x_8704); +x_8705 = lean_ctor_get(x_8703, 1); +lean_inc(x_8705); +lean_dec(x_8703); +x_8706 = lean_ctor_get(x_8704, 0); +lean_inc(x_8706); +x_8707 = lean_ctor_get(x_8704, 1); +lean_inc(x_8707); +lean_dec(x_8704); +x_8708 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_8709 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8706, x_8701, x_8708, x_8707, x_4, x_5, x_8705); +if (lean_obj_tag(x_8709) == 0) +{ +lean_object* x_8710; lean_object* x_8711; lean_object* x_8712; lean_object* x_8713; lean_object* x_8714; lean_object* x_8715; lean_object* x_8716; +x_8710 = lean_ctor_get(x_8709, 0); +lean_inc(x_8710); +x_8711 = lean_ctor_get(x_8709, 1); +lean_inc(x_8711); +lean_dec(x_8709); +x_8712 = lean_ctor_get(x_8710, 0); +lean_inc(x_8712); +x_8713 = lean_ctor_get(x_8710, 1); +lean_inc(x_8713); +if (lean_is_exclusive(x_8710)) { + lean_ctor_release(x_8710, 0); + lean_ctor_release(x_8710, 1); + x_8714 = x_8710; +} else { + lean_dec_ref(x_8710); + x_8714 = lean_box(0); +} +if (lean_is_scalar(x_8629)) { + x_8715 = lean_alloc_ctor(1, 1, 0); +} else { + x_8715 = x_8629; +} +lean_ctor_set(x_8715, 0, x_8712); +if (lean_is_scalar(x_8714)) { + x_8716 = lean_alloc_ctor(0, 2, 0); +} else { + x_8716 = x_8714; +} +lean_ctor_set(x_8716, 0, x_8715); +lean_ctor_set(x_8716, 1, x_8713); +x_8593 = x_8716; +x_8594 = x_8711; +goto block_8620; +} +else +{ +lean_object* x_8717; lean_object* x_8718; lean_object* x_8719; lean_object* x_8720; +lean_dec(x_8629); +lean_dec(x_8167); +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8717 = lean_ctor_get(x_8709, 0); +lean_inc(x_8717); +x_8718 = lean_ctor_get(x_8709, 1); +lean_inc(x_8718); +if (lean_is_exclusive(x_8709)) { + lean_ctor_release(x_8709, 0); + lean_ctor_release(x_8709, 1); + x_8719 = x_8709; +} else { + lean_dec_ref(x_8709); + x_8719 = lean_box(0); +} +if (lean_is_scalar(x_8719)) { + x_8720 = lean_alloc_ctor(1, 2, 0); +} else { + x_8720 = x_8719; +} +lean_ctor_set(x_8720, 0, x_8717); +lean_ctor_set(x_8720, 1, x_8718); +return x_8720; +} +} +} +block_8620: +{ +lean_object* x_8595; +x_8595 = lean_ctor_get(x_8593, 0); +lean_inc(x_8595); +if (lean_obj_tag(x_8595) == 0) +{ +lean_object* x_8596; lean_object* x_8597; lean_object* x_8598; lean_object* x_8599; lean_object* x_8600; lean_object* x_8601; lean_object* x_8602; lean_object* x_8603; lean_object* x_8604; lean_object* x_8605; +lean_dec(x_8167); +x_8596 = lean_ctor_get(x_8593, 1); +lean_inc(x_8596); +lean_dec(x_8593); +x_8597 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_8597, 0, x_153); +lean_ctor_set(x_8597, 1, x_8157); +x_8598 = lean_ctor_get(x_1, 0); +lean_inc(x_8598); +x_8599 = l_Lean_IR_ToIR_bindVar(x_8598, x_8596, x_4, x_5, x_8594); +x_8600 = lean_ctor_get(x_8599, 0); +lean_inc(x_8600); +x_8601 = lean_ctor_get(x_8599, 1); +lean_inc(x_8601); +lean_dec(x_8599); +x_8602 = lean_ctor_get(x_8600, 0); +lean_inc(x_8602); +x_8603 = lean_ctor_get(x_8600, 1); +lean_inc(x_8603); +lean_dec(x_8600); +x_8604 = lean_ctor_get(x_1, 2); +lean_inc(x_8604); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_8605 = l_Lean_IR_ToIR_lowerType(x_8604, x_8603, x_4, x_5, x_8601); +if (lean_obj_tag(x_8605) == 0) +{ +lean_object* x_8606; lean_object* x_8607; lean_object* x_8608; lean_object* x_8609; lean_object* x_8610; +x_8606 = lean_ctor_get(x_8605, 0); +lean_inc(x_8606); +x_8607 = lean_ctor_get(x_8605, 1); +lean_inc(x_8607); +lean_dec(x_8605); +x_8608 = lean_ctor_get(x_8606, 0); +lean_inc(x_8608); +x_8609 = lean_ctor_get(x_8606, 1); +lean_inc(x_8609); +lean_dec(x_8606); +x_8610 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8602, x_8597, x_8608, x_8609, x_4, x_5, x_8607); +return x_8610; +} +else +{ +lean_object* x_8611; lean_object* x_8612; lean_object* x_8613; lean_object* x_8614; +lean_dec(x_8602); +lean_dec(x_8597); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_8611 = lean_ctor_get(x_8605, 0); +lean_inc(x_8611); +x_8612 = lean_ctor_get(x_8605, 1); +lean_inc(x_8612); +if (lean_is_exclusive(x_8605)) { + lean_ctor_release(x_8605, 0); + lean_ctor_release(x_8605, 1); + x_8613 = x_8605; +} else { + lean_dec_ref(x_8605); + x_8613 = lean_box(0); +} +if (lean_is_scalar(x_8613)) { + x_8614 = lean_alloc_ctor(1, 2, 0); +} else { + x_8614 = x_8613; +} +lean_ctor_set(x_8614, 0, x_8611); +lean_ctor_set(x_8614, 1, x_8612); +return x_8614; +} +} +else +{ +lean_object* x_8615; lean_object* x_8616; lean_object* x_8617; lean_object* x_8618; lean_object* x_8619; +lean_dec(x_8157); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8615 = lean_ctor_get(x_8593, 1); +lean_inc(x_8615); +if (lean_is_exclusive(x_8593)) { + lean_ctor_release(x_8593, 0); + lean_ctor_release(x_8593, 1); + x_8616 = x_8593; +} else { + lean_dec_ref(x_8593); + x_8616 = lean_box(0); +} +x_8617 = lean_ctor_get(x_8595, 0); +lean_inc(x_8617); +lean_dec(x_8595); +if (lean_is_scalar(x_8616)) { + x_8618 = lean_alloc_ctor(0, 2, 0); +} else { + x_8618 = x_8616; +} +lean_ctor_set(x_8618, 0, x_8617); +lean_ctor_set(x_8618, 1, x_8615); +if (lean_is_scalar(x_8167)) { + x_8619 = lean_alloc_ctor(0, 2, 0); +} else { + x_8619 = x_8167; +} +lean_ctor_set(x_8619, 0, x_8618); +lean_ctor_set(x_8619, 1, x_8594); +return x_8619; +} +} +} +} +default: +{ +lean_object* x_8721; uint8_t x_8722; lean_object* x_8723; lean_object* x_8724; lean_object* x_8725; lean_object* x_8726; lean_object* x_8727; lean_object* x_8728; lean_object* x_8729; lean_object* x_8730; lean_object* x_8731; +lean_dec(x_8168); +lean_dec(x_8167); +lean_dec(x_8163); +lean_dec(x_8157); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_8173)) { + lean_ctor_release(x_8173, 0); + x_8721 = x_8173; +} else { + lean_dec_ref(x_8173); + x_8721 = lean_box(0); +} +x_8722 = 1; +x_8723 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_8724 = l_Lean_Name_toString(x_153, x_8722, x_8723); +if (lean_is_scalar(x_8721)) { + x_8725 = lean_alloc_ctor(3, 1, 0); +} else { + x_8725 = x_8721; + lean_ctor_set_tag(x_8725, 3); +} +lean_ctor_set(x_8725, 0, x_8724); +x_8726 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_8727 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_8727, 0, x_8726); +lean_ctor_set(x_8727, 1, x_8725); +x_8728 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_8729 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_8729, 0, x_8727); +lean_ctor_set(x_8729, 1, x_8728); +x_8730 = l_Lean_MessageData_ofFormat(x_8729); +x_8731 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_8730, x_8162, x_4, x_5, x_8166); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_8162); +return x_8731; +} +} +} +} +else +{ +lean_object* x_8732; lean_object* x_8733; lean_object* x_8734; lean_object* x_8735; lean_object* x_8736; +lean_dec(x_8157); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8732 = lean_ctor_get(x_8159, 1); +lean_inc(x_8732); +if (lean_is_exclusive(x_8159)) { + lean_ctor_release(x_8159, 0); + lean_ctor_release(x_8159, 1); + x_8733 = x_8159; +} else { + lean_dec_ref(x_8159); + x_8733 = lean_box(0); +} +x_8734 = lean_ctor_get(x_8161, 0); +lean_inc(x_8734); +lean_dec(x_8161); +if (lean_is_scalar(x_8733)) { + x_8735 = lean_alloc_ctor(0, 2, 0); +} else { + x_8735 = x_8733; +} +lean_ctor_set(x_8735, 0, x_8734); +lean_ctor_set(x_8735, 1, x_8732); +if (lean_is_scalar(x_5955)) { + x_8736 = lean_alloc_ctor(0, 2, 0); +} else { + x_8736 = x_5955; +} +lean_ctor_set(x_8736, 0, x_8735); +lean_ctor_set(x_8736, 1, x_8160); +return x_8736; +} +} +} +} +else +{ +uint8_t x_8838; +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_8838 = !lean_is_exclusive(x_5952); +if (x_8838 == 0) +{ +return x_5952; +} +else +{ +lean_object* x_8839; lean_object* x_8840; lean_object* x_8841; +x_8839 = lean_ctor_get(x_5952, 0); +x_8840 = lean_ctor_get(x_5952, 1); +lean_inc(x_8840); +lean_inc(x_8839); +lean_dec(x_5952); +x_8841 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_8841, 0, x_8839); +lean_ctor_set(x_8841, 1, x_8840); +return x_8841; +} +} +} +else +{ +lean_object* x_8842; uint8_t x_8843; +lean_dec(x_153); +x_8842 = l_Lean_IR_ToIR_lowerLet___closed__33; +x_8843 = lean_string_dec_eq(x_5946, x_8842); +if (x_8843 == 0) +{ +lean_object* x_8844; lean_object* x_8845; size_t x_8846; size_t x_8847; lean_object* x_8848; +x_8844 = l_Lean_Name_str___override(x_5943, x_5948); +x_8845 = l_Lean_Name_str___override(x_8844, x_5946); +x_8846 = lean_array_size(x_5944); +x_8847 = 0; +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_5944); +x_8848 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_8846, x_8847, x_5944, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_8848) == 0) +{ +lean_object* x_8849; lean_object* x_8850; lean_object* x_8851; uint8_t x_8852; +x_8849 = lean_ctor_get(x_8848, 0); +lean_inc(x_8849); +x_8850 = lean_ctor_get(x_8848, 1); +lean_inc(x_8850); +if (lean_is_exclusive(x_8848)) { + lean_ctor_release(x_8848, 0); + lean_ctor_release(x_8848, 1); + x_8851 = x_8848; +} else { + lean_dec_ref(x_8848); + x_8851 = lean_box(0); +} +x_8852 = !lean_is_exclusive(x_8849); +if (x_8852 == 0) +{ +lean_object* x_8853; lean_object* x_8854; lean_object* x_8855; lean_object* x_8856; lean_object* x_10773; lean_object* x_10774; +x_8853 = lean_ctor_get(x_8849, 0); +x_8854 = lean_ctor_get(x_8849, 1); +lean_inc(x_8845); +x_10773 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_8845, x_4, x_5, x_8850); +x_10774 = lean_ctor_get(x_10773, 0); +lean_inc(x_10774); +if (lean_obj_tag(x_10774) == 0) +{ +lean_object* x_10775; lean_object* x_10776; +x_10775 = lean_ctor_get(x_10773, 1); +lean_inc(x_10775); +lean_dec(x_10773); +x_10776 = lean_box(0); +lean_ctor_set(x_8849, 0, x_10776); +x_8855 = x_8849; +x_8856 = x_10775; +goto block_10772; +} +else +{ +uint8_t x_10777; +lean_free_object(x_8849); +x_10777 = !lean_is_exclusive(x_10773); +if (x_10777 == 0) +{ +lean_object* x_10778; lean_object* x_10779; uint8_t x_10780; +x_10778 = lean_ctor_get(x_10773, 1); +x_10779 = lean_ctor_get(x_10773, 0); +lean_dec(x_10779); +x_10780 = !lean_is_exclusive(x_10774); +if (x_10780 == 0) +{ +lean_object* x_10781; lean_object* x_10782; lean_object* x_10783; lean_object* x_10784; uint8_t x_10785; +x_10781 = lean_ctor_get(x_10774, 0); +x_10782 = lean_array_get_size(x_8853); +x_10783 = lean_ctor_get(x_10781, 3); +lean_inc(x_10783); +lean_dec(x_10781); +x_10784 = lean_array_get_size(x_10783); +lean_dec(x_10783); +x_10785 = lean_nat_dec_lt(x_10782, x_10784); +if (x_10785 == 0) +{ +uint8_t x_10786; +x_10786 = lean_nat_dec_eq(x_10782, x_10784); +if (x_10786 == 0) +{ +lean_object* x_10787; lean_object* x_10788; lean_object* x_10789; lean_object* x_10790; lean_object* x_10791; lean_object* x_10792; lean_object* x_10793; lean_object* x_10794; lean_object* x_10795; lean_object* x_10796; lean_object* x_10797; lean_object* x_10798; lean_object* x_10799; lean_object* x_10800; lean_object* x_10801; lean_object* x_10802; +x_10787 = lean_unsigned_to_nat(0u); +x_10788 = l_Array_extract___rarg(x_8853, x_10787, x_10784); +x_10789 = l_Array_extract___rarg(x_8853, x_10784, x_10782); +lean_dec(x_10782); +lean_inc(x_8845); +lean_ctor_set_tag(x_10773, 6); +lean_ctor_set(x_10773, 1, x_10788); +lean_ctor_set(x_10773, 0, x_8845); +x_10790 = lean_ctor_get(x_1, 0); +lean_inc(x_10790); +x_10791 = l_Lean_IR_ToIR_bindVar(x_10790, x_8854, x_4, x_5, x_10778); +x_10792 = lean_ctor_get(x_10791, 0); +lean_inc(x_10792); +x_10793 = lean_ctor_get(x_10791, 1); +lean_inc(x_10793); +lean_dec(x_10791); +x_10794 = lean_ctor_get(x_10792, 0); +lean_inc(x_10794); +x_10795 = lean_ctor_get(x_10792, 1); +lean_inc(x_10795); +lean_dec(x_10792); +x_10796 = l_Lean_IR_ToIR_newVar(x_10795, x_4, x_5, x_10793); +x_10797 = lean_ctor_get(x_10796, 0); +lean_inc(x_10797); +x_10798 = lean_ctor_get(x_10796, 1); +lean_inc(x_10798); +lean_dec(x_10796); +x_10799 = lean_ctor_get(x_10797, 0); +lean_inc(x_10799); +x_10800 = lean_ctor_get(x_10797, 1); +lean_inc(x_10800); +lean_dec(x_10797); +x_10801 = lean_ctor_get(x_1, 2); +lean_inc(x_10801); +lean_inc(x_5); +lean_inc(x_4); +x_10802 = l_Lean_IR_ToIR_lowerType(x_10801, x_10800, x_4, x_5, x_10798); +if (lean_obj_tag(x_10802) == 0) +{ +lean_object* x_10803; lean_object* x_10804; lean_object* x_10805; lean_object* x_10806; lean_object* x_10807; +x_10803 = lean_ctor_get(x_10802, 0); +lean_inc(x_10803); +x_10804 = lean_ctor_get(x_10802, 1); +lean_inc(x_10804); +lean_dec(x_10802); +x_10805 = lean_ctor_get(x_10803, 0); +lean_inc(x_10805); +x_10806 = lean_ctor_get(x_10803, 1); +lean_inc(x_10806); +lean_dec(x_10803); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10807 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_10799, x_10789, x_10794, x_10773, x_10805, x_10806, x_4, x_5, x_10804); +if (lean_obj_tag(x_10807) == 0) +{ +lean_object* x_10808; lean_object* x_10809; uint8_t x_10810; +x_10808 = lean_ctor_get(x_10807, 0); +lean_inc(x_10808); +x_10809 = lean_ctor_get(x_10807, 1); +lean_inc(x_10809); +lean_dec(x_10807); +x_10810 = !lean_is_exclusive(x_10808); +if (x_10810 == 0) +{ +lean_object* x_10811; +x_10811 = lean_ctor_get(x_10808, 0); +lean_ctor_set(x_10774, 0, x_10811); +lean_ctor_set(x_10808, 0, x_10774); +x_8855 = x_10808; +x_8856 = x_10809; +goto block_10772; +} +else +{ +lean_object* x_10812; lean_object* x_10813; lean_object* x_10814; +x_10812 = lean_ctor_get(x_10808, 0); +x_10813 = lean_ctor_get(x_10808, 1); +lean_inc(x_10813); +lean_inc(x_10812); +lean_dec(x_10808); +lean_ctor_set(x_10774, 0, x_10812); +x_10814 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10814, 0, x_10774); +lean_ctor_set(x_10814, 1, x_10813); +x_8855 = x_10814; +x_8856 = x_10809; +goto block_10772; +} +} +else +{ +uint8_t x_10815; +lean_free_object(x_10774); +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10815 = !lean_is_exclusive(x_10807); +if (x_10815 == 0) +{ +return x_10807; +} +else +{ +lean_object* x_10816; lean_object* x_10817; lean_object* x_10818; +x_10816 = lean_ctor_get(x_10807, 0); +x_10817 = lean_ctor_get(x_10807, 1); +lean_inc(x_10817); +lean_inc(x_10816); +lean_dec(x_10807); +x_10818 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_10818, 0, x_10816); +lean_ctor_set(x_10818, 1, x_10817); +return x_10818; +} +} +} +else +{ +uint8_t x_10819; +lean_dec(x_10799); +lean_dec(x_10794); +lean_dec(x_10773); +lean_dec(x_10789); +lean_free_object(x_10774); +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10819 = !lean_is_exclusive(x_10802); +if (x_10819 == 0) +{ +return x_10802; +} +else +{ +lean_object* x_10820; lean_object* x_10821; lean_object* x_10822; +x_10820 = lean_ctor_get(x_10802, 0); +x_10821 = lean_ctor_get(x_10802, 1); +lean_inc(x_10821); +lean_inc(x_10820); +lean_dec(x_10802); +x_10822 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_10822, 0, x_10820); +lean_ctor_set(x_10822, 1, x_10821); +return x_10822; +} +} +} +else +{ +lean_object* x_10823; lean_object* x_10824; lean_object* x_10825; lean_object* x_10826; lean_object* x_10827; lean_object* x_10828; lean_object* x_10829; lean_object* x_10830; +lean_dec(x_10784); +lean_dec(x_10782); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_10773, 6); +lean_ctor_set(x_10773, 1, x_8853); +lean_ctor_set(x_10773, 0, x_8845); +x_10823 = lean_ctor_get(x_1, 0); +lean_inc(x_10823); +x_10824 = l_Lean_IR_ToIR_bindVar(x_10823, x_8854, x_4, x_5, x_10778); +x_10825 = lean_ctor_get(x_10824, 0); +lean_inc(x_10825); +x_10826 = lean_ctor_get(x_10824, 1); +lean_inc(x_10826); +lean_dec(x_10824); +x_10827 = lean_ctor_get(x_10825, 0); +lean_inc(x_10827); +x_10828 = lean_ctor_get(x_10825, 1); +lean_inc(x_10828); +lean_dec(x_10825); +x_10829 = lean_ctor_get(x_1, 2); +lean_inc(x_10829); +lean_inc(x_5); +lean_inc(x_4); +x_10830 = l_Lean_IR_ToIR_lowerType(x_10829, x_10828, x_4, x_5, x_10826); +if (lean_obj_tag(x_10830) == 0) +{ +lean_object* x_10831; lean_object* x_10832; lean_object* x_10833; lean_object* x_10834; lean_object* x_10835; +x_10831 = lean_ctor_get(x_10830, 0); +lean_inc(x_10831); +x_10832 = lean_ctor_get(x_10830, 1); +lean_inc(x_10832); +lean_dec(x_10830); +x_10833 = lean_ctor_get(x_10831, 0); +lean_inc(x_10833); +x_10834 = lean_ctor_get(x_10831, 1); +lean_inc(x_10834); +lean_dec(x_10831); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10835 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10827, x_10773, x_10833, x_10834, x_4, x_5, x_10832); +if (lean_obj_tag(x_10835) == 0) +{ +lean_object* x_10836; lean_object* x_10837; uint8_t x_10838; +x_10836 = lean_ctor_get(x_10835, 0); +lean_inc(x_10836); +x_10837 = lean_ctor_get(x_10835, 1); +lean_inc(x_10837); +lean_dec(x_10835); +x_10838 = !lean_is_exclusive(x_10836); +if (x_10838 == 0) +{ +lean_object* x_10839; +x_10839 = lean_ctor_get(x_10836, 0); +lean_ctor_set(x_10774, 0, x_10839); +lean_ctor_set(x_10836, 0, x_10774); +x_8855 = x_10836; +x_8856 = x_10837; +goto block_10772; +} +else +{ +lean_object* x_10840; lean_object* x_10841; lean_object* x_10842; +x_10840 = lean_ctor_get(x_10836, 0); +x_10841 = lean_ctor_get(x_10836, 1); +lean_inc(x_10841); +lean_inc(x_10840); +lean_dec(x_10836); +lean_ctor_set(x_10774, 0, x_10840); +x_10842 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10842, 0, x_10774); +lean_ctor_set(x_10842, 1, x_10841); +x_8855 = x_10842; +x_8856 = x_10837; +goto block_10772; +} +} +else +{ +uint8_t x_10843; +lean_free_object(x_10774); +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10843 = !lean_is_exclusive(x_10835); +if (x_10843 == 0) +{ +return x_10835; +} +else +{ +lean_object* x_10844; lean_object* x_10845; lean_object* x_10846; +x_10844 = lean_ctor_get(x_10835, 0); +x_10845 = lean_ctor_get(x_10835, 1); +lean_inc(x_10845); +lean_inc(x_10844); +lean_dec(x_10835); +x_10846 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_10846, 0, x_10844); +lean_ctor_set(x_10846, 1, x_10845); +return x_10846; +} +} +} +else +{ +uint8_t x_10847; +lean_dec(x_10827); +lean_dec(x_10773); +lean_free_object(x_10774); +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10847 = !lean_is_exclusive(x_10830); +if (x_10847 == 0) +{ +return x_10830; +} +else +{ +lean_object* x_10848; lean_object* x_10849; lean_object* x_10850; +x_10848 = lean_ctor_get(x_10830, 0); +x_10849 = lean_ctor_get(x_10830, 1); +lean_inc(x_10849); +lean_inc(x_10848); +lean_dec(x_10830); +x_10850 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_10850, 0, x_10848); +lean_ctor_set(x_10850, 1, x_10849); +return x_10850; +} +} +} +} +else +{ +lean_object* x_10851; lean_object* x_10852; lean_object* x_10853; lean_object* x_10854; lean_object* x_10855; lean_object* x_10856; lean_object* x_10857; lean_object* x_10858; +lean_dec(x_10784); +lean_dec(x_10782); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_10773, 7); +lean_ctor_set(x_10773, 1, x_8853); +lean_ctor_set(x_10773, 0, x_8845); +x_10851 = lean_ctor_get(x_1, 0); +lean_inc(x_10851); +x_10852 = l_Lean_IR_ToIR_bindVar(x_10851, x_8854, x_4, x_5, x_10778); +x_10853 = lean_ctor_get(x_10852, 0); +lean_inc(x_10853); +x_10854 = lean_ctor_get(x_10852, 1); +lean_inc(x_10854); +lean_dec(x_10852); +x_10855 = lean_ctor_get(x_10853, 0); +lean_inc(x_10855); +x_10856 = lean_ctor_get(x_10853, 1); +lean_inc(x_10856); +lean_dec(x_10853); +x_10857 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10858 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10855, x_10773, x_10857, x_10856, x_4, x_5, x_10854); +if (lean_obj_tag(x_10858) == 0) +{ +lean_object* x_10859; lean_object* x_10860; uint8_t x_10861; +x_10859 = lean_ctor_get(x_10858, 0); +lean_inc(x_10859); +x_10860 = lean_ctor_get(x_10858, 1); +lean_inc(x_10860); +lean_dec(x_10858); +x_10861 = !lean_is_exclusive(x_10859); +if (x_10861 == 0) +{ +lean_object* x_10862; +x_10862 = lean_ctor_get(x_10859, 0); +lean_ctor_set(x_10774, 0, x_10862); +lean_ctor_set(x_10859, 0, x_10774); +x_8855 = x_10859; +x_8856 = x_10860; +goto block_10772; +} +else +{ +lean_object* x_10863; lean_object* x_10864; lean_object* x_10865; +x_10863 = lean_ctor_get(x_10859, 0); +x_10864 = lean_ctor_get(x_10859, 1); +lean_inc(x_10864); +lean_inc(x_10863); +lean_dec(x_10859); +lean_ctor_set(x_10774, 0, x_10863); +x_10865 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10865, 0, x_10774); +lean_ctor_set(x_10865, 1, x_10864); +x_8855 = x_10865; +x_8856 = x_10860; +goto block_10772; +} +} +else +{ +uint8_t x_10866; +lean_free_object(x_10774); +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10866 = !lean_is_exclusive(x_10858); +if (x_10866 == 0) +{ +return x_10858; +} +else +{ +lean_object* x_10867; lean_object* x_10868; lean_object* x_10869; +x_10867 = lean_ctor_get(x_10858, 0); +x_10868 = lean_ctor_get(x_10858, 1); +lean_inc(x_10868); +lean_inc(x_10867); +lean_dec(x_10858); +x_10869 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_10869, 0, x_10867); +lean_ctor_set(x_10869, 1, x_10868); +return x_10869; +} +} +} +} +else +{ +lean_object* x_10870; lean_object* x_10871; lean_object* x_10872; lean_object* x_10873; uint8_t x_10874; +x_10870 = lean_ctor_get(x_10774, 0); +lean_inc(x_10870); +lean_dec(x_10774); +x_10871 = lean_array_get_size(x_8853); +x_10872 = lean_ctor_get(x_10870, 3); +lean_inc(x_10872); +lean_dec(x_10870); +x_10873 = lean_array_get_size(x_10872); +lean_dec(x_10872); +x_10874 = lean_nat_dec_lt(x_10871, x_10873); +if (x_10874 == 0) +{ +uint8_t x_10875; +x_10875 = lean_nat_dec_eq(x_10871, x_10873); +if (x_10875 == 0) +{ +lean_object* x_10876; lean_object* x_10877; lean_object* x_10878; lean_object* x_10879; lean_object* x_10880; lean_object* x_10881; lean_object* x_10882; lean_object* x_10883; lean_object* x_10884; lean_object* x_10885; lean_object* x_10886; lean_object* x_10887; lean_object* x_10888; lean_object* x_10889; lean_object* x_10890; lean_object* x_10891; +x_10876 = lean_unsigned_to_nat(0u); +x_10877 = l_Array_extract___rarg(x_8853, x_10876, x_10873); +x_10878 = l_Array_extract___rarg(x_8853, x_10873, x_10871); +lean_dec(x_10871); +lean_inc(x_8845); +lean_ctor_set_tag(x_10773, 6); +lean_ctor_set(x_10773, 1, x_10877); +lean_ctor_set(x_10773, 0, x_8845); +x_10879 = lean_ctor_get(x_1, 0); +lean_inc(x_10879); +x_10880 = l_Lean_IR_ToIR_bindVar(x_10879, x_8854, x_4, x_5, x_10778); +x_10881 = lean_ctor_get(x_10880, 0); +lean_inc(x_10881); +x_10882 = lean_ctor_get(x_10880, 1); +lean_inc(x_10882); +lean_dec(x_10880); +x_10883 = lean_ctor_get(x_10881, 0); +lean_inc(x_10883); +x_10884 = lean_ctor_get(x_10881, 1); +lean_inc(x_10884); +lean_dec(x_10881); +x_10885 = l_Lean_IR_ToIR_newVar(x_10884, x_4, x_5, x_10882); +x_10886 = lean_ctor_get(x_10885, 0); +lean_inc(x_10886); +x_10887 = lean_ctor_get(x_10885, 1); +lean_inc(x_10887); +lean_dec(x_10885); +x_10888 = lean_ctor_get(x_10886, 0); +lean_inc(x_10888); +x_10889 = lean_ctor_get(x_10886, 1); +lean_inc(x_10889); +lean_dec(x_10886); +x_10890 = lean_ctor_get(x_1, 2); +lean_inc(x_10890); +lean_inc(x_5); +lean_inc(x_4); +x_10891 = l_Lean_IR_ToIR_lowerType(x_10890, x_10889, x_4, x_5, x_10887); +if (lean_obj_tag(x_10891) == 0) +{ +lean_object* x_10892; lean_object* x_10893; lean_object* x_10894; lean_object* x_10895; lean_object* x_10896; +x_10892 = lean_ctor_get(x_10891, 0); +lean_inc(x_10892); +x_10893 = lean_ctor_get(x_10891, 1); +lean_inc(x_10893); +lean_dec(x_10891); +x_10894 = lean_ctor_get(x_10892, 0); +lean_inc(x_10894); +x_10895 = lean_ctor_get(x_10892, 1); +lean_inc(x_10895); +lean_dec(x_10892); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10896 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_10888, x_10878, x_10883, x_10773, x_10894, x_10895, x_4, x_5, x_10893); +if (lean_obj_tag(x_10896) == 0) +{ +lean_object* x_10897; lean_object* x_10898; lean_object* x_10899; lean_object* x_10900; lean_object* x_10901; lean_object* x_10902; lean_object* x_10903; +x_10897 = lean_ctor_get(x_10896, 0); +lean_inc(x_10897); +x_10898 = lean_ctor_get(x_10896, 1); +lean_inc(x_10898); +lean_dec(x_10896); +x_10899 = lean_ctor_get(x_10897, 0); +lean_inc(x_10899); +x_10900 = lean_ctor_get(x_10897, 1); +lean_inc(x_10900); +if (lean_is_exclusive(x_10897)) { + lean_ctor_release(x_10897, 0); + lean_ctor_release(x_10897, 1); + x_10901 = x_10897; +} else { + lean_dec_ref(x_10897); + x_10901 = lean_box(0); +} +x_10902 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_10902, 0, x_10899); +if (lean_is_scalar(x_10901)) { + x_10903 = lean_alloc_ctor(0, 2, 0); +} else { + x_10903 = x_10901; +} +lean_ctor_set(x_10903, 0, x_10902); +lean_ctor_set(x_10903, 1, x_10900); +x_8855 = x_10903; +x_8856 = x_10898; +goto block_10772; +} +else +{ +lean_object* x_10904; lean_object* x_10905; lean_object* x_10906; lean_object* x_10907; +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10904 = lean_ctor_get(x_10896, 0); +lean_inc(x_10904); +x_10905 = lean_ctor_get(x_10896, 1); +lean_inc(x_10905); +if (lean_is_exclusive(x_10896)) { + lean_ctor_release(x_10896, 0); + lean_ctor_release(x_10896, 1); + x_10906 = x_10896; +} else { + lean_dec_ref(x_10896); + x_10906 = lean_box(0); +} +if (lean_is_scalar(x_10906)) { + x_10907 = lean_alloc_ctor(1, 2, 0); +} else { + x_10907 = x_10906; +} +lean_ctor_set(x_10907, 0, x_10904); +lean_ctor_set(x_10907, 1, x_10905); +return x_10907; +} +} +else +{ +lean_object* x_10908; lean_object* x_10909; lean_object* x_10910; lean_object* x_10911; +lean_dec(x_10888); +lean_dec(x_10883); +lean_dec(x_10773); +lean_dec(x_10878); +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10908 = lean_ctor_get(x_10891, 0); +lean_inc(x_10908); +x_10909 = lean_ctor_get(x_10891, 1); +lean_inc(x_10909); +if (lean_is_exclusive(x_10891)) { + lean_ctor_release(x_10891, 0); + lean_ctor_release(x_10891, 1); + x_10910 = x_10891; +} else { + lean_dec_ref(x_10891); + x_10910 = lean_box(0); +} +if (lean_is_scalar(x_10910)) { + x_10911 = lean_alloc_ctor(1, 2, 0); +} else { + x_10911 = x_10910; +} +lean_ctor_set(x_10911, 0, x_10908); +lean_ctor_set(x_10911, 1, x_10909); +return x_10911; +} +} +else +{ +lean_object* x_10912; lean_object* x_10913; lean_object* x_10914; lean_object* x_10915; lean_object* x_10916; lean_object* x_10917; lean_object* x_10918; lean_object* x_10919; +lean_dec(x_10873); +lean_dec(x_10871); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_10773, 6); +lean_ctor_set(x_10773, 1, x_8853); +lean_ctor_set(x_10773, 0, x_8845); +x_10912 = lean_ctor_get(x_1, 0); +lean_inc(x_10912); +x_10913 = l_Lean_IR_ToIR_bindVar(x_10912, x_8854, x_4, x_5, x_10778); +x_10914 = lean_ctor_get(x_10913, 0); +lean_inc(x_10914); +x_10915 = lean_ctor_get(x_10913, 1); +lean_inc(x_10915); +lean_dec(x_10913); +x_10916 = lean_ctor_get(x_10914, 0); +lean_inc(x_10916); +x_10917 = lean_ctor_get(x_10914, 1); +lean_inc(x_10917); +lean_dec(x_10914); +x_10918 = lean_ctor_get(x_1, 2); +lean_inc(x_10918); +lean_inc(x_5); +lean_inc(x_4); +x_10919 = l_Lean_IR_ToIR_lowerType(x_10918, x_10917, x_4, x_5, x_10915); +if (lean_obj_tag(x_10919) == 0) +{ +lean_object* x_10920; lean_object* x_10921; lean_object* x_10922; lean_object* x_10923; lean_object* x_10924; +x_10920 = lean_ctor_get(x_10919, 0); +lean_inc(x_10920); +x_10921 = lean_ctor_get(x_10919, 1); +lean_inc(x_10921); +lean_dec(x_10919); +x_10922 = lean_ctor_get(x_10920, 0); +lean_inc(x_10922); +x_10923 = lean_ctor_get(x_10920, 1); +lean_inc(x_10923); +lean_dec(x_10920); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10924 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10916, x_10773, x_10922, x_10923, x_4, x_5, x_10921); +if (lean_obj_tag(x_10924) == 0) +{ +lean_object* x_10925; lean_object* x_10926; lean_object* x_10927; lean_object* x_10928; lean_object* x_10929; lean_object* x_10930; lean_object* x_10931; +x_10925 = lean_ctor_get(x_10924, 0); +lean_inc(x_10925); +x_10926 = lean_ctor_get(x_10924, 1); +lean_inc(x_10926); +lean_dec(x_10924); +x_10927 = lean_ctor_get(x_10925, 0); +lean_inc(x_10927); +x_10928 = lean_ctor_get(x_10925, 1); +lean_inc(x_10928); +if (lean_is_exclusive(x_10925)) { + lean_ctor_release(x_10925, 0); + lean_ctor_release(x_10925, 1); + x_10929 = x_10925; +} else { + lean_dec_ref(x_10925); + x_10929 = lean_box(0); +} +x_10930 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_10930, 0, x_10927); +if (lean_is_scalar(x_10929)) { + x_10931 = lean_alloc_ctor(0, 2, 0); +} else { + x_10931 = x_10929; +} +lean_ctor_set(x_10931, 0, x_10930); +lean_ctor_set(x_10931, 1, x_10928); +x_8855 = x_10931; +x_8856 = x_10926; +goto block_10772; +} +else +{ +lean_object* x_10932; lean_object* x_10933; lean_object* x_10934; lean_object* x_10935; +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10932 = lean_ctor_get(x_10924, 0); +lean_inc(x_10932); +x_10933 = lean_ctor_get(x_10924, 1); +lean_inc(x_10933); +if (lean_is_exclusive(x_10924)) { + lean_ctor_release(x_10924, 0); + lean_ctor_release(x_10924, 1); + x_10934 = x_10924; +} else { + lean_dec_ref(x_10924); + x_10934 = lean_box(0); +} +if (lean_is_scalar(x_10934)) { + x_10935 = lean_alloc_ctor(1, 2, 0); +} else { + x_10935 = x_10934; +} +lean_ctor_set(x_10935, 0, x_10932); +lean_ctor_set(x_10935, 1, x_10933); +return x_10935; +} +} +else +{ +lean_object* x_10936; lean_object* x_10937; lean_object* x_10938; lean_object* x_10939; +lean_dec(x_10916); +lean_dec(x_10773); +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10936 = lean_ctor_get(x_10919, 0); +lean_inc(x_10936); +x_10937 = lean_ctor_get(x_10919, 1); +lean_inc(x_10937); +if (lean_is_exclusive(x_10919)) { + lean_ctor_release(x_10919, 0); + lean_ctor_release(x_10919, 1); + x_10938 = x_10919; +} else { + lean_dec_ref(x_10919); + x_10938 = lean_box(0); +} +if (lean_is_scalar(x_10938)) { + x_10939 = lean_alloc_ctor(1, 2, 0); +} else { + x_10939 = x_10938; +} +lean_ctor_set(x_10939, 0, x_10936); +lean_ctor_set(x_10939, 1, x_10937); +return x_10939; +} +} +} +else +{ +lean_object* x_10940; lean_object* x_10941; lean_object* x_10942; lean_object* x_10943; lean_object* x_10944; lean_object* x_10945; lean_object* x_10946; lean_object* x_10947; +lean_dec(x_10873); +lean_dec(x_10871); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_10773, 7); +lean_ctor_set(x_10773, 1, x_8853); +lean_ctor_set(x_10773, 0, x_8845); +x_10940 = lean_ctor_get(x_1, 0); +lean_inc(x_10940); +x_10941 = l_Lean_IR_ToIR_bindVar(x_10940, x_8854, x_4, x_5, x_10778); +x_10942 = lean_ctor_get(x_10941, 0); +lean_inc(x_10942); +x_10943 = lean_ctor_get(x_10941, 1); +lean_inc(x_10943); +lean_dec(x_10941); +x_10944 = lean_ctor_get(x_10942, 0); +lean_inc(x_10944); +x_10945 = lean_ctor_get(x_10942, 1); +lean_inc(x_10945); +lean_dec(x_10942); +x_10946 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10947 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10944, x_10773, x_10946, x_10945, x_4, x_5, x_10943); +if (lean_obj_tag(x_10947) == 0) +{ +lean_object* x_10948; lean_object* x_10949; lean_object* x_10950; lean_object* x_10951; lean_object* x_10952; lean_object* x_10953; lean_object* x_10954; +x_10948 = lean_ctor_get(x_10947, 0); +lean_inc(x_10948); +x_10949 = lean_ctor_get(x_10947, 1); +lean_inc(x_10949); +lean_dec(x_10947); +x_10950 = lean_ctor_get(x_10948, 0); +lean_inc(x_10950); +x_10951 = lean_ctor_get(x_10948, 1); +lean_inc(x_10951); +if (lean_is_exclusive(x_10948)) { + lean_ctor_release(x_10948, 0); + lean_ctor_release(x_10948, 1); + x_10952 = x_10948; +} else { + lean_dec_ref(x_10948); + x_10952 = lean_box(0); +} +x_10953 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_10953, 0, x_10950); +if (lean_is_scalar(x_10952)) { + x_10954 = lean_alloc_ctor(0, 2, 0); +} else { + x_10954 = x_10952; +} +lean_ctor_set(x_10954, 0, x_10953); +lean_ctor_set(x_10954, 1, x_10951); +x_8855 = x_10954; +x_8856 = x_10949; +goto block_10772; +} +else +{ +lean_object* x_10955; lean_object* x_10956; lean_object* x_10957; lean_object* x_10958; +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10955 = lean_ctor_get(x_10947, 0); +lean_inc(x_10955); +x_10956 = lean_ctor_get(x_10947, 1); +lean_inc(x_10956); +if (lean_is_exclusive(x_10947)) { + lean_ctor_release(x_10947, 0); + lean_ctor_release(x_10947, 1); + x_10957 = x_10947; +} else { + lean_dec_ref(x_10947); + x_10957 = lean_box(0); +} +if (lean_is_scalar(x_10957)) { + x_10958 = lean_alloc_ctor(1, 2, 0); +} else { + x_10958 = x_10957; +} +lean_ctor_set(x_10958, 0, x_10955); +lean_ctor_set(x_10958, 1, x_10956); +return x_10958; +} +} +} +} +else +{ +lean_object* x_10959; lean_object* x_10960; lean_object* x_10961; lean_object* x_10962; lean_object* x_10963; lean_object* x_10964; uint8_t x_10965; +x_10959 = lean_ctor_get(x_10773, 1); +lean_inc(x_10959); +lean_dec(x_10773); +x_10960 = lean_ctor_get(x_10774, 0); +lean_inc(x_10960); +if (lean_is_exclusive(x_10774)) { + lean_ctor_release(x_10774, 0); + x_10961 = x_10774; +} else { + lean_dec_ref(x_10774); + x_10961 = lean_box(0); +} +x_10962 = lean_array_get_size(x_8853); +x_10963 = lean_ctor_get(x_10960, 3); +lean_inc(x_10963); +lean_dec(x_10960); +x_10964 = lean_array_get_size(x_10963); +lean_dec(x_10963); +x_10965 = lean_nat_dec_lt(x_10962, x_10964); +if (x_10965 == 0) +{ +uint8_t x_10966; +x_10966 = lean_nat_dec_eq(x_10962, x_10964); +if (x_10966 == 0) +{ +lean_object* x_10967; lean_object* x_10968; lean_object* x_10969; lean_object* x_10970; lean_object* x_10971; lean_object* x_10972; lean_object* x_10973; lean_object* x_10974; lean_object* x_10975; lean_object* x_10976; lean_object* x_10977; lean_object* x_10978; lean_object* x_10979; lean_object* x_10980; lean_object* x_10981; lean_object* x_10982; lean_object* x_10983; +x_10967 = lean_unsigned_to_nat(0u); +x_10968 = l_Array_extract___rarg(x_8853, x_10967, x_10964); +x_10969 = l_Array_extract___rarg(x_8853, x_10964, x_10962); +lean_dec(x_10962); +lean_inc(x_8845); +x_10970 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_10970, 0, x_8845); +lean_ctor_set(x_10970, 1, x_10968); +x_10971 = lean_ctor_get(x_1, 0); +lean_inc(x_10971); +x_10972 = l_Lean_IR_ToIR_bindVar(x_10971, x_8854, x_4, x_5, x_10959); +x_10973 = lean_ctor_get(x_10972, 0); +lean_inc(x_10973); +x_10974 = lean_ctor_get(x_10972, 1); +lean_inc(x_10974); +lean_dec(x_10972); +x_10975 = lean_ctor_get(x_10973, 0); +lean_inc(x_10975); +x_10976 = lean_ctor_get(x_10973, 1); +lean_inc(x_10976); +lean_dec(x_10973); +x_10977 = l_Lean_IR_ToIR_newVar(x_10976, x_4, x_5, x_10974); +x_10978 = lean_ctor_get(x_10977, 0); +lean_inc(x_10978); +x_10979 = lean_ctor_get(x_10977, 1); +lean_inc(x_10979); +lean_dec(x_10977); +x_10980 = lean_ctor_get(x_10978, 0); +lean_inc(x_10980); +x_10981 = lean_ctor_get(x_10978, 1); +lean_inc(x_10981); +lean_dec(x_10978); +x_10982 = lean_ctor_get(x_1, 2); +lean_inc(x_10982); +lean_inc(x_5); +lean_inc(x_4); +x_10983 = l_Lean_IR_ToIR_lowerType(x_10982, x_10981, x_4, x_5, x_10979); +if (lean_obj_tag(x_10983) == 0) +{ +lean_object* x_10984; lean_object* x_10985; lean_object* x_10986; lean_object* x_10987; lean_object* x_10988; +x_10984 = lean_ctor_get(x_10983, 0); +lean_inc(x_10984); +x_10985 = lean_ctor_get(x_10983, 1); +lean_inc(x_10985); +lean_dec(x_10983); +x_10986 = lean_ctor_get(x_10984, 0); +lean_inc(x_10986); +x_10987 = lean_ctor_get(x_10984, 1); +lean_inc(x_10987); +lean_dec(x_10984); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10988 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_10980, x_10969, x_10975, x_10970, x_10986, x_10987, x_4, x_5, x_10985); +if (lean_obj_tag(x_10988) == 0) +{ +lean_object* x_10989; lean_object* x_10990; lean_object* x_10991; lean_object* x_10992; lean_object* x_10993; lean_object* x_10994; lean_object* x_10995; +x_10989 = lean_ctor_get(x_10988, 0); +lean_inc(x_10989); +x_10990 = lean_ctor_get(x_10988, 1); +lean_inc(x_10990); +lean_dec(x_10988); +x_10991 = lean_ctor_get(x_10989, 0); +lean_inc(x_10991); +x_10992 = lean_ctor_get(x_10989, 1); +lean_inc(x_10992); +if (lean_is_exclusive(x_10989)) { + lean_ctor_release(x_10989, 0); + lean_ctor_release(x_10989, 1); + x_10993 = x_10989; +} else { + lean_dec_ref(x_10989); + x_10993 = lean_box(0); +} +if (lean_is_scalar(x_10961)) { + x_10994 = lean_alloc_ctor(1, 1, 0); +} else { + x_10994 = x_10961; +} +lean_ctor_set(x_10994, 0, x_10991); +if (lean_is_scalar(x_10993)) { + x_10995 = lean_alloc_ctor(0, 2, 0); +} else { + x_10995 = x_10993; +} +lean_ctor_set(x_10995, 0, x_10994); +lean_ctor_set(x_10995, 1, x_10992); +x_8855 = x_10995; +x_8856 = x_10990; +goto block_10772; +} +else +{ +lean_object* x_10996; lean_object* x_10997; lean_object* x_10998; lean_object* x_10999; +lean_dec(x_10961); +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10996 = lean_ctor_get(x_10988, 0); +lean_inc(x_10996); +x_10997 = lean_ctor_get(x_10988, 1); +lean_inc(x_10997); +if (lean_is_exclusive(x_10988)) { + lean_ctor_release(x_10988, 0); + lean_ctor_release(x_10988, 1); + x_10998 = x_10988; +} else { + lean_dec_ref(x_10988); + x_10998 = lean_box(0); +} +if (lean_is_scalar(x_10998)) { + x_10999 = lean_alloc_ctor(1, 2, 0); +} else { + x_10999 = x_10998; +} +lean_ctor_set(x_10999, 0, x_10996); +lean_ctor_set(x_10999, 1, x_10997); +return x_10999; +} +} +else +{ +lean_object* x_11000; lean_object* x_11001; lean_object* x_11002; lean_object* x_11003; +lean_dec(x_10980); +lean_dec(x_10975); +lean_dec(x_10970); +lean_dec(x_10969); +lean_dec(x_10961); +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11000 = lean_ctor_get(x_10983, 0); +lean_inc(x_11000); +x_11001 = lean_ctor_get(x_10983, 1); +lean_inc(x_11001); +if (lean_is_exclusive(x_10983)) { + lean_ctor_release(x_10983, 0); + lean_ctor_release(x_10983, 1); + x_11002 = x_10983; +} else { + lean_dec_ref(x_10983); + x_11002 = lean_box(0); +} +if (lean_is_scalar(x_11002)) { + x_11003 = lean_alloc_ctor(1, 2, 0); +} else { + x_11003 = x_11002; +} +lean_ctor_set(x_11003, 0, x_11000); +lean_ctor_set(x_11003, 1, x_11001); +return x_11003; +} +} +else +{ +lean_object* x_11004; lean_object* x_11005; lean_object* x_11006; lean_object* x_11007; lean_object* x_11008; lean_object* x_11009; lean_object* x_11010; lean_object* x_11011; lean_object* x_11012; +lean_dec(x_10964); +lean_dec(x_10962); +lean_inc(x_8853); +lean_inc(x_8845); +x_11004 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_11004, 0, x_8845); +lean_ctor_set(x_11004, 1, x_8853); +x_11005 = lean_ctor_get(x_1, 0); +lean_inc(x_11005); +x_11006 = l_Lean_IR_ToIR_bindVar(x_11005, x_8854, x_4, x_5, x_10959); +x_11007 = lean_ctor_get(x_11006, 0); +lean_inc(x_11007); +x_11008 = lean_ctor_get(x_11006, 1); +lean_inc(x_11008); +lean_dec(x_11006); +x_11009 = lean_ctor_get(x_11007, 0); +lean_inc(x_11009); +x_11010 = lean_ctor_get(x_11007, 1); +lean_inc(x_11010); +lean_dec(x_11007); +x_11011 = lean_ctor_get(x_1, 2); +lean_inc(x_11011); +lean_inc(x_5); +lean_inc(x_4); +x_11012 = l_Lean_IR_ToIR_lowerType(x_11011, x_11010, x_4, x_5, x_11008); +if (lean_obj_tag(x_11012) == 0) +{ +lean_object* x_11013; lean_object* x_11014; lean_object* x_11015; lean_object* x_11016; lean_object* x_11017; +x_11013 = lean_ctor_get(x_11012, 0); +lean_inc(x_11013); +x_11014 = lean_ctor_get(x_11012, 1); +lean_inc(x_11014); +lean_dec(x_11012); +x_11015 = lean_ctor_get(x_11013, 0); +lean_inc(x_11015); +x_11016 = lean_ctor_get(x_11013, 1); +lean_inc(x_11016); +lean_dec(x_11013); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11017 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11009, x_11004, x_11015, x_11016, x_4, x_5, x_11014); +if (lean_obj_tag(x_11017) == 0) +{ +lean_object* x_11018; lean_object* x_11019; lean_object* x_11020; lean_object* x_11021; lean_object* x_11022; lean_object* x_11023; lean_object* x_11024; +x_11018 = lean_ctor_get(x_11017, 0); +lean_inc(x_11018); +x_11019 = lean_ctor_get(x_11017, 1); +lean_inc(x_11019); +lean_dec(x_11017); +x_11020 = lean_ctor_get(x_11018, 0); +lean_inc(x_11020); +x_11021 = lean_ctor_get(x_11018, 1); +lean_inc(x_11021); +if (lean_is_exclusive(x_11018)) { + lean_ctor_release(x_11018, 0); + lean_ctor_release(x_11018, 1); + x_11022 = x_11018; +} else { + lean_dec_ref(x_11018); + x_11022 = lean_box(0); +} +if (lean_is_scalar(x_10961)) { + x_11023 = lean_alloc_ctor(1, 1, 0); +} else { + x_11023 = x_10961; +} +lean_ctor_set(x_11023, 0, x_11020); +if (lean_is_scalar(x_11022)) { + x_11024 = lean_alloc_ctor(0, 2, 0); +} else { + x_11024 = x_11022; +} +lean_ctor_set(x_11024, 0, x_11023); +lean_ctor_set(x_11024, 1, x_11021); +x_8855 = x_11024; +x_8856 = x_11019; +goto block_10772; +} +else +{ +lean_object* x_11025; lean_object* x_11026; lean_object* x_11027; lean_object* x_11028; +lean_dec(x_10961); +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11025 = lean_ctor_get(x_11017, 0); +lean_inc(x_11025); +x_11026 = lean_ctor_get(x_11017, 1); +lean_inc(x_11026); +if (lean_is_exclusive(x_11017)) { + lean_ctor_release(x_11017, 0); + lean_ctor_release(x_11017, 1); + x_11027 = x_11017; +} else { + lean_dec_ref(x_11017); + x_11027 = lean_box(0); +} +if (lean_is_scalar(x_11027)) { + x_11028 = lean_alloc_ctor(1, 2, 0); +} else { + x_11028 = x_11027; +} +lean_ctor_set(x_11028, 0, x_11025); +lean_ctor_set(x_11028, 1, x_11026); +return x_11028; +} +} +else +{ +lean_object* x_11029; lean_object* x_11030; lean_object* x_11031; lean_object* x_11032; +lean_dec(x_11009); +lean_dec(x_11004); +lean_dec(x_10961); +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11029 = lean_ctor_get(x_11012, 0); +lean_inc(x_11029); +x_11030 = lean_ctor_get(x_11012, 1); +lean_inc(x_11030); +if (lean_is_exclusive(x_11012)) { + lean_ctor_release(x_11012, 0); + lean_ctor_release(x_11012, 1); + x_11031 = x_11012; +} else { + lean_dec_ref(x_11012); + x_11031 = lean_box(0); +} +if (lean_is_scalar(x_11031)) { + x_11032 = lean_alloc_ctor(1, 2, 0); +} else { + x_11032 = x_11031; +} +lean_ctor_set(x_11032, 0, x_11029); +lean_ctor_set(x_11032, 1, x_11030); +return x_11032; +} +} +} +else +{ +lean_object* x_11033; lean_object* x_11034; lean_object* x_11035; lean_object* x_11036; lean_object* x_11037; lean_object* x_11038; lean_object* x_11039; lean_object* x_11040; lean_object* x_11041; +lean_dec(x_10964); +lean_dec(x_10962); +lean_inc(x_8853); +lean_inc(x_8845); +x_11033 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_11033, 0, x_8845); +lean_ctor_set(x_11033, 1, x_8853); +x_11034 = lean_ctor_get(x_1, 0); +lean_inc(x_11034); +x_11035 = l_Lean_IR_ToIR_bindVar(x_11034, x_8854, x_4, x_5, x_10959); +x_11036 = lean_ctor_get(x_11035, 0); +lean_inc(x_11036); +x_11037 = lean_ctor_get(x_11035, 1); +lean_inc(x_11037); +lean_dec(x_11035); +x_11038 = lean_ctor_get(x_11036, 0); +lean_inc(x_11038); +x_11039 = lean_ctor_get(x_11036, 1); +lean_inc(x_11039); +lean_dec(x_11036); +x_11040 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11041 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11038, x_11033, x_11040, x_11039, x_4, x_5, x_11037); +if (lean_obj_tag(x_11041) == 0) +{ +lean_object* x_11042; lean_object* x_11043; lean_object* x_11044; lean_object* x_11045; lean_object* x_11046; lean_object* x_11047; lean_object* x_11048; +x_11042 = lean_ctor_get(x_11041, 0); +lean_inc(x_11042); +x_11043 = lean_ctor_get(x_11041, 1); +lean_inc(x_11043); +lean_dec(x_11041); +x_11044 = lean_ctor_get(x_11042, 0); +lean_inc(x_11044); +x_11045 = lean_ctor_get(x_11042, 1); +lean_inc(x_11045); +if (lean_is_exclusive(x_11042)) { + lean_ctor_release(x_11042, 0); + lean_ctor_release(x_11042, 1); + x_11046 = x_11042; +} else { + lean_dec_ref(x_11042); + x_11046 = lean_box(0); +} +if (lean_is_scalar(x_10961)) { + x_11047 = lean_alloc_ctor(1, 1, 0); +} else { + x_11047 = x_10961; +} +lean_ctor_set(x_11047, 0, x_11044); +if (lean_is_scalar(x_11046)) { + x_11048 = lean_alloc_ctor(0, 2, 0); +} else { + x_11048 = x_11046; +} +lean_ctor_set(x_11048, 0, x_11047); +lean_ctor_set(x_11048, 1, x_11045); +x_8855 = x_11048; +x_8856 = x_11043; +goto block_10772; +} +else +{ +lean_object* x_11049; lean_object* x_11050; lean_object* x_11051; lean_object* x_11052; +lean_dec(x_10961); +lean_dec(x_8853); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11049 = lean_ctor_get(x_11041, 0); +lean_inc(x_11049); +x_11050 = lean_ctor_get(x_11041, 1); +lean_inc(x_11050); +if (lean_is_exclusive(x_11041)) { + lean_ctor_release(x_11041, 0); + lean_ctor_release(x_11041, 1); + x_11051 = x_11041; +} else { + lean_dec_ref(x_11041); + x_11051 = lean_box(0); +} +if (lean_is_scalar(x_11051)) { + x_11052 = lean_alloc_ctor(1, 2, 0); +} else { + x_11052 = x_11051; +} +lean_ctor_set(x_11052, 0, x_11049); +lean_ctor_set(x_11052, 1, x_11050); +return x_11052; +} +} +} +} +block_10772: +{ +lean_object* x_8857; +x_8857 = lean_ctor_get(x_8855, 0); +lean_inc(x_8857); +if (lean_obj_tag(x_8857) == 0) +{ +uint8_t x_8858; +lean_dec(x_8851); +x_8858 = !lean_is_exclusive(x_8855); +if (x_8858 == 0) +{ +lean_object* x_8859; lean_object* x_8860; lean_object* x_8861; lean_object* x_8862; lean_object* x_8863; lean_object* x_8864; lean_object* x_8865; uint8_t x_8866; lean_object* x_8867; +x_8859 = lean_ctor_get(x_8855, 1); +x_8860 = lean_ctor_get(x_8855, 0); +lean_dec(x_8860); +x_8861 = lean_st_ref_get(x_5, x_8856); +x_8862 = lean_ctor_get(x_8861, 0); +lean_inc(x_8862); +x_8863 = lean_ctor_get(x_8861, 1); +lean_inc(x_8863); +if (lean_is_exclusive(x_8861)) { + lean_ctor_release(x_8861, 0); + lean_ctor_release(x_8861, 1); + x_8864 = x_8861; +} else { + lean_dec_ref(x_8861); + x_8864 = lean_box(0); +} +x_8865 = lean_ctor_get(x_8862, 0); +lean_inc(x_8865); +lean_dec(x_8862); +x_8866 = 0; +lean_inc(x_8845); +lean_inc(x_8865); +x_8867 = l_Lean_Environment_find_x3f(x_8865, x_8845, x_8866); +if (lean_obj_tag(x_8867) == 0) +{ +lean_object* x_8868; lean_object* x_8869; +lean_dec(x_8865); +lean_dec(x_8864); +lean_free_object(x_8855); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +x_8868 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_8869 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_8868, x_8859, x_4, x_5, x_8863); +return x_8869; +} +else +{ +lean_object* x_8870; +x_8870 = lean_ctor_get(x_8867, 0); +lean_inc(x_8870); +lean_dec(x_8867); +switch (lean_obj_tag(x_8870)) { +case 0: +{ +uint8_t x_8871; +lean_dec(x_8865); +lean_dec(x_5945); +lean_dec(x_5944); +x_8871 = !lean_is_exclusive(x_8870); +if (x_8871 == 0) +{ +lean_object* x_8872; lean_object* x_8873; uint8_t x_8874; +x_8872 = lean_ctor_get(x_8870, 0); +lean_dec(x_8872); +x_8873 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_8874 = lean_name_eq(x_8845, x_8873); +if (x_8874 == 0) +{ +lean_object* x_8875; uint8_t x_8876; +x_8875 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_8876 = lean_name_eq(x_8845, x_8875); +if (x_8876 == 0) +{ +lean_object* x_8877; lean_object* x_8878; lean_object* x_8879; +lean_dec(x_8864); +lean_free_object(x_8855); +lean_inc(x_8845); +x_8877 = l_Lean_IR_ToIR_findDecl(x_8845, x_8859, x_4, x_5, x_8863); +x_8878 = lean_ctor_get(x_8877, 0); +lean_inc(x_8878); +x_8879 = lean_ctor_get(x_8878, 0); +lean_inc(x_8879); +if (lean_obj_tag(x_8879) == 0) +{ +uint8_t x_8880; +lean_dec(x_8853); +lean_dec(x_2); +lean_dec(x_1); +x_8880 = !lean_is_exclusive(x_8877); +if (x_8880 == 0) +{ +lean_object* x_8881; lean_object* x_8882; uint8_t x_8883; +x_8881 = lean_ctor_get(x_8877, 1); +x_8882 = lean_ctor_get(x_8877, 0); +lean_dec(x_8882); +x_8883 = !lean_is_exclusive(x_8878); +if (x_8883 == 0) +{ +lean_object* x_8884; lean_object* x_8885; uint8_t x_8886; lean_object* x_8887; lean_object* x_8888; lean_object* x_8889; lean_object* x_8890; lean_object* x_8891; lean_object* x_8892; +x_8884 = lean_ctor_get(x_8878, 1); +x_8885 = lean_ctor_get(x_8878, 0); +lean_dec(x_8885); +x_8886 = 1; +x_8887 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_8888 = l_Lean_Name_toString(x_8845, x_8886, x_8887); +lean_ctor_set_tag(x_8870, 3); +lean_ctor_set(x_8870, 0, x_8888); +x_8889 = l_Lean_IR_ToIR_lowerLet___closed__13; +lean_ctor_set_tag(x_8878, 5); +lean_ctor_set(x_8878, 1, x_8870); +lean_ctor_set(x_8878, 0, x_8889); +x_8890 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_8877, 5); +lean_ctor_set(x_8877, 1, x_8890); +x_8891 = l_Lean_MessageData_ofFormat(x_8877); +x_8892 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_8891, x_8884, x_4, x_5, x_8881); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_8884); +return x_8892; +} +else +{ +lean_object* x_8893; uint8_t x_8894; lean_object* x_8895; lean_object* x_8896; lean_object* x_8897; lean_object* x_8898; lean_object* x_8899; lean_object* x_8900; lean_object* x_8901; +x_8893 = lean_ctor_get(x_8878, 1); +lean_inc(x_8893); +lean_dec(x_8878); +x_8894 = 1; +x_8895 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_8896 = l_Lean_Name_toString(x_8845, x_8894, x_8895); +lean_ctor_set_tag(x_8870, 3); +lean_ctor_set(x_8870, 0, x_8896); +x_8897 = l_Lean_IR_ToIR_lowerLet___closed__13; +x_8898 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_8898, 0, x_8897); +lean_ctor_set(x_8898, 1, x_8870); +x_8899 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_8877, 5); +lean_ctor_set(x_8877, 1, x_8899); +lean_ctor_set(x_8877, 0, x_8898); +x_8900 = l_Lean_MessageData_ofFormat(x_8877); +x_8901 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_8900, x_8893, x_4, x_5, x_8881); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_8893); +return x_8901; +} +} +else +{ +lean_object* x_8902; lean_object* x_8903; lean_object* x_8904; uint8_t x_8905; lean_object* x_8906; lean_object* x_8907; lean_object* x_8908; lean_object* x_8909; lean_object* x_8910; lean_object* x_8911; lean_object* x_8912; lean_object* x_8913; +x_8902 = lean_ctor_get(x_8877, 1); +lean_inc(x_8902); +lean_dec(x_8877); +x_8903 = lean_ctor_get(x_8878, 1); +lean_inc(x_8903); +if (lean_is_exclusive(x_8878)) { + lean_ctor_release(x_8878, 0); + lean_ctor_release(x_8878, 1); + x_8904 = x_8878; +} else { + lean_dec_ref(x_8878); + x_8904 = lean_box(0); +} +x_8905 = 1; +x_8906 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_8907 = l_Lean_Name_toString(x_8845, x_8905, x_8906); +lean_ctor_set_tag(x_8870, 3); +lean_ctor_set(x_8870, 0, x_8907); +x_8908 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_8904)) { + x_8909 = lean_alloc_ctor(5, 2, 0); +} else { + x_8909 = x_8904; + lean_ctor_set_tag(x_8909, 5); +} +lean_ctor_set(x_8909, 0, x_8908); +lean_ctor_set(x_8909, 1, x_8870); +x_8910 = l_Lean_IR_ToIR_lowerLet___closed__16; +x_8911 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_8911, 0, x_8909); +lean_ctor_set(x_8911, 1, x_8910); +x_8912 = l_Lean_MessageData_ofFormat(x_8911); +x_8913 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_8912, x_8903, x_4, x_5, x_8902); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_8903); +return x_8913; +} +} +else +{ +lean_object* x_8914; uint8_t x_8915; +lean_free_object(x_8870); +x_8914 = lean_ctor_get(x_8877, 1); +lean_inc(x_8914); +lean_dec(x_8877); +x_8915 = !lean_is_exclusive(x_8878); +if (x_8915 == 0) +{ +lean_object* x_8916; lean_object* x_8917; lean_object* x_8918; lean_object* x_8919; lean_object* x_8920; lean_object* x_8921; uint8_t x_8922; +x_8916 = lean_ctor_get(x_8878, 1); +x_8917 = lean_ctor_get(x_8878, 0); +lean_dec(x_8917); +x_8918 = lean_ctor_get(x_8879, 0); +lean_inc(x_8918); +lean_dec(x_8879); +x_8919 = lean_array_get_size(x_8853); +x_8920 = l_Lean_IR_Decl_params(x_8918); +lean_dec(x_8918); +x_8921 = lean_array_get_size(x_8920); +lean_dec(x_8920); +x_8922 = lean_nat_dec_lt(x_8919, x_8921); +if (x_8922 == 0) +{ +uint8_t x_8923; +x_8923 = lean_nat_dec_eq(x_8919, x_8921); +if (x_8923 == 0) +{ +lean_object* x_8924; lean_object* x_8925; lean_object* x_8926; lean_object* x_8927; lean_object* x_8928; lean_object* x_8929; lean_object* x_8930; lean_object* x_8931; lean_object* x_8932; lean_object* x_8933; lean_object* x_8934; lean_object* x_8935; lean_object* x_8936; lean_object* x_8937; lean_object* x_8938; lean_object* x_8939; +x_8924 = lean_unsigned_to_nat(0u); +x_8925 = l_Array_extract___rarg(x_8853, x_8924, x_8921); +x_8926 = l_Array_extract___rarg(x_8853, x_8921, x_8919); +lean_dec(x_8919); +lean_dec(x_8853); +lean_ctor_set_tag(x_8878, 6); +lean_ctor_set(x_8878, 1, x_8925); +lean_ctor_set(x_8878, 0, x_8845); +x_8927 = lean_ctor_get(x_1, 0); +lean_inc(x_8927); +x_8928 = l_Lean_IR_ToIR_bindVar(x_8927, x_8916, x_4, x_5, x_8914); +x_8929 = lean_ctor_get(x_8928, 0); +lean_inc(x_8929); +x_8930 = lean_ctor_get(x_8928, 1); +lean_inc(x_8930); +lean_dec(x_8928); +x_8931 = lean_ctor_get(x_8929, 0); +lean_inc(x_8931); +x_8932 = lean_ctor_get(x_8929, 1); +lean_inc(x_8932); +lean_dec(x_8929); +x_8933 = l_Lean_IR_ToIR_newVar(x_8932, x_4, x_5, x_8930); +x_8934 = lean_ctor_get(x_8933, 0); +lean_inc(x_8934); +x_8935 = lean_ctor_get(x_8933, 1); +lean_inc(x_8935); +lean_dec(x_8933); +x_8936 = lean_ctor_get(x_8934, 0); +lean_inc(x_8936); +x_8937 = lean_ctor_get(x_8934, 1); +lean_inc(x_8937); +lean_dec(x_8934); +x_8938 = lean_ctor_get(x_1, 2); +lean_inc(x_8938); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_8939 = l_Lean_IR_ToIR_lowerType(x_8938, x_8937, x_4, x_5, x_8935); +if (lean_obj_tag(x_8939) == 0) +{ +lean_object* x_8940; lean_object* x_8941; lean_object* x_8942; lean_object* x_8943; lean_object* x_8944; +x_8940 = lean_ctor_get(x_8939, 0); +lean_inc(x_8940); +x_8941 = lean_ctor_get(x_8939, 1); +lean_inc(x_8941); +lean_dec(x_8939); +x_8942 = lean_ctor_get(x_8940, 0); +lean_inc(x_8942); +x_8943 = lean_ctor_get(x_8940, 1); +lean_inc(x_8943); +lean_dec(x_8940); +x_8944 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_8936, x_8926, x_8931, x_8878, x_8942, x_8943, x_4, x_5, x_8941); +return x_8944; +} +else +{ +uint8_t x_8945; +lean_dec(x_8936); +lean_dec(x_8931); +lean_dec(x_8878); +lean_dec(x_8926); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_8945 = !lean_is_exclusive(x_8939); +if (x_8945 == 0) +{ +return x_8939; +} +else +{ +lean_object* x_8946; lean_object* x_8947; lean_object* x_8948; +x_8946 = lean_ctor_get(x_8939, 0); +x_8947 = lean_ctor_get(x_8939, 1); +lean_inc(x_8947); +lean_inc(x_8946); +lean_dec(x_8939); +x_8948 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_8948, 0, x_8946); +lean_ctor_set(x_8948, 1, x_8947); +return x_8948; +} +} +} +else +{ +lean_object* x_8949; lean_object* x_8950; lean_object* x_8951; lean_object* x_8952; lean_object* x_8953; lean_object* x_8954; lean_object* x_8955; lean_object* x_8956; +lean_dec(x_8921); +lean_dec(x_8919); +lean_ctor_set_tag(x_8878, 6); +lean_ctor_set(x_8878, 1, x_8853); +lean_ctor_set(x_8878, 0, x_8845); +x_8949 = lean_ctor_get(x_1, 0); +lean_inc(x_8949); +x_8950 = l_Lean_IR_ToIR_bindVar(x_8949, x_8916, x_4, x_5, x_8914); +x_8951 = lean_ctor_get(x_8950, 0); +lean_inc(x_8951); +x_8952 = lean_ctor_get(x_8950, 1); +lean_inc(x_8952); +lean_dec(x_8950); +x_8953 = lean_ctor_get(x_8951, 0); +lean_inc(x_8953); +x_8954 = lean_ctor_get(x_8951, 1); +lean_inc(x_8954); +lean_dec(x_8951); +x_8955 = lean_ctor_get(x_1, 2); +lean_inc(x_8955); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_8956 = l_Lean_IR_ToIR_lowerType(x_8955, x_8954, x_4, x_5, x_8952); +if (lean_obj_tag(x_8956) == 0) +{ +lean_object* x_8957; lean_object* x_8958; lean_object* x_8959; lean_object* x_8960; lean_object* x_8961; +x_8957 = lean_ctor_get(x_8956, 0); +lean_inc(x_8957); +x_8958 = lean_ctor_get(x_8956, 1); +lean_inc(x_8958); +lean_dec(x_8956); +x_8959 = lean_ctor_get(x_8957, 0); +lean_inc(x_8959); +x_8960 = lean_ctor_get(x_8957, 1); +lean_inc(x_8960); +lean_dec(x_8957); +x_8961 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8953, x_8878, x_8959, x_8960, x_4, x_5, x_8958); +return x_8961; +} +else +{ +uint8_t x_8962; +lean_dec(x_8953); +lean_dec(x_8878); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_8962 = !lean_is_exclusive(x_8956); +if (x_8962 == 0) +{ +return x_8956; +} +else +{ +lean_object* x_8963; lean_object* x_8964; lean_object* x_8965; +x_8963 = lean_ctor_get(x_8956, 0); +x_8964 = lean_ctor_get(x_8956, 1); +lean_inc(x_8964); +lean_inc(x_8963); +lean_dec(x_8956); +x_8965 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_8965, 0, x_8963); +lean_ctor_set(x_8965, 1, x_8964); +return x_8965; +} +} +} +} +else +{ +lean_object* x_8966; lean_object* x_8967; lean_object* x_8968; lean_object* x_8969; lean_object* x_8970; lean_object* x_8971; lean_object* x_8972; lean_object* x_8973; +lean_dec(x_8921); +lean_dec(x_8919); +lean_ctor_set_tag(x_8878, 7); +lean_ctor_set(x_8878, 1, x_8853); +lean_ctor_set(x_8878, 0, x_8845); +x_8966 = lean_ctor_get(x_1, 0); +lean_inc(x_8966); +lean_dec(x_1); +x_8967 = l_Lean_IR_ToIR_bindVar(x_8966, x_8916, x_4, x_5, x_8914); +x_8968 = lean_ctor_get(x_8967, 0); +lean_inc(x_8968); +x_8969 = lean_ctor_get(x_8967, 1); +lean_inc(x_8969); +lean_dec(x_8967); +x_8970 = lean_ctor_get(x_8968, 0); +lean_inc(x_8970); +x_8971 = lean_ctor_get(x_8968, 1); +lean_inc(x_8971); +lean_dec(x_8968); +x_8972 = lean_box(7); +x_8973 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_8970, x_8878, x_8972, x_8971, x_4, x_5, x_8969); +return x_8973; +} +} +else +{ +lean_object* x_8974; lean_object* x_8975; lean_object* x_8976; lean_object* x_8977; lean_object* x_8978; uint8_t x_8979; +x_8974 = lean_ctor_get(x_8878, 1); +lean_inc(x_8974); +lean_dec(x_8878); +x_8975 = lean_ctor_get(x_8879, 0); +lean_inc(x_8975); +lean_dec(x_8879); +x_8976 = lean_array_get_size(x_8853); +x_8977 = l_Lean_IR_Decl_params(x_8975); +lean_dec(x_8975); +x_8978 = lean_array_get_size(x_8977); +lean_dec(x_8977); +x_8979 = lean_nat_dec_lt(x_8976, x_8978); +if (x_8979 == 0) +{ +uint8_t x_8980; +x_8980 = lean_nat_dec_eq(x_8976, x_8978); +if (x_8980 == 0) +{ +lean_object* x_8981; lean_object* x_8982; lean_object* x_8983; lean_object* x_8984; lean_object* x_8985; lean_object* x_8986; lean_object* x_8987; lean_object* x_8988; lean_object* x_8989; lean_object* x_8990; lean_object* x_8991; lean_object* x_8992; lean_object* x_8993; lean_object* x_8994; lean_object* x_8995; lean_object* x_8996; lean_object* x_8997; +x_8981 = lean_unsigned_to_nat(0u); +x_8982 = l_Array_extract___rarg(x_8853, x_8981, x_8978); +x_8983 = l_Array_extract___rarg(x_8853, x_8978, x_8976); +lean_dec(x_8976); +lean_dec(x_8853); +x_8984 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_8984, 0, x_8845); +lean_ctor_set(x_8984, 1, x_8982); +x_8985 = lean_ctor_get(x_1, 0); +lean_inc(x_8985); +x_8986 = l_Lean_IR_ToIR_bindVar(x_8985, x_8974, x_4, x_5, x_8914); +x_8987 = lean_ctor_get(x_8986, 0); +lean_inc(x_8987); +x_8988 = lean_ctor_get(x_8986, 1); +lean_inc(x_8988); +lean_dec(x_8986); +x_8989 = lean_ctor_get(x_8987, 0); +lean_inc(x_8989); +x_8990 = lean_ctor_get(x_8987, 1); +lean_inc(x_8990); +lean_dec(x_8987); +x_8991 = l_Lean_IR_ToIR_newVar(x_8990, x_4, x_5, x_8988); +x_8992 = lean_ctor_get(x_8991, 0); +lean_inc(x_8992); +x_8993 = lean_ctor_get(x_8991, 1); +lean_inc(x_8993); +lean_dec(x_8991); +x_8994 = lean_ctor_get(x_8992, 0); +lean_inc(x_8994); +x_8995 = lean_ctor_get(x_8992, 1); +lean_inc(x_8995); +lean_dec(x_8992); +x_8996 = lean_ctor_get(x_1, 2); +lean_inc(x_8996); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_8997 = l_Lean_IR_ToIR_lowerType(x_8996, x_8995, x_4, x_5, x_8993); +if (lean_obj_tag(x_8997) == 0) +{ +lean_object* x_8998; lean_object* x_8999; lean_object* x_9000; lean_object* x_9001; lean_object* x_9002; +x_8998 = lean_ctor_get(x_8997, 0); +lean_inc(x_8998); +x_8999 = lean_ctor_get(x_8997, 1); +lean_inc(x_8999); +lean_dec(x_8997); +x_9000 = lean_ctor_get(x_8998, 0); +lean_inc(x_9000); +x_9001 = lean_ctor_get(x_8998, 1); +lean_inc(x_9001); +lean_dec(x_8998); +x_9002 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_8994, x_8983, x_8989, x_8984, x_9000, x_9001, x_4, x_5, x_8999); +return x_9002; +} +else +{ +lean_object* x_9003; lean_object* x_9004; lean_object* x_9005; lean_object* x_9006; +lean_dec(x_8994); +lean_dec(x_8989); +lean_dec(x_8984); +lean_dec(x_8983); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_9003 = lean_ctor_get(x_8997, 0); +lean_inc(x_9003); +x_9004 = lean_ctor_get(x_8997, 1); +lean_inc(x_9004); +if (lean_is_exclusive(x_8997)) { + lean_ctor_release(x_8997, 0); + lean_ctor_release(x_8997, 1); + x_9005 = x_8997; +} else { + lean_dec_ref(x_8997); + x_9005 = lean_box(0); +} +if (lean_is_scalar(x_9005)) { + x_9006 = lean_alloc_ctor(1, 2, 0); +} else { + x_9006 = x_9005; +} +lean_ctor_set(x_9006, 0, x_9003); +lean_ctor_set(x_9006, 1, x_9004); +return x_9006; +} +} +else +{ +lean_object* x_9007; lean_object* x_9008; lean_object* x_9009; lean_object* x_9010; lean_object* x_9011; lean_object* x_9012; lean_object* x_9013; lean_object* x_9014; lean_object* x_9015; +lean_dec(x_8978); +lean_dec(x_8976); +x_9007 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_9007, 0, x_8845); +lean_ctor_set(x_9007, 1, x_8853); +x_9008 = lean_ctor_get(x_1, 0); +lean_inc(x_9008); +x_9009 = l_Lean_IR_ToIR_bindVar(x_9008, x_8974, x_4, x_5, x_8914); +x_9010 = lean_ctor_get(x_9009, 0); +lean_inc(x_9010); +x_9011 = lean_ctor_get(x_9009, 1); +lean_inc(x_9011); +lean_dec(x_9009); +x_9012 = lean_ctor_get(x_9010, 0); +lean_inc(x_9012); +x_9013 = lean_ctor_get(x_9010, 1); +lean_inc(x_9013); +lean_dec(x_9010); +x_9014 = lean_ctor_get(x_1, 2); +lean_inc(x_9014); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_9015 = l_Lean_IR_ToIR_lowerType(x_9014, x_9013, x_4, x_5, x_9011); +if (lean_obj_tag(x_9015) == 0) +{ +lean_object* x_9016; lean_object* x_9017; lean_object* x_9018; lean_object* x_9019; lean_object* x_9020; +x_9016 = lean_ctor_get(x_9015, 0); +lean_inc(x_9016); +x_9017 = lean_ctor_get(x_9015, 1); +lean_inc(x_9017); +lean_dec(x_9015); +x_9018 = lean_ctor_get(x_9016, 0); +lean_inc(x_9018); +x_9019 = lean_ctor_get(x_9016, 1); +lean_inc(x_9019); +lean_dec(x_9016); +x_9020 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9012, x_9007, x_9018, x_9019, x_4, x_5, x_9017); +return x_9020; +} +else +{ +lean_object* x_9021; lean_object* x_9022; lean_object* x_9023; lean_object* x_9024; +lean_dec(x_9012); +lean_dec(x_9007); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_9021 = lean_ctor_get(x_9015, 0); +lean_inc(x_9021); +x_9022 = lean_ctor_get(x_9015, 1); +lean_inc(x_9022); +if (lean_is_exclusive(x_9015)) { + lean_ctor_release(x_9015, 0); + lean_ctor_release(x_9015, 1); + x_9023 = x_9015; +} else { + lean_dec_ref(x_9015); + x_9023 = lean_box(0); +} +if (lean_is_scalar(x_9023)) { + x_9024 = lean_alloc_ctor(1, 2, 0); +} else { + x_9024 = x_9023; +} +lean_ctor_set(x_9024, 0, x_9021); +lean_ctor_set(x_9024, 1, x_9022); +return x_9024; +} +} +} +else +{ +lean_object* x_9025; lean_object* x_9026; lean_object* x_9027; lean_object* x_9028; lean_object* x_9029; lean_object* x_9030; lean_object* x_9031; lean_object* x_9032; lean_object* x_9033; +lean_dec(x_8978); +lean_dec(x_8976); +x_9025 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_9025, 0, x_8845); +lean_ctor_set(x_9025, 1, x_8853); +x_9026 = lean_ctor_get(x_1, 0); +lean_inc(x_9026); +lean_dec(x_1); +x_9027 = l_Lean_IR_ToIR_bindVar(x_9026, x_8974, x_4, x_5, x_8914); +x_9028 = lean_ctor_get(x_9027, 0); +lean_inc(x_9028); +x_9029 = lean_ctor_get(x_9027, 1); +lean_inc(x_9029); +lean_dec(x_9027); +x_9030 = lean_ctor_get(x_9028, 0); +lean_inc(x_9030); +x_9031 = lean_ctor_get(x_9028, 1); +lean_inc(x_9031); +lean_dec(x_9028); +x_9032 = lean_box(7); +x_9033 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9030, x_9025, x_9032, x_9031, x_4, x_5, x_9029); +return x_9033; +} +} +} +} +else +{ +lean_object* x_9034; lean_object* x_9035; +lean_free_object(x_8870); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9034 = lean_box(13); +lean_ctor_set(x_8855, 0, x_9034); +if (lean_is_scalar(x_8864)) { + x_9035 = lean_alloc_ctor(0, 2, 0); +} else { + x_9035 = x_8864; +} +lean_ctor_set(x_9035, 0, x_8855); +lean_ctor_set(x_9035, 1, x_8863); +return x_9035; +} +} +else +{ +lean_object* x_9036; lean_object* x_9037; lean_object* x_9038; +lean_free_object(x_8870); +lean_dec(x_8864); +lean_free_object(x_8855); +lean_dec(x_8845); +x_9036 = l_Lean_IR_instInhabitedArg; +x_9037 = lean_unsigned_to_nat(2u); +x_9038 = lean_array_get(x_9036, x_8853, x_9037); +lean_dec(x_8853); +if (lean_obj_tag(x_9038) == 0) +{ +lean_object* x_9039; lean_object* x_9040; lean_object* x_9041; lean_object* x_9042; lean_object* x_9043; lean_object* x_9044; lean_object* x_9045; +x_9039 = lean_ctor_get(x_9038, 0); +lean_inc(x_9039); +lean_dec(x_9038); +x_9040 = lean_ctor_get(x_1, 0); +lean_inc(x_9040); +lean_dec(x_1); +x_9041 = l_Lean_IR_ToIR_bindVarToVarId(x_9040, x_9039, x_8859, x_4, x_5, x_8863); +x_9042 = lean_ctor_get(x_9041, 0); +lean_inc(x_9042); +x_9043 = lean_ctor_get(x_9041, 1); +lean_inc(x_9043); +lean_dec(x_9041); +x_9044 = lean_ctor_get(x_9042, 1); +lean_inc(x_9044); +lean_dec(x_9042); +x_9045 = l_Lean_IR_ToIR_lowerCode(x_2, x_9044, x_4, x_5, x_9043); +return x_9045; +} +else +{ +lean_object* x_9046; lean_object* x_9047; lean_object* x_9048; lean_object* x_9049; lean_object* x_9050; lean_object* x_9051; +x_9046 = lean_ctor_get(x_1, 0); +lean_inc(x_9046); +lean_dec(x_1); +x_9047 = l_Lean_IR_ToIR_bindErased(x_9046, x_8859, x_4, x_5, x_8863); +x_9048 = lean_ctor_get(x_9047, 0); +lean_inc(x_9048); +x_9049 = lean_ctor_get(x_9047, 1); +lean_inc(x_9049); +lean_dec(x_9047); +x_9050 = lean_ctor_get(x_9048, 1); +lean_inc(x_9050); +lean_dec(x_9048); +x_9051 = l_Lean_IR_ToIR_lowerCode(x_2, x_9050, x_4, x_5, x_9049); +return x_9051; +} +} +} +else +{ +lean_object* x_9052; uint8_t x_9053; +lean_dec(x_8870); +x_9052 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_9053 = lean_name_eq(x_8845, x_9052); +if (x_9053 == 0) +{ +lean_object* x_9054; uint8_t x_9055; +x_9054 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_9055 = lean_name_eq(x_8845, x_9054); +if (x_9055 == 0) +{ +lean_object* x_9056; lean_object* x_9057; lean_object* x_9058; +lean_dec(x_8864); +lean_free_object(x_8855); +lean_inc(x_8845); +x_9056 = l_Lean_IR_ToIR_findDecl(x_8845, x_8859, x_4, x_5, x_8863); +x_9057 = lean_ctor_get(x_9056, 0); +lean_inc(x_9057); +x_9058 = lean_ctor_get(x_9057, 0); +lean_inc(x_9058); +if (lean_obj_tag(x_9058) == 0) +{ +lean_object* x_9059; lean_object* x_9060; lean_object* x_9061; lean_object* x_9062; uint8_t x_9063; lean_object* x_9064; lean_object* x_9065; lean_object* x_9066; lean_object* x_9067; lean_object* x_9068; lean_object* x_9069; lean_object* x_9070; lean_object* x_9071; lean_object* x_9072; +lean_dec(x_8853); +lean_dec(x_2); +lean_dec(x_1); +x_9059 = lean_ctor_get(x_9056, 1); +lean_inc(x_9059); +if (lean_is_exclusive(x_9056)) { + lean_ctor_release(x_9056, 0); + lean_ctor_release(x_9056, 1); + x_9060 = x_9056; +} else { + lean_dec_ref(x_9056); + x_9060 = lean_box(0); +} +x_9061 = lean_ctor_get(x_9057, 1); +lean_inc(x_9061); +if (lean_is_exclusive(x_9057)) { + lean_ctor_release(x_9057, 0); + lean_ctor_release(x_9057, 1); + x_9062 = x_9057; +} else { + lean_dec_ref(x_9057); + x_9062 = lean_box(0); +} +x_9063 = 1; +x_9064 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_9065 = l_Lean_Name_toString(x_8845, x_9063, x_9064); +x_9066 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_9066, 0, x_9065); +x_9067 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_9062)) { + x_9068 = lean_alloc_ctor(5, 2, 0); +} else { + x_9068 = x_9062; + lean_ctor_set_tag(x_9068, 5); +} +lean_ctor_set(x_9068, 0, x_9067); +lean_ctor_set(x_9068, 1, x_9066); +x_9069 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_9060)) { + x_9070 = lean_alloc_ctor(5, 2, 0); +} else { + x_9070 = x_9060; + lean_ctor_set_tag(x_9070, 5); +} +lean_ctor_set(x_9070, 0, x_9068); +lean_ctor_set(x_9070, 1, x_9069); +x_9071 = l_Lean_MessageData_ofFormat(x_9070); +x_9072 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_9071, x_9061, x_4, x_5, x_9059); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_9061); +return x_9072; +} +else +{ +lean_object* x_9073; lean_object* x_9074; lean_object* x_9075; lean_object* x_9076; lean_object* x_9077; lean_object* x_9078; lean_object* x_9079; uint8_t x_9080; +x_9073 = lean_ctor_get(x_9056, 1); +lean_inc(x_9073); +lean_dec(x_9056); +x_9074 = lean_ctor_get(x_9057, 1); +lean_inc(x_9074); +if (lean_is_exclusive(x_9057)) { + lean_ctor_release(x_9057, 0); + lean_ctor_release(x_9057, 1); + x_9075 = x_9057; +} else { + lean_dec_ref(x_9057); + x_9075 = lean_box(0); +} +x_9076 = lean_ctor_get(x_9058, 0); +lean_inc(x_9076); +lean_dec(x_9058); +x_9077 = lean_array_get_size(x_8853); +x_9078 = l_Lean_IR_Decl_params(x_9076); +lean_dec(x_9076); +x_9079 = lean_array_get_size(x_9078); +lean_dec(x_9078); +x_9080 = lean_nat_dec_lt(x_9077, x_9079); +if (x_9080 == 0) +{ +uint8_t x_9081; +x_9081 = lean_nat_dec_eq(x_9077, x_9079); +if (x_9081 == 0) +{ +lean_object* x_9082; lean_object* x_9083; lean_object* x_9084; lean_object* x_9085; lean_object* x_9086; lean_object* x_9087; lean_object* x_9088; lean_object* x_9089; lean_object* x_9090; lean_object* x_9091; lean_object* x_9092; lean_object* x_9093; lean_object* x_9094; lean_object* x_9095; lean_object* x_9096; lean_object* x_9097; lean_object* x_9098; +x_9082 = lean_unsigned_to_nat(0u); +x_9083 = l_Array_extract___rarg(x_8853, x_9082, x_9079); +x_9084 = l_Array_extract___rarg(x_8853, x_9079, x_9077); +lean_dec(x_9077); +lean_dec(x_8853); +if (lean_is_scalar(x_9075)) { + x_9085 = lean_alloc_ctor(6, 2, 0); +} else { + x_9085 = x_9075; + lean_ctor_set_tag(x_9085, 6); +} +lean_ctor_set(x_9085, 0, x_8845); +lean_ctor_set(x_9085, 1, x_9083); +x_9086 = lean_ctor_get(x_1, 0); +lean_inc(x_9086); +x_9087 = l_Lean_IR_ToIR_bindVar(x_9086, x_9074, x_4, x_5, x_9073); +x_9088 = lean_ctor_get(x_9087, 0); +lean_inc(x_9088); +x_9089 = lean_ctor_get(x_9087, 1); +lean_inc(x_9089); +lean_dec(x_9087); +x_9090 = lean_ctor_get(x_9088, 0); +lean_inc(x_9090); +x_9091 = lean_ctor_get(x_9088, 1); +lean_inc(x_9091); +lean_dec(x_9088); +x_9092 = l_Lean_IR_ToIR_newVar(x_9091, x_4, x_5, x_9089); +x_9093 = lean_ctor_get(x_9092, 0); +lean_inc(x_9093); +x_9094 = lean_ctor_get(x_9092, 1); +lean_inc(x_9094); +lean_dec(x_9092); +x_9095 = lean_ctor_get(x_9093, 0); +lean_inc(x_9095); +x_9096 = lean_ctor_get(x_9093, 1); +lean_inc(x_9096); +lean_dec(x_9093); +x_9097 = lean_ctor_get(x_1, 2); +lean_inc(x_9097); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_9098 = l_Lean_IR_ToIR_lowerType(x_9097, x_9096, x_4, x_5, x_9094); +if (lean_obj_tag(x_9098) == 0) +{ +lean_object* x_9099; lean_object* x_9100; lean_object* x_9101; lean_object* x_9102; lean_object* x_9103; +x_9099 = lean_ctor_get(x_9098, 0); +lean_inc(x_9099); +x_9100 = lean_ctor_get(x_9098, 1); +lean_inc(x_9100); +lean_dec(x_9098); +x_9101 = lean_ctor_get(x_9099, 0); +lean_inc(x_9101); +x_9102 = lean_ctor_get(x_9099, 1); +lean_inc(x_9102); +lean_dec(x_9099); +x_9103 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_9095, x_9084, x_9090, x_9085, x_9101, x_9102, x_4, x_5, x_9100); +return x_9103; +} +else +{ +lean_object* x_9104; lean_object* x_9105; lean_object* x_9106; lean_object* x_9107; +lean_dec(x_9095); +lean_dec(x_9090); +lean_dec(x_9085); +lean_dec(x_9084); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_9104 = lean_ctor_get(x_9098, 0); +lean_inc(x_9104); +x_9105 = lean_ctor_get(x_9098, 1); +lean_inc(x_9105); +if (lean_is_exclusive(x_9098)) { + lean_ctor_release(x_9098, 0); + lean_ctor_release(x_9098, 1); + x_9106 = x_9098; +} else { + lean_dec_ref(x_9098); + x_9106 = lean_box(0); +} +if (lean_is_scalar(x_9106)) { + x_9107 = lean_alloc_ctor(1, 2, 0); +} else { + x_9107 = x_9106; +} +lean_ctor_set(x_9107, 0, x_9104); +lean_ctor_set(x_9107, 1, x_9105); +return x_9107; +} +} +else +{ +lean_object* x_9108; lean_object* x_9109; lean_object* x_9110; lean_object* x_9111; lean_object* x_9112; lean_object* x_9113; lean_object* x_9114; lean_object* x_9115; lean_object* x_9116; +lean_dec(x_9079); +lean_dec(x_9077); +if (lean_is_scalar(x_9075)) { + x_9108 = lean_alloc_ctor(6, 2, 0); +} else { + x_9108 = x_9075; + lean_ctor_set_tag(x_9108, 6); +} +lean_ctor_set(x_9108, 0, x_8845); +lean_ctor_set(x_9108, 1, x_8853); +x_9109 = lean_ctor_get(x_1, 0); +lean_inc(x_9109); +x_9110 = l_Lean_IR_ToIR_bindVar(x_9109, x_9074, x_4, x_5, x_9073); +x_9111 = lean_ctor_get(x_9110, 0); +lean_inc(x_9111); +x_9112 = lean_ctor_get(x_9110, 1); +lean_inc(x_9112); +lean_dec(x_9110); +x_9113 = lean_ctor_get(x_9111, 0); +lean_inc(x_9113); +x_9114 = lean_ctor_get(x_9111, 1); +lean_inc(x_9114); +lean_dec(x_9111); +x_9115 = lean_ctor_get(x_1, 2); +lean_inc(x_9115); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_9116 = l_Lean_IR_ToIR_lowerType(x_9115, x_9114, x_4, x_5, x_9112); +if (lean_obj_tag(x_9116) == 0) +{ +lean_object* x_9117; lean_object* x_9118; lean_object* x_9119; lean_object* x_9120; lean_object* x_9121; +x_9117 = lean_ctor_get(x_9116, 0); +lean_inc(x_9117); +x_9118 = lean_ctor_get(x_9116, 1); +lean_inc(x_9118); +lean_dec(x_9116); +x_9119 = lean_ctor_get(x_9117, 0); +lean_inc(x_9119); +x_9120 = lean_ctor_get(x_9117, 1); +lean_inc(x_9120); +lean_dec(x_9117); +x_9121 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9113, x_9108, x_9119, x_9120, x_4, x_5, x_9118); +return x_9121; +} +else +{ +lean_object* x_9122; lean_object* x_9123; lean_object* x_9124; lean_object* x_9125; +lean_dec(x_9113); +lean_dec(x_9108); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_9122 = lean_ctor_get(x_9116, 0); +lean_inc(x_9122); +x_9123 = lean_ctor_get(x_9116, 1); +lean_inc(x_9123); +if (lean_is_exclusive(x_9116)) { + lean_ctor_release(x_9116, 0); + lean_ctor_release(x_9116, 1); + x_9124 = x_9116; +} else { + lean_dec_ref(x_9116); + x_9124 = lean_box(0); +} +if (lean_is_scalar(x_9124)) { + x_9125 = lean_alloc_ctor(1, 2, 0); +} else { + x_9125 = x_9124; +} +lean_ctor_set(x_9125, 0, x_9122); +lean_ctor_set(x_9125, 1, x_9123); +return x_9125; +} +} +} +else +{ +lean_object* x_9126; lean_object* x_9127; lean_object* x_9128; lean_object* x_9129; lean_object* x_9130; lean_object* x_9131; lean_object* x_9132; lean_object* x_9133; lean_object* x_9134; +lean_dec(x_9079); +lean_dec(x_9077); +if (lean_is_scalar(x_9075)) { + x_9126 = lean_alloc_ctor(7, 2, 0); +} else { + x_9126 = x_9075; + lean_ctor_set_tag(x_9126, 7); +} +lean_ctor_set(x_9126, 0, x_8845); +lean_ctor_set(x_9126, 1, x_8853); +x_9127 = lean_ctor_get(x_1, 0); +lean_inc(x_9127); +lean_dec(x_1); +x_9128 = l_Lean_IR_ToIR_bindVar(x_9127, x_9074, x_4, x_5, x_9073); +x_9129 = lean_ctor_get(x_9128, 0); +lean_inc(x_9129); +x_9130 = lean_ctor_get(x_9128, 1); +lean_inc(x_9130); +lean_dec(x_9128); +x_9131 = lean_ctor_get(x_9129, 0); +lean_inc(x_9131); +x_9132 = lean_ctor_get(x_9129, 1); +lean_inc(x_9132); +lean_dec(x_9129); +x_9133 = lean_box(7); +x_9134 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9131, x_9126, x_9133, x_9132, x_4, x_5, x_9130); +return x_9134; +} +} +} +else +{ +lean_object* x_9135; lean_object* x_9136; +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9135 = lean_box(13); +lean_ctor_set(x_8855, 0, x_9135); +if (lean_is_scalar(x_8864)) { + x_9136 = lean_alloc_ctor(0, 2, 0); +} else { + x_9136 = x_8864; +} +lean_ctor_set(x_9136, 0, x_8855); +lean_ctor_set(x_9136, 1, x_8863); +return x_9136; +} +} +else +{ +lean_object* x_9137; lean_object* x_9138; lean_object* x_9139; +lean_dec(x_8864); +lean_free_object(x_8855); +lean_dec(x_8845); +x_9137 = l_Lean_IR_instInhabitedArg; +x_9138 = lean_unsigned_to_nat(2u); +x_9139 = lean_array_get(x_9137, x_8853, x_9138); +lean_dec(x_8853); +if (lean_obj_tag(x_9139) == 0) +{ +lean_object* x_9140; lean_object* x_9141; lean_object* x_9142; lean_object* x_9143; lean_object* x_9144; lean_object* x_9145; lean_object* x_9146; +x_9140 = lean_ctor_get(x_9139, 0); +lean_inc(x_9140); +lean_dec(x_9139); +x_9141 = lean_ctor_get(x_1, 0); +lean_inc(x_9141); +lean_dec(x_1); +x_9142 = l_Lean_IR_ToIR_bindVarToVarId(x_9141, x_9140, x_8859, x_4, x_5, x_8863); +x_9143 = lean_ctor_get(x_9142, 0); +lean_inc(x_9143); +x_9144 = lean_ctor_get(x_9142, 1); +lean_inc(x_9144); +lean_dec(x_9142); +x_9145 = lean_ctor_get(x_9143, 1); +lean_inc(x_9145); +lean_dec(x_9143); +x_9146 = l_Lean_IR_ToIR_lowerCode(x_2, x_9145, x_4, x_5, x_9144); +return x_9146; +} +else +{ +lean_object* x_9147; lean_object* x_9148; lean_object* x_9149; lean_object* x_9150; lean_object* x_9151; lean_object* x_9152; +x_9147 = lean_ctor_get(x_1, 0); +lean_inc(x_9147); +lean_dec(x_1); +x_9148 = l_Lean_IR_ToIR_bindErased(x_9147, x_8859, x_4, x_5, x_8863); +x_9149 = lean_ctor_get(x_9148, 0); +lean_inc(x_9149); +x_9150 = lean_ctor_get(x_9148, 1); +lean_inc(x_9150); +lean_dec(x_9148); +x_9151 = lean_ctor_get(x_9149, 1); +lean_inc(x_9151); +lean_dec(x_9149); +x_9152 = l_Lean_IR_ToIR_lowerCode(x_2, x_9151, x_4, x_5, x_9150); +return x_9152; +} +} +} +} +case 1: +{ +lean_object* x_9153; lean_object* x_9154; lean_object* x_9184; lean_object* x_9185; +lean_dec(x_8870); +lean_dec(x_8865); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_8845); +x_9184 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_8845, x_4, x_5, x_8863); +x_9185 = lean_ctor_get(x_9184, 0); +lean_inc(x_9185); +if (lean_obj_tag(x_9185) == 0) +{ +lean_object* x_9186; lean_object* x_9187; +x_9186 = lean_ctor_get(x_9184, 1); +lean_inc(x_9186); +lean_dec(x_9184); +x_9187 = lean_box(0); +lean_ctor_set(x_8855, 0, x_9187); +x_9153 = x_8855; +x_9154 = x_9186; +goto block_9183; +} +else +{ +uint8_t x_9188; +lean_free_object(x_8855); +x_9188 = !lean_is_exclusive(x_9184); +if (x_9188 == 0) +{ +lean_object* x_9189; lean_object* x_9190; uint8_t x_9191; +x_9189 = lean_ctor_get(x_9184, 1); +x_9190 = lean_ctor_get(x_9184, 0); +lean_dec(x_9190); +x_9191 = !lean_is_exclusive(x_9185); +if (x_9191 == 0) +{ +lean_object* x_9192; lean_object* x_9193; lean_object* x_9194; lean_object* x_9195; uint8_t x_9196; +x_9192 = lean_ctor_get(x_9185, 0); +x_9193 = lean_array_get_size(x_8853); +x_9194 = lean_ctor_get(x_9192, 3); +lean_inc(x_9194); +lean_dec(x_9192); +x_9195 = lean_array_get_size(x_9194); +lean_dec(x_9194); +x_9196 = lean_nat_dec_lt(x_9193, x_9195); +if (x_9196 == 0) +{ +uint8_t x_9197; +x_9197 = lean_nat_dec_eq(x_9193, x_9195); +if (x_9197 == 0) +{ +lean_object* x_9198; lean_object* x_9199; lean_object* x_9200; lean_object* x_9201; lean_object* x_9202; lean_object* x_9203; lean_object* x_9204; lean_object* x_9205; lean_object* x_9206; lean_object* x_9207; lean_object* x_9208; lean_object* x_9209; lean_object* x_9210; lean_object* x_9211; lean_object* x_9212; lean_object* x_9213; +x_9198 = lean_unsigned_to_nat(0u); +x_9199 = l_Array_extract___rarg(x_8853, x_9198, x_9195); +x_9200 = l_Array_extract___rarg(x_8853, x_9195, x_9193); +lean_dec(x_9193); +lean_inc(x_8845); +lean_ctor_set_tag(x_9184, 6); +lean_ctor_set(x_9184, 1, x_9199); +lean_ctor_set(x_9184, 0, x_8845); +x_9201 = lean_ctor_get(x_1, 0); +lean_inc(x_9201); +x_9202 = l_Lean_IR_ToIR_bindVar(x_9201, x_8859, x_4, x_5, x_9189); +x_9203 = lean_ctor_get(x_9202, 0); +lean_inc(x_9203); +x_9204 = lean_ctor_get(x_9202, 1); +lean_inc(x_9204); +lean_dec(x_9202); +x_9205 = lean_ctor_get(x_9203, 0); +lean_inc(x_9205); +x_9206 = lean_ctor_get(x_9203, 1); +lean_inc(x_9206); +lean_dec(x_9203); +x_9207 = l_Lean_IR_ToIR_newVar(x_9206, x_4, x_5, x_9204); +x_9208 = lean_ctor_get(x_9207, 0); +lean_inc(x_9208); +x_9209 = lean_ctor_get(x_9207, 1); +lean_inc(x_9209); +lean_dec(x_9207); +x_9210 = lean_ctor_get(x_9208, 0); +lean_inc(x_9210); +x_9211 = lean_ctor_get(x_9208, 1); +lean_inc(x_9211); +lean_dec(x_9208); +x_9212 = lean_ctor_get(x_1, 2); +lean_inc(x_9212); +lean_inc(x_5); +lean_inc(x_4); +x_9213 = l_Lean_IR_ToIR_lowerType(x_9212, x_9211, x_4, x_5, x_9209); +if (lean_obj_tag(x_9213) == 0) +{ +lean_object* x_9214; lean_object* x_9215; lean_object* x_9216; lean_object* x_9217; lean_object* x_9218; +x_9214 = lean_ctor_get(x_9213, 0); +lean_inc(x_9214); +x_9215 = lean_ctor_get(x_9213, 1); +lean_inc(x_9215); +lean_dec(x_9213); +x_9216 = lean_ctor_get(x_9214, 0); +lean_inc(x_9216); +x_9217 = lean_ctor_get(x_9214, 1); +lean_inc(x_9217); +lean_dec(x_9214); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9218 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_9210, x_9200, x_9205, x_9184, x_9216, x_9217, x_4, x_5, x_9215); +if (lean_obj_tag(x_9218) == 0) +{ +lean_object* x_9219; lean_object* x_9220; uint8_t x_9221; +x_9219 = lean_ctor_get(x_9218, 0); +lean_inc(x_9219); +x_9220 = lean_ctor_get(x_9218, 1); +lean_inc(x_9220); +lean_dec(x_9218); +x_9221 = !lean_is_exclusive(x_9219); +if (x_9221 == 0) +{ +lean_object* x_9222; +x_9222 = lean_ctor_get(x_9219, 0); +lean_ctor_set(x_9185, 0, x_9222); +lean_ctor_set(x_9219, 0, x_9185); +x_9153 = x_9219; +x_9154 = x_9220; +goto block_9183; +} +else +{ +lean_object* x_9223; lean_object* x_9224; lean_object* x_9225; +x_9223 = lean_ctor_get(x_9219, 0); +x_9224 = lean_ctor_get(x_9219, 1); +lean_inc(x_9224); +lean_inc(x_9223); +lean_dec(x_9219); +lean_ctor_set(x_9185, 0, x_9223); +x_9225 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9225, 0, x_9185); +lean_ctor_set(x_9225, 1, x_9224); +x_9153 = x_9225; +x_9154 = x_9220; +goto block_9183; +} +} +else +{ +uint8_t x_9226; +lean_free_object(x_9185); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9226 = !lean_is_exclusive(x_9218); +if (x_9226 == 0) +{ +return x_9218; +} +else +{ +lean_object* x_9227; lean_object* x_9228; lean_object* x_9229; +x_9227 = lean_ctor_get(x_9218, 0); +x_9228 = lean_ctor_get(x_9218, 1); +lean_inc(x_9228); +lean_inc(x_9227); +lean_dec(x_9218); +x_9229 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9229, 0, x_9227); +lean_ctor_set(x_9229, 1, x_9228); +return x_9229; +} +} +} +else +{ +uint8_t x_9230; +lean_dec(x_9210); +lean_dec(x_9205); +lean_dec(x_9184); +lean_dec(x_9200); +lean_free_object(x_9185); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9230 = !lean_is_exclusive(x_9213); +if (x_9230 == 0) +{ +return x_9213; +} +else +{ +lean_object* x_9231; lean_object* x_9232; lean_object* x_9233; +x_9231 = lean_ctor_get(x_9213, 0); +x_9232 = lean_ctor_get(x_9213, 1); +lean_inc(x_9232); +lean_inc(x_9231); +lean_dec(x_9213); +x_9233 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9233, 0, x_9231); +lean_ctor_set(x_9233, 1, x_9232); +return x_9233; +} +} +} +else +{ +lean_object* x_9234; lean_object* x_9235; lean_object* x_9236; lean_object* x_9237; lean_object* x_9238; lean_object* x_9239; lean_object* x_9240; lean_object* x_9241; +lean_dec(x_9195); +lean_dec(x_9193); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_9184, 6); +lean_ctor_set(x_9184, 1, x_8853); +lean_ctor_set(x_9184, 0, x_8845); +x_9234 = lean_ctor_get(x_1, 0); +lean_inc(x_9234); +x_9235 = l_Lean_IR_ToIR_bindVar(x_9234, x_8859, x_4, x_5, x_9189); +x_9236 = lean_ctor_get(x_9235, 0); +lean_inc(x_9236); +x_9237 = lean_ctor_get(x_9235, 1); +lean_inc(x_9237); +lean_dec(x_9235); +x_9238 = lean_ctor_get(x_9236, 0); +lean_inc(x_9238); +x_9239 = lean_ctor_get(x_9236, 1); +lean_inc(x_9239); +lean_dec(x_9236); +x_9240 = lean_ctor_get(x_1, 2); +lean_inc(x_9240); +lean_inc(x_5); +lean_inc(x_4); +x_9241 = l_Lean_IR_ToIR_lowerType(x_9240, x_9239, x_4, x_5, x_9237); +if (lean_obj_tag(x_9241) == 0) +{ +lean_object* x_9242; lean_object* x_9243; lean_object* x_9244; lean_object* x_9245; lean_object* x_9246; +x_9242 = lean_ctor_get(x_9241, 0); +lean_inc(x_9242); +x_9243 = lean_ctor_get(x_9241, 1); +lean_inc(x_9243); +lean_dec(x_9241); +x_9244 = lean_ctor_get(x_9242, 0); +lean_inc(x_9244); +x_9245 = lean_ctor_get(x_9242, 1); +lean_inc(x_9245); +lean_dec(x_9242); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9246 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9238, x_9184, x_9244, x_9245, x_4, x_5, x_9243); +if (lean_obj_tag(x_9246) == 0) +{ +lean_object* x_9247; lean_object* x_9248; uint8_t x_9249; +x_9247 = lean_ctor_get(x_9246, 0); +lean_inc(x_9247); +x_9248 = lean_ctor_get(x_9246, 1); +lean_inc(x_9248); +lean_dec(x_9246); +x_9249 = !lean_is_exclusive(x_9247); +if (x_9249 == 0) +{ +lean_object* x_9250; +x_9250 = lean_ctor_get(x_9247, 0); +lean_ctor_set(x_9185, 0, x_9250); +lean_ctor_set(x_9247, 0, x_9185); +x_9153 = x_9247; +x_9154 = x_9248; +goto block_9183; +} +else +{ +lean_object* x_9251; lean_object* x_9252; lean_object* x_9253; +x_9251 = lean_ctor_get(x_9247, 0); +x_9252 = lean_ctor_get(x_9247, 1); +lean_inc(x_9252); +lean_inc(x_9251); +lean_dec(x_9247); +lean_ctor_set(x_9185, 0, x_9251); +x_9253 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9253, 0, x_9185); +lean_ctor_set(x_9253, 1, x_9252); +x_9153 = x_9253; +x_9154 = x_9248; +goto block_9183; +} +} +else +{ +uint8_t x_9254; +lean_free_object(x_9185); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9254 = !lean_is_exclusive(x_9246); +if (x_9254 == 0) +{ +return x_9246; +} +else +{ +lean_object* x_9255; lean_object* x_9256; lean_object* x_9257; +x_9255 = lean_ctor_get(x_9246, 0); +x_9256 = lean_ctor_get(x_9246, 1); +lean_inc(x_9256); +lean_inc(x_9255); +lean_dec(x_9246); +x_9257 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9257, 0, x_9255); +lean_ctor_set(x_9257, 1, x_9256); +return x_9257; +} +} +} +else +{ +uint8_t x_9258; +lean_dec(x_9238); +lean_dec(x_9184); +lean_free_object(x_9185); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9258 = !lean_is_exclusive(x_9241); +if (x_9258 == 0) +{ +return x_9241; +} +else +{ +lean_object* x_9259; lean_object* x_9260; lean_object* x_9261; +x_9259 = lean_ctor_get(x_9241, 0); +x_9260 = lean_ctor_get(x_9241, 1); +lean_inc(x_9260); +lean_inc(x_9259); +lean_dec(x_9241); +x_9261 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9261, 0, x_9259); +lean_ctor_set(x_9261, 1, x_9260); +return x_9261; +} +} +} +} +else +{ +lean_object* x_9262; lean_object* x_9263; lean_object* x_9264; lean_object* x_9265; lean_object* x_9266; lean_object* x_9267; lean_object* x_9268; lean_object* x_9269; +lean_dec(x_9195); +lean_dec(x_9193); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_9184, 7); +lean_ctor_set(x_9184, 1, x_8853); +lean_ctor_set(x_9184, 0, x_8845); +x_9262 = lean_ctor_get(x_1, 0); +lean_inc(x_9262); +x_9263 = l_Lean_IR_ToIR_bindVar(x_9262, x_8859, x_4, x_5, x_9189); +x_9264 = lean_ctor_get(x_9263, 0); +lean_inc(x_9264); +x_9265 = lean_ctor_get(x_9263, 1); +lean_inc(x_9265); +lean_dec(x_9263); +x_9266 = lean_ctor_get(x_9264, 0); +lean_inc(x_9266); +x_9267 = lean_ctor_get(x_9264, 1); +lean_inc(x_9267); +lean_dec(x_9264); +x_9268 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9269 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9266, x_9184, x_9268, x_9267, x_4, x_5, x_9265); +if (lean_obj_tag(x_9269) == 0) +{ +lean_object* x_9270; lean_object* x_9271; uint8_t x_9272; +x_9270 = lean_ctor_get(x_9269, 0); +lean_inc(x_9270); +x_9271 = lean_ctor_get(x_9269, 1); +lean_inc(x_9271); +lean_dec(x_9269); +x_9272 = !lean_is_exclusive(x_9270); +if (x_9272 == 0) +{ +lean_object* x_9273; +x_9273 = lean_ctor_get(x_9270, 0); +lean_ctor_set(x_9185, 0, x_9273); +lean_ctor_set(x_9270, 0, x_9185); +x_9153 = x_9270; +x_9154 = x_9271; +goto block_9183; +} +else +{ +lean_object* x_9274; lean_object* x_9275; lean_object* x_9276; +x_9274 = lean_ctor_get(x_9270, 0); +x_9275 = lean_ctor_get(x_9270, 1); +lean_inc(x_9275); +lean_inc(x_9274); +lean_dec(x_9270); +lean_ctor_set(x_9185, 0, x_9274); +x_9276 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9276, 0, x_9185); +lean_ctor_set(x_9276, 1, x_9275); +x_9153 = x_9276; +x_9154 = x_9271; +goto block_9183; +} +} +else +{ +uint8_t x_9277; +lean_free_object(x_9185); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9277 = !lean_is_exclusive(x_9269); +if (x_9277 == 0) +{ +return x_9269; +} +else +{ +lean_object* x_9278; lean_object* x_9279; lean_object* x_9280; +x_9278 = lean_ctor_get(x_9269, 0); +x_9279 = lean_ctor_get(x_9269, 1); +lean_inc(x_9279); +lean_inc(x_9278); +lean_dec(x_9269); +x_9280 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9280, 0, x_9278); +lean_ctor_set(x_9280, 1, x_9279); +return x_9280; +} +} +} +} +else +{ +lean_object* x_9281; lean_object* x_9282; lean_object* x_9283; lean_object* x_9284; uint8_t x_9285; +x_9281 = lean_ctor_get(x_9185, 0); +lean_inc(x_9281); +lean_dec(x_9185); +x_9282 = lean_array_get_size(x_8853); +x_9283 = lean_ctor_get(x_9281, 3); +lean_inc(x_9283); +lean_dec(x_9281); +x_9284 = lean_array_get_size(x_9283); +lean_dec(x_9283); +x_9285 = lean_nat_dec_lt(x_9282, x_9284); +if (x_9285 == 0) +{ +uint8_t x_9286; +x_9286 = lean_nat_dec_eq(x_9282, x_9284); +if (x_9286 == 0) +{ +lean_object* x_9287; lean_object* x_9288; lean_object* x_9289; lean_object* x_9290; lean_object* x_9291; lean_object* x_9292; lean_object* x_9293; lean_object* x_9294; lean_object* x_9295; lean_object* x_9296; lean_object* x_9297; lean_object* x_9298; lean_object* x_9299; lean_object* x_9300; lean_object* x_9301; lean_object* x_9302; +x_9287 = lean_unsigned_to_nat(0u); +x_9288 = l_Array_extract___rarg(x_8853, x_9287, x_9284); +x_9289 = l_Array_extract___rarg(x_8853, x_9284, x_9282); +lean_dec(x_9282); +lean_inc(x_8845); +lean_ctor_set_tag(x_9184, 6); +lean_ctor_set(x_9184, 1, x_9288); +lean_ctor_set(x_9184, 0, x_8845); +x_9290 = lean_ctor_get(x_1, 0); +lean_inc(x_9290); +x_9291 = l_Lean_IR_ToIR_bindVar(x_9290, x_8859, x_4, x_5, x_9189); +x_9292 = lean_ctor_get(x_9291, 0); +lean_inc(x_9292); +x_9293 = lean_ctor_get(x_9291, 1); +lean_inc(x_9293); +lean_dec(x_9291); +x_9294 = lean_ctor_get(x_9292, 0); +lean_inc(x_9294); +x_9295 = lean_ctor_get(x_9292, 1); +lean_inc(x_9295); +lean_dec(x_9292); +x_9296 = l_Lean_IR_ToIR_newVar(x_9295, x_4, x_5, x_9293); +x_9297 = lean_ctor_get(x_9296, 0); +lean_inc(x_9297); +x_9298 = lean_ctor_get(x_9296, 1); +lean_inc(x_9298); +lean_dec(x_9296); +x_9299 = lean_ctor_get(x_9297, 0); +lean_inc(x_9299); +x_9300 = lean_ctor_get(x_9297, 1); +lean_inc(x_9300); +lean_dec(x_9297); +x_9301 = lean_ctor_get(x_1, 2); +lean_inc(x_9301); +lean_inc(x_5); +lean_inc(x_4); +x_9302 = l_Lean_IR_ToIR_lowerType(x_9301, x_9300, x_4, x_5, x_9298); +if (lean_obj_tag(x_9302) == 0) +{ +lean_object* x_9303; lean_object* x_9304; lean_object* x_9305; lean_object* x_9306; lean_object* x_9307; +x_9303 = lean_ctor_get(x_9302, 0); +lean_inc(x_9303); +x_9304 = lean_ctor_get(x_9302, 1); +lean_inc(x_9304); +lean_dec(x_9302); +x_9305 = lean_ctor_get(x_9303, 0); +lean_inc(x_9305); +x_9306 = lean_ctor_get(x_9303, 1); +lean_inc(x_9306); +lean_dec(x_9303); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9307 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_9299, x_9289, x_9294, x_9184, x_9305, x_9306, x_4, x_5, x_9304); +if (lean_obj_tag(x_9307) == 0) +{ +lean_object* x_9308; lean_object* x_9309; lean_object* x_9310; lean_object* x_9311; lean_object* x_9312; lean_object* x_9313; lean_object* x_9314; +x_9308 = lean_ctor_get(x_9307, 0); +lean_inc(x_9308); +x_9309 = lean_ctor_get(x_9307, 1); +lean_inc(x_9309); +lean_dec(x_9307); +x_9310 = lean_ctor_get(x_9308, 0); +lean_inc(x_9310); +x_9311 = lean_ctor_get(x_9308, 1); +lean_inc(x_9311); +if (lean_is_exclusive(x_9308)) { + lean_ctor_release(x_9308, 0); + lean_ctor_release(x_9308, 1); + x_9312 = x_9308; +} else { + lean_dec_ref(x_9308); + x_9312 = lean_box(0); +} +x_9313 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_9313, 0, x_9310); +if (lean_is_scalar(x_9312)) { + x_9314 = lean_alloc_ctor(0, 2, 0); +} else { + x_9314 = x_9312; +} +lean_ctor_set(x_9314, 0, x_9313); +lean_ctor_set(x_9314, 1, x_9311); +x_9153 = x_9314; +x_9154 = x_9309; +goto block_9183; +} +else +{ +lean_object* x_9315; lean_object* x_9316; lean_object* x_9317; lean_object* x_9318; +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9315 = lean_ctor_get(x_9307, 0); +lean_inc(x_9315); +x_9316 = lean_ctor_get(x_9307, 1); +lean_inc(x_9316); +if (lean_is_exclusive(x_9307)) { + lean_ctor_release(x_9307, 0); + lean_ctor_release(x_9307, 1); + x_9317 = x_9307; +} else { + lean_dec_ref(x_9307); + x_9317 = lean_box(0); +} +if (lean_is_scalar(x_9317)) { + x_9318 = lean_alloc_ctor(1, 2, 0); +} else { + x_9318 = x_9317; +} +lean_ctor_set(x_9318, 0, x_9315); +lean_ctor_set(x_9318, 1, x_9316); +return x_9318; +} +} +else +{ +lean_object* x_9319; lean_object* x_9320; lean_object* x_9321; lean_object* x_9322; +lean_dec(x_9299); +lean_dec(x_9294); +lean_dec(x_9184); +lean_dec(x_9289); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9319 = lean_ctor_get(x_9302, 0); +lean_inc(x_9319); +x_9320 = lean_ctor_get(x_9302, 1); +lean_inc(x_9320); +if (lean_is_exclusive(x_9302)) { + lean_ctor_release(x_9302, 0); + lean_ctor_release(x_9302, 1); + x_9321 = x_9302; +} else { + lean_dec_ref(x_9302); + x_9321 = lean_box(0); +} +if (lean_is_scalar(x_9321)) { + x_9322 = lean_alloc_ctor(1, 2, 0); +} else { + x_9322 = x_9321; +} +lean_ctor_set(x_9322, 0, x_9319); +lean_ctor_set(x_9322, 1, x_9320); +return x_9322; +} +} +else +{ +lean_object* x_9323; lean_object* x_9324; lean_object* x_9325; lean_object* x_9326; lean_object* x_9327; lean_object* x_9328; lean_object* x_9329; lean_object* x_9330; +lean_dec(x_9284); +lean_dec(x_9282); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_9184, 6); +lean_ctor_set(x_9184, 1, x_8853); +lean_ctor_set(x_9184, 0, x_8845); +x_9323 = lean_ctor_get(x_1, 0); +lean_inc(x_9323); +x_9324 = l_Lean_IR_ToIR_bindVar(x_9323, x_8859, x_4, x_5, x_9189); +x_9325 = lean_ctor_get(x_9324, 0); +lean_inc(x_9325); +x_9326 = lean_ctor_get(x_9324, 1); +lean_inc(x_9326); +lean_dec(x_9324); +x_9327 = lean_ctor_get(x_9325, 0); +lean_inc(x_9327); +x_9328 = lean_ctor_get(x_9325, 1); +lean_inc(x_9328); +lean_dec(x_9325); +x_9329 = lean_ctor_get(x_1, 2); +lean_inc(x_9329); +lean_inc(x_5); +lean_inc(x_4); +x_9330 = l_Lean_IR_ToIR_lowerType(x_9329, x_9328, x_4, x_5, x_9326); +if (lean_obj_tag(x_9330) == 0) +{ +lean_object* x_9331; lean_object* x_9332; lean_object* x_9333; lean_object* x_9334; lean_object* x_9335; +x_9331 = lean_ctor_get(x_9330, 0); +lean_inc(x_9331); +x_9332 = lean_ctor_get(x_9330, 1); +lean_inc(x_9332); +lean_dec(x_9330); +x_9333 = lean_ctor_get(x_9331, 0); +lean_inc(x_9333); +x_9334 = lean_ctor_get(x_9331, 1); +lean_inc(x_9334); +lean_dec(x_9331); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9335 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9327, x_9184, x_9333, x_9334, x_4, x_5, x_9332); +if (lean_obj_tag(x_9335) == 0) +{ +lean_object* x_9336; lean_object* x_9337; lean_object* x_9338; lean_object* x_9339; lean_object* x_9340; lean_object* x_9341; lean_object* x_9342; +x_9336 = lean_ctor_get(x_9335, 0); +lean_inc(x_9336); +x_9337 = lean_ctor_get(x_9335, 1); +lean_inc(x_9337); +lean_dec(x_9335); +x_9338 = lean_ctor_get(x_9336, 0); +lean_inc(x_9338); +x_9339 = lean_ctor_get(x_9336, 1); +lean_inc(x_9339); +if (lean_is_exclusive(x_9336)) { + lean_ctor_release(x_9336, 0); + lean_ctor_release(x_9336, 1); + x_9340 = x_9336; +} else { + lean_dec_ref(x_9336); + x_9340 = lean_box(0); +} +x_9341 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_9341, 0, x_9338); +if (lean_is_scalar(x_9340)) { + x_9342 = lean_alloc_ctor(0, 2, 0); +} else { + x_9342 = x_9340; +} +lean_ctor_set(x_9342, 0, x_9341); +lean_ctor_set(x_9342, 1, x_9339); +x_9153 = x_9342; +x_9154 = x_9337; +goto block_9183; +} +else +{ +lean_object* x_9343; lean_object* x_9344; lean_object* x_9345; lean_object* x_9346; +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9343 = lean_ctor_get(x_9335, 0); +lean_inc(x_9343); +x_9344 = lean_ctor_get(x_9335, 1); +lean_inc(x_9344); +if (lean_is_exclusive(x_9335)) { + lean_ctor_release(x_9335, 0); + lean_ctor_release(x_9335, 1); + x_9345 = x_9335; +} else { + lean_dec_ref(x_9335); + x_9345 = lean_box(0); +} +if (lean_is_scalar(x_9345)) { + x_9346 = lean_alloc_ctor(1, 2, 0); +} else { + x_9346 = x_9345; +} +lean_ctor_set(x_9346, 0, x_9343); +lean_ctor_set(x_9346, 1, x_9344); +return x_9346; +} +} +else +{ +lean_object* x_9347; lean_object* x_9348; lean_object* x_9349; lean_object* x_9350; +lean_dec(x_9327); +lean_dec(x_9184); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9347 = lean_ctor_get(x_9330, 0); +lean_inc(x_9347); +x_9348 = lean_ctor_get(x_9330, 1); +lean_inc(x_9348); +if (lean_is_exclusive(x_9330)) { + lean_ctor_release(x_9330, 0); + lean_ctor_release(x_9330, 1); + x_9349 = x_9330; +} else { + lean_dec_ref(x_9330); + x_9349 = lean_box(0); +} +if (lean_is_scalar(x_9349)) { + x_9350 = lean_alloc_ctor(1, 2, 0); +} else { + x_9350 = x_9349; +} +lean_ctor_set(x_9350, 0, x_9347); +lean_ctor_set(x_9350, 1, x_9348); +return x_9350; +} +} +} +else +{ +lean_object* x_9351; lean_object* x_9352; lean_object* x_9353; lean_object* x_9354; lean_object* x_9355; lean_object* x_9356; lean_object* x_9357; lean_object* x_9358; +lean_dec(x_9284); +lean_dec(x_9282); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_9184, 7); +lean_ctor_set(x_9184, 1, x_8853); +lean_ctor_set(x_9184, 0, x_8845); +x_9351 = lean_ctor_get(x_1, 0); +lean_inc(x_9351); +x_9352 = l_Lean_IR_ToIR_bindVar(x_9351, x_8859, x_4, x_5, x_9189); +x_9353 = lean_ctor_get(x_9352, 0); +lean_inc(x_9353); +x_9354 = lean_ctor_get(x_9352, 1); +lean_inc(x_9354); +lean_dec(x_9352); +x_9355 = lean_ctor_get(x_9353, 0); +lean_inc(x_9355); +x_9356 = lean_ctor_get(x_9353, 1); +lean_inc(x_9356); +lean_dec(x_9353); +x_9357 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9358 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9355, x_9184, x_9357, x_9356, x_4, x_5, x_9354); +if (lean_obj_tag(x_9358) == 0) +{ +lean_object* x_9359; lean_object* x_9360; lean_object* x_9361; lean_object* x_9362; lean_object* x_9363; lean_object* x_9364; lean_object* x_9365; +x_9359 = lean_ctor_get(x_9358, 0); +lean_inc(x_9359); +x_9360 = lean_ctor_get(x_9358, 1); +lean_inc(x_9360); +lean_dec(x_9358); +x_9361 = lean_ctor_get(x_9359, 0); +lean_inc(x_9361); +x_9362 = lean_ctor_get(x_9359, 1); +lean_inc(x_9362); +if (lean_is_exclusive(x_9359)) { + lean_ctor_release(x_9359, 0); + lean_ctor_release(x_9359, 1); + x_9363 = x_9359; +} else { + lean_dec_ref(x_9359); + x_9363 = lean_box(0); +} +x_9364 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_9364, 0, x_9361); +if (lean_is_scalar(x_9363)) { + x_9365 = lean_alloc_ctor(0, 2, 0); +} else { + x_9365 = x_9363; +} +lean_ctor_set(x_9365, 0, x_9364); +lean_ctor_set(x_9365, 1, x_9362); +x_9153 = x_9365; +x_9154 = x_9360; +goto block_9183; +} +else +{ +lean_object* x_9366; lean_object* x_9367; lean_object* x_9368; lean_object* x_9369; +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9366 = lean_ctor_get(x_9358, 0); +lean_inc(x_9366); +x_9367 = lean_ctor_get(x_9358, 1); +lean_inc(x_9367); +if (lean_is_exclusive(x_9358)) { + lean_ctor_release(x_9358, 0); + lean_ctor_release(x_9358, 1); + x_9368 = x_9358; +} else { + lean_dec_ref(x_9358); + x_9368 = lean_box(0); +} +if (lean_is_scalar(x_9368)) { + x_9369 = lean_alloc_ctor(1, 2, 0); +} else { + x_9369 = x_9368; +} +lean_ctor_set(x_9369, 0, x_9366); +lean_ctor_set(x_9369, 1, x_9367); +return x_9369; +} +} +} +} +else +{ +lean_object* x_9370; lean_object* x_9371; lean_object* x_9372; lean_object* x_9373; lean_object* x_9374; lean_object* x_9375; uint8_t x_9376; +x_9370 = lean_ctor_get(x_9184, 1); +lean_inc(x_9370); +lean_dec(x_9184); +x_9371 = lean_ctor_get(x_9185, 0); +lean_inc(x_9371); +if (lean_is_exclusive(x_9185)) { + lean_ctor_release(x_9185, 0); + x_9372 = x_9185; +} else { + lean_dec_ref(x_9185); + x_9372 = lean_box(0); +} +x_9373 = lean_array_get_size(x_8853); +x_9374 = lean_ctor_get(x_9371, 3); +lean_inc(x_9374); +lean_dec(x_9371); +x_9375 = lean_array_get_size(x_9374); +lean_dec(x_9374); +x_9376 = lean_nat_dec_lt(x_9373, x_9375); +if (x_9376 == 0) +{ +uint8_t x_9377; +x_9377 = lean_nat_dec_eq(x_9373, x_9375); +if (x_9377 == 0) +{ +lean_object* x_9378; lean_object* x_9379; lean_object* x_9380; lean_object* x_9381; lean_object* x_9382; lean_object* x_9383; lean_object* x_9384; lean_object* x_9385; lean_object* x_9386; lean_object* x_9387; lean_object* x_9388; lean_object* x_9389; lean_object* x_9390; lean_object* x_9391; lean_object* x_9392; lean_object* x_9393; lean_object* x_9394; +x_9378 = lean_unsigned_to_nat(0u); +x_9379 = l_Array_extract___rarg(x_8853, x_9378, x_9375); +x_9380 = l_Array_extract___rarg(x_8853, x_9375, x_9373); +lean_dec(x_9373); +lean_inc(x_8845); +x_9381 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_9381, 0, x_8845); +lean_ctor_set(x_9381, 1, x_9379); +x_9382 = lean_ctor_get(x_1, 0); +lean_inc(x_9382); +x_9383 = l_Lean_IR_ToIR_bindVar(x_9382, x_8859, x_4, x_5, x_9370); +x_9384 = lean_ctor_get(x_9383, 0); +lean_inc(x_9384); +x_9385 = lean_ctor_get(x_9383, 1); +lean_inc(x_9385); +lean_dec(x_9383); +x_9386 = lean_ctor_get(x_9384, 0); +lean_inc(x_9386); +x_9387 = lean_ctor_get(x_9384, 1); +lean_inc(x_9387); +lean_dec(x_9384); +x_9388 = l_Lean_IR_ToIR_newVar(x_9387, x_4, x_5, x_9385); +x_9389 = lean_ctor_get(x_9388, 0); +lean_inc(x_9389); +x_9390 = lean_ctor_get(x_9388, 1); +lean_inc(x_9390); +lean_dec(x_9388); +x_9391 = lean_ctor_get(x_9389, 0); +lean_inc(x_9391); +x_9392 = lean_ctor_get(x_9389, 1); +lean_inc(x_9392); +lean_dec(x_9389); +x_9393 = lean_ctor_get(x_1, 2); +lean_inc(x_9393); +lean_inc(x_5); +lean_inc(x_4); +x_9394 = l_Lean_IR_ToIR_lowerType(x_9393, x_9392, x_4, x_5, x_9390); +if (lean_obj_tag(x_9394) == 0) +{ +lean_object* x_9395; lean_object* x_9396; lean_object* x_9397; lean_object* x_9398; lean_object* x_9399; +x_9395 = lean_ctor_get(x_9394, 0); +lean_inc(x_9395); +x_9396 = lean_ctor_get(x_9394, 1); +lean_inc(x_9396); +lean_dec(x_9394); +x_9397 = lean_ctor_get(x_9395, 0); +lean_inc(x_9397); +x_9398 = lean_ctor_get(x_9395, 1); +lean_inc(x_9398); +lean_dec(x_9395); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9399 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_9391, x_9380, x_9386, x_9381, x_9397, x_9398, x_4, x_5, x_9396); +if (lean_obj_tag(x_9399) == 0) +{ +lean_object* x_9400; lean_object* x_9401; lean_object* x_9402; lean_object* x_9403; lean_object* x_9404; lean_object* x_9405; lean_object* x_9406; +x_9400 = lean_ctor_get(x_9399, 0); +lean_inc(x_9400); +x_9401 = lean_ctor_get(x_9399, 1); +lean_inc(x_9401); +lean_dec(x_9399); +x_9402 = lean_ctor_get(x_9400, 0); +lean_inc(x_9402); +x_9403 = lean_ctor_get(x_9400, 1); +lean_inc(x_9403); +if (lean_is_exclusive(x_9400)) { + lean_ctor_release(x_9400, 0); + lean_ctor_release(x_9400, 1); + x_9404 = x_9400; +} else { + lean_dec_ref(x_9400); + x_9404 = lean_box(0); +} +if (lean_is_scalar(x_9372)) { + x_9405 = lean_alloc_ctor(1, 1, 0); +} else { + x_9405 = x_9372; +} +lean_ctor_set(x_9405, 0, x_9402); +if (lean_is_scalar(x_9404)) { + x_9406 = lean_alloc_ctor(0, 2, 0); +} else { + x_9406 = x_9404; +} +lean_ctor_set(x_9406, 0, x_9405); +lean_ctor_set(x_9406, 1, x_9403); +x_9153 = x_9406; +x_9154 = x_9401; +goto block_9183; +} +else +{ +lean_object* x_9407; lean_object* x_9408; lean_object* x_9409; lean_object* x_9410; +lean_dec(x_9372); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9407 = lean_ctor_get(x_9399, 0); +lean_inc(x_9407); +x_9408 = lean_ctor_get(x_9399, 1); +lean_inc(x_9408); +if (lean_is_exclusive(x_9399)) { + lean_ctor_release(x_9399, 0); + lean_ctor_release(x_9399, 1); + x_9409 = x_9399; +} else { + lean_dec_ref(x_9399); + x_9409 = lean_box(0); +} +if (lean_is_scalar(x_9409)) { + x_9410 = lean_alloc_ctor(1, 2, 0); +} else { + x_9410 = x_9409; +} +lean_ctor_set(x_9410, 0, x_9407); +lean_ctor_set(x_9410, 1, x_9408); +return x_9410; +} +} +else +{ +lean_object* x_9411; lean_object* x_9412; lean_object* x_9413; lean_object* x_9414; +lean_dec(x_9391); +lean_dec(x_9386); +lean_dec(x_9381); +lean_dec(x_9380); +lean_dec(x_9372); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9411 = lean_ctor_get(x_9394, 0); +lean_inc(x_9411); +x_9412 = lean_ctor_get(x_9394, 1); +lean_inc(x_9412); +if (lean_is_exclusive(x_9394)) { + lean_ctor_release(x_9394, 0); + lean_ctor_release(x_9394, 1); + x_9413 = x_9394; +} else { + lean_dec_ref(x_9394); + x_9413 = lean_box(0); +} +if (lean_is_scalar(x_9413)) { + x_9414 = lean_alloc_ctor(1, 2, 0); +} else { + x_9414 = x_9413; +} +lean_ctor_set(x_9414, 0, x_9411); +lean_ctor_set(x_9414, 1, x_9412); +return x_9414; +} +} +else +{ +lean_object* x_9415; lean_object* x_9416; lean_object* x_9417; lean_object* x_9418; lean_object* x_9419; lean_object* x_9420; lean_object* x_9421; lean_object* x_9422; lean_object* x_9423; +lean_dec(x_9375); +lean_dec(x_9373); +lean_inc(x_8853); +lean_inc(x_8845); +x_9415 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_9415, 0, x_8845); +lean_ctor_set(x_9415, 1, x_8853); +x_9416 = lean_ctor_get(x_1, 0); +lean_inc(x_9416); +x_9417 = l_Lean_IR_ToIR_bindVar(x_9416, x_8859, x_4, x_5, x_9370); +x_9418 = lean_ctor_get(x_9417, 0); +lean_inc(x_9418); +x_9419 = lean_ctor_get(x_9417, 1); +lean_inc(x_9419); +lean_dec(x_9417); +x_9420 = lean_ctor_get(x_9418, 0); +lean_inc(x_9420); +x_9421 = lean_ctor_get(x_9418, 1); +lean_inc(x_9421); +lean_dec(x_9418); +x_9422 = lean_ctor_get(x_1, 2); +lean_inc(x_9422); +lean_inc(x_5); +lean_inc(x_4); +x_9423 = l_Lean_IR_ToIR_lowerType(x_9422, x_9421, x_4, x_5, x_9419); +if (lean_obj_tag(x_9423) == 0) +{ +lean_object* x_9424; lean_object* x_9425; lean_object* x_9426; lean_object* x_9427; lean_object* x_9428; +x_9424 = lean_ctor_get(x_9423, 0); +lean_inc(x_9424); +x_9425 = lean_ctor_get(x_9423, 1); +lean_inc(x_9425); +lean_dec(x_9423); +x_9426 = lean_ctor_get(x_9424, 0); +lean_inc(x_9426); +x_9427 = lean_ctor_get(x_9424, 1); +lean_inc(x_9427); +lean_dec(x_9424); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9428 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9420, x_9415, x_9426, x_9427, x_4, x_5, x_9425); +if (lean_obj_tag(x_9428) == 0) +{ +lean_object* x_9429; lean_object* x_9430; lean_object* x_9431; lean_object* x_9432; lean_object* x_9433; lean_object* x_9434; lean_object* x_9435; +x_9429 = lean_ctor_get(x_9428, 0); +lean_inc(x_9429); +x_9430 = lean_ctor_get(x_9428, 1); +lean_inc(x_9430); +lean_dec(x_9428); +x_9431 = lean_ctor_get(x_9429, 0); +lean_inc(x_9431); +x_9432 = lean_ctor_get(x_9429, 1); +lean_inc(x_9432); +if (lean_is_exclusive(x_9429)) { + lean_ctor_release(x_9429, 0); + lean_ctor_release(x_9429, 1); + x_9433 = x_9429; +} else { + lean_dec_ref(x_9429); + x_9433 = lean_box(0); +} +if (lean_is_scalar(x_9372)) { + x_9434 = lean_alloc_ctor(1, 1, 0); +} else { + x_9434 = x_9372; +} +lean_ctor_set(x_9434, 0, x_9431); +if (lean_is_scalar(x_9433)) { + x_9435 = lean_alloc_ctor(0, 2, 0); +} else { + x_9435 = x_9433; +} +lean_ctor_set(x_9435, 0, x_9434); +lean_ctor_set(x_9435, 1, x_9432); +x_9153 = x_9435; +x_9154 = x_9430; +goto block_9183; +} +else +{ +lean_object* x_9436; lean_object* x_9437; lean_object* x_9438; lean_object* x_9439; +lean_dec(x_9372); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9436 = lean_ctor_get(x_9428, 0); +lean_inc(x_9436); +x_9437 = lean_ctor_get(x_9428, 1); +lean_inc(x_9437); +if (lean_is_exclusive(x_9428)) { + lean_ctor_release(x_9428, 0); + lean_ctor_release(x_9428, 1); + x_9438 = x_9428; +} else { + lean_dec_ref(x_9428); + x_9438 = lean_box(0); +} +if (lean_is_scalar(x_9438)) { + x_9439 = lean_alloc_ctor(1, 2, 0); +} else { + x_9439 = x_9438; +} +lean_ctor_set(x_9439, 0, x_9436); +lean_ctor_set(x_9439, 1, x_9437); +return x_9439; +} +} +else +{ +lean_object* x_9440; lean_object* x_9441; lean_object* x_9442; lean_object* x_9443; +lean_dec(x_9420); +lean_dec(x_9415); +lean_dec(x_9372); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9440 = lean_ctor_get(x_9423, 0); +lean_inc(x_9440); +x_9441 = lean_ctor_get(x_9423, 1); +lean_inc(x_9441); +if (lean_is_exclusive(x_9423)) { + lean_ctor_release(x_9423, 0); + lean_ctor_release(x_9423, 1); + x_9442 = x_9423; +} else { + lean_dec_ref(x_9423); + x_9442 = lean_box(0); +} +if (lean_is_scalar(x_9442)) { + x_9443 = lean_alloc_ctor(1, 2, 0); +} else { + x_9443 = x_9442; +} +lean_ctor_set(x_9443, 0, x_9440); +lean_ctor_set(x_9443, 1, x_9441); +return x_9443; +} +} +} +else +{ +lean_object* x_9444; lean_object* x_9445; lean_object* x_9446; lean_object* x_9447; lean_object* x_9448; lean_object* x_9449; lean_object* x_9450; lean_object* x_9451; lean_object* x_9452; +lean_dec(x_9375); +lean_dec(x_9373); +lean_inc(x_8853); +lean_inc(x_8845); +x_9444 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_9444, 0, x_8845); +lean_ctor_set(x_9444, 1, x_8853); +x_9445 = lean_ctor_get(x_1, 0); +lean_inc(x_9445); +x_9446 = l_Lean_IR_ToIR_bindVar(x_9445, x_8859, x_4, x_5, x_9370); +x_9447 = lean_ctor_get(x_9446, 0); +lean_inc(x_9447); +x_9448 = lean_ctor_get(x_9446, 1); +lean_inc(x_9448); +lean_dec(x_9446); +x_9449 = lean_ctor_get(x_9447, 0); +lean_inc(x_9449); +x_9450 = lean_ctor_get(x_9447, 1); +lean_inc(x_9450); +lean_dec(x_9447); +x_9451 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9452 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9449, x_9444, x_9451, x_9450, x_4, x_5, x_9448); +if (lean_obj_tag(x_9452) == 0) +{ +lean_object* x_9453; lean_object* x_9454; lean_object* x_9455; lean_object* x_9456; lean_object* x_9457; lean_object* x_9458; lean_object* x_9459; +x_9453 = lean_ctor_get(x_9452, 0); +lean_inc(x_9453); +x_9454 = lean_ctor_get(x_9452, 1); +lean_inc(x_9454); +lean_dec(x_9452); +x_9455 = lean_ctor_get(x_9453, 0); +lean_inc(x_9455); +x_9456 = lean_ctor_get(x_9453, 1); +lean_inc(x_9456); +if (lean_is_exclusive(x_9453)) { + lean_ctor_release(x_9453, 0); + lean_ctor_release(x_9453, 1); + x_9457 = x_9453; +} else { + lean_dec_ref(x_9453); + x_9457 = lean_box(0); +} +if (lean_is_scalar(x_9372)) { + x_9458 = lean_alloc_ctor(1, 1, 0); +} else { + x_9458 = x_9372; +} +lean_ctor_set(x_9458, 0, x_9455); +if (lean_is_scalar(x_9457)) { + x_9459 = lean_alloc_ctor(0, 2, 0); +} else { + x_9459 = x_9457; +} +lean_ctor_set(x_9459, 0, x_9458); +lean_ctor_set(x_9459, 1, x_9456); +x_9153 = x_9459; +x_9154 = x_9454; +goto block_9183; +} +else +{ +lean_object* x_9460; lean_object* x_9461; lean_object* x_9462; lean_object* x_9463; +lean_dec(x_9372); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9460 = lean_ctor_get(x_9452, 0); +lean_inc(x_9460); +x_9461 = lean_ctor_get(x_9452, 1); +lean_inc(x_9461); +if (lean_is_exclusive(x_9452)) { + lean_ctor_release(x_9452, 0); + lean_ctor_release(x_9452, 1); + x_9462 = x_9452; +} else { + lean_dec_ref(x_9452); + x_9462 = lean_box(0); +} +if (lean_is_scalar(x_9462)) { + x_9463 = lean_alloc_ctor(1, 2, 0); +} else { + x_9463 = x_9462; +} +lean_ctor_set(x_9463, 0, x_9460); +lean_ctor_set(x_9463, 1, x_9461); +return x_9463; +} +} +} +} +block_9183: +{ +lean_object* x_9155; +x_9155 = lean_ctor_get(x_9153, 0); +lean_inc(x_9155); +if (lean_obj_tag(x_9155) == 0) +{ +lean_object* x_9156; lean_object* x_9157; lean_object* x_9158; lean_object* x_9159; lean_object* x_9160; lean_object* x_9161; lean_object* x_9162; lean_object* x_9163; lean_object* x_9164; lean_object* x_9165; +lean_dec(x_8864); +x_9156 = lean_ctor_get(x_9153, 1); +lean_inc(x_9156); +lean_dec(x_9153); +x_9157 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_9157, 0, x_8845); +lean_ctor_set(x_9157, 1, x_8853); +x_9158 = lean_ctor_get(x_1, 0); +lean_inc(x_9158); +x_9159 = l_Lean_IR_ToIR_bindVar(x_9158, x_9156, x_4, x_5, x_9154); +x_9160 = lean_ctor_get(x_9159, 0); +lean_inc(x_9160); +x_9161 = lean_ctor_get(x_9159, 1); +lean_inc(x_9161); +lean_dec(x_9159); +x_9162 = lean_ctor_get(x_9160, 0); +lean_inc(x_9162); +x_9163 = lean_ctor_get(x_9160, 1); +lean_inc(x_9163); +lean_dec(x_9160); +x_9164 = lean_ctor_get(x_1, 2); +lean_inc(x_9164); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_9165 = l_Lean_IR_ToIR_lowerType(x_9164, x_9163, x_4, x_5, x_9161); +if (lean_obj_tag(x_9165) == 0) +{ +lean_object* x_9166; lean_object* x_9167; lean_object* x_9168; lean_object* x_9169; lean_object* x_9170; +x_9166 = lean_ctor_get(x_9165, 0); +lean_inc(x_9166); +x_9167 = lean_ctor_get(x_9165, 1); +lean_inc(x_9167); +lean_dec(x_9165); +x_9168 = lean_ctor_get(x_9166, 0); +lean_inc(x_9168); +x_9169 = lean_ctor_get(x_9166, 1); +lean_inc(x_9169); +lean_dec(x_9166); +x_9170 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9162, x_9157, x_9168, x_9169, x_4, x_5, x_9167); +return x_9170; +} +else +{ +uint8_t x_9171; +lean_dec(x_9162); +lean_dec(x_9157); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_9171 = !lean_is_exclusive(x_9165); +if (x_9171 == 0) +{ +return x_9165; +} +else +{ +lean_object* x_9172; lean_object* x_9173; lean_object* x_9174; +x_9172 = lean_ctor_get(x_9165, 0); +x_9173 = lean_ctor_get(x_9165, 1); +lean_inc(x_9173); +lean_inc(x_9172); +lean_dec(x_9165); +x_9174 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9174, 0, x_9172); +lean_ctor_set(x_9174, 1, x_9173); +return x_9174; +} +} +} +else +{ +uint8_t x_9175; +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9175 = !lean_is_exclusive(x_9153); +if (x_9175 == 0) +{ +lean_object* x_9176; lean_object* x_9177; lean_object* x_9178; +x_9176 = lean_ctor_get(x_9153, 0); +lean_dec(x_9176); +x_9177 = lean_ctor_get(x_9155, 0); +lean_inc(x_9177); +lean_dec(x_9155); +lean_ctor_set(x_9153, 0, x_9177); +if (lean_is_scalar(x_8864)) { + x_9178 = lean_alloc_ctor(0, 2, 0); +} else { + x_9178 = x_8864; +} +lean_ctor_set(x_9178, 0, x_9153); +lean_ctor_set(x_9178, 1, x_9154); +return x_9178; +} +else +{ +lean_object* x_9179; lean_object* x_9180; lean_object* x_9181; lean_object* x_9182; +x_9179 = lean_ctor_get(x_9153, 1); +lean_inc(x_9179); +lean_dec(x_9153); +x_9180 = lean_ctor_get(x_9155, 0); +lean_inc(x_9180); +lean_dec(x_9155); +x_9181 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9181, 0, x_9180); +lean_ctor_set(x_9181, 1, x_9179); +if (lean_is_scalar(x_8864)) { + x_9182 = lean_alloc_ctor(0, 2, 0); +} else { + x_9182 = x_8864; +} +lean_ctor_set(x_9182, 0, x_9181); +lean_ctor_set(x_9182, 1, x_9154); +return x_9182; +} +} +} +} +case 2: +{ +lean_object* x_9464; lean_object* x_9465; +lean_dec(x_8870); +lean_dec(x_8865); +lean_dec(x_8864); +lean_free_object(x_8855); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +x_9464 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_9465 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_9464, x_8859, x_4, x_5, x_8863); +return x_9465; +} +case 3: +{ +lean_object* x_9466; lean_object* x_9467; lean_object* x_9497; lean_object* x_9498; +lean_dec(x_8870); +lean_dec(x_8865); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_8845); +x_9497 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_8845, x_4, x_5, x_8863); +x_9498 = lean_ctor_get(x_9497, 0); +lean_inc(x_9498); +if (lean_obj_tag(x_9498) == 0) +{ +lean_object* x_9499; lean_object* x_9500; +x_9499 = lean_ctor_get(x_9497, 1); +lean_inc(x_9499); +lean_dec(x_9497); +x_9500 = lean_box(0); +lean_ctor_set(x_8855, 0, x_9500); +x_9466 = x_8855; +x_9467 = x_9499; +goto block_9496; +} +else +{ +uint8_t x_9501; +lean_free_object(x_8855); +x_9501 = !lean_is_exclusive(x_9497); +if (x_9501 == 0) +{ +lean_object* x_9502; lean_object* x_9503; uint8_t x_9504; +x_9502 = lean_ctor_get(x_9497, 1); +x_9503 = lean_ctor_get(x_9497, 0); +lean_dec(x_9503); +x_9504 = !lean_is_exclusive(x_9498); +if (x_9504 == 0) +{ +lean_object* x_9505; lean_object* x_9506; lean_object* x_9507; lean_object* x_9508; uint8_t x_9509; +x_9505 = lean_ctor_get(x_9498, 0); +x_9506 = lean_array_get_size(x_8853); +x_9507 = lean_ctor_get(x_9505, 3); +lean_inc(x_9507); +lean_dec(x_9505); +x_9508 = lean_array_get_size(x_9507); +lean_dec(x_9507); +x_9509 = lean_nat_dec_lt(x_9506, x_9508); +if (x_9509 == 0) +{ +uint8_t x_9510; +x_9510 = lean_nat_dec_eq(x_9506, x_9508); +if (x_9510 == 0) +{ +lean_object* x_9511; lean_object* x_9512; lean_object* x_9513; lean_object* x_9514; lean_object* x_9515; lean_object* x_9516; lean_object* x_9517; lean_object* x_9518; lean_object* x_9519; lean_object* x_9520; lean_object* x_9521; lean_object* x_9522; lean_object* x_9523; lean_object* x_9524; lean_object* x_9525; lean_object* x_9526; +x_9511 = lean_unsigned_to_nat(0u); +x_9512 = l_Array_extract___rarg(x_8853, x_9511, x_9508); +x_9513 = l_Array_extract___rarg(x_8853, x_9508, x_9506); +lean_dec(x_9506); +lean_inc(x_8845); +lean_ctor_set_tag(x_9497, 6); +lean_ctor_set(x_9497, 1, x_9512); +lean_ctor_set(x_9497, 0, x_8845); +x_9514 = lean_ctor_get(x_1, 0); +lean_inc(x_9514); +x_9515 = l_Lean_IR_ToIR_bindVar(x_9514, x_8859, x_4, x_5, x_9502); +x_9516 = lean_ctor_get(x_9515, 0); +lean_inc(x_9516); +x_9517 = lean_ctor_get(x_9515, 1); +lean_inc(x_9517); +lean_dec(x_9515); +x_9518 = lean_ctor_get(x_9516, 0); +lean_inc(x_9518); +x_9519 = lean_ctor_get(x_9516, 1); +lean_inc(x_9519); +lean_dec(x_9516); +x_9520 = l_Lean_IR_ToIR_newVar(x_9519, x_4, x_5, x_9517); +x_9521 = lean_ctor_get(x_9520, 0); +lean_inc(x_9521); +x_9522 = lean_ctor_get(x_9520, 1); +lean_inc(x_9522); +lean_dec(x_9520); +x_9523 = lean_ctor_get(x_9521, 0); +lean_inc(x_9523); +x_9524 = lean_ctor_get(x_9521, 1); +lean_inc(x_9524); +lean_dec(x_9521); +x_9525 = lean_ctor_get(x_1, 2); +lean_inc(x_9525); +lean_inc(x_5); +lean_inc(x_4); +x_9526 = l_Lean_IR_ToIR_lowerType(x_9525, x_9524, x_4, x_5, x_9522); +if (lean_obj_tag(x_9526) == 0) +{ +lean_object* x_9527; lean_object* x_9528; lean_object* x_9529; lean_object* x_9530; lean_object* x_9531; +x_9527 = lean_ctor_get(x_9526, 0); +lean_inc(x_9527); +x_9528 = lean_ctor_get(x_9526, 1); +lean_inc(x_9528); +lean_dec(x_9526); +x_9529 = lean_ctor_get(x_9527, 0); +lean_inc(x_9529); +x_9530 = lean_ctor_get(x_9527, 1); +lean_inc(x_9530); +lean_dec(x_9527); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9531 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_9523, x_9513, x_9518, x_9497, x_9529, x_9530, x_4, x_5, x_9528); +if (lean_obj_tag(x_9531) == 0) +{ +lean_object* x_9532; lean_object* x_9533; uint8_t x_9534; +x_9532 = lean_ctor_get(x_9531, 0); +lean_inc(x_9532); +x_9533 = lean_ctor_get(x_9531, 1); +lean_inc(x_9533); +lean_dec(x_9531); +x_9534 = !lean_is_exclusive(x_9532); +if (x_9534 == 0) +{ +lean_object* x_9535; +x_9535 = lean_ctor_get(x_9532, 0); +lean_ctor_set(x_9498, 0, x_9535); +lean_ctor_set(x_9532, 0, x_9498); +x_9466 = x_9532; +x_9467 = x_9533; +goto block_9496; +} +else +{ +lean_object* x_9536; lean_object* x_9537; lean_object* x_9538; +x_9536 = lean_ctor_get(x_9532, 0); +x_9537 = lean_ctor_get(x_9532, 1); +lean_inc(x_9537); +lean_inc(x_9536); +lean_dec(x_9532); +lean_ctor_set(x_9498, 0, x_9536); +x_9538 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9538, 0, x_9498); +lean_ctor_set(x_9538, 1, x_9537); +x_9466 = x_9538; +x_9467 = x_9533; +goto block_9496; +} +} +else +{ +uint8_t x_9539; +lean_free_object(x_9498); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9539 = !lean_is_exclusive(x_9531); +if (x_9539 == 0) +{ +return x_9531; +} +else +{ +lean_object* x_9540; lean_object* x_9541; lean_object* x_9542; +x_9540 = lean_ctor_get(x_9531, 0); +x_9541 = lean_ctor_get(x_9531, 1); +lean_inc(x_9541); +lean_inc(x_9540); +lean_dec(x_9531); +x_9542 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9542, 0, x_9540); +lean_ctor_set(x_9542, 1, x_9541); +return x_9542; +} +} +} +else +{ +uint8_t x_9543; +lean_dec(x_9523); +lean_dec(x_9518); +lean_dec(x_9497); +lean_dec(x_9513); +lean_free_object(x_9498); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9543 = !lean_is_exclusive(x_9526); +if (x_9543 == 0) +{ +return x_9526; +} +else +{ +lean_object* x_9544; lean_object* x_9545; lean_object* x_9546; +x_9544 = lean_ctor_get(x_9526, 0); +x_9545 = lean_ctor_get(x_9526, 1); +lean_inc(x_9545); +lean_inc(x_9544); +lean_dec(x_9526); +x_9546 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9546, 0, x_9544); +lean_ctor_set(x_9546, 1, x_9545); +return x_9546; +} +} +} +else +{ +lean_object* x_9547; lean_object* x_9548; lean_object* x_9549; lean_object* x_9550; lean_object* x_9551; lean_object* x_9552; lean_object* x_9553; lean_object* x_9554; +lean_dec(x_9508); +lean_dec(x_9506); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_9497, 6); +lean_ctor_set(x_9497, 1, x_8853); +lean_ctor_set(x_9497, 0, x_8845); +x_9547 = lean_ctor_get(x_1, 0); +lean_inc(x_9547); +x_9548 = l_Lean_IR_ToIR_bindVar(x_9547, x_8859, x_4, x_5, x_9502); +x_9549 = lean_ctor_get(x_9548, 0); +lean_inc(x_9549); +x_9550 = lean_ctor_get(x_9548, 1); +lean_inc(x_9550); +lean_dec(x_9548); +x_9551 = lean_ctor_get(x_9549, 0); +lean_inc(x_9551); +x_9552 = lean_ctor_get(x_9549, 1); +lean_inc(x_9552); +lean_dec(x_9549); +x_9553 = lean_ctor_get(x_1, 2); +lean_inc(x_9553); +lean_inc(x_5); +lean_inc(x_4); +x_9554 = l_Lean_IR_ToIR_lowerType(x_9553, x_9552, x_4, x_5, x_9550); +if (lean_obj_tag(x_9554) == 0) +{ +lean_object* x_9555; lean_object* x_9556; lean_object* x_9557; lean_object* x_9558; lean_object* x_9559; +x_9555 = lean_ctor_get(x_9554, 0); +lean_inc(x_9555); +x_9556 = lean_ctor_get(x_9554, 1); +lean_inc(x_9556); +lean_dec(x_9554); +x_9557 = lean_ctor_get(x_9555, 0); +lean_inc(x_9557); +x_9558 = lean_ctor_get(x_9555, 1); +lean_inc(x_9558); +lean_dec(x_9555); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9559 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9551, x_9497, x_9557, x_9558, x_4, x_5, x_9556); +if (lean_obj_tag(x_9559) == 0) +{ +lean_object* x_9560; lean_object* x_9561; uint8_t x_9562; +x_9560 = lean_ctor_get(x_9559, 0); +lean_inc(x_9560); +x_9561 = lean_ctor_get(x_9559, 1); +lean_inc(x_9561); +lean_dec(x_9559); +x_9562 = !lean_is_exclusive(x_9560); +if (x_9562 == 0) +{ +lean_object* x_9563; +x_9563 = lean_ctor_get(x_9560, 0); +lean_ctor_set(x_9498, 0, x_9563); +lean_ctor_set(x_9560, 0, x_9498); +x_9466 = x_9560; +x_9467 = x_9561; +goto block_9496; +} +else +{ +lean_object* x_9564; lean_object* x_9565; lean_object* x_9566; +x_9564 = lean_ctor_get(x_9560, 0); +x_9565 = lean_ctor_get(x_9560, 1); +lean_inc(x_9565); +lean_inc(x_9564); +lean_dec(x_9560); +lean_ctor_set(x_9498, 0, x_9564); +x_9566 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9566, 0, x_9498); +lean_ctor_set(x_9566, 1, x_9565); +x_9466 = x_9566; +x_9467 = x_9561; +goto block_9496; +} +} +else +{ +uint8_t x_9567; +lean_free_object(x_9498); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9567 = !lean_is_exclusive(x_9559); +if (x_9567 == 0) +{ +return x_9559; +} +else +{ +lean_object* x_9568; lean_object* x_9569; lean_object* x_9570; +x_9568 = lean_ctor_get(x_9559, 0); +x_9569 = lean_ctor_get(x_9559, 1); +lean_inc(x_9569); +lean_inc(x_9568); +lean_dec(x_9559); +x_9570 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9570, 0, x_9568); +lean_ctor_set(x_9570, 1, x_9569); +return x_9570; +} +} +} +else +{ +uint8_t x_9571; +lean_dec(x_9551); +lean_dec(x_9497); +lean_free_object(x_9498); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9571 = !lean_is_exclusive(x_9554); +if (x_9571 == 0) +{ +return x_9554; +} +else +{ +lean_object* x_9572; lean_object* x_9573; lean_object* x_9574; +x_9572 = lean_ctor_get(x_9554, 0); +x_9573 = lean_ctor_get(x_9554, 1); +lean_inc(x_9573); +lean_inc(x_9572); +lean_dec(x_9554); +x_9574 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9574, 0, x_9572); +lean_ctor_set(x_9574, 1, x_9573); +return x_9574; +} +} +} +} +else +{ +lean_object* x_9575; lean_object* x_9576; lean_object* x_9577; lean_object* x_9578; lean_object* x_9579; lean_object* x_9580; lean_object* x_9581; lean_object* x_9582; +lean_dec(x_9508); +lean_dec(x_9506); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_9497, 7); +lean_ctor_set(x_9497, 1, x_8853); +lean_ctor_set(x_9497, 0, x_8845); +x_9575 = lean_ctor_get(x_1, 0); +lean_inc(x_9575); +x_9576 = l_Lean_IR_ToIR_bindVar(x_9575, x_8859, x_4, x_5, x_9502); +x_9577 = lean_ctor_get(x_9576, 0); +lean_inc(x_9577); +x_9578 = lean_ctor_get(x_9576, 1); +lean_inc(x_9578); +lean_dec(x_9576); +x_9579 = lean_ctor_get(x_9577, 0); +lean_inc(x_9579); +x_9580 = lean_ctor_get(x_9577, 1); +lean_inc(x_9580); +lean_dec(x_9577); +x_9581 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9582 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9579, x_9497, x_9581, x_9580, x_4, x_5, x_9578); +if (lean_obj_tag(x_9582) == 0) +{ +lean_object* x_9583; lean_object* x_9584; uint8_t x_9585; +x_9583 = lean_ctor_get(x_9582, 0); +lean_inc(x_9583); +x_9584 = lean_ctor_get(x_9582, 1); +lean_inc(x_9584); +lean_dec(x_9582); +x_9585 = !lean_is_exclusive(x_9583); +if (x_9585 == 0) +{ +lean_object* x_9586; +x_9586 = lean_ctor_get(x_9583, 0); +lean_ctor_set(x_9498, 0, x_9586); +lean_ctor_set(x_9583, 0, x_9498); +x_9466 = x_9583; +x_9467 = x_9584; +goto block_9496; +} +else +{ +lean_object* x_9587; lean_object* x_9588; lean_object* x_9589; +x_9587 = lean_ctor_get(x_9583, 0); +x_9588 = lean_ctor_get(x_9583, 1); +lean_inc(x_9588); +lean_inc(x_9587); +lean_dec(x_9583); +lean_ctor_set(x_9498, 0, x_9587); +x_9589 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9589, 0, x_9498); +lean_ctor_set(x_9589, 1, x_9588); +x_9466 = x_9589; +x_9467 = x_9584; +goto block_9496; +} +} +else +{ +uint8_t x_9590; +lean_free_object(x_9498); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9590 = !lean_is_exclusive(x_9582); +if (x_9590 == 0) +{ +return x_9582; +} +else +{ +lean_object* x_9591; lean_object* x_9592; lean_object* x_9593; +x_9591 = lean_ctor_get(x_9582, 0); +x_9592 = lean_ctor_get(x_9582, 1); +lean_inc(x_9592); +lean_inc(x_9591); +lean_dec(x_9582); +x_9593 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9593, 0, x_9591); +lean_ctor_set(x_9593, 1, x_9592); +return x_9593; +} +} +} +} +else +{ +lean_object* x_9594; lean_object* x_9595; lean_object* x_9596; lean_object* x_9597; uint8_t x_9598; +x_9594 = lean_ctor_get(x_9498, 0); +lean_inc(x_9594); +lean_dec(x_9498); +x_9595 = lean_array_get_size(x_8853); +x_9596 = lean_ctor_get(x_9594, 3); +lean_inc(x_9596); +lean_dec(x_9594); +x_9597 = lean_array_get_size(x_9596); +lean_dec(x_9596); +x_9598 = lean_nat_dec_lt(x_9595, x_9597); +if (x_9598 == 0) +{ +uint8_t x_9599; +x_9599 = lean_nat_dec_eq(x_9595, x_9597); +if (x_9599 == 0) +{ +lean_object* x_9600; lean_object* x_9601; lean_object* x_9602; lean_object* x_9603; lean_object* x_9604; lean_object* x_9605; lean_object* x_9606; lean_object* x_9607; lean_object* x_9608; lean_object* x_9609; lean_object* x_9610; lean_object* x_9611; lean_object* x_9612; lean_object* x_9613; lean_object* x_9614; lean_object* x_9615; +x_9600 = lean_unsigned_to_nat(0u); +x_9601 = l_Array_extract___rarg(x_8853, x_9600, x_9597); +x_9602 = l_Array_extract___rarg(x_8853, x_9597, x_9595); +lean_dec(x_9595); +lean_inc(x_8845); +lean_ctor_set_tag(x_9497, 6); +lean_ctor_set(x_9497, 1, x_9601); +lean_ctor_set(x_9497, 0, x_8845); +x_9603 = lean_ctor_get(x_1, 0); +lean_inc(x_9603); +x_9604 = l_Lean_IR_ToIR_bindVar(x_9603, x_8859, x_4, x_5, x_9502); +x_9605 = lean_ctor_get(x_9604, 0); +lean_inc(x_9605); +x_9606 = lean_ctor_get(x_9604, 1); +lean_inc(x_9606); +lean_dec(x_9604); +x_9607 = lean_ctor_get(x_9605, 0); +lean_inc(x_9607); +x_9608 = lean_ctor_get(x_9605, 1); +lean_inc(x_9608); +lean_dec(x_9605); +x_9609 = l_Lean_IR_ToIR_newVar(x_9608, x_4, x_5, x_9606); +x_9610 = lean_ctor_get(x_9609, 0); +lean_inc(x_9610); +x_9611 = lean_ctor_get(x_9609, 1); +lean_inc(x_9611); +lean_dec(x_9609); +x_9612 = lean_ctor_get(x_9610, 0); +lean_inc(x_9612); +x_9613 = lean_ctor_get(x_9610, 1); +lean_inc(x_9613); +lean_dec(x_9610); +x_9614 = lean_ctor_get(x_1, 2); +lean_inc(x_9614); +lean_inc(x_5); +lean_inc(x_4); +x_9615 = l_Lean_IR_ToIR_lowerType(x_9614, x_9613, x_4, x_5, x_9611); +if (lean_obj_tag(x_9615) == 0) +{ +lean_object* x_9616; lean_object* x_9617; lean_object* x_9618; lean_object* x_9619; lean_object* x_9620; +x_9616 = lean_ctor_get(x_9615, 0); +lean_inc(x_9616); +x_9617 = lean_ctor_get(x_9615, 1); +lean_inc(x_9617); +lean_dec(x_9615); +x_9618 = lean_ctor_get(x_9616, 0); +lean_inc(x_9618); +x_9619 = lean_ctor_get(x_9616, 1); +lean_inc(x_9619); +lean_dec(x_9616); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9620 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_9612, x_9602, x_9607, x_9497, x_9618, x_9619, x_4, x_5, x_9617); +if (lean_obj_tag(x_9620) == 0) +{ +lean_object* x_9621; lean_object* x_9622; lean_object* x_9623; lean_object* x_9624; lean_object* x_9625; lean_object* x_9626; lean_object* x_9627; +x_9621 = lean_ctor_get(x_9620, 0); +lean_inc(x_9621); +x_9622 = lean_ctor_get(x_9620, 1); +lean_inc(x_9622); +lean_dec(x_9620); +x_9623 = lean_ctor_get(x_9621, 0); +lean_inc(x_9623); +x_9624 = lean_ctor_get(x_9621, 1); +lean_inc(x_9624); +if (lean_is_exclusive(x_9621)) { + lean_ctor_release(x_9621, 0); + lean_ctor_release(x_9621, 1); + x_9625 = x_9621; +} else { + lean_dec_ref(x_9621); + x_9625 = lean_box(0); +} +x_9626 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_9626, 0, x_9623); +if (lean_is_scalar(x_9625)) { + x_9627 = lean_alloc_ctor(0, 2, 0); +} else { + x_9627 = x_9625; +} +lean_ctor_set(x_9627, 0, x_9626); +lean_ctor_set(x_9627, 1, x_9624); +x_9466 = x_9627; +x_9467 = x_9622; +goto block_9496; +} +else +{ +lean_object* x_9628; lean_object* x_9629; lean_object* x_9630; lean_object* x_9631; +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9628 = lean_ctor_get(x_9620, 0); +lean_inc(x_9628); +x_9629 = lean_ctor_get(x_9620, 1); +lean_inc(x_9629); +if (lean_is_exclusive(x_9620)) { + lean_ctor_release(x_9620, 0); + lean_ctor_release(x_9620, 1); + x_9630 = x_9620; +} else { + lean_dec_ref(x_9620); + x_9630 = lean_box(0); +} +if (lean_is_scalar(x_9630)) { + x_9631 = lean_alloc_ctor(1, 2, 0); +} else { + x_9631 = x_9630; +} +lean_ctor_set(x_9631, 0, x_9628); +lean_ctor_set(x_9631, 1, x_9629); +return x_9631; +} +} +else +{ +lean_object* x_9632; lean_object* x_9633; lean_object* x_9634; lean_object* x_9635; +lean_dec(x_9612); +lean_dec(x_9607); +lean_dec(x_9497); +lean_dec(x_9602); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9632 = lean_ctor_get(x_9615, 0); +lean_inc(x_9632); +x_9633 = lean_ctor_get(x_9615, 1); +lean_inc(x_9633); +if (lean_is_exclusive(x_9615)) { + lean_ctor_release(x_9615, 0); + lean_ctor_release(x_9615, 1); + x_9634 = x_9615; +} else { + lean_dec_ref(x_9615); + x_9634 = lean_box(0); +} +if (lean_is_scalar(x_9634)) { + x_9635 = lean_alloc_ctor(1, 2, 0); +} else { + x_9635 = x_9634; +} +lean_ctor_set(x_9635, 0, x_9632); +lean_ctor_set(x_9635, 1, x_9633); +return x_9635; +} +} +else +{ +lean_object* x_9636; lean_object* x_9637; lean_object* x_9638; lean_object* x_9639; lean_object* x_9640; lean_object* x_9641; lean_object* x_9642; lean_object* x_9643; +lean_dec(x_9597); +lean_dec(x_9595); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_9497, 6); +lean_ctor_set(x_9497, 1, x_8853); +lean_ctor_set(x_9497, 0, x_8845); +x_9636 = lean_ctor_get(x_1, 0); +lean_inc(x_9636); +x_9637 = l_Lean_IR_ToIR_bindVar(x_9636, x_8859, x_4, x_5, x_9502); +x_9638 = lean_ctor_get(x_9637, 0); +lean_inc(x_9638); +x_9639 = lean_ctor_get(x_9637, 1); +lean_inc(x_9639); +lean_dec(x_9637); +x_9640 = lean_ctor_get(x_9638, 0); +lean_inc(x_9640); +x_9641 = lean_ctor_get(x_9638, 1); +lean_inc(x_9641); +lean_dec(x_9638); +x_9642 = lean_ctor_get(x_1, 2); +lean_inc(x_9642); +lean_inc(x_5); +lean_inc(x_4); +x_9643 = l_Lean_IR_ToIR_lowerType(x_9642, x_9641, x_4, x_5, x_9639); +if (lean_obj_tag(x_9643) == 0) +{ +lean_object* x_9644; lean_object* x_9645; lean_object* x_9646; lean_object* x_9647; lean_object* x_9648; +x_9644 = lean_ctor_get(x_9643, 0); +lean_inc(x_9644); +x_9645 = lean_ctor_get(x_9643, 1); +lean_inc(x_9645); +lean_dec(x_9643); +x_9646 = lean_ctor_get(x_9644, 0); +lean_inc(x_9646); +x_9647 = lean_ctor_get(x_9644, 1); +lean_inc(x_9647); +lean_dec(x_9644); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9648 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9640, x_9497, x_9646, x_9647, x_4, x_5, x_9645); +if (lean_obj_tag(x_9648) == 0) +{ +lean_object* x_9649; lean_object* x_9650; lean_object* x_9651; lean_object* x_9652; lean_object* x_9653; lean_object* x_9654; lean_object* x_9655; +x_9649 = lean_ctor_get(x_9648, 0); +lean_inc(x_9649); +x_9650 = lean_ctor_get(x_9648, 1); +lean_inc(x_9650); +lean_dec(x_9648); +x_9651 = lean_ctor_get(x_9649, 0); +lean_inc(x_9651); +x_9652 = lean_ctor_get(x_9649, 1); +lean_inc(x_9652); +if (lean_is_exclusive(x_9649)) { + lean_ctor_release(x_9649, 0); + lean_ctor_release(x_9649, 1); + x_9653 = x_9649; +} else { + lean_dec_ref(x_9649); + x_9653 = lean_box(0); +} +x_9654 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_9654, 0, x_9651); +if (lean_is_scalar(x_9653)) { + x_9655 = lean_alloc_ctor(0, 2, 0); +} else { + x_9655 = x_9653; +} +lean_ctor_set(x_9655, 0, x_9654); +lean_ctor_set(x_9655, 1, x_9652); +x_9466 = x_9655; +x_9467 = x_9650; +goto block_9496; +} +else +{ +lean_object* x_9656; lean_object* x_9657; lean_object* x_9658; lean_object* x_9659; +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9656 = lean_ctor_get(x_9648, 0); +lean_inc(x_9656); +x_9657 = lean_ctor_get(x_9648, 1); +lean_inc(x_9657); +if (lean_is_exclusive(x_9648)) { + lean_ctor_release(x_9648, 0); + lean_ctor_release(x_9648, 1); + x_9658 = x_9648; +} else { + lean_dec_ref(x_9648); + x_9658 = lean_box(0); +} +if (lean_is_scalar(x_9658)) { + x_9659 = lean_alloc_ctor(1, 2, 0); +} else { + x_9659 = x_9658; +} +lean_ctor_set(x_9659, 0, x_9656); +lean_ctor_set(x_9659, 1, x_9657); +return x_9659; +} +} +else +{ +lean_object* x_9660; lean_object* x_9661; lean_object* x_9662; lean_object* x_9663; +lean_dec(x_9640); +lean_dec(x_9497); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9660 = lean_ctor_get(x_9643, 0); +lean_inc(x_9660); +x_9661 = lean_ctor_get(x_9643, 1); +lean_inc(x_9661); +if (lean_is_exclusive(x_9643)) { + lean_ctor_release(x_9643, 0); + lean_ctor_release(x_9643, 1); + x_9662 = x_9643; +} else { + lean_dec_ref(x_9643); + x_9662 = lean_box(0); +} +if (lean_is_scalar(x_9662)) { + x_9663 = lean_alloc_ctor(1, 2, 0); +} else { + x_9663 = x_9662; +} +lean_ctor_set(x_9663, 0, x_9660); +lean_ctor_set(x_9663, 1, x_9661); +return x_9663; +} +} +} +else +{ +lean_object* x_9664; lean_object* x_9665; lean_object* x_9666; lean_object* x_9667; lean_object* x_9668; lean_object* x_9669; lean_object* x_9670; lean_object* x_9671; +lean_dec(x_9597); +lean_dec(x_9595); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_9497, 7); +lean_ctor_set(x_9497, 1, x_8853); +lean_ctor_set(x_9497, 0, x_8845); +x_9664 = lean_ctor_get(x_1, 0); +lean_inc(x_9664); +x_9665 = l_Lean_IR_ToIR_bindVar(x_9664, x_8859, x_4, x_5, x_9502); +x_9666 = lean_ctor_get(x_9665, 0); +lean_inc(x_9666); +x_9667 = lean_ctor_get(x_9665, 1); +lean_inc(x_9667); +lean_dec(x_9665); +x_9668 = lean_ctor_get(x_9666, 0); +lean_inc(x_9668); +x_9669 = lean_ctor_get(x_9666, 1); +lean_inc(x_9669); +lean_dec(x_9666); +x_9670 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9671 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9668, x_9497, x_9670, x_9669, x_4, x_5, x_9667); +if (lean_obj_tag(x_9671) == 0) +{ +lean_object* x_9672; lean_object* x_9673; lean_object* x_9674; lean_object* x_9675; lean_object* x_9676; lean_object* x_9677; lean_object* x_9678; +x_9672 = lean_ctor_get(x_9671, 0); +lean_inc(x_9672); +x_9673 = lean_ctor_get(x_9671, 1); +lean_inc(x_9673); +lean_dec(x_9671); +x_9674 = lean_ctor_get(x_9672, 0); +lean_inc(x_9674); +x_9675 = lean_ctor_get(x_9672, 1); +lean_inc(x_9675); +if (lean_is_exclusive(x_9672)) { + lean_ctor_release(x_9672, 0); + lean_ctor_release(x_9672, 1); + x_9676 = x_9672; +} else { + lean_dec_ref(x_9672); + x_9676 = lean_box(0); +} +x_9677 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_9677, 0, x_9674); +if (lean_is_scalar(x_9676)) { + x_9678 = lean_alloc_ctor(0, 2, 0); +} else { + x_9678 = x_9676; +} +lean_ctor_set(x_9678, 0, x_9677); +lean_ctor_set(x_9678, 1, x_9675); +x_9466 = x_9678; +x_9467 = x_9673; +goto block_9496; +} +else +{ +lean_object* x_9679; lean_object* x_9680; lean_object* x_9681; lean_object* x_9682; +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9679 = lean_ctor_get(x_9671, 0); +lean_inc(x_9679); +x_9680 = lean_ctor_get(x_9671, 1); +lean_inc(x_9680); +if (lean_is_exclusive(x_9671)) { + lean_ctor_release(x_9671, 0); + lean_ctor_release(x_9671, 1); + x_9681 = x_9671; +} else { + lean_dec_ref(x_9671); + x_9681 = lean_box(0); +} +if (lean_is_scalar(x_9681)) { + x_9682 = lean_alloc_ctor(1, 2, 0); +} else { + x_9682 = x_9681; +} +lean_ctor_set(x_9682, 0, x_9679); +lean_ctor_set(x_9682, 1, x_9680); +return x_9682; +} +} +} +} +else +{ +lean_object* x_9683; lean_object* x_9684; lean_object* x_9685; lean_object* x_9686; lean_object* x_9687; lean_object* x_9688; uint8_t x_9689; +x_9683 = lean_ctor_get(x_9497, 1); +lean_inc(x_9683); +lean_dec(x_9497); +x_9684 = lean_ctor_get(x_9498, 0); +lean_inc(x_9684); +if (lean_is_exclusive(x_9498)) { + lean_ctor_release(x_9498, 0); + x_9685 = x_9498; +} else { + lean_dec_ref(x_9498); + x_9685 = lean_box(0); +} +x_9686 = lean_array_get_size(x_8853); +x_9687 = lean_ctor_get(x_9684, 3); +lean_inc(x_9687); +lean_dec(x_9684); +x_9688 = lean_array_get_size(x_9687); +lean_dec(x_9687); +x_9689 = lean_nat_dec_lt(x_9686, x_9688); +if (x_9689 == 0) +{ +uint8_t x_9690; +x_9690 = lean_nat_dec_eq(x_9686, x_9688); +if (x_9690 == 0) +{ +lean_object* x_9691; lean_object* x_9692; lean_object* x_9693; lean_object* x_9694; lean_object* x_9695; lean_object* x_9696; lean_object* x_9697; lean_object* x_9698; lean_object* x_9699; lean_object* x_9700; lean_object* x_9701; lean_object* x_9702; lean_object* x_9703; lean_object* x_9704; lean_object* x_9705; lean_object* x_9706; lean_object* x_9707; +x_9691 = lean_unsigned_to_nat(0u); +x_9692 = l_Array_extract___rarg(x_8853, x_9691, x_9688); +x_9693 = l_Array_extract___rarg(x_8853, x_9688, x_9686); +lean_dec(x_9686); +lean_inc(x_8845); +x_9694 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_9694, 0, x_8845); +lean_ctor_set(x_9694, 1, x_9692); +x_9695 = lean_ctor_get(x_1, 0); +lean_inc(x_9695); +x_9696 = l_Lean_IR_ToIR_bindVar(x_9695, x_8859, x_4, x_5, x_9683); +x_9697 = lean_ctor_get(x_9696, 0); +lean_inc(x_9697); +x_9698 = lean_ctor_get(x_9696, 1); +lean_inc(x_9698); +lean_dec(x_9696); +x_9699 = lean_ctor_get(x_9697, 0); +lean_inc(x_9699); +x_9700 = lean_ctor_get(x_9697, 1); +lean_inc(x_9700); +lean_dec(x_9697); +x_9701 = l_Lean_IR_ToIR_newVar(x_9700, x_4, x_5, x_9698); +x_9702 = lean_ctor_get(x_9701, 0); +lean_inc(x_9702); +x_9703 = lean_ctor_get(x_9701, 1); +lean_inc(x_9703); +lean_dec(x_9701); +x_9704 = lean_ctor_get(x_9702, 0); +lean_inc(x_9704); +x_9705 = lean_ctor_get(x_9702, 1); +lean_inc(x_9705); +lean_dec(x_9702); +x_9706 = lean_ctor_get(x_1, 2); +lean_inc(x_9706); +lean_inc(x_5); +lean_inc(x_4); +x_9707 = l_Lean_IR_ToIR_lowerType(x_9706, x_9705, x_4, x_5, x_9703); +if (lean_obj_tag(x_9707) == 0) +{ +lean_object* x_9708; lean_object* x_9709; lean_object* x_9710; lean_object* x_9711; lean_object* x_9712; +x_9708 = lean_ctor_get(x_9707, 0); +lean_inc(x_9708); +x_9709 = lean_ctor_get(x_9707, 1); +lean_inc(x_9709); +lean_dec(x_9707); +x_9710 = lean_ctor_get(x_9708, 0); +lean_inc(x_9710); +x_9711 = lean_ctor_get(x_9708, 1); +lean_inc(x_9711); +lean_dec(x_9708); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9712 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_9704, x_9693, x_9699, x_9694, x_9710, x_9711, x_4, x_5, x_9709); +if (lean_obj_tag(x_9712) == 0) +{ +lean_object* x_9713; lean_object* x_9714; lean_object* x_9715; lean_object* x_9716; lean_object* x_9717; lean_object* x_9718; lean_object* x_9719; +x_9713 = lean_ctor_get(x_9712, 0); +lean_inc(x_9713); +x_9714 = lean_ctor_get(x_9712, 1); +lean_inc(x_9714); +lean_dec(x_9712); +x_9715 = lean_ctor_get(x_9713, 0); +lean_inc(x_9715); +x_9716 = lean_ctor_get(x_9713, 1); +lean_inc(x_9716); +if (lean_is_exclusive(x_9713)) { + lean_ctor_release(x_9713, 0); + lean_ctor_release(x_9713, 1); + x_9717 = x_9713; +} else { + lean_dec_ref(x_9713); + x_9717 = lean_box(0); +} +if (lean_is_scalar(x_9685)) { + x_9718 = lean_alloc_ctor(1, 1, 0); +} else { + x_9718 = x_9685; +} +lean_ctor_set(x_9718, 0, x_9715); +if (lean_is_scalar(x_9717)) { + x_9719 = lean_alloc_ctor(0, 2, 0); +} else { + x_9719 = x_9717; +} +lean_ctor_set(x_9719, 0, x_9718); +lean_ctor_set(x_9719, 1, x_9716); +x_9466 = x_9719; +x_9467 = x_9714; +goto block_9496; +} +else +{ +lean_object* x_9720; lean_object* x_9721; lean_object* x_9722; lean_object* x_9723; +lean_dec(x_9685); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9720 = lean_ctor_get(x_9712, 0); +lean_inc(x_9720); +x_9721 = lean_ctor_get(x_9712, 1); +lean_inc(x_9721); +if (lean_is_exclusive(x_9712)) { + lean_ctor_release(x_9712, 0); + lean_ctor_release(x_9712, 1); + x_9722 = x_9712; +} else { + lean_dec_ref(x_9712); + x_9722 = lean_box(0); +} +if (lean_is_scalar(x_9722)) { + x_9723 = lean_alloc_ctor(1, 2, 0); +} else { + x_9723 = x_9722; +} +lean_ctor_set(x_9723, 0, x_9720); +lean_ctor_set(x_9723, 1, x_9721); +return x_9723; +} +} +else +{ +lean_object* x_9724; lean_object* x_9725; lean_object* x_9726; lean_object* x_9727; +lean_dec(x_9704); +lean_dec(x_9699); +lean_dec(x_9694); +lean_dec(x_9693); +lean_dec(x_9685); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9724 = lean_ctor_get(x_9707, 0); +lean_inc(x_9724); +x_9725 = lean_ctor_get(x_9707, 1); +lean_inc(x_9725); +if (lean_is_exclusive(x_9707)) { + lean_ctor_release(x_9707, 0); + lean_ctor_release(x_9707, 1); + x_9726 = x_9707; +} else { + lean_dec_ref(x_9707); + x_9726 = lean_box(0); +} +if (lean_is_scalar(x_9726)) { + x_9727 = lean_alloc_ctor(1, 2, 0); +} else { + x_9727 = x_9726; +} +lean_ctor_set(x_9727, 0, x_9724); +lean_ctor_set(x_9727, 1, x_9725); +return x_9727; +} +} +else +{ +lean_object* x_9728; lean_object* x_9729; lean_object* x_9730; lean_object* x_9731; lean_object* x_9732; lean_object* x_9733; lean_object* x_9734; lean_object* x_9735; lean_object* x_9736; +lean_dec(x_9688); +lean_dec(x_9686); +lean_inc(x_8853); +lean_inc(x_8845); +x_9728 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_9728, 0, x_8845); +lean_ctor_set(x_9728, 1, x_8853); +x_9729 = lean_ctor_get(x_1, 0); +lean_inc(x_9729); +x_9730 = l_Lean_IR_ToIR_bindVar(x_9729, x_8859, x_4, x_5, x_9683); +x_9731 = lean_ctor_get(x_9730, 0); +lean_inc(x_9731); +x_9732 = lean_ctor_get(x_9730, 1); +lean_inc(x_9732); +lean_dec(x_9730); +x_9733 = lean_ctor_get(x_9731, 0); +lean_inc(x_9733); +x_9734 = lean_ctor_get(x_9731, 1); +lean_inc(x_9734); +lean_dec(x_9731); +x_9735 = lean_ctor_get(x_1, 2); +lean_inc(x_9735); +lean_inc(x_5); +lean_inc(x_4); +x_9736 = l_Lean_IR_ToIR_lowerType(x_9735, x_9734, x_4, x_5, x_9732); +if (lean_obj_tag(x_9736) == 0) +{ +lean_object* x_9737; lean_object* x_9738; lean_object* x_9739; lean_object* x_9740; lean_object* x_9741; +x_9737 = lean_ctor_get(x_9736, 0); +lean_inc(x_9737); +x_9738 = lean_ctor_get(x_9736, 1); +lean_inc(x_9738); +lean_dec(x_9736); +x_9739 = lean_ctor_get(x_9737, 0); +lean_inc(x_9739); +x_9740 = lean_ctor_get(x_9737, 1); +lean_inc(x_9740); +lean_dec(x_9737); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9741 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9733, x_9728, x_9739, x_9740, x_4, x_5, x_9738); +if (lean_obj_tag(x_9741) == 0) +{ +lean_object* x_9742; lean_object* x_9743; lean_object* x_9744; lean_object* x_9745; lean_object* x_9746; lean_object* x_9747; lean_object* x_9748; +x_9742 = lean_ctor_get(x_9741, 0); +lean_inc(x_9742); +x_9743 = lean_ctor_get(x_9741, 1); +lean_inc(x_9743); +lean_dec(x_9741); +x_9744 = lean_ctor_get(x_9742, 0); +lean_inc(x_9744); +x_9745 = lean_ctor_get(x_9742, 1); +lean_inc(x_9745); +if (lean_is_exclusive(x_9742)) { + lean_ctor_release(x_9742, 0); + lean_ctor_release(x_9742, 1); + x_9746 = x_9742; +} else { + lean_dec_ref(x_9742); + x_9746 = lean_box(0); +} +if (lean_is_scalar(x_9685)) { + x_9747 = lean_alloc_ctor(1, 1, 0); +} else { + x_9747 = x_9685; +} +lean_ctor_set(x_9747, 0, x_9744); +if (lean_is_scalar(x_9746)) { + x_9748 = lean_alloc_ctor(0, 2, 0); +} else { + x_9748 = x_9746; +} +lean_ctor_set(x_9748, 0, x_9747); +lean_ctor_set(x_9748, 1, x_9745); +x_9466 = x_9748; +x_9467 = x_9743; +goto block_9496; +} +else +{ +lean_object* x_9749; lean_object* x_9750; lean_object* x_9751; lean_object* x_9752; +lean_dec(x_9685); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9749 = lean_ctor_get(x_9741, 0); +lean_inc(x_9749); +x_9750 = lean_ctor_get(x_9741, 1); +lean_inc(x_9750); +if (lean_is_exclusive(x_9741)) { + lean_ctor_release(x_9741, 0); + lean_ctor_release(x_9741, 1); + x_9751 = x_9741; +} else { + lean_dec_ref(x_9741); + x_9751 = lean_box(0); +} +if (lean_is_scalar(x_9751)) { + x_9752 = lean_alloc_ctor(1, 2, 0); +} else { + x_9752 = x_9751; +} +lean_ctor_set(x_9752, 0, x_9749); +lean_ctor_set(x_9752, 1, x_9750); +return x_9752; +} +} +else +{ +lean_object* x_9753; lean_object* x_9754; lean_object* x_9755; lean_object* x_9756; +lean_dec(x_9733); +lean_dec(x_9728); +lean_dec(x_9685); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9753 = lean_ctor_get(x_9736, 0); +lean_inc(x_9753); +x_9754 = lean_ctor_get(x_9736, 1); +lean_inc(x_9754); +if (lean_is_exclusive(x_9736)) { + lean_ctor_release(x_9736, 0); + lean_ctor_release(x_9736, 1); + x_9755 = x_9736; +} else { + lean_dec_ref(x_9736); + x_9755 = lean_box(0); +} +if (lean_is_scalar(x_9755)) { + x_9756 = lean_alloc_ctor(1, 2, 0); +} else { + x_9756 = x_9755; +} +lean_ctor_set(x_9756, 0, x_9753); +lean_ctor_set(x_9756, 1, x_9754); +return x_9756; +} +} +} +else +{ +lean_object* x_9757; lean_object* x_9758; lean_object* x_9759; lean_object* x_9760; lean_object* x_9761; lean_object* x_9762; lean_object* x_9763; lean_object* x_9764; lean_object* x_9765; +lean_dec(x_9688); +lean_dec(x_9686); +lean_inc(x_8853); +lean_inc(x_8845); +x_9757 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_9757, 0, x_8845); +lean_ctor_set(x_9757, 1, x_8853); +x_9758 = lean_ctor_get(x_1, 0); +lean_inc(x_9758); +x_9759 = l_Lean_IR_ToIR_bindVar(x_9758, x_8859, x_4, x_5, x_9683); +x_9760 = lean_ctor_get(x_9759, 0); +lean_inc(x_9760); +x_9761 = lean_ctor_get(x_9759, 1); +lean_inc(x_9761); +lean_dec(x_9759); +x_9762 = lean_ctor_get(x_9760, 0); +lean_inc(x_9762); +x_9763 = lean_ctor_get(x_9760, 1); +lean_inc(x_9763); +lean_dec(x_9760); +x_9764 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9765 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9762, x_9757, x_9764, x_9763, x_4, x_5, x_9761); +if (lean_obj_tag(x_9765) == 0) +{ +lean_object* x_9766; lean_object* x_9767; lean_object* x_9768; lean_object* x_9769; lean_object* x_9770; lean_object* x_9771; lean_object* x_9772; +x_9766 = lean_ctor_get(x_9765, 0); +lean_inc(x_9766); +x_9767 = lean_ctor_get(x_9765, 1); +lean_inc(x_9767); +lean_dec(x_9765); +x_9768 = lean_ctor_get(x_9766, 0); +lean_inc(x_9768); +x_9769 = lean_ctor_get(x_9766, 1); +lean_inc(x_9769); +if (lean_is_exclusive(x_9766)) { + lean_ctor_release(x_9766, 0); + lean_ctor_release(x_9766, 1); + x_9770 = x_9766; +} else { + lean_dec_ref(x_9766); + x_9770 = lean_box(0); +} +if (lean_is_scalar(x_9685)) { + x_9771 = lean_alloc_ctor(1, 1, 0); +} else { + x_9771 = x_9685; +} +lean_ctor_set(x_9771, 0, x_9768); +if (lean_is_scalar(x_9770)) { + x_9772 = lean_alloc_ctor(0, 2, 0); +} else { + x_9772 = x_9770; +} +lean_ctor_set(x_9772, 0, x_9771); +lean_ctor_set(x_9772, 1, x_9769); +x_9466 = x_9772; +x_9467 = x_9767; +goto block_9496; +} +else +{ +lean_object* x_9773; lean_object* x_9774; lean_object* x_9775; lean_object* x_9776; +lean_dec(x_9685); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9773 = lean_ctor_get(x_9765, 0); +lean_inc(x_9773); +x_9774 = lean_ctor_get(x_9765, 1); +lean_inc(x_9774); +if (lean_is_exclusive(x_9765)) { + lean_ctor_release(x_9765, 0); + lean_ctor_release(x_9765, 1); + x_9775 = x_9765; +} else { + lean_dec_ref(x_9765); + x_9775 = lean_box(0); +} +if (lean_is_scalar(x_9775)) { + x_9776 = lean_alloc_ctor(1, 2, 0); +} else { + x_9776 = x_9775; +} +lean_ctor_set(x_9776, 0, x_9773); +lean_ctor_set(x_9776, 1, x_9774); +return x_9776; +} +} +} +} +block_9496: +{ +lean_object* x_9468; +x_9468 = lean_ctor_get(x_9466, 0); +lean_inc(x_9468); +if (lean_obj_tag(x_9468) == 0) +{ +lean_object* x_9469; lean_object* x_9470; lean_object* x_9471; lean_object* x_9472; lean_object* x_9473; lean_object* x_9474; lean_object* x_9475; lean_object* x_9476; lean_object* x_9477; lean_object* x_9478; +lean_dec(x_8864); +x_9469 = lean_ctor_get(x_9466, 1); +lean_inc(x_9469); +lean_dec(x_9466); +x_9470 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_9470, 0, x_8845); +lean_ctor_set(x_9470, 1, x_8853); +x_9471 = lean_ctor_get(x_1, 0); +lean_inc(x_9471); +x_9472 = l_Lean_IR_ToIR_bindVar(x_9471, x_9469, x_4, x_5, x_9467); +x_9473 = lean_ctor_get(x_9472, 0); +lean_inc(x_9473); +x_9474 = lean_ctor_get(x_9472, 1); +lean_inc(x_9474); +lean_dec(x_9472); +x_9475 = lean_ctor_get(x_9473, 0); +lean_inc(x_9475); +x_9476 = lean_ctor_get(x_9473, 1); +lean_inc(x_9476); +lean_dec(x_9473); +x_9477 = lean_ctor_get(x_1, 2); +lean_inc(x_9477); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_9478 = l_Lean_IR_ToIR_lowerType(x_9477, x_9476, x_4, x_5, x_9474); +if (lean_obj_tag(x_9478) == 0) +{ +lean_object* x_9479; lean_object* x_9480; lean_object* x_9481; lean_object* x_9482; lean_object* x_9483; +x_9479 = lean_ctor_get(x_9478, 0); +lean_inc(x_9479); +x_9480 = lean_ctor_get(x_9478, 1); +lean_inc(x_9480); +lean_dec(x_9478); +x_9481 = lean_ctor_get(x_9479, 0); +lean_inc(x_9481); +x_9482 = lean_ctor_get(x_9479, 1); +lean_inc(x_9482); +lean_dec(x_9479); +x_9483 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9475, x_9470, x_9481, x_9482, x_4, x_5, x_9480); +return x_9483; +} +else +{ +uint8_t x_9484; +lean_dec(x_9475); +lean_dec(x_9470); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_9484 = !lean_is_exclusive(x_9478); +if (x_9484 == 0) +{ +return x_9478; +} +else +{ +lean_object* x_9485; lean_object* x_9486; lean_object* x_9487; +x_9485 = lean_ctor_get(x_9478, 0); +x_9486 = lean_ctor_get(x_9478, 1); +lean_inc(x_9486); +lean_inc(x_9485); +lean_dec(x_9478); +x_9487 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9487, 0, x_9485); +lean_ctor_set(x_9487, 1, x_9486); +return x_9487; +} +} +} +else +{ +uint8_t x_9488; +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9488 = !lean_is_exclusive(x_9466); +if (x_9488 == 0) +{ +lean_object* x_9489; lean_object* x_9490; lean_object* x_9491; +x_9489 = lean_ctor_get(x_9466, 0); +lean_dec(x_9489); +x_9490 = lean_ctor_get(x_9468, 0); +lean_inc(x_9490); +lean_dec(x_9468); +lean_ctor_set(x_9466, 0, x_9490); +if (lean_is_scalar(x_8864)) { + x_9491 = lean_alloc_ctor(0, 2, 0); +} else { + x_9491 = x_8864; +} +lean_ctor_set(x_9491, 0, x_9466); +lean_ctor_set(x_9491, 1, x_9467); +return x_9491; +} +else +{ +lean_object* x_9492; lean_object* x_9493; lean_object* x_9494; lean_object* x_9495; +x_9492 = lean_ctor_get(x_9466, 1); +lean_inc(x_9492); +lean_dec(x_9466); +x_9493 = lean_ctor_get(x_9468, 0); +lean_inc(x_9493); +lean_dec(x_9468); +x_9494 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9494, 0, x_9493); +lean_ctor_set(x_9494, 1, x_9492); +if (lean_is_scalar(x_8864)) { + x_9495 = lean_alloc_ctor(0, 2, 0); +} else { + x_9495 = x_8864; +} +lean_ctor_set(x_9495, 0, x_9494); +lean_ctor_set(x_9495, 1, x_9467); +return x_9495; +} +} +} +} +case 4: +{ +uint8_t x_9777; +lean_dec(x_8865); +lean_dec(x_8864); +lean_free_object(x_8855); +lean_dec(x_5945); +lean_dec(x_5944); +x_9777 = !lean_is_exclusive(x_8870); +if (x_9777 == 0) +{ +lean_object* x_9778; lean_object* x_9779; uint8_t x_9780; +x_9778 = lean_ctor_get(x_8870, 0); +lean_dec(x_9778); +x_9779 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_9780 = lean_name_eq(x_8845, x_9779); +if (x_9780 == 0) +{ +uint8_t x_9781; lean_object* x_9782; lean_object* x_9783; lean_object* x_9784; lean_object* x_9785; lean_object* x_9786; lean_object* x_9787; lean_object* x_9788; lean_object* x_9789; +lean_dec(x_8853); +lean_dec(x_2); +lean_dec(x_1); +x_9781 = 1; +x_9782 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_9783 = l_Lean_Name_toString(x_8845, x_9781, x_9782); +lean_ctor_set_tag(x_8870, 3); +lean_ctor_set(x_8870, 0, x_9783); +x_9784 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_9785 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_9785, 0, x_9784); +lean_ctor_set(x_9785, 1, x_8870); +x_9786 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_9787 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_9787, 0, x_9785); +lean_ctor_set(x_9787, 1, x_9786); +x_9788 = l_Lean_MessageData_ofFormat(x_9787); +x_9789 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_9788, x_8859, x_4, x_5, x_8863); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_8859); +return x_9789; +} +else +{ +lean_object* x_9790; lean_object* x_9791; lean_object* x_9792; +lean_free_object(x_8870); +lean_dec(x_8845); +x_9790 = l_Lean_IR_instInhabitedArg; +x_9791 = lean_unsigned_to_nat(2u); +x_9792 = lean_array_get(x_9790, x_8853, x_9791); +lean_dec(x_8853); +if (lean_obj_tag(x_9792) == 0) +{ +lean_object* x_9793; lean_object* x_9794; lean_object* x_9795; lean_object* x_9796; lean_object* x_9797; lean_object* x_9798; lean_object* x_9799; +x_9793 = lean_ctor_get(x_9792, 0); +lean_inc(x_9793); +lean_dec(x_9792); +x_9794 = lean_ctor_get(x_1, 0); +lean_inc(x_9794); +lean_dec(x_1); +x_9795 = l_Lean_IR_ToIR_bindVarToVarId(x_9794, x_9793, x_8859, x_4, x_5, x_8863); +x_9796 = lean_ctor_get(x_9795, 0); +lean_inc(x_9796); +x_9797 = lean_ctor_get(x_9795, 1); +lean_inc(x_9797); +lean_dec(x_9795); +x_9798 = lean_ctor_get(x_9796, 1); +lean_inc(x_9798); +lean_dec(x_9796); +x_9799 = l_Lean_IR_ToIR_lowerCode(x_2, x_9798, x_4, x_5, x_9797); +return x_9799; +} +else +{ +lean_object* x_9800; lean_object* x_9801; lean_object* x_9802; lean_object* x_9803; lean_object* x_9804; lean_object* x_9805; +x_9800 = lean_ctor_get(x_1, 0); +lean_inc(x_9800); +lean_dec(x_1); +x_9801 = l_Lean_IR_ToIR_bindErased(x_9800, x_8859, x_4, x_5, x_8863); +x_9802 = lean_ctor_get(x_9801, 0); +lean_inc(x_9802); +x_9803 = lean_ctor_get(x_9801, 1); +lean_inc(x_9803); +lean_dec(x_9801); +x_9804 = lean_ctor_get(x_9802, 1); +lean_inc(x_9804); +lean_dec(x_9802); +x_9805 = l_Lean_IR_ToIR_lowerCode(x_2, x_9804, x_4, x_5, x_9803); +return x_9805; +} +} +} +else +{ +lean_object* x_9806; uint8_t x_9807; +lean_dec(x_8870); +x_9806 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_9807 = lean_name_eq(x_8845, x_9806); +if (x_9807 == 0) +{ +uint8_t x_9808; lean_object* x_9809; lean_object* x_9810; lean_object* x_9811; lean_object* x_9812; lean_object* x_9813; lean_object* x_9814; lean_object* x_9815; lean_object* x_9816; lean_object* x_9817; +lean_dec(x_8853); +lean_dec(x_2); +lean_dec(x_1); +x_9808 = 1; +x_9809 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_9810 = l_Lean_Name_toString(x_8845, x_9808, x_9809); +x_9811 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_9811, 0, x_9810); +x_9812 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_9813 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_9813, 0, x_9812); +lean_ctor_set(x_9813, 1, x_9811); +x_9814 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_9815 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_9815, 0, x_9813); +lean_ctor_set(x_9815, 1, x_9814); +x_9816 = l_Lean_MessageData_ofFormat(x_9815); +x_9817 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_9816, x_8859, x_4, x_5, x_8863); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_8859); +return x_9817; +} +else +{ +lean_object* x_9818; lean_object* x_9819; lean_object* x_9820; +lean_dec(x_8845); +x_9818 = l_Lean_IR_instInhabitedArg; +x_9819 = lean_unsigned_to_nat(2u); +x_9820 = lean_array_get(x_9818, x_8853, x_9819); +lean_dec(x_8853); +if (lean_obj_tag(x_9820) == 0) +{ +lean_object* x_9821; lean_object* x_9822; lean_object* x_9823; lean_object* x_9824; lean_object* x_9825; lean_object* x_9826; lean_object* x_9827; +x_9821 = lean_ctor_get(x_9820, 0); +lean_inc(x_9821); +lean_dec(x_9820); +x_9822 = lean_ctor_get(x_1, 0); +lean_inc(x_9822); +lean_dec(x_1); +x_9823 = l_Lean_IR_ToIR_bindVarToVarId(x_9822, x_9821, x_8859, x_4, x_5, x_8863); +x_9824 = lean_ctor_get(x_9823, 0); +lean_inc(x_9824); +x_9825 = lean_ctor_get(x_9823, 1); +lean_inc(x_9825); +lean_dec(x_9823); +x_9826 = lean_ctor_get(x_9824, 1); +lean_inc(x_9826); +lean_dec(x_9824); +x_9827 = l_Lean_IR_ToIR_lowerCode(x_2, x_9826, x_4, x_5, x_9825); +return x_9827; +} +else +{ +lean_object* x_9828; lean_object* x_9829; lean_object* x_9830; lean_object* x_9831; lean_object* x_9832; lean_object* x_9833; +x_9828 = lean_ctor_get(x_1, 0); +lean_inc(x_9828); +lean_dec(x_1); +x_9829 = l_Lean_IR_ToIR_bindErased(x_9828, x_8859, x_4, x_5, x_8863); +x_9830 = lean_ctor_get(x_9829, 0); +lean_inc(x_9830); +x_9831 = lean_ctor_get(x_9829, 1); +lean_inc(x_9831); +lean_dec(x_9829); +x_9832 = lean_ctor_get(x_9830, 1); +lean_inc(x_9832); +lean_dec(x_9830); +x_9833 = l_Lean_IR_ToIR_lowerCode(x_2, x_9832, x_4, x_5, x_9831); +return x_9833; +} +} +} +} +case 5: +{ +lean_object* x_9834; lean_object* x_9835; +lean_dec(x_8870); +lean_dec(x_8865); +lean_dec(x_8864); +lean_free_object(x_8855); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +x_9834 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_9835 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_9834, x_8859, x_4, x_5, x_8863); +return x_9835; +} +case 6: +{ +lean_object* x_9836; uint8_t x_9837; +x_9836 = lean_ctor_get(x_8870, 0); +lean_inc(x_9836); +lean_dec(x_8870); +lean_inc(x_8845); +x_9837 = l_Lean_isExtern(x_8865, x_8845); +if (x_9837 == 0) +{ +lean_object* x_9838; +lean_dec(x_8864); +lean_free_object(x_8855); +lean_dec(x_8853); +lean_inc(x_5); +lean_inc(x_4); +x_9838 = l_Lean_IR_ToIR_getCtorInfo(x_8845, x_8859, x_4, x_5, x_8863); +if (lean_obj_tag(x_9838) == 0) +{ +lean_object* x_9839; lean_object* x_9840; lean_object* x_9841; lean_object* x_9842; lean_object* x_9843; lean_object* x_9844; lean_object* x_9845; lean_object* x_9846; lean_object* x_9847; lean_object* x_9848; lean_object* x_9849; lean_object* x_9850; lean_object* x_9851; lean_object* x_9852; lean_object* x_9853; lean_object* x_9854; lean_object* x_9855; lean_object* x_9856; lean_object* x_9857; lean_object* x_9858; +x_9839 = lean_ctor_get(x_9838, 0); +lean_inc(x_9839); +x_9840 = lean_ctor_get(x_9839, 0); +lean_inc(x_9840); +x_9841 = lean_ctor_get(x_9838, 1); +lean_inc(x_9841); +lean_dec(x_9838); +x_9842 = lean_ctor_get(x_9839, 1); +lean_inc(x_9842); +lean_dec(x_9839); +x_9843 = lean_ctor_get(x_9840, 0); +lean_inc(x_9843); +x_9844 = lean_ctor_get(x_9840, 1); +lean_inc(x_9844); +lean_dec(x_9840); +x_9845 = lean_ctor_get(x_9836, 3); +lean_inc(x_9845); +lean_dec(x_9836); +x_9846 = lean_array_get_size(x_5944); +x_9847 = l_Array_extract___rarg(x_5944, x_9845, x_9846); +lean_dec(x_9846); +lean_dec(x_5944); +x_9848 = lean_array_get_size(x_9844); +x_9849 = lean_unsigned_to_nat(0u); +x_9850 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_5945)) { + x_9851 = lean_alloc_ctor(0, 3, 0); +} else { + x_9851 = x_5945; + lean_ctor_set_tag(x_9851, 0); +} +lean_ctor_set(x_9851, 0, x_9849); +lean_ctor_set(x_9851, 1, x_9848); +lean_ctor_set(x_9851, 2, x_9850); +x_9852 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_9853 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__5(x_9844, x_9847, x_9851, x_9851, x_9852, x_9849, lean_box(0), lean_box(0), x_9842, x_4, x_5, x_9841); +lean_dec(x_9851); +x_9854 = lean_ctor_get(x_9853, 0); +lean_inc(x_9854); +x_9855 = lean_ctor_get(x_9853, 1); +lean_inc(x_9855); +lean_dec(x_9853); +x_9856 = lean_ctor_get(x_9854, 0); +lean_inc(x_9856); +x_9857 = lean_ctor_get(x_9854, 1); +lean_inc(x_9857); +lean_dec(x_9854); +x_9858 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_9843, x_9844, x_9847, x_9856, x_9857, x_4, x_5, x_9855); +lean_dec(x_9847); +lean_dec(x_9844); +return x_9858; +} +else +{ +uint8_t x_9859; +lean_dec(x_9836); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9859 = !lean_is_exclusive(x_9838); +if (x_9859 == 0) +{ +return x_9838; +} +else +{ +lean_object* x_9860; lean_object* x_9861; lean_object* x_9862; +x_9860 = lean_ctor_get(x_9838, 0); +x_9861 = lean_ctor_get(x_9838, 1); +lean_inc(x_9861); +lean_inc(x_9860); +lean_dec(x_9838); +x_9862 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9862, 0, x_9860); +lean_ctor_set(x_9862, 1, x_9861); +return x_9862; +} +} +} +else +{ +lean_object* x_9863; lean_object* x_9864; lean_object* x_9894; lean_object* x_9895; +lean_dec(x_9836); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_8845); +x_9894 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_8845, x_4, x_5, x_8863); +x_9895 = lean_ctor_get(x_9894, 0); +lean_inc(x_9895); +if (lean_obj_tag(x_9895) == 0) +{ +lean_object* x_9896; lean_object* x_9897; +x_9896 = lean_ctor_get(x_9894, 1); +lean_inc(x_9896); +lean_dec(x_9894); +x_9897 = lean_box(0); +lean_ctor_set(x_8855, 0, x_9897); +x_9863 = x_8855; +x_9864 = x_9896; +goto block_9893; +} +else +{ +uint8_t x_9898; +lean_free_object(x_8855); +x_9898 = !lean_is_exclusive(x_9894); +if (x_9898 == 0) +{ +lean_object* x_9899; lean_object* x_9900; uint8_t x_9901; +x_9899 = lean_ctor_get(x_9894, 1); +x_9900 = lean_ctor_get(x_9894, 0); +lean_dec(x_9900); +x_9901 = !lean_is_exclusive(x_9895); +if (x_9901 == 0) +{ +lean_object* x_9902; lean_object* x_9903; lean_object* x_9904; lean_object* x_9905; uint8_t x_9906; +x_9902 = lean_ctor_get(x_9895, 0); +x_9903 = lean_array_get_size(x_8853); +x_9904 = lean_ctor_get(x_9902, 3); +lean_inc(x_9904); +lean_dec(x_9902); +x_9905 = lean_array_get_size(x_9904); +lean_dec(x_9904); +x_9906 = lean_nat_dec_lt(x_9903, x_9905); +if (x_9906 == 0) +{ +uint8_t x_9907; +x_9907 = lean_nat_dec_eq(x_9903, x_9905); +if (x_9907 == 0) +{ +lean_object* x_9908; lean_object* x_9909; lean_object* x_9910; lean_object* x_9911; lean_object* x_9912; lean_object* x_9913; lean_object* x_9914; lean_object* x_9915; lean_object* x_9916; lean_object* x_9917; lean_object* x_9918; lean_object* x_9919; lean_object* x_9920; lean_object* x_9921; lean_object* x_9922; lean_object* x_9923; +x_9908 = lean_unsigned_to_nat(0u); +x_9909 = l_Array_extract___rarg(x_8853, x_9908, x_9905); +x_9910 = l_Array_extract___rarg(x_8853, x_9905, x_9903); +lean_dec(x_9903); +lean_inc(x_8845); +lean_ctor_set_tag(x_9894, 6); +lean_ctor_set(x_9894, 1, x_9909); +lean_ctor_set(x_9894, 0, x_8845); +x_9911 = lean_ctor_get(x_1, 0); +lean_inc(x_9911); +x_9912 = l_Lean_IR_ToIR_bindVar(x_9911, x_8859, x_4, x_5, x_9899); +x_9913 = lean_ctor_get(x_9912, 0); +lean_inc(x_9913); +x_9914 = lean_ctor_get(x_9912, 1); +lean_inc(x_9914); +lean_dec(x_9912); +x_9915 = lean_ctor_get(x_9913, 0); +lean_inc(x_9915); +x_9916 = lean_ctor_get(x_9913, 1); +lean_inc(x_9916); +lean_dec(x_9913); +x_9917 = l_Lean_IR_ToIR_newVar(x_9916, x_4, x_5, x_9914); +x_9918 = lean_ctor_get(x_9917, 0); +lean_inc(x_9918); +x_9919 = lean_ctor_get(x_9917, 1); +lean_inc(x_9919); +lean_dec(x_9917); +x_9920 = lean_ctor_get(x_9918, 0); +lean_inc(x_9920); +x_9921 = lean_ctor_get(x_9918, 1); +lean_inc(x_9921); +lean_dec(x_9918); +x_9922 = lean_ctor_get(x_1, 2); +lean_inc(x_9922); +lean_inc(x_5); +lean_inc(x_4); +x_9923 = l_Lean_IR_ToIR_lowerType(x_9922, x_9921, x_4, x_5, x_9919); +if (lean_obj_tag(x_9923) == 0) +{ +lean_object* x_9924; lean_object* x_9925; lean_object* x_9926; lean_object* x_9927; lean_object* x_9928; +x_9924 = lean_ctor_get(x_9923, 0); +lean_inc(x_9924); +x_9925 = lean_ctor_get(x_9923, 1); +lean_inc(x_9925); +lean_dec(x_9923); +x_9926 = lean_ctor_get(x_9924, 0); +lean_inc(x_9926); +x_9927 = lean_ctor_get(x_9924, 1); +lean_inc(x_9927); +lean_dec(x_9924); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9928 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_9920, x_9910, x_9915, x_9894, x_9926, x_9927, x_4, x_5, x_9925); +if (lean_obj_tag(x_9928) == 0) +{ +lean_object* x_9929; lean_object* x_9930; uint8_t x_9931; +x_9929 = lean_ctor_get(x_9928, 0); +lean_inc(x_9929); +x_9930 = lean_ctor_get(x_9928, 1); +lean_inc(x_9930); +lean_dec(x_9928); +x_9931 = !lean_is_exclusive(x_9929); +if (x_9931 == 0) +{ +lean_object* x_9932; +x_9932 = lean_ctor_get(x_9929, 0); +lean_ctor_set(x_9895, 0, x_9932); +lean_ctor_set(x_9929, 0, x_9895); +x_9863 = x_9929; +x_9864 = x_9930; +goto block_9893; +} +else +{ +lean_object* x_9933; lean_object* x_9934; lean_object* x_9935; +x_9933 = lean_ctor_get(x_9929, 0); +x_9934 = lean_ctor_get(x_9929, 1); +lean_inc(x_9934); +lean_inc(x_9933); +lean_dec(x_9929); +lean_ctor_set(x_9895, 0, x_9933); +x_9935 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9935, 0, x_9895); +lean_ctor_set(x_9935, 1, x_9934); +x_9863 = x_9935; +x_9864 = x_9930; +goto block_9893; +} +} +else +{ +uint8_t x_9936; +lean_free_object(x_9895); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9936 = !lean_is_exclusive(x_9928); +if (x_9936 == 0) +{ +return x_9928; +} +else +{ +lean_object* x_9937; lean_object* x_9938; lean_object* x_9939; +x_9937 = lean_ctor_get(x_9928, 0); +x_9938 = lean_ctor_get(x_9928, 1); +lean_inc(x_9938); +lean_inc(x_9937); +lean_dec(x_9928); +x_9939 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9939, 0, x_9937); +lean_ctor_set(x_9939, 1, x_9938); +return x_9939; +} +} +} +else +{ +uint8_t x_9940; +lean_dec(x_9920); +lean_dec(x_9915); +lean_dec(x_9894); +lean_dec(x_9910); +lean_free_object(x_9895); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9940 = !lean_is_exclusive(x_9923); +if (x_9940 == 0) +{ +return x_9923; +} +else +{ +lean_object* x_9941; lean_object* x_9942; lean_object* x_9943; +x_9941 = lean_ctor_get(x_9923, 0); +x_9942 = lean_ctor_get(x_9923, 1); +lean_inc(x_9942); +lean_inc(x_9941); +lean_dec(x_9923); +x_9943 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9943, 0, x_9941); +lean_ctor_set(x_9943, 1, x_9942); +return x_9943; +} +} +} +else +{ +lean_object* x_9944; lean_object* x_9945; lean_object* x_9946; lean_object* x_9947; lean_object* x_9948; lean_object* x_9949; lean_object* x_9950; lean_object* x_9951; +lean_dec(x_9905); +lean_dec(x_9903); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_9894, 6); +lean_ctor_set(x_9894, 1, x_8853); +lean_ctor_set(x_9894, 0, x_8845); +x_9944 = lean_ctor_get(x_1, 0); +lean_inc(x_9944); +x_9945 = l_Lean_IR_ToIR_bindVar(x_9944, x_8859, x_4, x_5, x_9899); +x_9946 = lean_ctor_get(x_9945, 0); +lean_inc(x_9946); +x_9947 = lean_ctor_get(x_9945, 1); +lean_inc(x_9947); +lean_dec(x_9945); +x_9948 = lean_ctor_get(x_9946, 0); +lean_inc(x_9948); +x_9949 = lean_ctor_get(x_9946, 1); +lean_inc(x_9949); +lean_dec(x_9946); +x_9950 = lean_ctor_get(x_1, 2); +lean_inc(x_9950); +lean_inc(x_5); +lean_inc(x_4); +x_9951 = l_Lean_IR_ToIR_lowerType(x_9950, x_9949, x_4, x_5, x_9947); +if (lean_obj_tag(x_9951) == 0) +{ +lean_object* x_9952; lean_object* x_9953; lean_object* x_9954; lean_object* x_9955; lean_object* x_9956; +x_9952 = lean_ctor_get(x_9951, 0); +lean_inc(x_9952); +x_9953 = lean_ctor_get(x_9951, 1); +lean_inc(x_9953); +lean_dec(x_9951); +x_9954 = lean_ctor_get(x_9952, 0); +lean_inc(x_9954); +x_9955 = lean_ctor_get(x_9952, 1); +lean_inc(x_9955); +lean_dec(x_9952); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9956 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9948, x_9894, x_9954, x_9955, x_4, x_5, x_9953); +if (lean_obj_tag(x_9956) == 0) +{ +lean_object* x_9957; lean_object* x_9958; uint8_t x_9959; +x_9957 = lean_ctor_get(x_9956, 0); +lean_inc(x_9957); +x_9958 = lean_ctor_get(x_9956, 1); +lean_inc(x_9958); +lean_dec(x_9956); +x_9959 = !lean_is_exclusive(x_9957); +if (x_9959 == 0) +{ +lean_object* x_9960; +x_9960 = lean_ctor_get(x_9957, 0); +lean_ctor_set(x_9895, 0, x_9960); +lean_ctor_set(x_9957, 0, x_9895); +x_9863 = x_9957; +x_9864 = x_9958; +goto block_9893; +} +else +{ +lean_object* x_9961; lean_object* x_9962; lean_object* x_9963; +x_9961 = lean_ctor_get(x_9957, 0); +x_9962 = lean_ctor_get(x_9957, 1); +lean_inc(x_9962); +lean_inc(x_9961); +lean_dec(x_9957); +lean_ctor_set(x_9895, 0, x_9961); +x_9963 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9963, 0, x_9895); +lean_ctor_set(x_9963, 1, x_9962); +x_9863 = x_9963; +x_9864 = x_9958; +goto block_9893; +} +} +else +{ +uint8_t x_9964; +lean_free_object(x_9895); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9964 = !lean_is_exclusive(x_9956); +if (x_9964 == 0) +{ +return x_9956; +} +else +{ +lean_object* x_9965; lean_object* x_9966; lean_object* x_9967; +x_9965 = lean_ctor_get(x_9956, 0); +x_9966 = lean_ctor_get(x_9956, 1); +lean_inc(x_9966); +lean_inc(x_9965); +lean_dec(x_9956); +x_9967 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9967, 0, x_9965); +lean_ctor_set(x_9967, 1, x_9966); +return x_9967; +} +} +} +else +{ +uint8_t x_9968; +lean_dec(x_9948); +lean_dec(x_9894); +lean_free_object(x_9895); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9968 = !lean_is_exclusive(x_9951); +if (x_9968 == 0) +{ +return x_9951; +} +else +{ +lean_object* x_9969; lean_object* x_9970; lean_object* x_9971; +x_9969 = lean_ctor_get(x_9951, 0); +x_9970 = lean_ctor_get(x_9951, 1); +lean_inc(x_9970); +lean_inc(x_9969); +lean_dec(x_9951); +x_9971 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9971, 0, x_9969); +lean_ctor_set(x_9971, 1, x_9970); +return x_9971; +} +} +} +} +else +{ +lean_object* x_9972; lean_object* x_9973; lean_object* x_9974; lean_object* x_9975; lean_object* x_9976; lean_object* x_9977; lean_object* x_9978; lean_object* x_9979; +lean_dec(x_9905); +lean_dec(x_9903); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_9894, 7); +lean_ctor_set(x_9894, 1, x_8853); +lean_ctor_set(x_9894, 0, x_8845); +x_9972 = lean_ctor_get(x_1, 0); +lean_inc(x_9972); +x_9973 = l_Lean_IR_ToIR_bindVar(x_9972, x_8859, x_4, x_5, x_9899); +x_9974 = lean_ctor_get(x_9973, 0); +lean_inc(x_9974); +x_9975 = lean_ctor_get(x_9973, 1); +lean_inc(x_9975); +lean_dec(x_9973); +x_9976 = lean_ctor_get(x_9974, 0); +lean_inc(x_9976); +x_9977 = lean_ctor_get(x_9974, 1); +lean_inc(x_9977); +lean_dec(x_9974); +x_9978 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_9979 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9976, x_9894, x_9978, x_9977, x_4, x_5, x_9975); +if (lean_obj_tag(x_9979) == 0) +{ +lean_object* x_9980; lean_object* x_9981; uint8_t x_9982; +x_9980 = lean_ctor_get(x_9979, 0); +lean_inc(x_9980); +x_9981 = lean_ctor_get(x_9979, 1); +lean_inc(x_9981); +lean_dec(x_9979); +x_9982 = !lean_is_exclusive(x_9980); +if (x_9982 == 0) +{ +lean_object* x_9983; +x_9983 = lean_ctor_get(x_9980, 0); +lean_ctor_set(x_9895, 0, x_9983); +lean_ctor_set(x_9980, 0, x_9895); +x_9863 = x_9980; +x_9864 = x_9981; +goto block_9893; +} +else +{ +lean_object* x_9984; lean_object* x_9985; lean_object* x_9986; +x_9984 = lean_ctor_get(x_9980, 0); +x_9985 = lean_ctor_get(x_9980, 1); +lean_inc(x_9985); +lean_inc(x_9984); +lean_dec(x_9980); +lean_ctor_set(x_9895, 0, x_9984); +x_9986 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9986, 0, x_9895); +lean_ctor_set(x_9986, 1, x_9985); +x_9863 = x_9986; +x_9864 = x_9981; +goto block_9893; +} +} +else +{ +uint8_t x_9987; +lean_free_object(x_9895); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9987 = !lean_is_exclusive(x_9979); +if (x_9987 == 0) +{ +return x_9979; +} +else +{ +lean_object* x_9988; lean_object* x_9989; lean_object* x_9990; +x_9988 = lean_ctor_get(x_9979, 0); +x_9989 = lean_ctor_get(x_9979, 1); +lean_inc(x_9989); +lean_inc(x_9988); +lean_dec(x_9979); +x_9990 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9990, 0, x_9988); +lean_ctor_set(x_9990, 1, x_9989); +return x_9990; +} +} +} +} +else +{ +lean_object* x_9991; lean_object* x_9992; lean_object* x_9993; lean_object* x_9994; uint8_t x_9995; +x_9991 = lean_ctor_get(x_9895, 0); +lean_inc(x_9991); +lean_dec(x_9895); +x_9992 = lean_array_get_size(x_8853); +x_9993 = lean_ctor_get(x_9991, 3); +lean_inc(x_9993); +lean_dec(x_9991); +x_9994 = lean_array_get_size(x_9993); +lean_dec(x_9993); +x_9995 = lean_nat_dec_lt(x_9992, x_9994); +if (x_9995 == 0) +{ +uint8_t x_9996; +x_9996 = lean_nat_dec_eq(x_9992, x_9994); +if (x_9996 == 0) +{ +lean_object* x_9997; lean_object* x_9998; lean_object* x_9999; lean_object* x_10000; lean_object* x_10001; lean_object* x_10002; lean_object* x_10003; lean_object* x_10004; lean_object* x_10005; lean_object* x_10006; lean_object* x_10007; lean_object* x_10008; lean_object* x_10009; lean_object* x_10010; lean_object* x_10011; lean_object* x_10012; +x_9997 = lean_unsigned_to_nat(0u); +x_9998 = l_Array_extract___rarg(x_8853, x_9997, x_9994); +x_9999 = l_Array_extract___rarg(x_8853, x_9994, x_9992); +lean_dec(x_9992); +lean_inc(x_8845); +lean_ctor_set_tag(x_9894, 6); +lean_ctor_set(x_9894, 1, x_9998); +lean_ctor_set(x_9894, 0, x_8845); +x_10000 = lean_ctor_get(x_1, 0); +lean_inc(x_10000); +x_10001 = l_Lean_IR_ToIR_bindVar(x_10000, x_8859, x_4, x_5, x_9899); +x_10002 = lean_ctor_get(x_10001, 0); +lean_inc(x_10002); +x_10003 = lean_ctor_get(x_10001, 1); +lean_inc(x_10003); +lean_dec(x_10001); +x_10004 = lean_ctor_get(x_10002, 0); +lean_inc(x_10004); +x_10005 = lean_ctor_get(x_10002, 1); +lean_inc(x_10005); +lean_dec(x_10002); +x_10006 = l_Lean_IR_ToIR_newVar(x_10005, x_4, x_5, x_10003); +x_10007 = lean_ctor_get(x_10006, 0); +lean_inc(x_10007); +x_10008 = lean_ctor_get(x_10006, 1); +lean_inc(x_10008); +lean_dec(x_10006); +x_10009 = lean_ctor_get(x_10007, 0); +lean_inc(x_10009); +x_10010 = lean_ctor_get(x_10007, 1); +lean_inc(x_10010); +lean_dec(x_10007); +x_10011 = lean_ctor_get(x_1, 2); +lean_inc(x_10011); +lean_inc(x_5); +lean_inc(x_4); +x_10012 = l_Lean_IR_ToIR_lowerType(x_10011, x_10010, x_4, x_5, x_10008); +if (lean_obj_tag(x_10012) == 0) +{ +lean_object* x_10013; lean_object* x_10014; lean_object* x_10015; lean_object* x_10016; lean_object* x_10017; +x_10013 = lean_ctor_get(x_10012, 0); +lean_inc(x_10013); +x_10014 = lean_ctor_get(x_10012, 1); +lean_inc(x_10014); +lean_dec(x_10012); +x_10015 = lean_ctor_get(x_10013, 0); +lean_inc(x_10015); +x_10016 = lean_ctor_get(x_10013, 1); +lean_inc(x_10016); +lean_dec(x_10013); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10017 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_10009, x_9999, x_10004, x_9894, x_10015, x_10016, x_4, x_5, x_10014); +if (lean_obj_tag(x_10017) == 0) +{ +lean_object* x_10018; lean_object* x_10019; lean_object* x_10020; lean_object* x_10021; lean_object* x_10022; lean_object* x_10023; lean_object* x_10024; +x_10018 = lean_ctor_get(x_10017, 0); +lean_inc(x_10018); +x_10019 = lean_ctor_get(x_10017, 1); +lean_inc(x_10019); +lean_dec(x_10017); +x_10020 = lean_ctor_get(x_10018, 0); +lean_inc(x_10020); +x_10021 = lean_ctor_get(x_10018, 1); +lean_inc(x_10021); +if (lean_is_exclusive(x_10018)) { + lean_ctor_release(x_10018, 0); + lean_ctor_release(x_10018, 1); + x_10022 = x_10018; +} else { + lean_dec_ref(x_10018); + x_10022 = lean_box(0); +} +x_10023 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_10023, 0, x_10020); +if (lean_is_scalar(x_10022)) { + x_10024 = lean_alloc_ctor(0, 2, 0); +} else { + x_10024 = x_10022; +} +lean_ctor_set(x_10024, 0, x_10023); +lean_ctor_set(x_10024, 1, x_10021); +x_9863 = x_10024; +x_9864 = x_10019; +goto block_9893; +} +else +{ +lean_object* x_10025; lean_object* x_10026; lean_object* x_10027; lean_object* x_10028; +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10025 = lean_ctor_get(x_10017, 0); +lean_inc(x_10025); +x_10026 = lean_ctor_get(x_10017, 1); +lean_inc(x_10026); +if (lean_is_exclusive(x_10017)) { + lean_ctor_release(x_10017, 0); + lean_ctor_release(x_10017, 1); + x_10027 = x_10017; +} else { + lean_dec_ref(x_10017); + x_10027 = lean_box(0); +} +if (lean_is_scalar(x_10027)) { + x_10028 = lean_alloc_ctor(1, 2, 0); +} else { + x_10028 = x_10027; +} +lean_ctor_set(x_10028, 0, x_10025); +lean_ctor_set(x_10028, 1, x_10026); +return x_10028; +} +} +else +{ +lean_object* x_10029; lean_object* x_10030; lean_object* x_10031; lean_object* x_10032; +lean_dec(x_10009); +lean_dec(x_10004); +lean_dec(x_9894); +lean_dec(x_9999); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10029 = lean_ctor_get(x_10012, 0); +lean_inc(x_10029); +x_10030 = lean_ctor_get(x_10012, 1); +lean_inc(x_10030); +if (lean_is_exclusive(x_10012)) { + lean_ctor_release(x_10012, 0); + lean_ctor_release(x_10012, 1); + x_10031 = x_10012; +} else { + lean_dec_ref(x_10012); + x_10031 = lean_box(0); +} +if (lean_is_scalar(x_10031)) { + x_10032 = lean_alloc_ctor(1, 2, 0); +} else { + x_10032 = x_10031; +} +lean_ctor_set(x_10032, 0, x_10029); +lean_ctor_set(x_10032, 1, x_10030); +return x_10032; +} +} +else +{ +lean_object* x_10033; lean_object* x_10034; lean_object* x_10035; lean_object* x_10036; lean_object* x_10037; lean_object* x_10038; lean_object* x_10039; lean_object* x_10040; +lean_dec(x_9994); +lean_dec(x_9992); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_9894, 6); +lean_ctor_set(x_9894, 1, x_8853); +lean_ctor_set(x_9894, 0, x_8845); +x_10033 = lean_ctor_get(x_1, 0); +lean_inc(x_10033); +x_10034 = l_Lean_IR_ToIR_bindVar(x_10033, x_8859, x_4, x_5, x_9899); +x_10035 = lean_ctor_get(x_10034, 0); +lean_inc(x_10035); +x_10036 = lean_ctor_get(x_10034, 1); +lean_inc(x_10036); +lean_dec(x_10034); +x_10037 = lean_ctor_get(x_10035, 0); +lean_inc(x_10037); +x_10038 = lean_ctor_get(x_10035, 1); +lean_inc(x_10038); +lean_dec(x_10035); +x_10039 = lean_ctor_get(x_1, 2); +lean_inc(x_10039); +lean_inc(x_5); +lean_inc(x_4); +x_10040 = l_Lean_IR_ToIR_lowerType(x_10039, x_10038, x_4, x_5, x_10036); +if (lean_obj_tag(x_10040) == 0) +{ +lean_object* x_10041; lean_object* x_10042; lean_object* x_10043; lean_object* x_10044; lean_object* x_10045; +x_10041 = lean_ctor_get(x_10040, 0); +lean_inc(x_10041); +x_10042 = lean_ctor_get(x_10040, 1); +lean_inc(x_10042); +lean_dec(x_10040); +x_10043 = lean_ctor_get(x_10041, 0); +lean_inc(x_10043); +x_10044 = lean_ctor_get(x_10041, 1); +lean_inc(x_10044); +lean_dec(x_10041); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10045 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10037, x_9894, x_10043, x_10044, x_4, x_5, x_10042); +if (lean_obj_tag(x_10045) == 0) +{ +lean_object* x_10046; lean_object* x_10047; lean_object* x_10048; lean_object* x_10049; lean_object* x_10050; lean_object* x_10051; lean_object* x_10052; +x_10046 = lean_ctor_get(x_10045, 0); +lean_inc(x_10046); +x_10047 = lean_ctor_get(x_10045, 1); +lean_inc(x_10047); +lean_dec(x_10045); +x_10048 = lean_ctor_get(x_10046, 0); +lean_inc(x_10048); +x_10049 = lean_ctor_get(x_10046, 1); +lean_inc(x_10049); +if (lean_is_exclusive(x_10046)) { + lean_ctor_release(x_10046, 0); + lean_ctor_release(x_10046, 1); + x_10050 = x_10046; +} else { + lean_dec_ref(x_10046); + x_10050 = lean_box(0); +} +x_10051 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_10051, 0, x_10048); +if (lean_is_scalar(x_10050)) { + x_10052 = lean_alloc_ctor(0, 2, 0); +} else { + x_10052 = x_10050; +} +lean_ctor_set(x_10052, 0, x_10051); +lean_ctor_set(x_10052, 1, x_10049); +x_9863 = x_10052; +x_9864 = x_10047; +goto block_9893; +} +else +{ +lean_object* x_10053; lean_object* x_10054; lean_object* x_10055; lean_object* x_10056; +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10053 = lean_ctor_get(x_10045, 0); +lean_inc(x_10053); +x_10054 = lean_ctor_get(x_10045, 1); +lean_inc(x_10054); +if (lean_is_exclusive(x_10045)) { + lean_ctor_release(x_10045, 0); + lean_ctor_release(x_10045, 1); + x_10055 = x_10045; +} else { + lean_dec_ref(x_10045); + x_10055 = lean_box(0); +} +if (lean_is_scalar(x_10055)) { + x_10056 = lean_alloc_ctor(1, 2, 0); +} else { + x_10056 = x_10055; +} +lean_ctor_set(x_10056, 0, x_10053); +lean_ctor_set(x_10056, 1, x_10054); +return x_10056; +} +} +else +{ +lean_object* x_10057; lean_object* x_10058; lean_object* x_10059; lean_object* x_10060; +lean_dec(x_10037); +lean_dec(x_9894); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10057 = lean_ctor_get(x_10040, 0); +lean_inc(x_10057); +x_10058 = lean_ctor_get(x_10040, 1); +lean_inc(x_10058); +if (lean_is_exclusive(x_10040)) { + lean_ctor_release(x_10040, 0); + lean_ctor_release(x_10040, 1); + x_10059 = x_10040; +} else { + lean_dec_ref(x_10040); + x_10059 = lean_box(0); +} +if (lean_is_scalar(x_10059)) { + x_10060 = lean_alloc_ctor(1, 2, 0); +} else { + x_10060 = x_10059; +} +lean_ctor_set(x_10060, 0, x_10057); +lean_ctor_set(x_10060, 1, x_10058); +return x_10060; +} +} +} +else +{ +lean_object* x_10061; lean_object* x_10062; lean_object* x_10063; lean_object* x_10064; lean_object* x_10065; lean_object* x_10066; lean_object* x_10067; lean_object* x_10068; +lean_dec(x_9994); +lean_dec(x_9992); +lean_inc(x_8853); +lean_inc(x_8845); +lean_ctor_set_tag(x_9894, 7); +lean_ctor_set(x_9894, 1, x_8853); +lean_ctor_set(x_9894, 0, x_8845); +x_10061 = lean_ctor_get(x_1, 0); +lean_inc(x_10061); +x_10062 = l_Lean_IR_ToIR_bindVar(x_10061, x_8859, x_4, x_5, x_9899); +x_10063 = lean_ctor_get(x_10062, 0); +lean_inc(x_10063); +x_10064 = lean_ctor_get(x_10062, 1); +lean_inc(x_10064); +lean_dec(x_10062); +x_10065 = lean_ctor_get(x_10063, 0); +lean_inc(x_10065); +x_10066 = lean_ctor_get(x_10063, 1); +lean_inc(x_10066); +lean_dec(x_10063); +x_10067 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10068 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10065, x_9894, x_10067, x_10066, x_4, x_5, x_10064); +if (lean_obj_tag(x_10068) == 0) +{ +lean_object* x_10069; lean_object* x_10070; lean_object* x_10071; lean_object* x_10072; lean_object* x_10073; lean_object* x_10074; lean_object* x_10075; +x_10069 = lean_ctor_get(x_10068, 0); +lean_inc(x_10069); +x_10070 = lean_ctor_get(x_10068, 1); +lean_inc(x_10070); +lean_dec(x_10068); +x_10071 = lean_ctor_get(x_10069, 0); +lean_inc(x_10071); +x_10072 = lean_ctor_get(x_10069, 1); +lean_inc(x_10072); +if (lean_is_exclusive(x_10069)) { + lean_ctor_release(x_10069, 0); + lean_ctor_release(x_10069, 1); + x_10073 = x_10069; +} else { + lean_dec_ref(x_10069); + x_10073 = lean_box(0); +} +x_10074 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_10074, 0, x_10071); +if (lean_is_scalar(x_10073)) { + x_10075 = lean_alloc_ctor(0, 2, 0); +} else { + x_10075 = x_10073; +} +lean_ctor_set(x_10075, 0, x_10074); +lean_ctor_set(x_10075, 1, x_10072); +x_9863 = x_10075; +x_9864 = x_10070; +goto block_9893; +} +else +{ +lean_object* x_10076; lean_object* x_10077; lean_object* x_10078; lean_object* x_10079; +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10076 = lean_ctor_get(x_10068, 0); +lean_inc(x_10076); +x_10077 = lean_ctor_get(x_10068, 1); +lean_inc(x_10077); +if (lean_is_exclusive(x_10068)) { + lean_ctor_release(x_10068, 0); + lean_ctor_release(x_10068, 1); + x_10078 = x_10068; +} else { + lean_dec_ref(x_10068); + x_10078 = lean_box(0); +} +if (lean_is_scalar(x_10078)) { + x_10079 = lean_alloc_ctor(1, 2, 0); +} else { + x_10079 = x_10078; +} +lean_ctor_set(x_10079, 0, x_10076); +lean_ctor_set(x_10079, 1, x_10077); +return x_10079; +} +} +} +} +else +{ +lean_object* x_10080; lean_object* x_10081; lean_object* x_10082; lean_object* x_10083; lean_object* x_10084; lean_object* x_10085; uint8_t x_10086; +x_10080 = lean_ctor_get(x_9894, 1); +lean_inc(x_10080); +lean_dec(x_9894); +x_10081 = lean_ctor_get(x_9895, 0); +lean_inc(x_10081); +if (lean_is_exclusive(x_9895)) { + lean_ctor_release(x_9895, 0); + x_10082 = x_9895; +} else { + lean_dec_ref(x_9895); + x_10082 = lean_box(0); +} +x_10083 = lean_array_get_size(x_8853); +x_10084 = lean_ctor_get(x_10081, 3); +lean_inc(x_10084); +lean_dec(x_10081); +x_10085 = lean_array_get_size(x_10084); +lean_dec(x_10084); +x_10086 = lean_nat_dec_lt(x_10083, x_10085); +if (x_10086 == 0) +{ +uint8_t x_10087; +x_10087 = lean_nat_dec_eq(x_10083, x_10085); +if (x_10087 == 0) +{ +lean_object* x_10088; lean_object* x_10089; lean_object* x_10090; lean_object* x_10091; lean_object* x_10092; lean_object* x_10093; lean_object* x_10094; lean_object* x_10095; lean_object* x_10096; lean_object* x_10097; lean_object* x_10098; lean_object* x_10099; lean_object* x_10100; lean_object* x_10101; lean_object* x_10102; lean_object* x_10103; lean_object* x_10104; +x_10088 = lean_unsigned_to_nat(0u); +x_10089 = l_Array_extract___rarg(x_8853, x_10088, x_10085); +x_10090 = l_Array_extract___rarg(x_8853, x_10085, x_10083); +lean_dec(x_10083); +lean_inc(x_8845); +x_10091 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_10091, 0, x_8845); +lean_ctor_set(x_10091, 1, x_10089); +x_10092 = lean_ctor_get(x_1, 0); +lean_inc(x_10092); +x_10093 = l_Lean_IR_ToIR_bindVar(x_10092, x_8859, x_4, x_5, x_10080); +x_10094 = lean_ctor_get(x_10093, 0); +lean_inc(x_10094); +x_10095 = lean_ctor_get(x_10093, 1); +lean_inc(x_10095); +lean_dec(x_10093); +x_10096 = lean_ctor_get(x_10094, 0); +lean_inc(x_10096); +x_10097 = lean_ctor_get(x_10094, 1); +lean_inc(x_10097); +lean_dec(x_10094); +x_10098 = l_Lean_IR_ToIR_newVar(x_10097, x_4, x_5, x_10095); +x_10099 = lean_ctor_get(x_10098, 0); +lean_inc(x_10099); +x_10100 = lean_ctor_get(x_10098, 1); +lean_inc(x_10100); +lean_dec(x_10098); +x_10101 = lean_ctor_get(x_10099, 0); +lean_inc(x_10101); +x_10102 = lean_ctor_get(x_10099, 1); +lean_inc(x_10102); +lean_dec(x_10099); +x_10103 = lean_ctor_get(x_1, 2); +lean_inc(x_10103); +lean_inc(x_5); +lean_inc(x_4); +x_10104 = l_Lean_IR_ToIR_lowerType(x_10103, x_10102, x_4, x_5, x_10100); +if (lean_obj_tag(x_10104) == 0) +{ +lean_object* x_10105; lean_object* x_10106; lean_object* x_10107; lean_object* x_10108; lean_object* x_10109; +x_10105 = lean_ctor_get(x_10104, 0); +lean_inc(x_10105); +x_10106 = lean_ctor_get(x_10104, 1); +lean_inc(x_10106); +lean_dec(x_10104); +x_10107 = lean_ctor_get(x_10105, 0); +lean_inc(x_10107); +x_10108 = lean_ctor_get(x_10105, 1); +lean_inc(x_10108); +lean_dec(x_10105); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10109 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_10101, x_10090, x_10096, x_10091, x_10107, x_10108, x_4, x_5, x_10106); +if (lean_obj_tag(x_10109) == 0) +{ +lean_object* x_10110; lean_object* x_10111; lean_object* x_10112; lean_object* x_10113; lean_object* x_10114; lean_object* x_10115; lean_object* x_10116; +x_10110 = lean_ctor_get(x_10109, 0); +lean_inc(x_10110); +x_10111 = lean_ctor_get(x_10109, 1); +lean_inc(x_10111); +lean_dec(x_10109); +x_10112 = lean_ctor_get(x_10110, 0); +lean_inc(x_10112); +x_10113 = lean_ctor_get(x_10110, 1); +lean_inc(x_10113); +if (lean_is_exclusive(x_10110)) { + lean_ctor_release(x_10110, 0); + lean_ctor_release(x_10110, 1); + x_10114 = x_10110; +} else { + lean_dec_ref(x_10110); + x_10114 = lean_box(0); +} +if (lean_is_scalar(x_10082)) { + x_10115 = lean_alloc_ctor(1, 1, 0); +} else { + x_10115 = x_10082; +} +lean_ctor_set(x_10115, 0, x_10112); +if (lean_is_scalar(x_10114)) { + x_10116 = lean_alloc_ctor(0, 2, 0); +} else { + x_10116 = x_10114; +} +lean_ctor_set(x_10116, 0, x_10115); +lean_ctor_set(x_10116, 1, x_10113); +x_9863 = x_10116; +x_9864 = x_10111; +goto block_9893; +} +else +{ +lean_object* x_10117; lean_object* x_10118; lean_object* x_10119; lean_object* x_10120; +lean_dec(x_10082); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10117 = lean_ctor_get(x_10109, 0); +lean_inc(x_10117); +x_10118 = lean_ctor_get(x_10109, 1); +lean_inc(x_10118); +if (lean_is_exclusive(x_10109)) { + lean_ctor_release(x_10109, 0); + lean_ctor_release(x_10109, 1); + x_10119 = x_10109; +} else { + lean_dec_ref(x_10109); + x_10119 = lean_box(0); +} +if (lean_is_scalar(x_10119)) { + x_10120 = lean_alloc_ctor(1, 2, 0); +} else { + x_10120 = x_10119; +} +lean_ctor_set(x_10120, 0, x_10117); +lean_ctor_set(x_10120, 1, x_10118); +return x_10120; +} +} +else +{ +lean_object* x_10121; lean_object* x_10122; lean_object* x_10123; lean_object* x_10124; +lean_dec(x_10101); +lean_dec(x_10096); +lean_dec(x_10091); +lean_dec(x_10090); +lean_dec(x_10082); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10121 = lean_ctor_get(x_10104, 0); +lean_inc(x_10121); +x_10122 = lean_ctor_get(x_10104, 1); +lean_inc(x_10122); +if (lean_is_exclusive(x_10104)) { + lean_ctor_release(x_10104, 0); + lean_ctor_release(x_10104, 1); + x_10123 = x_10104; +} else { + lean_dec_ref(x_10104); + x_10123 = lean_box(0); +} +if (lean_is_scalar(x_10123)) { + x_10124 = lean_alloc_ctor(1, 2, 0); +} else { + x_10124 = x_10123; +} +lean_ctor_set(x_10124, 0, x_10121); +lean_ctor_set(x_10124, 1, x_10122); +return x_10124; +} +} +else +{ +lean_object* x_10125; lean_object* x_10126; lean_object* x_10127; lean_object* x_10128; lean_object* x_10129; lean_object* x_10130; lean_object* x_10131; lean_object* x_10132; lean_object* x_10133; +lean_dec(x_10085); +lean_dec(x_10083); +lean_inc(x_8853); +lean_inc(x_8845); +x_10125 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_10125, 0, x_8845); +lean_ctor_set(x_10125, 1, x_8853); +x_10126 = lean_ctor_get(x_1, 0); +lean_inc(x_10126); +x_10127 = l_Lean_IR_ToIR_bindVar(x_10126, x_8859, x_4, x_5, x_10080); +x_10128 = lean_ctor_get(x_10127, 0); +lean_inc(x_10128); +x_10129 = lean_ctor_get(x_10127, 1); +lean_inc(x_10129); +lean_dec(x_10127); +x_10130 = lean_ctor_get(x_10128, 0); +lean_inc(x_10130); +x_10131 = lean_ctor_get(x_10128, 1); +lean_inc(x_10131); +lean_dec(x_10128); +x_10132 = lean_ctor_get(x_1, 2); +lean_inc(x_10132); +lean_inc(x_5); +lean_inc(x_4); +x_10133 = l_Lean_IR_ToIR_lowerType(x_10132, x_10131, x_4, x_5, x_10129); +if (lean_obj_tag(x_10133) == 0) +{ +lean_object* x_10134; lean_object* x_10135; lean_object* x_10136; lean_object* x_10137; lean_object* x_10138; +x_10134 = lean_ctor_get(x_10133, 0); +lean_inc(x_10134); +x_10135 = lean_ctor_get(x_10133, 1); +lean_inc(x_10135); +lean_dec(x_10133); +x_10136 = lean_ctor_get(x_10134, 0); +lean_inc(x_10136); +x_10137 = lean_ctor_get(x_10134, 1); +lean_inc(x_10137); +lean_dec(x_10134); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10138 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10130, x_10125, x_10136, x_10137, x_4, x_5, x_10135); +if (lean_obj_tag(x_10138) == 0) +{ +lean_object* x_10139; lean_object* x_10140; lean_object* x_10141; lean_object* x_10142; lean_object* x_10143; lean_object* x_10144; lean_object* x_10145; +x_10139 = lean_ctor_get(x_10138, 0); +lean_inc(x_10139); +x_10140 = lean_ctor_get(x_10138, 1); +lean_inc(x_10140); +lean_dec(x_10138); +x_10141 = lean_ctor_get(x_10139, 0); +lean_inc(x_10141); +x_10142 = lean_ctor_get(x_10139, 1); +lean_inc(x_10142); +if (lean_is_exclusive(x_10139)) { + lean_ctor_release(x_10139, 0); + lean_ctor_release(x_10139, 1); + x_10143 = x_10139; +} else { + lean_dec_ref(x_10139); + x_10143 = lean_box(0); +} +if (lean_is_scalar(x_10082)) { + x_10144 = lean_alloc_ctor(1, 1, 0); +} else { + x_10144 = x_10082; +} +lean_ctor_set(x_10144, 0, x_10141); +if (lean_is_scalar(x_10143)) { + x_10145 = lean_alloc_ctor(0, 2, 0); +} else { + x_10145 = x_10143; +} +lean_ctor_set(x_10145, 0, x_10144); +lean_ctor_set(x_10145, 1, x_10142); +x_9863 = x_10145; +x_9864 = x_10140; +goto block_9893; +} +else +{ +lean_object* x_10146; lean_object* x_10147; lean_object* x_10148; lean_object* x_10149; +lean_dec(x_10082); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10146 = lean_ctor_get(x_10138, 0); +lean_inc(x_10146); +x_10147 = lean_ctor_get(x_10138, 1); +lean_inc(x_10147); +if (lean_is_exclusive(x_10138)) { + lean_ctor_release(x_10138, 0); + lean_ctor_release(x_10138, 1); + x_10148 = x_10138; +} else { + lean_dec_ref(x_10138); + x_10148 = lean_box(0); +} +if (lean_is_scalar(x_10148)) { + x_10149 = lean_alloc_ctor(1, 2, 0); +} else { + x_10149 = x_10148; +} +lean_ctor_set(x_10149, 0, x_10146); +lean_ctor_set(x_10149, 1, x_10147); +return x_10149; +} +} +else +{ +lean_object* x_10150; lean_object* x_10151; lean_object* x_10152; lean_object* x_10153; +lean_dec(x_10130); +lean_dec(x_10125); +lean_dec(x_10082); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10150 = lean_ctor_get(x_10133, 0); +lean_inc(x_10150); +x_10151 = lean_ctor_get(x_10133, 1); +lean_inc(x_10151); +if (lean_is_exclusive(x_10133)) { + lean_ctor_release(x_10133, 0); + lean_ctor_release(x_10133, 1); + x_10152 = x_10133; +} else { + lean_dec_ref(x_10133); + x_10152 = lean_box(0); +} +if (lean_is_scalar(x_10152)) { + x_10153 = lean_alloc_ctor(1, 2, 0); +} else { + x_10153 = x_10152; +} +lean_ctor_set(x_10153, 0, x_10150); +lean_ctor_set(x_10153, 1, x_10151); +return x_10153; +} +} +} +else +{ +lean_object* x_10154; lean_object* x_10155; lean_object* x_10156; lean_object* x_10157; lean_object* x_10158; lean_object* x_10159; lean_object* x_10160; lean_object* x_10161; lean_object* x_10162; +lean_dec(x_10085); +lean_dec(x_10083); +lean_inc(x_8853); +lean_inc(x_8845); +x_10154 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_10154, 0, x_8845); +lean_ctor_set(x_10154, 1, x_8853); +x_10155 = lean_ctor_get(x_1, 0); +lean_inc(x_10155); +x_10156 = l_Lean_IR_ToIR_bindVar(x_10155, x_8859, x_4, x_5, x_10080); +x_10157 = lean_ctor_get(x_10156, 0); +lean_inc(x_10157); +x_10158 = lean_ctor_get(x_10156, 1); +lean_inc(x_10158); +lean_dec(x_10156); +x_10159 = lean_ctor_get(x_10157, 0); +lean_inc(x_10159); +x_10160 = lean_ctor_get(x_10157, 1); +lean_inc(x_10160); +lean_dec(x_10157); +x_10161 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10162 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10159, x_10154, x_10161, x_10160, x_4, x_5, x_10158); +if (lean_obj_tag(x_10162) == 0) +{ +lean_object* x_10163; lean_object* x_10164; lean_object* x_10165; lean_object* x_10166; lean_object* x_10167; lean_object* x_10168; lean_object* x_10169; +x_10163 = lean_ctor_get(x_10162, 0); +lean_inc(x_10163); +x_10164 = lean_ctor_get(x_10162, 1); +lean_inc(x_10164); +lean_dec(x_10162); +x_10165 = lean_ctor_get(x_10163, 0); +lean_inc(x_10165); +x_10166 = lean_ctor_get(x_10163, 1); +lean_inc(x_10166); +if (lean_is_exclusive(x_10163)) { + lean_ctor_release(x_10163, 0); + lean_ctor_release(x_10163, 1); + x_10167 = x_10163; +} else { + lean_dec_ref(x_10163); + x_10167 = lean_box(0); +} +if (lean_is_scalar(x_10082)) { + x_10168 = lean_alloc_ctor(1, 1, 0); +} else { + x_10168 = x_10082; +} +lean_ctor_set(x_10168, 0, x_10165); +if (lean_is_scalar(x_10167)) { + x_10169 = lean_alloc_ctor(0, 2, 0); +} else { + x_10169 = x_10167; +} +lean_ctor_set(x_10169, 0, x_10168); +lean_ctor_set(x_10169, 1, x_10166); +x_9863 = x_10169; +x_9864 = x_10164; +goto block_9893; +} +else +{ +lean_object* x_10170; lean_object* x_10171; lean_object* x_10172; lean_object* x_10173; +lean_dec(x_10082); +lean_dec(x_8864); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10170 = lean_ctor_get(x_10162, 0); +lean_inc(x_10170); +x_10171 = lean_ctor_get(x_10162, 1); +lean_inc(x_10171); +if (lean_is_exclusive(x_10162)) { + lean_ctor_release(x_10162, 0); + lean_ctor_release(x_10162, 1); + x_10172 = x_10162; +} else { + lean_dec_ref(x_10162); + x_10172 = lean_box(0); +} +if (lean_is_scalar(x_10172)) { + x_10173 = lean_alloc_ctor(1, 2, 0); +} else { + x_10173 = x_10172; +} +lean_ctor_set(x_10173, 0, x_10170); +lean_ctor_set(x_10173, 1, x_10171); +return x_10173; +} +} +} +} +block_9893: +{ +lean_object* x_9865; +x_9865 = lean_ctor_get(x_9863, 0); +lean_inc(x_9865); +if (lean_obj_tag(x_9865) == 0) +{ +lean_object* x_9866; lean_object* x_9867; lean_object* x_9868; lean_object* x_9869; lean_object* x_9870; lean_object* x_9871; lean_object* x_9872; lean_object* x_9873; lean_object* x_9874; lean_object* x_9875; +lean_dec(x_8864); +x_9866 = lean_ctor_get(x_9863, 1); +lean_inc(x_9866); +lean_dec(x_9863); +x_9867 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_9867, 0, x_8845); +lean_ctor_set(x_9867, 1, x_8853); +x_9868 = lean_ctor_get(x_1, 0); +lean_inc(x_9868); +x_9869 = l_Lean_IR_ToIR_bindVar(x_9868, x_9866, x_4, x_5, x_9864); +x_9870 = lean_ctor_get(x_9869, 0); +lean_inc(x_9870); +x_9871 = lean_ctor_get(x_9869, 1); +lean_inc(x_9871); +lean_dec(x_9869); +x_9872 = lean_ctor_get(x_9870, 0); +lean_inc(x_9872); +x_9873 = lean_ctor_get(x_9870, 1); +lean_inc(x_9873); +lean_dec(x_9870); +x_9874 = lean_ctor_get(x_1, 2); +lean_inc(x_9874); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_9875 = l_Lean_IR_ToIR_lowerType(x_9874, x_9873, x_4, x_5, x_9871); +if (lean_obj_tag(x_9875) == 0) +{ +lean_object* x_9876; lean_object* x_9877; lean_object* x_9878; lean_object* x_9879; lean_object* x_9880; +x_9876 = lean_ctor_get(x_9875, 0); +lean_inc(x_9876); +x_9877 = lean_ctor_get(x_9875, 1); +lean_inc(x_9877); +lean_dec(x_9875); +x_9878 = lean_ctor_get(x_9876, 0); +lean_inc(x_9878); +x_9879 = lean_ctor_get(x_9876, 1); +lean_inc(x_9879); +lean_dec(x_9876); +x_9880 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_9872, x_9867, x_9878, x_9879, x_4, x_5, x_9877); +return x_9880; +} +else +{ +uint8_t x_9881; +lean_dec(x_9872); +lean_dec(x_9867); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_9881 = !lean_is_exclusive(x_9875); +if (x_9881 == 0) +{ +return x_9875; +} +else +{ +lean_object* x_9882; lean_object* x_9883; lean_object* x_9884; +x_9882 = lean_ctor_get(x_9875, 0); +x_9883 = lean_ctor_get(x_9875, 1); +lean_inc(x_9883); +lean_inc(x_9882); +lean_dec(x_9875); +x_9884 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9884, 0, x_9882); +lean_ctor_set(x_9884, 1, x_9883); +return x_9884; +} +} +} +else +{ +uint8_t x_9885; +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_9885 = !lean_is_exclusive(x_9863); +if (x_9885 == 0) +{ +lean_object* x_9886; lean_object* x_9887; lean_object* x_9888; +x_9886 = lean_ctor_get(x_9863, 0); +lean_dec(x_9886); +x_9887 = lean_ctor_get(x_9865, 0); +lean_inc(x_9887); +lean_dec(x_9865); +lean_ctor_set(x_9863, 0, x_9887); +if (lean_is_scalar(x_8864)) { + x_9888 = lean_alloc_ctor(0, 2, 0); +} else { + x_9888 = x_8864; +} +lean_ctor_set(x_9888, 0, x_9863); +lean_ctor_set(x_9888, 1, x_9864); +return x_9888; +} +else +{ +lean_object* x_9889; lean_object* x_9890; lean_object* x_9891; lean_object* x_9892; +x_9889 = lean_ctor_get(x_9863, 1); +lean_inc(x_9889); +lean_dec(x_9863); +x_9890 = lean_ctor_get(x_9865, 0); +lean_inc(x_9890); +lean_dec(x_9865); +x_9891 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9891, 0, x_9890); +lean_ctor_set(x_9891, 1, x_9889); +if (lean_is_scalar(x_8864)) { + x_9892 = lean_alloc_ctor(0, 2, 0); +} else { + x_9892 = x_8864; +} +lean_ctor_set(x_9892, 0, x_9891); +lean_ctor_set(x_9892, 1, x_9864); +return x_9892; +} +} +} +} +} +default: +{ +uint8_t x_10174; +lean_dec(x_8865); +lean_dec(x_8864); +lean_free_object(x_8855); +lean_dec(x_8853); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +x_10174 = !lean_is_exclusive(x_8870); +if (x_10174 == 0) +{ +lean_object* x_10175; uint8_t x_10176; lean_object* x_10177; lean_object* x_10178; lean_object* x_10179; lean_object* x_10180; lean_object* x_10181; lean_object* x_10182; lean_object* x_10183; lean_object* x_10184; +x_10175 = lean_ctor_get(x_8870, 0); +lean_dec(x_10175); +x_10176 = 1; +x_10177 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_10178 = l_Lean_Name_toString(x_8845, x_10176, x_10177); +lean_ctor_set_tag(x_8870, 3); +lean_ctor_set(x_8870, 0, x_10178); +x_10179 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_10180 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_10180, 0, x_10179); +lean_ctor_set(x_10180, 1, x_8870); +x_10181 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_10182 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_10182, 0, x_10180); +lean_ctor_set(x_10182, 1, x_10181); +x_10183 = l_Lean_MessageData_ofFormat(x_10182); +x_10184 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_10183, x_8859, x_4, x_5, x_8863); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_8859); +return x_10184; +} +else +{ +uint8_t x_10185; lean_object* x_10186; lean_object* x_10187; lean_object* x_10188; lean_object* x_10189; lean_object* x_10190; lean_object* x_10191; lean_object* x_10192; lean_object* x_10193; lean_object* x_10194; +lean_dec(x_8870); +x_10185 = 1; +x_10186 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_10187 = l_Lean_Name_toString(x_8845, x_10185, x_10186); +x_10188 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_10188, 0, x_10187); +x_10189 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_10190 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_10190, 0, x_10189); +lean_ctor_set(x_10190, 1, x_10188); +x_10191 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_10192 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_10192, 0, x_10190); +lean_ctor_set(x_10192, 1, x_10191); +x_10193 = l_Lean_MessageData_ofFormat(x_10192); +x_10194 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_10193, x_8859, x_4, x_5, x_8863); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_8859); +return x_10194; +} +} +} +} +} +else +{ +lean_object* x_10195; lean_object* x_10196; lean_object* x_10197; lean_object* x_10198; lean_object* x_10199; lean_object* x_10200; uint8_t x_10201; lean_object* x_10202; +x_10195 = lean_ctor_get(x_8855, 1); +lean_inc(x_10195); +lean_dec(x_8855); +x_10196 = lean_st_ref_get(x_5, x_8856); +x_10197 = lean_ctor_get(x_10196, 0); +lean_inc(x_10197); +x_10198 = lean_ctor_get(x_10196, 1); +lean_inc(x_10198); +if (lean_is_exclusive(x_10196)) { + lean_ctor_release(x_10196, 0); + lean_ctor_release(x_10196, 1); + x_10199 = x_10196; +} else { + lean_dec_ref(x_10196); + x_10199 = lean_box(0); +} +x_10200 = lean_ctor_get(x_10197, 0); +lean_inc(x_10200); +lean_dec(x_10197); +x_10201 = 0; +lean_inc(x_8845); +lean_inc(x_10200); +x_10202 = l_Lean_Environment_find_x3f(x_10200, x_8845, x_10201); +if (lean_obj_tag(x_10202) == 0) +{ +lean_object* x_10203; lean_object* x_10204; +lean_dec(x_10200); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +x_10203 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_10204 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_10203, x_10195, x_4, x_5, x_10198); +return x_10204; +} +else +{ +lean_object* x_10205; +x_10205 = lean_ctor_get(x_10202, 0); +lean_inc(x_10205); +lean_dec(x_10202); +switch (lean_obj_tag(x_10205)) { +case 0: +{ +lean_object* x_10206; lean_object* x_10207; uint8_t x_10208; +lean_dec(x_10200); +lean_dec(x_5945); +lean_dec(x_5944); +if (lean_is_exclusive(x_10205)) { + lean_ctor_release(x_10205, 0); + x_10206 = x_10205; +} else { + lean_dec_ref(x_10205); + x_10206 = lean_box(0); +} +x_10207 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_10208 = lean_name_eq(x_8845, x_10207); +if (x_10208 == 0) +{ +lean_object* x_10209; uint8_t x_10210; +x_10209 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_10210 = lean_name_eq(x_8845, x_10209); +if (x_10210 == 0) +{ +lean_object* x_10211; lean_object* x_10212; lean_object* x_10213; +lean_dec(x_10199); +lean_inc(x_8845); +x_10211 = l_Lean_IR_ToIR_findDecl(x_8845, x_10195, x_4, x_5, x_10198); +x_10212 = lean_ctor_get(x_10211, 0); +lean_inc(x_10212); +x_10213 = lean_ctor_get(x_10212, 0); +lean_inc(x_10213); +if (lean_obj_tag(x_10213) == 0) +{ +lean_object* x_10214; lean_object* x_10215; lean_object* x_10216; lean_object* x_10217; uint8_t x_10218; lean_object* x_10219; lean_object* x_10220; lean_object* x_10221; lean_object* x_10222; lean_object* x_10223; lean_object* x_10224; lean_object* x_10225; lean_object* x_10226; lean_object* x_10227; +lean_dec(x_8853); +lean_dec(x_2); +lean_dec(x_1); +x_10214 = lean_ctor_get(x_10211, 1); +lean_inc(x_10214); +if (lean_is_exclusive(x_10211)) { + lean_ctor_release(x_10211, 0); + lean_ctor_release(x_10211, 1); + x_10215 = x_10211; +} else { + lean_dec_ref(x_10211); + x_10215 = lean_box(0); +} +x_10216 = lean_ctor_get(x_10212, 1); +lean_inc(x_10216); +if (lean_is_exclusive(x_10212)) { + lean_ctor_release(x_10212, 0); + lean_ctor_release(x_10212, 1); + x_10217 = x_10212; +} else { + lean_dec_ref(x_10212); + x_10217 = lean_box(0); +} +x_10218 = 1; +x_10219 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_10220 = l_Lean_Name_toString(x_8845, x_10218, x_10219); +if (lean_is_scalar(x_10206)) { + x_10221 = lean_alloc_ctor(3, 1, 0); +} else { + x_10221 = x_10206; + lean_ctor_set_tag(x_10221, 3); +} +lean_ctor_set(x_10221, 0, x_10220); +x_10222 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_10217)) { + x_10223 = lean_alloc_ctor(5, 2, 0); +} else { + x_10223 = x_10217; + lean_ctor_set_tag(x_10223, 5); +} +lean_ctor_set(x_10223, 0, x_10222); +lean_ctor_set(x_10223, 1, x_10221); +x_10224 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_10215)) { + x_10225 = lean_alloc_ctor(5, 2, 0); +} else { + x_10225 = x_10215; + lean_ctor_set_tag(x_10225, 5); +} +lean_ctor_set(x_10225, 0, x_10223); +lean_ctor_set(x_10225, 1, x_10224); +x_10226 = l_Lean_MessageData_ofFormat(x_10225); +x_10227 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_10226, x_10216, x_4, x_5, x_10214); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_10216); +return x_10227; +} +else +{ +lean_object* x_10228; lean_object* x_10229; lean_object* x_10230; lean_object* x_10231; lean_object* x_10232; lean_object* x_10233; lean_object* x_10234; uint8_t x_10235; +lean_dec(x_10206); +x_10228 = lean_ctor_get(x_10211, 1); +lean_inc(x_10228); +lean_dec(x_10211); +x_10229 = lean_ctor_get(x_10212, 1); +lean_inc(x_10229); +if (lean_is_exclusive(x_10212)) { + lean_ctor_release(x_10212, 0); + lean_ctor_release(x_10212, 1); + x_10230 = x_10212; +} else { + lean_dec_ref(x_10212); + x_10230 = lean_box(0); +} +x_10231 = lean_ctor_get(x_10213, 0); +lean_inc(x_10231); +lean_dec(x_10213); +x_10232 = lean_array_get_size(x_8853); +x_10233 = l_Lean_IR_Decl_params(x_10231); +lean_dec(x_10231); +x_10234 = lean_array_get_size(x_10233); +lean_dec(x_10233); +x_10235 = lean_nat_dec_lt(x_10232, x_10234); +if (x_10235 == 0) +{ +uint8_t x_10236; +x_10236 = lean_nat_dec_eq(x_10232, x_10234); +if (x_10236 == 0) +{ +lean_object* x_10237; lean_object* x_10238; lean_object* x_10239; lean_object* x_10240; lean_object* x_10241; lean_object* x_10242; lean_object* x_10243; lean_object* x_10244; lean_object* x_10245; lean_object* x_10246; lean_object* x_10247; lean_object* x_10248; lean_object* x_10249; lean_object* x_10250; lean_object* x_10251; lean_object* x_10252; lean_object* x_10253; +x_10237 = lean_unsigned_to_nat(0u); +x_10238 = l_Array_extract___rarg(x_8853, x_10237, x_10234); +x_10239 = l_Array_extract___rarg(x_8853, x_10234, x_10232); +lean_dec(x_10232); +lean_dec(x_8853); +if (lean_is_scalar(x_10230)) { + x_10240 = lean_alloc_ctor(6, 2, 0); +} else { + x_10240 = x_10230; + lean_ctor_set_tag(x_10240, 6); +} +lean_ctor_set(x_10240, 0, x_8845); +lean_ctor_set(x_10240, 1, x_10238); +x_10241 = lean_ctor_get(x_1, 0); +lean_inc(x_10241); +x_10242 = l_Lean_IR_ToIR_bindVar(x_10241, x_10229, x_4, x_5, x_10228); +x_10243 = lean_ctor_get(x_10242, 0); +lean_inc(x_10243); +x_10244 = lean_ctor_get(x_10242, 1); +lean_inc(x_10244); +lean_dec(x_10242); +x_10245 = lean_ctor_get(x_10243, 0); +lean_inc(x_10245); +x_10246 = lean_ctor_get(x_10243, 1); +lean_inc(x_10246); +lean_dec(x_10243); +x_10247 = l_Lean_IR_ToIR_newVar(x_10246, x_4, x_5, x_10244); +x_10248 = lean_ctor_get(x_10247, 0); +lean_inc(x_10248); +x_10249 = lean_ctor_get(x_10247, 1); +lean_inc(x_10249); +lean_dec(x_10247); +x_10250 = lean_ctor_get(x_10248, 0); +lean_inc(x_10250); +x_10251 = lean_ctor_get(x_10248, 1); +lean_inc(x_10251); +lean_dec(x_10248); +x_10252 = lean_ctor_get(x_1, 2); +lean_inc(x_10252); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_10253 = l_Lean_IR_ToIR_lowerType(x_10252, x_10251, x_4, x_5, x_10249); +if (lean_obj_tag(x_10253) == 0) +{ +lean_object* x_10254; lean_object* x_10255; lean_object* x_10256; lean_object* x_10257; lean_object* x_10258; +x_10254 = lean_ctor_get(x_10253, 0); +lean_inc(x_10254); +x_10255 = lean_ctor_get(x_10253, 1); +lean_inc(x_10255); +lean_dec(x_10253); +x_10256 = lean_ctor_get(x_10254, 0); +lean_inc(x_10256); +x_10257 = lean_ctor_get(x_10254, 1); +lean_inc(x_10257); +lean_dec(x_10254); +x_10258 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_10250, x_10239, x_10245, x_10240, x_10256, x_10257, x_4, x_5, x_10255); +return x_10258; +} +else +{ +lean_object* x_10259; lean_object* x_10260; lean_object* x_10261; lean_object* x_10262; +lean_dec(x_10250); +lean_dec(x_10245); +lean_dec(x_10240); +lean_dec(x_10239); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_10259 = lean_ctor_get(x_10253, 0); +lean_inc(x_10259); +x_10260 = lean_ctor_get(x_10253, 1); +lean_inc(x_10260); +if (lean_is_exclusive(x_10253)) { + lean_ctor_release(x_10253, 0); + lean_ctor_release(x_10253, 1); + x_10261 = x_10253; +} else { + lean_dec_ref(x_10253); + x_10261 = lean_box(0); +} +if (lean_is_scalar(x_10261)) { + x_10262 = lean_alloc_ctor(1, 2, 0); +} else { + x_10262 = x_10261; +} +lean_ctor_set(x_10262, 0, x_10259); +lean_ctor_set(x_10262, 1, x_10260); +return x_10262; +} +} +else +{ +lean_object* x_10263; lean_object* x_10264; lean_object* x_10265; lean_object* x_10266; lean_object* x_10267; lean_object* x_10268; lean_object* x_10269; lean_object* x_10270; lean_object* x_10271; +lean_dec(x_10234); +lean_dec(x_10232); +if (lean_is_scalar(x_10230)) { + x_10263 = lean_alloc_ctor(6, 2, 0); +} else { + x_10263 = x_10230; + lean_ctor_set_tag(x_10263, 6); +} +lean_ctor_set(x_10263, 0, x_8845); +lean_ctor_set(x_10263, 1, x_8853); +x_10264 = lean_ctor_get(x_1, 0); +lean_inc(x_10264); +x_10265 = l_Lean_IR_ToIR_bindVar(x_10264, x_10229, x_4, x_5, x_10228); +x_10266 = lean_ctor_get(x_10265, 0); +lean_inc(x_10266); +x_10267 = lean_ctor_get(x_10265, 1); +lean_inc(x_10267); +lean_dec(x_10265); +x_10268 = lean_ctor_get(x_10266, 0); +lean_inc(x_10268); +x_10269 = lean_ctor_get(x_10266, 1); +lean_inc(x_10269); +lean_dec(x_10266); +x_10270 = lean_ctor_get(x_1, 2); +lean_inc(x_10270); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_10271 = l_Lean_IR_ToIR_lowerType(x_10270, x_10269, x_4, x_5, x_10267); +if (lean_obj_tag(x_10271) == 0) +{ +lean_object* x_10272; lean_object* x_10273; lean_object* x_10274; lean_object* x_10275; lean_object* x_10276; +x_10272 = lean_ctor_get(x_10271, 0); +lean_inc(x_10272); +x_10273 = lean_ctor_get(x_10271, 1); +lean_inc(x_10273); +lean_dec(x_10271); +x_10274 = lean_ctor_get(x_10272, 0); +lean_inc(x_10274); +x_10275 = lean_ctor_get(x_10272, 1); +lean_inc(x_10275); +lean_dec(x_10272); +x_10276 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10268, x_10263, x_10274, x_10275, x_4, x_5, x_10273); +return x_10276; +} +else +{ +lean_object* x_10277; lean_object* x_10278; lean_object* x_10279; lean_object* x_10280; +lean_dec(x_10268); +lean_dec(x_10263); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_10277 = lean_ctor_get(x_10271, 0); +lean_inc(x_10277); +x_10278 = lean_ctor_get(x_10271, 1); +lean_inc(x_10278); +if (lean_is_exclusive(x_10271)) { + lean_ctor_release(x_10271, 0); + lean_ctor_release(x_10271, 1); + x_10279 = x_10271; +} else { + lean_dec_ref(x_10271); + x_10279 = lean_box(0); +} +if (lean_is_scalar(x_10279)) { + x_10280 = lean_alloc_ctor(1, 2, 0); +} else { + x_10280 = x_10279; +} +lean_ctor_set(x_10280, 0, x_10277); +lean_ctor_set(x_10280, 1, x_10278); +return x_10280; +} +} +} +else +{ +lean_object* x_10281; lean_object* x_10282; lean_object* x_10283; lean_object* x_10284; lean_object* x_10285; lean_object* x_10286; lean_object* x_10287; lean_object* x_10288; lean_object* x_10289; +lean_dec(x_10234); +lean_dec(x_10232); +if (lean_is_scalar(x_10230)) { + x_10281 = lean_alloc_ctor(7, 2, 0); +} else { + x_10281 = x_10230; + lean_ctor_set_tag(x_10281, 7); +} +lean_ctor_set(x_10281, 0, x_8845); +lean_ctor_set(x_10281, 1, x_8853); +x_10282 = lean_ctor_get(x_1, 0); +lean_inc(x_10282); +lean_dec(x_1); +x_10283 = l_Lean_IR_ToIR_bindVar(x_10282, x_10229, x_4, x_5, x_10228); +x_10284 = lean_ctor_get(x_10283, 0); +lean_inc(x_10284); +x_10285 = lean_ctor_get(x_10283, 1); +lean_inc(x_10285); +lean_dec(x_10283); +x_10286 = lean_ctor_get(x_10284, 0); +lean_inc(x_10286); +x_10287 = lean_ctor_get(x_10284, 1); +lean_inc(x_10287); +lean_dec(x_10284); +x_10288 = lean_box(7); +x_10289 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10286, x_10281, x_10288, x_10287, x_4, x_5, x_10285); +return x_10289; +} +} +} +else +{ +lean_object* x_10290; lean_object* x_10291; lean_object* x_10292; +lean_dec(x_10206); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10290 = lean_box(13); +x_10291 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10291, 0, x_10290); +lean_ctor_set(x_10291, 1, x_10195); +if (lean_is_scalar(x_10199)) { + x_10292 = lean_alloc_ctor(0, 2, 0); +} else { + x_10292 = x_10199; +} +lean_ctor_set(x_10292, 0, x_10291); +lean_ctor_set(x_10292, 1, x_10198); +return x_10292; +} +} +else +{ +lean_object* x_10293; lean_object* x_10294; lean_object* x_10295; +lean_dec(x_10206); +lean_dec(x_10199); +lean_dec(x_8845); +x_10293 = l_Lean_IR_instInhabitedArg; +x_10294 = lean_unsigned_to_nat(2u); +x_10295 = lean_array_get(x_10293, x_8853, x_10294); +lean_dec(x_8853); +if (lean_obj_tag(x_10295) == 0) +{ +lean_object* x_10296; lean_object* x_10297; lean_object* x_10298; lean_object* x_10299; lean_object* x_10300; lean_object* x_10301; lean_object* x_10302; +x_10296 = lean_ctor_get(x_10295, 0); +lean_inc(x_10296); +lean_dec(x_10295); +x_10297 = lean_ctor_get(x_1, 0); +lean_inc(x_10297); +lean_dec(x_1); +x_10298 = l_Lean_IR_ToIR_bindVarToVarId(x_10297, x_10296, x_10195, x_4, x_5, x_10198); +x_10299 = lean_ctor_get(x_10298, 0); +lean_inc(x_10299); +x_10300 = lean_ctor_get(x_10298, 1); +lean_inc(x_10300); +lean_dec(x_10298); +x_10301 = lean_ctor_get(x_10299, 1); +lean_inc(x_10301); +lean_dec(x_10299); +x_10302 = l_Lean_IR_ToIR_lowerCode(x_2, x_10301, x_4, x_5, x_10300); +return x_10302; +} +else +{ +lean_object* x_10303; lean_object* x_10304; lean_object* x_10305; lean_object* x_10306; lean_object* x_10307; lean_object* x_10308; +x_10303 = lean_ctor_get(x_1, 0); +lean_inc(x_10303); +lean_dec(x_1); +x_10304 = l_Lean_IR_ToIR_bindErased(x_10303, x_10195, x_4, x_5, x_10198); +x_10305 = lean_ctor_get(x_10304, 0); +lean_inc(x_10305); +x_10306 = lean_ctor_get(x_10304, 1); +lean_inc(x_10306); +lean_dec(x_10304); +x_10307 = lean_ctor_get(x_10305, 1); +lean_inc(x_10307); +lean_dec(x_10305); +x_10308 = l_Lean_IR_ToIR_lowerCode(x_2, x_10307, x_4, x_5, x_10306); +return x_10308; +} +} +} +case 1: +{ +lean_object* x_10309; lean_object* x_10310; lean_object* x_10337; lean_object* x_10338; +lean_dec(x_10205); +lean_dec(x_10200); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_8845); +x_10337 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_8845, x_4, x_5, x_10198); +x_10338 = lean_ctor_get(x_10337, 0); +lean_inc(x_10338); +if (lean_obj_tag(x_10338) == 0) +{ +lean_object* x_10339; lean_object* x_10340; lean_object* x_10341; +x_10339 = lean_ctor_get(x_10337, 1); +lean_inc(x_10339); +lean_dec(x_10337); +x_10340 = lean_box(0); +x_10341 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10341, 0, x_10340); +lean_ctor_set(x_10341, 1, x_10195); +x_10309 = x_10341; +x_10310 = x_10339; +goto block_10336; +} +else +{ +lean_object* x_10342; lean_object* x_10343; lean_object* x_10344; lean_object* x_10345; lean_object* x_10346; lean_object* x_10347; lean_object* x_10348; uint8_t x_10349; +x_10342 = lean_ctor_get(x_10337, 1); +lean_inc(x_10342); +if (lean_is_exclusive(x_10337)) { + lean_ctor_release(x_10337, 0); + lean_ctor_release(x_10337, 1); + x_10343 = x_10337; +} else { + lean_dec_ref(x_10337); + x_10343 = lean_box(0); +} +x_10344 = lean_ctor_get(x_10338, 0); +lean_inc(x_10344); +if (lean_is_exclusive(x_10338)) { + lean_ctor_release(x_10338, 0); + x_10345 = x_10338; +} else { + lean_dec_ref(x_10338); + x_10345 = lean_box(0); +} +x_10346 = lean_array_get_size(x_8853); +x_10347 = lean_ctor_get(x_10344, 3); +lean_inc(x_10347); +lean_dec(x_10344); +x_10348 = lean_array_get_size(x_10347); +lean_dec(x_10347); +x_10349 = lean_nat_dec_lt(x_10346, x_10348); +if (x_10349 == 0) +{ +uint8_t x_10350; +x_10350 = lean_nat_dec_eq(x_10346, x_10348); +if (x_10350 == 0) +{ +lean_object* x_10351; lean_object* x_10352; lean_object* x_10353; lean_object* x_10354; lean_object* x_10355; lean_object* x_10356; lean_object* x_10357; lean_object* x_10358; lean_object* x_10359; lean_object* x_10360; lean_object* x_10361; lean_object* x_10362; lean_object* x_10363; lean_object* x_10364; lean_object* x_10365; lean_object* x_10366; lean_object* x_10367; +x_10351 = lean_unsigned_to_nat(0u); +x_10352 = l_Array_extract___rarg(x_8853, x_10351, x_10348); +x_10353 = l_Array_extract___rarg(x_8853, x_10348, x_10346); +lean_dec(x_10346); +lean_inc(x_8845); +if (lean_is_scalar(x_10343)) { + x_10354 = lean_alloc_ctor(6, 2, 0); +} else { + x_10354 = x_10343; + lean_ctor_set_tag(x_10354, 6); +} +lean_ctor_set(x_10354, 0, x_8845); +lean_ctor_set(x_10354, 1, x_10352); +x_10355 = lean_ctor_get(x_1, 0); +lean_inc(x_10355); +x_10356 = l_Lean_IR_ToIR_bindVar(x_10355, x_10195, x_4, x_5, x_10342); +x_10357 = lean_ctor_get(x_10356, 0); +lean_inc(x_10357); +x_10358 = lean_ctor_get(x_10356, 1); +lean_inc(x_10358); +lean_dec(x_10356); +x_10359 = lean_ctor_get(x_10357, 0); +lean_inc(x_10359); +x_10360 = lean_ctor_get(x_10357, 1); +lean_inc(x_10360); +lean_dec(x_10357); +x_10361 = l_Lean_IR_ToIR_newVar(x_10360, x_4, x_5, x_10358); +x_10362 = lean_ctor_get(x_10361, 0); +lean_inc(x_10362); +x_10363 = lean_ctor_get(x_10361, 1); +lean_inc(x_10363); +lean_dec(x_10361); +x_10364 = lean_ctor_get(x_10362, 0); +lean_inc(x_10364); +x_10365 = lean_ctor_get(x_10362, 1); +lean_inc(x_10365); +lean_dec(x_10362); +x_10366 = lean_ctor_get(x_1, 2); +lean_inc(x_10366); +lean_inc(x_5); +lean_inc(x_4); +x_10367 = l_Lean_IR_ToIR_lowerType(x_10366, x_10365, x_4, x_5, x_10363); +if (lean_obj_tag(x_10367) == 0) +{ +lean_object* x_10368; lean_object* x_10369; lean_object* x_10370; lean_object* x_10371; lean_object* x_10372; +x_10368 = lean_ctor_get(x_10367, 0); +lean_inc(x_10368); +x_10369 = lean_ctor_get(x_10367, 1); +lean_inc(x_10369); +lean_dec(x_10367); +x_10370 = lean_ctor_get(x_10368, 0); +lean_inc(x_10370); +x_10371 = lean_ctor_get(x_10368, 1); +lean_inc(x_10371); +lean_dec(x_10368); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10372 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_10364, x_10353, x_10359, x_10354, x_10370, x_10371, x_4, x_5, x_10369); +if (lean_obj_tag(x_10372) == 0) +{ +lean_object* x_10373; lean_object* x_10374; lean_object* x_10375; lean_object* x_10376; lean_object* x_10377; lean_object* x_10378; lean_object* x_10379; +x_10373 = lean_ctor_get(x_10372, 0); +lean_inc(x_10373); +x_10374 = lean_ctor_get(x_10372, 1); +lean_inc(x_10374); +lean_dec(x_10372); +x_10375 = lean_ctor_get(x_10373, 0); +lean_inc(x_10375); +x_10376 = lean_ctor_get(x_10373, 1); +lean_inc(x_10376); +if (lean_is_exclusive(x_10373)) { + lean_ctor_release(x_10373, 0); + lean_ctor_release(x_10373, 1); + x_10377 = x_10373; +} else { + lean_dec_ref(x_10373); + x_10377 = lean_box(0); +} +if (lean_is_scalar(x_10345)) { + x_10378 = lean_alloc_ctor(1, 1, 0); +} else { + x_10378 = x_10345; +} +lean_ctor_set(x_10378, 0, x_10375); +if (lean_is_scalar(x_10377)) { + x_10379 = lean_alloc_ctor(0, 2, 0); +} else { + x_10379 = x_10377; +} +lean_ctor_set(x_10379, 0, x_10378); +lean_ctor_set(x_10379, 1, x_10376); +x_10309 = x_10379; +x_10310 = x_10374; +goto block_10336; +} +else +{ +lean_object* x_10380; lean_object* x_10381; lean_object* x_10382; lean_object* x_10383; +lean_dec(x_10345); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10380 = lean_ctor_get(x_10372, 0); +lean_inc(x_10380); +x_10381 = lean_ctor_get(x_10372, 1); +lean_inc(x_10381); +if (lean_is_exclusive(x_10372)) { + lean_ctor_release(x_10372, 0); + lean_ctor_release(x_10372, 1); + x_10382 = x_10372; +} else { + lean_dec_ref(x_10372); + x_10382 = lean_box(0); +} +if (lean_is_scalar(x_10382)) { + x_10383 = lean_alloc_ctor(1, 2, 0); +} else { + x_10383 = x_10382; +} +lean_ctor_set(x_10383, 0, x_10380); +lean_ctor_set(x_10383, 1, x_10381); +return x_10383; +} +} +else +{ +lean_object* x_10384; lean_object* x_10385; lean_object* x_10386; lean_object* x_10387; +lean_dec(x_10364); +lean_dec(x_10359); +lean_dec(x_10354); +lean_dec(x_10353); +lean_dec(x_10345); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10384 = lean_ctor_get(x_10367, 0); +lean_inc(x_10384); +x_10385 = lean_ctor_get(x_10367, 1); +lean_inc(x_10385); +if (lean_is_exclusive(x_10367)) { + lean_ctor_release(x_10367, 0); + lean_ctor_release(x_10367, 1); + x_10386 = x_10367; +} else { + lean_dec_ref(x_10367); + x_10386 = lean_box(0); +} +if (lean_is_scalar(x_10386)) { + x_10387 = lean_alloc_ctor(1, 2, 0); +} else { + x_10387 = x_10386; +} +lean_ctor_set(x_10387, 0, x_10384); +lean_ctor_set(x_10387, 1, x_10385); +return x_10387; +} +} +else +{ +lean_object* x_10388; lean_object* x_10389; lean_object* x_10390; lean_object* x_10391; lean_object* x_10392; lean_object* x_10393; lean_object* x_10394; lean_object* x_10395; lean_object* x_10396; +lean_dec(x_10348); +lean_dec(x_10346); +lean_inc(x_8853); +lean_inc(x_8845); +if (lean_is_scalar(x_10343)) { + x_10388 = lean_alloc_ctor(6, 2, 0); +} else { + x_10388 = x_10343; + lean_ctor_set_tag(x_10388, 6); +} +lean_ctor_set(x_10388, 0, x_8845); +lean_ctor_set(x_10388, 1, x_8853); +x_10389 = lean_ctor_get(x_1, 0); +lean_inc(x_10389); +x_10390 = l_Lean_IR_ToIR_bindVar(x_10389, x_10195, x_4, x_5, x_10342); +x_10391 = lean_ctor_get(x_10390, 0); +lean_inc(x_10391); +x_10392 = lean_ctor_get(x_10390, 1); +lean_inc(x_10392); +lean_dec(x_10390); +x_10393 = lean_ctor_get(x_10391, 0); +lean_inc(x_10393); +x_10394 = lean_ctor_get(x_10391, 1); +lean_inc(x_10394); +lean_dec(x_10391); +x_10395 = lean_ctor_get(x_1, 2); +lean_inc(x_10395); +lean_inc(x_5); +lean_inc(x_4); +x_10396 = l_Lean_IR_ToIR_lowerType(x_10395, x_10394, x_4, x_5, x_10392); +if (lean_obj_tag(x_10396) == 0) +{ +lean_object* x_10397; lean_object* x_10398; lean_object* x_10399; lean_object* x_10400; lean_object* x_10401; +x_10397 = lean_ctor_get(x_10396, 0); +lean_inc(x_10397); +x_10398 = lean_ctor_get(x_10396, 1); +lean_inc(x_10398); +lean_dec(x_10396); +x_10399 = lean_ctor_get(x_10397, 0); +lean_inc(x_10399); +x_10400 = lean_ctor_get(x_10397, 1); +lean_inc(x_10400); +lean_dec(x_10397); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10401 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10393, x_10388, x_10399, x_10400, x_4, x_5, x_10398); +if (lean_obj_tag(x_10401) == 0) +{ +lean_object* x_10402; lean_object* x_10403; lean_object* x_10404; lean_object* x_10405; lean_object* x_10406; lean_object* x_10407; lean_object* x_10408; +x_10402 = lean_ctor_get(x_10401, 0); +lean_inc(x_10402); +x_10403 = lean_ctor_get(x_10401, 1); +lean_inc(x_10403); +lean_dec(x_10401); +x_10404 = lean_ctor_get(x_10402, 0); +lean_inc(x_10404); +x_10405 = lean_ctor_get(x_10402, 1); +lean_inc(x_10405); +if (lean_is_exclusive(x_10402)) { + lean_ctor_release(x_10402, 0); + lean_ctor_release(x_10402, 1); + x_10406 = x_10402; +} else { + lean_dec_ref(x_10402); + x_10406 = lean_box(0); +} +if (lean_is_scalar(x_10345)) { + x_10407 = lean_alloc_ctor(1, 1, 0); +} else { + x_10407 = x_10345; +} +lean_ctor_set(x_10407, 0, x_10404); +if (lean_is_scalar(x_10406)) { + x_10408 = lean_alloc_ctor(0, 2, 0); +} else { + x_10408 = x_10406; +} +lean_ctor_set(x_10408, 0, x_10407); +lean_ctor_set(x_10408, 1, x_10405); +x_10309 = x_10408; +x_10310 = x_10403; +goto block_10336; +} +else +{ +lean_object* x_10409; lean_object* x_10410; lean_object* x_10411; lean_object* x_10412; +lean_dec(x_10345); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10409 = lean_ctor_get(x_10401, 0); +lean_inc(x_10409); +x_10410 = lean_ctor_get(x_10401, 1); +lean_inc(x_10410); +if (lean_is_exclusive(x_10401)) { + lean_ctor_release(x_10401, 0); + lean_ctor_release(x_10401, 1); + x_10411 = x_10401; +} else { + lean_dec_ref(x_10401); + x_10411 = lean_box(0); +} +if (lean_is_scalar(x_10411)) { + x_10412 = lean_alloc_ctor(1, 2, 0); +} else { + x_10412 = x_10411; +} +lean_ctor_set(x_10412, 0, x_10409); +lean_ctor_set(x_10412, 1, x_10410); +return x_10412; +} +} +else +{ +lean_object* x_10413; lean_object* x_10414; lean_object* x_10415; lean_object* x_10416; +lean_dec(x_10393); +lean_dec(x_10388); +lean_dec(x_10345); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10413 = lean_ctor_get(x_10396, 0); +lean_inc(x_10413); +x_10414 = lean_ctor_get(x_10396, 1); +lean_inc(x_10414); +if (lean_is_exclusive(x_10396)) { + lean_ctor_release(x_10396, 0); + lean_ctor_release(x_10396, 1); + x_10415 = x_10396; +} else { + lean_dec_ref(x_10396); + x_10415 = lean_box(0); +} +if (lean_is_scalar(x_10415)) { + x_10416 = lean_alloc_ctor(1, 2, 0); +} else { + x_10416 = x_10415; +} +lean_ctor_set(x_10416, 0, x_10413); +lean_ctor_set(x_10416, 1, x_10414); +return x_10416; +} +} +} +else +{ +lean_object* x_10417; lean_object* x_10418; lean_object* x_10419; lean_object* x_10420; lean_object* x_10421; lean_object* x_10422; lean_object* x_10423; lean_object* x_10424; lean_object* x_10425; +lean_dec(x_10348); +lean_dec(x_10346); +lean_inc(x_8853); +lean_inc(x_8845); +if (lean_is_scalar(x_10343)) { + x_10417 = lean_alloc_ctor(7, 2, 0); +} else { + x_10417 = x_10343; + lean_ctor_set_tag(x_10417, 7); +} +lean_ctor_set(x_10417, 0, x_8845); +lean_ctor_set(x_10417, 1, x_8853); +x_10418 = lean_ctor_get(x_1, 0); +lean_inc(x_10418); +x_10419 = l_Lean_IR_ToIR_bindVar(x_10418, x_10195, x_4, x_5, x_10342); +x_10420 = lean_ctor_get(x_10419, 0); +lean_inc(x_10420); +x_10421 = lean_ctor_get(x_10419, 1); +lean_inc(x_10421); +lean_dec(x_10419); +x_10422 = lean_ctor_get(x_10420, 0); +lean_inc(x_10422); +x_10423 = lean_ctor_get(x_10420, 1); +lean_inc(x_10423); +lean_dec(x_10420); +x_10424 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10425 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10422, x_10417, x_10424, x_10423, x_4, x_5, x_10421); +if (lean_obj_tag(x_10425) == 0) +{ +lean_object* x_10426; lean_object* x_10427; lean_object* x_10428; lean_object* x_10429; lean_object* x_10430; lean_object* x_10431; lean_object* x_10432; +x_10426 = lean_ctor_get(x_10425, 0); +lean_inc(x_10426); +x_10427 = lean_ctor_get(x_10425, 1); +lean_inc(x_10427); +lean_dec(x_10425); +x_10428 = lean_ctor_get(x_10426, 0); +lean_inc(x_10428); +x_10429 = lean_ctor_get(x_10426, 1); +lean_inc(x_10429); +if (lean_is_exclusive(x_10426)) { + lean_ctor_release(x_10426, 0); + lean_ctor_release(x_10426, 1); + x_10430 = x_10426; +} else { + lean_dec_ref(x_10426); + x_10430 = lean_box(0); +} +if (lean_is_scalar(x_10345)) { + x_10431 = lean_alloc_ctor(1, 1, 0); +} else { + x_10431 = x_10345; +} +lean_ctor_set(x_10431, 0, x_10428); +if (lean_is_scalar(x_10430)) { + x_10432 = lean_alloc_ctor(0, 2, 0); +} else { + x_10432 = x_10430; +} +lean_ctor_set(x_10432, 0, x_10431); +lean_ctor_set(x_10432, 1, x_10429); +x_10309 = x_10432; +x_10310 = x_10427; +goto block_10336; +} +else +{ +lean_object* x_10433; lean_object* x_10434; lean_object* x_10435; lean_object* x_10436; +lean_dec(x_10345); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10433 = lean_ctor_get(x_10425, 0); +lean_inc(x_10433); +x_10434 = lean_ctor_get(x_10425, 1); +lean_inc(x_10434); +if (lean_is_exclusive(x_10425)) { + lean_ctor_release(x_10425, 0); + lean_ctor_release(x_10425, 1); + x_10435 = x_10425; +} else { + lean_dec_ref(x_10425); + x_10435 = lean_box(0); +} +if (lean_is_scalar(x_10435)) { + x_10436 = lean_alloc_ctor(1, 2, 0); +} else { + x_10436 = x_10435; +} +lean_ctor_set(x_10436, 0, x_10433); +lean_ctor_set(x_10436, 1, x_10434); +return x_10436; +} +} +} +block_10336: +{ +lean_object* x_10311; +x_10311 = lean_ctor_get(x_10309, 0); +lean_inc(x_10311); +if (lean_obj_tag(x_10311) == 0) +{ +lean_object* x_10312; lean_object* x_10313; lean_object* x_10314; lean_object* x_10315; lean_object* x_10316; lean_object* x_10317; lean_object* x_10318; lean_object* x_10319; lean_object* x_10320; lean_object* x_10321; +lean_dec(x_10199); +x_10312 = lean_ctor_get(x_10309, 1); +lean_inc(x_10312); +lean_dec(x_10309); +x_10313 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_10313, 0, x_8845); +lean_ctor_set(x_10313, 1, x_8853); +x_10314 = lean_ctor_get(x_1, 0); +lean_inc(x_10314); +x_10315 = l_Lean_IR_ToIR_bindVar(x_10314, x_10312, x_4, x_5, x_10310); +x_10316 = lean_ctor_get(x_10315, 0); +lean_inc(x_10316); +x_10317 = lean_ctor_get(x_10315, 1); +lean_inc(x_10317); +lean_dec(x_10315); +x_10318 = lean_ctor_get(x_10316, 0); +lean_inc(x_10318); +x_10319 = lean_ctor_get(x_10316, 1); +lean_inc(x_10319); +lean_dec(x_10316); +x_10320 = lean_ctor_get(x_1, 2); +lean_inc(x_10320); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_10321 = l_Lean_IR_ToIR_lowerType(x_10320, x_10319, x_4, x_5, x_10317); +if (lean_obj_tag(x_10321) == 0) +{ +lean_object* x_10322; lean_object* x_10323; lean_object* x_10324; lean_object* x_10325; lean_object* x_10326; +x_10322 = lean_ctor_get(x_10321, 0); +lean_inc(x_10322); +x_10323 = lean_ctor_get(x_10321, 1); +lean_inc(x_10323); +lean_dec(x_10321); +x_10324 = lean_ctor_get(x_10322, 0); +lean_inc(x_10324); +x_10325 = lean_ctor_get(x_10322, 1); +lean_inc(x_10325); +lean_dec(x_10322); +x_10326 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10318, x_10313, x_10324, x_10325, x_4, x_5, x_10323); +return x_10326; +} +else +{ +lean_object* x_10327; lean_object* x_10328; lean_object* x_10329; lean_object* x_10330; +lean_dec(x_10318); +lean_dec(x_10313); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_10327 = lean_ctor_get(x_10321, 0); +lean_inc(x_10327); +x_10328 = lean_ctor_get(x_10321, 1); +lean_inc(x_10328); +if (lean_is_exclusive(x_10321)) { + lean_ctor_release(x_10321, 0); + lean_ctor_release(x_10321, 1); + x_10329 = x_10321; +} else { + lean_dec_ref(x_10321); + x_10329 = lean_box(0); +} +if (lean_is_scalar(x_10329)) { + x_10330 = lean_alloc_ctor(1, 2, 0); +} else { + x_10330 = x_10329; +} +lean_ctor_set(x_10330, 0, x_10327); +lean_ctor_set(x_10330, 1, x_10328); +return x_10330; +} +} +else +{ +lean_object* x_10331; lean_object* x_10332; lean_object* x_10333; lean_object* x_10334; lean_object* x_10335; +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10331 = lean_ctor_get(x_10309, 1); +lean_inc(x_10331); +if (lean_is_exclusive(x_10309)) { + lean_ctor_release(x_10309, 0); + lean_ctor_release(x_10309, 1); + x_10332 = x_10309; +} else { + lean_dec_ref(x_10309); + x_10332 = lean_box(0); +} +x_10333 = lean_ctor_get(x_10311, 0); +lean_inc(x_10333); +lean_dec(x_10311); +if (lean_is_scalar(x_10332)) { + x_10334 = lean_alloc_ctor(0, 2, 0); +} else { + x_10334 = x_10332; +} +lean_ctor_set(x_10334, 0, x_10333); +lean_ctor_set(x_10334, 1, x_10331); +if (lean_is_scalar(x_10199)) { + x_10335 = lean_alloc_ctor(0, 2, 0); +} else { + x_10335 = x_10199; +} +lean_ctor_set(x_10335, 0, x_10334); +lean_ctor_set(x_10335, 1, x_10310); +return x_10335; +} +} +} +case 2: +{ +lean_object* x_10437; lean_object* x_10438; +lean_dec(x_10205); +lean_dec(x_10200); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +x_10437 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_10438 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_10437, x_10195, x_4, x_5, x_10198); +return x_10438; +} +case 3: +{ +lean_object* x_10439; lean_object* x_10440; lean_object* x_10467; lean_object* x_10468; +lean_dec(x_10205); +lean_dec(x_10200); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_8845); +x_10467 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_8845, x_4, x_5, x_10198); +x_10468 = lean_ctor_get(x_10467, 0); +lean_inc(x_10468); +if (lean_obj_tag(x_10468) == 0) +{ +lean_object* x_10469; lean_object* x_10470; lean_object* x_10471; +x_10469 = lean_ctor_get(x_10467, 1); +lean_inc(x_10469); +lean_dec(x_10467); +x_10470 = lean_box(0); +x_10471 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10471, 0, x_10470); +lean_ctor_set(x_10471, 1, x_10195); +x_10439 = x_10471; +x_10440 = x_10469; +goto block_10466; +} +else +{ +lean_object* x_10472; lean_object* x_10473; lean_object* x_10474; lean_object* x_10475; lean_object* x_10476; lean_object* x_10477; lean_object* x_10478; uint8_t x_10479; +x_10472 = lean_ctor_get(x_10467, 1); +lean_inc(x_10472); +if (lean_is_exclusive(x_10467)) { + lean_ctor_release(x_10467, 0); + lean_ctor_release(x_10467, 1); + x_10473 = x_10467; +} else { + lean_dec_ref(x_10467); + x_10473 = lean_box(0); +} +x_10474 = lean_ctor_get(x_10468, 0); +lean_inc(x_10474); +if (lean_is_exclusive(x_10468)) { + lean_ctor_release(x_10468, 0); + x_10475 = x_10468; +} else { + lean_dec_ref(x_10468); + x_10475 = lean_box(0); +} +x_10476 = lean_array_get_size(x_8853); +x_10477 = lean_ctor_get(x_10474, 3); +lean_inc(x_10477); +lean_dec(x_10474); +x_10478 = lean_array_get_size(x_10477); +lean_dec(x_10477); +x_10479 = lean_nat_dec_lt(x_10476, x_10478); +if (x_10479 == 0) +{ +uint8_t x_10480; +x_10480 = lean_nat_dec_eq(x_10476, x_10478); +if (x_10480 == 0) +{ +lean_object* x_10481; lean_object* x_10482; lean_object* x_10483; lean_object* x_10484; lean_object* x_10485; lean_object* x_10486; lean_object* x_10487; lean_object* x_10488; lean_object* x_10489; lean_object* x_10490; lean_object* x_10491; lean_object* x_10492; lean_object* x_10493; lean_object* x_10494; lean_object* x_10495; lean_object* x_10496; lean_object* x_10497; +x_10481 = lean_unsigned_to_nat(0u); +x_10482 = l_Array_extract___rarg(x_8853, x_10481, x_10478); +x_10483 = l_Array_extract___rarg(x_8853, x_10478, x_10476); +lean_dec(x_10476); +lean_inc(x_8845); +if (lean_is_scalar(x_10473)) { + x_10484 = lean_alloc_ctor(6, 2, 0); +} else { + x_10484 = x_10473; + lean_ctor_set_tag(x_10484, 6); +} +lean_ctor_set(x_10484, 0, x_8845); +lean_ctor_set(x_10484, 1, x_10482); +x_10485 = lean_ctor_get(x_1, 0); +lean_inc(x_10485); +x_10486 = l_Lean_IR_ToIR_bindVar(x_10485, x_10195, x_4, x_5, x_10472); +x_10487 = lean_ctor_get(x_10486, 0); +lean_inc(x_10487); +x_10488 = lean_ctor_get(x_10486, 1); +lean_inc(x_10488); +lean_dec(x_10486); +x_10489 = lean_ctor_get(x_10487, 0); +lean_inc(x_10489); +x_10490 = lean_ctor_get(x_10487, 1); +lean_inc(x_10490); +lean_dec(x_10487); +x_10491 = l_Lean_IR_ToIR_newVar(x_10490, x_4, x_5, x_10488); +x_10492 = lean_ctor_get(x_10491, 0); +lean_inc(x_10492); +x_10493 = lean_ctor_get(x_10491, 1); +lean_inc(x_10493); +lean_dec(x_10491); +x_10494 = lean_ctor_get(x_10492, 0); +lean_inc(x_10494); +x_10495 = lean_ctor_get(x_10492, 1); +lean_inc(x_10495); +lean_dec(x_10492); +x_10496 = lean_ctor_get(x_1, 2); +lean_inc(x_10496); +lean_inc(x_5); +lean_inc(x_4); +x_10497 = l_Lean_IR_ToIR_lowerType(x_10496, x_10495, x_4, x_5, x_10493); +if (lean_obj_tag(x_10497) == 0) +{ +lean_object* x_10498; lean_object* x_10499; lean_object* x_10500; lean_object* x_10501; lean_object* x_10502; +x_10498 = lean_ctor_get(x_10497, 0); +lean_inc(x_10498); +x_10499 = lean_ctor_get(x_10497, 1); +lean_inc(x_10499); +lean_dec(x_10497); +x_10500 = lean_ctor_get(x_10498, 0); +lean_inc(x_10500); +x_10501 = lean_ctor_get(x_10498, 1); +lean_inc(x_10501); +lean_dec(x_10498); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10502 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_10494, x_10483, x_10489, x_10484, x_10500, x_10501, x_4, x_5, x_10499); +if (lean_obj_tag(x_10502) == 0) +{ +lean_object* x_10503; lean_object* x_10504; lean_object* x_10505; lean_object* x_10506; lean_object* x_10507; lean_object* x_10508; lean_object* x_10509; +x_10503 = lean_ctor_get(x_10502, 0); +lean_inc(x_10503); +x_10504 = lean_ctor_get(x_10502, 1); +lean_inc(x_10504); +lean_dec(x_10502); +x_10505 = lean_ctor_get(x_10503, 0); +lean_inc(x_10505); +x_10506 = lean_ctor_get(x_10503, 1); +lean_inc(x_10506); +if (lean_is_exclusive(x_10503)) { + lean_ctor_release(x_10503, 0); + lean_ctor_release(x_10503, 1); + x_10507 = x_10503; +} else { + lean_dec_ref(x_10503); + x_10507 = lean_box(0); +} +if (lean_is_scalar(x_10475)) { + x_10508 = lean_alloc_ctor(1, 1, 0); +} else { + x_10508 = x_10475; +} +lean_ctor_set(x_10508, 0, x_10505); +if (lean_is_scalar(x_10507)) { + x_10509 = lean_alloc_ctor(0, 2, 0); +} else { + x_10509 = x_10507; +} +lean_ctor_set(x_10509, 0, x_10508); +lean_ctor_set(x_10509, 1, x_10506); +x_10439 = x_10509; +x_10440 = x_10504; +goto block_10466; +} +else +{ +lean_object* x_10510; lean_object* x_10511; lean_object* x_10512; lean_object* x_10513; +lean_dec(x_10475); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10510 = lean_ctor_get(x_10502, 0); +lean_inc(x_10510); +x_10511 = lean_ctor_get(x_10502, 1); +lean_inc(x_10511); +if (lean_is_exclusive(x_10502)) { + lean_ctor_release(x_10502, 0); + lean_ctor_release(x_10502, 1); + x_10512 = x_10502; +} else { + lean_dec_ref(x_10502); + x_10512 = lean_box(0); +} +if (lean_is_scalar(x_10512)) { + x_10513 = lean_alloc_ctor(1, 2, 0); +} else { + x_10513 = x_10512; +} +lean_ctor_set(x_10513, 0, x_10510); +lean_ctor_set(x_10513, 1, x_10511); +return x_10513; +} +} +else +{ +lean_object* x_10514; lean_object* x_10515; lean_object* x_10516; lean_object* x_10517; +lean_dec(x_10494); +lean_dec(x_10489); +lean_dec(x_10484); +lean_dec(x_10483); +lean_dec(x_10475); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10514 = lean_ctor_get(x_10497, 0); +lean_inc(x_10514); +x_10515 = lean_ctor_get(x_10497, 1); +lean_inc(x_10515); +if (lean_is_exclusive(x_10497)) { + lean_ctor_release(x_10497, 0); + lean_ctor_release(x_10497, 1); + x_10516 = x_10497; +} else { + lean_dec_ref(x_10497); + x_10516 = lean_box(0); +} +if (lean_is_scalar(x_10516)) { + x_10517 = lean_alloc_ctor(1, 2, 0); +} else { + x_10517 = x_10516; +} +lean_ctor_set(x_10517, 0, x_10514); +lean_ctor_set(x_10517, 1, x_10515); +return x_10517; +} +} +else +{ +lean_object* x_10518; lean_object* x_10519; lean_object* x_10520; lean_object* x_10521; lean_object* x_10522; lean_object* x_10523; lean_object* x_10524; lean_object* x_10525; lean_object* x_10526; +lean_dec(x_10478); +lean_dec(x_10476); +lean_inc(x_8853); +lean_inc(x_8845); +if (lean_is_scalar(x_10473)) { + x_10518 = lean_alloc_ctor(6, 2, 0); +} else { + x_10518 = x_10473; + lean_ctor_set_tag(x_10518, 6); +} +lean_ctor_set(x_10518, 0, x_8845); +lean_ctor_set(x_10518, 1, x_8853); +x_10519 = lean_ctor_get(x_1, 0); +lean_inc(x_10519); +x_10520 = l_Lean_IR_ToIR_bindVar(x_10519, x_10195, x_4, x_5, x_10472); +x_10521 = lean_ctor_get(x_10520, 0); +lean_inc(x_10521); +x_10522 = lean_ctor_get(x_10520, 1); +lean_inc(x_10522); +lean_dec(x_10520); +x_10523 = lean_ctor_get(x_10521, 0); +lean_inc(x_10523); +x_10524 = lean_ctor_get(x_10521, 1); +lean_inc(x_10524); +lean_dec(x_10521); +x_10525 = lean_ctor_get(x_1, 2); +lean_inc(x_10525); +lean_inc(x_5); +lean_inc(x_4); +x_10526 = l_Lean_IR_ToIR_lowerType(x_10525, x_10524, x_4, x_5, x_10522); +if (lean_obj_tag(x_10526) == 0) +{ +lean_object* x_10527; lean_object* x_10528; lean_object* x_10529; lean_object* x_10530; lean_object* x_10531; +x_10527 = lean_ctor_get(x_10526, 0); +lean_inc(x_10527); +x_10528 = lean_ctor_get(x_10526, 1); +lean_inc(x_10528); +lean_dec(x_10526); +x_10529 = lean_ctor_get(x_10527, 0); +lean_inc(x_10529); +x_10530 = lean_ctor_get(x_10527, 1); +lean_inc(x_10530); +lean_dec(x_10527); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10531 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10523, x_10518, x_10529, x_10530, x_4, x_5, x_10528); +if (lean_obj_tag(x_10531) == 0) +{ +lean_object* x_10532; lean_object* x_10533; lean_object* x_10534; lean_object* x_10535; lean_object* x_10536; lean_object* x_10537; lean_object* x_10538; +x_10532 = lean_ctor_get(x_10531, 0); +lean_inc(x_10532); +x_10533 = lean_ctor_get(x_10531, 1); +lean_inc(x_10533); +lean_dec(x_10531); +x_10534 = lean_ctor_get(x_10532, 0); +lean_inc(x_10534); +x_10535 = lean_ctor_get(x_10532, 1); +lean_inc(x_10535); +if (lean_is_exclusive(x_10532)) { + lean_ctor_release(x_10532, 0); + lean_ctor_release(x_10532, 1); + x_10536 = x_10532; +} else { + lean_dec_ref(x_10532); + x_10536 = lean_box(0); +} +if (lean_is_scalar(x_10475)) { + x_10537 = lean_alloc_ctor(1, 1, 0); +} else { + x_10537 = x_10475; +} +lean_ctor_set(x_10537, 0, x_10534); +if (lean_is_scalar(x_10536)) { + x_10538 = lean_alloc_ctor(0, 2, 0); +} else { + x_10538 = x_10536; +} +lean_ctor_set(x_10538, 0, x_10537); +lean_ctor_set(x_10538, 1, x_10535); +x_10439 = x_10538; +x_10440 = x_10533; +goto block_10466; +} +else +{ +lean_object* x_10539; lean_object* x_10540; lean_object* x_10541; lean_object* x_10542; +lean_dec(x_10475); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10539 = lean_ctor_get(x_10531, 0); +lean_inc(x_10539); +x_10540 = lean_ctor_get(x_10531, 1); +lean_inc(x_10540); +if (lean_is_exclusive(x_10531)) { + lean_ctor_release(x_10531, 0); + lean_ctor_release(x_10531, 1); + x_10541 = x_10531; +} else { + lean_dec_ref(x_10531); + x_10541 = lean_box(0); +} +if (lean_is_scalar(x_10541)) { + x_10542 = lean_alloc_ctor(1, 2, 0); +} else { + x_10542 = x_10541; +} +lean_ctor_set(x_10542, 0, x_10539); +lean_ctor_set(x_10542, 1, x_10540); +return x_10542; +} +} +else +{ +lean_object* x_10543; lean_object* x_10544; lean_object* x_10545; lean_object* x_10546; +lean_dec(x_10523); +lean_dec(x_10518); +lean_dec(x_10475); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10543 = lean_ctor_get(x_10526, 0); +lean_inc(x_10543); +x_10544 = lean_ctor_get(x_10526, 1); +lean_inc(x_10544); +if (lean_is_exclusive(x_10526)) { + lean_ctor_release(x_10526, 0); + lean_ctor_release(x_10526, 1); + x_10545 = x_10526; +} else { + lean_dec_ref(x_10526); + x_10545 = lean_box(0); +} +if (lean_is_scalar(x_10545)) { + x_10546 = lean_alloc_ctor(1, 2, 0); +} else { + x_10546 = x_10545; +} +lean_ctor_set(x_10546, 0, x_10543); +lean_ctor_set(x_10546, 1, x_10544); +return x_10546; +} +} +} +else +{ +lean_object* x_10547; lean_object* x_10548; lean_object* x_10549; lean_object* x_10550; lean_object* x_10551; lean_object* x_10552; lean_object* x_10553; lean_object* x_10554; lean_object* x_10555; +lean_dec(x_10478); +lean_dec(x_10476); +lean_inc(x_8853); +lean_inc(x_8845); +if (lean_is_scalar(x_10473)) { + x_10547 = lean_alloc_ctor(7, 2, 0); +} else { + x_10547 = x_10473; + lean_ctor_set_tag(x_10547, 7); +} +lean_ctor_set(x_10547, 0, x_8845); +lean_ctor_set(x_10547, 1, x_8853); +x_10548 = lean_ctor_get(x_1, 0); +lean_inc(x_10548); +x_10549 = l_Lean_IR_ToIR_bindVar(x_10548, x_10195, x_4, x_5, x_10472); +x_10550 = lean_ctor_get(x_10549, 0); +lean_inc(x_10550); +x_10551 = lean_ctor_get(x_10549, 1); +lean_inc(x_10551); +lean_dec(x_10549); +x_10552 = lean_ctor_get(x_10550, 0); +lean_inc(x_10552); +x_10553 = lean_ctor_get(x_10550, 1); +lean_inc(x_10553); +lean_dec(x_10550); +x_10554 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10555 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10552, x_10547, x_10554, x_10553, x_4, x_5, x_10551); +if (lean_obj_tag(x_10555) == 0) +{ +lean_object* x_10556; lean_object* x_10557; lean_object* x_10558; lean_object* x_10559; lean_object* x_10560; lean_object* x_10561; lean_object* x_10562; +x_10556 = lean_ctor_get(x_10555, 0); +lean_inc(x_10556); +x_10557 = lean_ctor_get(x_10555, 1); +lean_inc(x_10557); +lean_dec(x_10555); +x_10558 = lean_ctor_get(x_10556, 0); +lean_inc(x_10558); +x_10559 = lean_ctor_get(x_10556, 1); +lean_inc(x_10559); +if (lean_is_exclusive(x_10556)) { + lean_ctor_release(x_10556, 0); + lean_ctor_release(x_10556, 1); + x_10560 = x_10556; +} else { + lean_dec_ref(x_10556); + x_10560 = lean_box(0); +} +if (lean_is_scalar(x_10475)) { + x_10561 = lean_alloc_ctor(1, 1, 0); +} else { + x_10561 = x_10475; +} +lean_ctor_set(x_10561, 0, x_10558); +if (lean_is_scalar(x_10560)) { + x_10562 = lean_alloc_ctor(0, 2, 0); +} else { + x_10562 = x_10560; +} +lean_ctor_set(x_10562, 0, x_10561); +lean_ctor_set(x_10562, 1, x_10559); +x_10439 = x_10562; +x_10440 = x_10557; +goto block_10466; +} +else +{ +lean_object* x_10563; lean_object* x_10564; lean_object* x_10565; lean_object* x_10566; +lean_dec(x_10475); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10563 = lean_ctor_get(x_10555, 0); +lean_inc(x_10563); +x_10564 = lean_ctor_get(x_10555, 1); +lean_inc(x_10564); +if (lean_is_exclusive(x_10555)) { + lean_ctor_release(x_10555, 0); + lean_ctor_release(x_10555, 1); + x_10565 = x_10555; +} else { + lean_dec_ref(x_10555); + x_10565 = lean_box(0); +} +if (lean_is_scalar(x_10565)) { + x_10566 = lean_alloc_ctor(1, 2, 0); +} else { + x_10566 = x_10565; +} +lean_ctor_set(x_10566, 0, x_10563); +lean_ctor_set(x_10566, 1, x_10564); +return x_10566; +} +} +} +block_10466: +{ +lean_object* x_10441; +x_10441 = lean_ctor_get(x_10439, 0); +lean_inc(x_10441); +if (lean_obj_tag(x_10441) == 0) +{ +lean_object* x_10442; lean_object* x_10443; lean_object* x_10444; lean_object* x_10445; lean_object* x_10446; lean_object* x_10447; lean_object* x_10448; lean_object* x_10449; lean_object* x_10450; lean_object* x_10451; +lean_dec(x_10199); +x_10442 = lean_ctor_get(x_10439, 1); +lean_inc(x_10442); +lean_dec(x_10439); +x_10443 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_10443, 0, x_8845); +lean_ctor_set(x_10443, 1, x_8853); +x_10444 = lean_ctor_get(x_1, 0); +lean_inc(x_10444); +x_10445 = l_Lean_IR_ToIR_bindVar(x_10444, x_10442, x_4, x_5, x_10440); +x_10446 = lean_ctor_get(x_10445, 0); +lean_inc(x_10446); +x_10447 = lean_ctor_get(x_10445, 1); +lean_inc(x_10447); +lean_dec(x_10445); +x_10448 = lean_ctor_get(x_10446, 0); +lean_inc(x_10448); +x_10449 = lean_ctor_get(x_10446, 1); +lean_inc(x_10449); +lean_dec(x_10446); +x_10450 = lean_ctor_get(x_1, 2); +lean_inc(x_10450); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_10451 = l_Lean_IR_ToIR_lowerType(x_10450, x_10449, x_4, x_5, x_10447); +if (lean_obj_tag(x_10451) == 0) +{ +lean_object* x_10452; lean_object* x_10453; lean_object* x_10454; lean_object* x_10455; lean_object* x_10456; +x_10452 = lean_ctor_get(x_10451, 0); +lean_inc(x_10452); +x_10453 = lean_ctor_get(x_10451, 1); +lean_inc(x_10453); +lean_dec(x_10451); +x_10454 = lean_ctor_get(x_10452, 0); +lean_inc(x_10454); +x_10455 = lean_ctor_get(x_10452, 1); +lean_inc(x_10455); +lean_dec(x_10452); +x_10456 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10448, x_10443, x_10454, x_10455, x_4, x_5, x_10453); +return x_10456; +} +else +{ +lean_object* x_10457; lean_object* x_10458; lean_object* x_10459; lean_object* x_10460; +lean_dec(x_10448); +lean_dec(x_10443); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_10457 = lean_ctor_get(x_10451, 0); +lean_inc(x_10457); +x_10458 = lean_ctor_get(x_10451, 1); +lean_inc(x_10458); +if (lean_is_exclusive(x_10451)) { + lean_ctor_release(x_10451, 0); + lean_ctor_release(x_10451, 1); + x_10459 = x_10451; +} else { + lean_dec_ref(x_10451); + x_10459 = lean_box(0); +} +if (lean_is_scalar(x_10459)) { + x_10460 = lean_alloc_ctor(1, 2, 0); +} else { + x_10460 = x_10459; +} +lean_ctor_set(x_10460, 0, x_10457); +lean_ctor_set(x_10460, 1, x_10458); +return x_10460; +} +} +else +{ +lean_object* x_10461; lean_object* x_10462; lean_object* x_10463; lean_object* x_10464; lean_object* x_10465; +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10461 = lean_ctor_get(x_10439, 1); +lean_inc(x_10461); +if (lean_is_exclusive(x_10439)) { + lean_ctor_release(x_10439, 0); + lean_ctor_release(x_10439, 1); + x_10462 = x_10439; +} else { + lean_dec_ref(x_10439); + x_10462 = lean_box(0); +} +x_10463 = lean_ctor_get(x_10441, 0); +lean_inc(x_10463); +lean_dec(x_10441); +if (lean_is_scalar(x_10462)) { + x_10464 = lean_alloc_ctor(0, 2, 0); +} else { + x_10464 = x_10462; +} +lean_ctor_set(x_10464, 0, x_10463); +lean_ctor_set(x_10464, 1, x_10461); +if (lean_is_scalar(x_10199)) { + x_10465 = lean_alloc_ctor(0, 2, 0); +} else { + x_10465 = x_10199; +} +lean_ctor_set(x_10465, 0, x_10464); +lean_ctor_set(x_10465, 1, x_10440); +return x_10465; +} +} +} +case 4: +{ +lean_object* x_10567; lean_object* x_10568; uint8_t x_10569; +lean_dec(x_10200); +lean_dec(x_10199); +lean_dec(x_5945); +lean_dec(x_5944); +if (lean_is_exclusive(x_10205)) { + lean_ctor_release(x_10205, 0); + x_10567 = x_10205; +} else { + lean_dec_ref(x_10205); + x_10567 = lean_box(0); +} +x_10568 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_10569 = lean_name_eq(x_8845, x_10568); +if (x_10569 == 0) +{ +uint8_t x_10570; lean_object* x_10571; lean_object* x_10572; lean_object* x_10573; lean_object* x_10574; lean_object* x_10575; lean_object* x_10576; lean_object* x_10577; lean_object* x_10578; lean_object* x_10579; +lean_dec(x_8853); +lean_dec(x_2); +lean_dec(x_1); +x_10570 = 1; +x_10571 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_10572 = l_Lean_Name_toString(x_8845, x_10570, x_10571); +if (lean_is_scalar(x_10567)) { + x_10573 = lean_alloc_ctor(3, 1, 0); +} else { + x_10573 = x_10567; + lean_ctor_set_tag(x_10573, 3); +} +lean_ctor_set(x_10573, 0, x_10572); +x_10574 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_10575 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_10575, 0, x_10574); +lean_ctor_set(x_10575, 1, x_10573); +x_10576 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_10577 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_10577, 0, x_10575); +lean_ctor_set(x_10577, 1, x_10576); +x_10578 = l_Lean_MessageData_ofFormat(x_10577); +x_10579 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_10578, x_10195, x_4, x_5, x_10198); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_10195); +return x_10579; +} +else +{ +lean_object* x_10580; lean_object* x_10581; lean_object* x_10582; +lean_dec(x_10567); +lean_dec(x_8845); +x_10580 = l_Lean_IR_instInhabitedArg; +x_10581 = lean_unsigned_to_nat(2u); +x_10582 = lean_array_get(x_10580, x_8853, x_10581); +lean_dec(x_8853); +if (lean_obj_tag(x_10582) == 0) +{ +lean_object* x_10583; lean_object* x_10584; lean_object* x_10585; lean_object* x_10586; lean_object* x_10587; lean_object* x_10588; lean_object* x_10589; +x_10583 = lean_ctor_get(x_10582, 0); +lean_inc(x_10583); +lean_dec(x_10582); +x_10584 = lean_ctor_get(x_1, 0); +lean_inc(x_10584); +lean_dec(x_1); +x_10585 = l_Lean_IR_ToIR_bindVarToVarId(x_10584, x_10583, x_10195, x_4, x_5, x_10198); +x_10586 = lean_ctor_get(x_10585, 0); +lean_inc(x_10586); +x_10587 = lean_ctor_get(x_10585, 1); +lean_inc(x_10587); +lean_dec(x_10585); +x_10588 = lean_ctor_get(x_10586, 1); +lean_inc(x_10588); +lean_dec(x_10586); +x_10589 = l_Lean_IR_ToIR_lowerCode(x_2, x_10588, x_4, x_5, x_10587); +return x_10589; +} +else +{ +lean_object* x_10590; lean_object* x_10591; lean_object* x_10592; lean_object* x_10593; lean_object* x_10594; lean_object* x_10595; +x_10590 = lean_ctor_get(x_1, 0); +lean_inc(x_10590); +lean_dec(x_1); +x_10591 = l_Lean_IR_ToIR_bindErased(x_10590, x_10195, x_4, x_5, x_10198); +x_10592 = lean_ctor_get(x_10591, 0); +lean_inc(x_10592); +x_10593 = lean_ctor_get(x_10591, 1); +lean_inc(x_10593); +lean_dec(x_10591); +x_10594 = lean_ctor_get(x_10592, 1); +lean_inc(x_10594); +lean_dec(x_10592); +x_10595 = l_Lean_IR_ToIR_lowerCode(x_2, x_10594, x_4, x_5, x_10593); +return x_10595; +} +} +} +case 5: +{ +lean_object* x_10596; lean_object* x_10597; +lean_dec(x_10205); +lean_dec(x_10200); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +x_10596 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_10597 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_10596, x_10195, x_4, x_5, x_10198); +return x_10597; +} +case 6: +{ +lean_object* x_10598; uint8_t x_10599; +x_10598 = lean_ctor_get(x_10205, 0); +lean_inc(x_10598); +lean_dec(x_10205); +lean_inc(x_8845); +x_10599 = l_Lean_isExtern(x_10200, x_8845); +if (x_10599 == 0) +{ +lean_object* x_10600; +lean_dec(x_10199); +lean_dec(x_8853); +lean_inc(x_5); +lean_inc(x_4); +x_10600 = l_Lean_IR_ToIR_getCtorInfo(x_8845, x_10195, x_4, x_5, x_10198); +if (lean_obj_tag(x_10600) == 0) +{ +lean_object* x_10601; lean_object* x_10602; lean_object* x_10603; lean_object* x_10604; lean_object* x_10605; lean_object* x_10606; lean_object* x_10607; lean_object* x_10608; lean_object* x_10609; lean_object* x_10610; lean_object* x_10611; lean_object* x_10612; lean_object* x_10613; lean_object* x_10614; lean_object* x_10615; lean_object* x_10616; lean_object* x_10617; lean_object* x_10618; lean_object* x_10619; lean_object* x_10620; +x_10601 = lean_ctor_get(x_10600, 0); +lean_inc(x_10601); +x_10602 = lean_ctor_get(x_10601, 0); +lean_inc(x_10602); +x_10603 = lean_ctor_get(x_10600, 1); +lean_inc(x_10603); +lean_dec(x_10600); +x_10604 = lean_ctor_get(x_10601, 1); +lean_inc(x_10604); +lean_dec(x_10601); +x_10605 = lean_ctor_get(x_10602, 0); +lean_inc(x_10605); +x_10606 = lean_ctor_get(x_10602, 1); +lean_inc(x_10606); +lean_dec(x_10602); +x_10607 = lean_ctor_get(x_10598, 3); +lean_inc(x_10607); +lean_dec(x_10598); +x_10608 = lean_array_get_size(x_5944); +x_10609 = l_Array_extract___rarg(x_5944, x_10607, x_10608); +lean_dec(x_10608); +lean_dec(x_5944); +x_10610 = lean_array_get_size(x_10606); +x_10611 = lean_unsigned_to_nat(0u); +x_10612 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_5945)) { + x_10613 = lean_alloc_ctor(0, 3, 0); +} else { + x_10613 = x_5945; + lean_ctor_set_tag(x_10613, 0); +} +lean_ctor_set(x_10613, 0, x_10611); +lean_ctor_set(x_10613, 1, x_10610); +lean_ctor_set(x_10613, 2, x_10612); +x_10614 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_10615 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__5(x_10606, x_10609, x_10613, x_10613, x_10614, x_10611, lean_box(0), lean_box(0), x_10604, x_4, x_5, x_10603); +lean_dec(x_10613); +x_10616 = lean_ctor_get(x_10615, 0); +lean_inc(x_10616); +x_10617 = lean_ctor_get(x_10615, 1); +lean_inc(x_10617); +lean_dec(x_10615); +x_10618 = lean_ctor_get(x_10616, 0); +lean_inc(x_10618); +x_10619 = lean_ctor_get(x_10616, 1); +lean_inc(x_10619); +lean_dec(x_10616); +x_10620 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_10605, x_10606, x_10609, x_10618, x_10619, x_4, x_5, x_10617); +lean_dec(x_10609); +lean_dec(x_10606); +return x_10620; +} +else +{ +lean_object* x_10621; lean_object* x_10622; lean_object* x_10623; lean_object* x_10624; +lean_dec(x_10598); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10621 = lean_ctor_get(x_10600, 0); +lean_inc(x_10621); +x_10622 = lean_ctor_get(x_10600, 1); +lean_inc(x_10622); +if (lean_is_exclusive(x_10600)) { + lean_ctor_release(x_10600, 0); + lean_ctor_release(x_10600, 1); + x_10623 = x_10600; +} else { + lean_dec_ref(x_10600); + x_10623 = lean_box(0); +} +if (lean_is_scalar(x_10623)) { + x_10624 = lean_alloc_ctor(1, 2, 0); +} else { + x_10624 = x_10623; +} +lean_ctor_set(x_10624, 0, x_10621); +lean_ctor_set(x_10624, 1, x_10622); +return x_10624; +} +} +else +{ +lean_object* x_10625; lean_object* x_10626; lean_object* x_10653; lean_object* x_10654; +lean_dec(x_10598); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_8845); +x_10653 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_8845, x_4, x_5, x_10198); +x_10654 = lean_ctor_get(x_10653, 0); +lean_inc(x_10654); +if (lean_obj_tag(x_10654) == 0) +{ +lean_object* x_10655; lean_object* x_10656; lean_object* x_10657; +x_10655 = lean_ctor_get(x_10653, 1); +lean_inc(x_10655); +lean_dec(x_10653); +x_10656 = lean_box(0); +x_10657 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10657, 0, x_10656); +lean_ctor_set(x_10657, 1, x_10195); +x_10625 = x_10657; +x_10626 = x_10655; +goto block_10652; +} +else +{ +lean_object* x_10658; lean_object* x_10659; lean_object* x_10660; lean_object* x_10661; lean_object* x_10662; lean_object* x_10663; lean_object* x_10664; uint8_t x_10665; +x_10658 = lean_ctor_get(x_10653, 1); +lean_inc(x_10658); +if (lean_is_exclusive(x_10653)) { + lean_ctor_release(x_10653, 0); + lean_ctor_release(x_10653, 1); + x_10659 = x_10653; +} else { + lean_dec_ref(x_10653); + x_10659 = lean_box(0); +} +x_10660 = lean_ctor_get(x_10654, 0); +lean_inc(x_10660); +if (lean_is_exclusive(x_10654)) { + lean_ctor_release(x_10654, 0); + x_10661 = x_10654; +} else { + lean_dec_ref(x_10654); + x_10661 = lean_box(0); +} +x_10662 = lean_array_get_size(x_8853); +x_10663 = lean_ctor_get(x_10660, 3); +lean_inc(x_10663); +lean_dec(x_10660); +x_10664 = lean_array_get_size(x_10663); +lean_dec(x_10663); +x_10665 = lean_nat_dec_lt(x_10662, x_10664); +if (x_10665 == 0) +{ +uint8_t x_10666; +x_10666 = lean_nat_dec_eq(x_10662, x_10664); +if (x_10666 == 0) +{ +lean_object* x_10667; lean_object* x_10668; lean_object* x_10669; lean_object* x_10670; lean_object* x_10671; lean_object* x_10672; lean_object* x_10673; lean_object* x_10674; lean_object* x_10675; lean_object* x_10676; lean_object* x_10677; lean_object* x_10678; lean_object* x_10679; lean_object* x_10680; lean_object* x_10681; lean_object* x_10682; lean_object* x_10683; +x_10667 = lean_unsigned_to_nat(0u); +x_10668 = l_Array_extract___rarg(x_8853, x_10667, x_10664); +x_10669 = l_Array_extract___rarg(x_8853, x_10664, x_10662); +lean_dec(x_10662); +lean_inc(x_8845); +if (lean_is_scalar(x_10659)) { + x_10670 = lean_alloc_ctor(6, 2, 0); +} else { + x_10670 = x_10659; + lean_ctor_set_tag(x_10670, 6); +} +lean_ctor_set(x_10670, 0, x_8845); +lean_ctor_set(x_10670, 1, x_10668); +x_10671 = lean_ctor_get(x_1, 0); +lean_inc(x_10671); +x_10672 = l_Lean_IR_ToIR_bindVar(x_10671, x_10195, x_4, x_5, x_10658); +x_10673 = lean_ctor_get(x_10672, 0); +lean_inc(x_10673); +x_10674 = lean_ctor_get(x_10672, 1); +lean_inc(x_10674); +lean_dec(x_10672); +x_10675 = lean_ctor_get(x_10673, 0); +lean_inc(x_10675); +x_10676 = lean_ctor_get(x_10673, 1); +lean_inc(x_10676); +lean_dec(x_10673); +x_10677 = l_Lean_IR_ToIR_newVar(x_10676, x_4, x_5, x_10674); +x_10678 = lean_ctor_get(x_10677, 0); +lean_inc(x_10678); +x_10679 = lean_ctor_get(x_10677, 1); +lean_inc(x_10679); +lean_dec(x_10677); +x_10680 = lean_ctor_get(x_10678, 0); +lean_inc(x_10680); +x_10681 = lean_ctor_get(x_10678, 1); +lean_inc(x_10681); +lean_dec(x_10678); +x_10682 = lean_ctor_get(x_1, 2); +lean_inc(x_10682); +lean_inc(x_5); +lean_inc(x_4); +x_10683 = l_Lean_IR_ToIR_lowerType(x_10682, x_10681, x_4, x_5, x_10679); +if (lean_obj_tag(x_10683) == 0) +{ +lean_object* x_10684; lean_object* x_10685; lean_object* x_10686; lean_object* x_10687; lean_object* x_10688; +x_10684 = lean_ctor_get(x_10683, 0); +lean_inc(x_10684); +x_10685 = lean_ctor_get(x_10683, 1); +lean_inc(x_10685); +lean_dec(x_10683); +x_10686 = lean_ctor_get(x_10684, 0); +lean_inc(x_10686); +x_10687 = lean_ctor_get(x_10684, 1); +lean_inc(x_10687); +lean_dec(x_10684); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10688 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_10680, x_10669, x_10675, x_10670, x_10686, x_10687, x_4, x_5, x_10685); +if (lean_obj_tag(x_10688) == 0) +{ +lean_object* x_10689; lean_object* x_10690; lean_object* x_10691; lean_object* x_10692; lean_object* x_10693; lean_object* x_10694; lean_object* x_10695; +x_10689 = lean_ctor_get(x_10688, 0); +lean_inc(x_10689); +x_10690 = lean_ctor_get(x_10688, 1); +lean_inc(x_10690); +lean_dec(x_10688); +x_10691 = lean_ctor_get(x_10689, 0); +lean_inc(x_10691); +x_10692 = lean_ctor_get(x_10689, 1); +lean_inc(x_10692); +if (lean_is_exclusive(x_10689)) { + lean_ctor_release(x_10689, 0); + lean_ctor_release(x_10689, 1); + x_10693 = x_10689; +} else { + lean_dec_ref(x_10689); + x_10693 = lean_box(0); +} +if (lean_is_scalar(x_10661)) { + x_10694 = lean_alloc_ctor(1, 1, 0); +} else { + x_10694 = x_10661; +} +lean_ctor_set(x_10694, 0, x_10691); +if (lean_is_scalar(x_10693)) { + x_10695 = lean_alloc_ctor(0, 2, 0); +} else { + x_10695 = x_10693; +} +lean_ctor_set(x_10695, 0, x_10694); +lean_ctor_set(x_10695, 1, x_10692); +x_10625 = x_10695; +x_10626 = x_10690; +goto block_10652; +} +else +{ +lean_object* x_10696; lean_object* x_10697; lean_object* x_10698; lean_object* x_10699; +lean_dec(x_10661); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10696 = lean_ctor_get(x_10688, 0); +lean_inc(x_10696); +x_10697 = lean_ctor_get(x_10688, 1); +lean_inc(x_10697); +if (lean_is_exclusive(x_10688)) { + lean_ctor_release(x_10688, 0); + lean_ctor_release(x_10688, 1); + x_10698 = x_10688; +} else { + lean_dec_ref(x_10688); + x_10698 = lean_box(0); +} +if (lean_is_scalar(x_10698)) { + x_10699 = lean_alloc_ctor(1, 2, 0); +} else { + x_10699 = x_10698; +} +lean_ctor_set(x_10699, 0, x_10696); +lean_ctor_set(x_10699, 1, x_10697); +return x_10699; +} +} +else +{ +lean_object* x_10700; lean_object* x_10701; lean_object* x_10702; lean_object* x_10703; +lean_dec(x_10680); +lean_dec(x_10675); +lean_dec(x_10670); +lean_dec(x_10669); +lean_dec(x_10661); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10700 = lean_ctor_get(x_10683, 0); +lean_inc(x_10700); +x_10701 = lean_ctor_get(x_10683, 1); +lean_inc(x_10701); +if (lean_is_exclusive(x_10683)) { + lean_ctor_release(x_10683, 0); + lean_ctor_release(x_10683, 1); + x_10702 = x_10683; +} else { + lean_dec_ref(x_10683); + x_10702 = lean_box(0); +} +if (lean_is_scalar(x_10702)) { + x_10703 = lean_alloc_ctor(1, 2, 0); +} else { + x_10703 = x_10702; +} +lean_ctor_set(x_10703, 0, x_10700); +lean_ctor_set(x_10703, 1, x_10701); +return x_10703; +} +} +else +{ +lean_object* x_10704; lean_object* x_10705; lean_object* x_10706; lean_object* x_10707; lean_object* x_10708; lean_object* x_10709; lean_object* x_10710; lean_object* x_10711; lean_object* x_10712; +lean_dec(x_10664); +lean_dec(x_10662); +lean_inc(x_8853); +lean_inc(x_8845); +if (lean_is_scalar(x_10659)) { + x_10704 = lean_alloc_ctor(6, 2, 0); +} else { + x_10704 = x_10659; + lean_ctor_set_tag(x_10704, 6); +} +lean_ctor_set(x_10704, 0, x_8845); +lean_ctor_set(x_10704, 1, x_8853); +x_10705 = lean_ctor_get(x_1, 0); +lean_inc(x_10705); +x_10706 = l_Lean_IR_ToIR_bindVar(x_10705, x_10195, x_4, x_5, x_10658); +x_10707 = lean_ctor_get(x_10706, 0); +lean_inc(x_10707); +x_10708 = lean_ctor_get(x_10706, 1); +lean_inc(x_10708); +lean_dec(x_10706); +x_10709 = lean_ctor_get(x_10707, 0); +lean_inc(x_10709); +x_10710 = lean_ctor_get(x_10707, 1); +lean_inc(x_10710); +lean_dec(x_10707); +x_10711 = lean_ctor_get(x_1, 2); +lean_inc(x_10711); +lean_inc(x_5); +lean_inc(x_4); +x_10712 = l_Lean_IR_ToIR_lowerType(x_10711, x_10710, x_4, x_5, x_10708); +if (lean_obj_tag(x_10712) == 0) +{ +lean_object* x_10713; lean_object* x_10714; lean_object* x_10715; lean_object* x_10716; lean_object* x_10717; +x_10713 = lean_ctor_get(x_10712, 0); +lean_inc(x_10713); +x_10714 = lean_ctor_get(x_10712, 1); +lean_inc(x_10714); +lean_dec(x_10712); +x_10715 = lean_ctor_get(x_10713, 0); +lean_inc(x_10715); +x_10716 = lean_ctor_get(x_10713, 1); +lean_inc(x_10716); +lean_dec(x_10713); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10717 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10709, x_10704, x_10715, x_10716, x_4, x_5, x_10714); +if (lean_obj_tag(x_10717) == 0) +{ +lean_object* x_10718; lean_object* x_10719; lean_object* x_10720; lean_object* x_10721; lean_object* x_10722; lean_object* x_10723; lean_object* x_10724; +x_10718 = lean_ctor_get(x_10717, 0); +lean_inc(x_10718); +x_10719 = lean_ctor_get(x_10717, 1); +lean_inc(x_10719); +lean_dec(x_10717); +x_10720 = lean_ctor_get(x_10718, 0); +lean_inc(x_10720); +x_10721 = lean_ctor_get(x_10718, 1); +lean_inc(x_10721); +if (lean_is_exclusive(x_10718)) { + lean_ctor_release(x_10718, 0); + lean_ctor_release(x_10718, 1); + x_10722 = x_10718; +} else { + lean_dec_ref(x_10718); + x_10722 = lean_box(0); +} +if (lean_is_scalar(x_10661)) { + x_10723 = lean_alloc_ctor(1, 1, 0); +} else { + x_10723 = x_10661; +} +lean_ctor_set(x_10723, 0, x_10720); +if (lean_is_scalar(x_10722)) { + x_10724 = lean_alloc_ctor(0, 2, 0); +} else { + x_10724 = x_10722; +} +lean_ctor_set(x_10724, 0, x_10723); +lean_ctor_set(x_10724, 1, x_10721); +x_10625 = x_10724; +x_10626 = x_10719; +goto block_10652; +} +else +{ +lean_object* x_10725; lean_object* x_10726; lean_object* x_10727; lean_object* x_10728; +lean_dec(x_10661); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10725 = lean_ctor_get(x_10717, 0); +lean_inc(x_10725); +x_10726 = lean_ctor_get(x_10717, 1); +lean_inc(x_10726); +if (lean_is_exclusive(x_10717)) { + lean_ctor_release(x_10717, 0); + lean_ctor_release(x_10717, 1); + x_10727 = x_10717; +} else { + lean_dec_ref(x_10717); + x_10727 = lean_box(0); +} +if (lean_is_scalar(x_10727)) { + x_10728 = lean_alloc_ctor(1, 2, 0); +} else { + x_10728 = x_10727; +} +lean_ctor_set(x_10728, 0, x_10725); +lean_ctor_set(x_10728, 1, x_10726); +return x_10728; +} +} +else +{ +lean_object* x_10729; lean_object* x_10730; lean_object* x_10731; lean_object* x_10732; +lean_dec(x_10709); +lean_dec(x_10704); +lean_dec(x_10661); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10729 = lean_ctor_get(x_10712, 0); +lean_inc(x_10729); +x_10730 = lean_ctor_get(x_10712, 1); +lean_inc(x_10730); +if (lean_is_exclusive(x_10712)) { + lean_ctor_release(x_10712, 0); + lean_ctor_release(x_10712, 1); + x_10731 = x_10712; +} else { + lean_dec_ref(x_10712); + x_10731 = lean_box(0); +} +if (lean_is_scalar(x_10731)) { + x_10732 = lean_alloc_ctor(1, 2, 0); +} else { + x_10732 = x_10731; +} +lean_ctor_set(x_10732, 0, x_10729); +lean_ctor_set(x_10732, 1, x_10730); +return x_10732; +} +} +} +else +{ +lean_object* x_10733; lean_object* x_10734; lean_object* x_10735; lean_object* x_10736; lean_object* x_10737; lean_object* x_10738; lean_object* x_10739; lean_object* x_10740; lean_object* x_10741; +lean_dec(x_10664); +lean_dec(x_10662); +lean_inc(x_8853); +lean_inc(x_8845); +if (lean_is_scalar(x_10659)) { + x_10733 = lean_alloc_ctor(7, 2, 0); +} else { + x_10733 = x_10659; + lean_ctor_set_tag(x_10733, 7); +} +lean_ctor_set(x_10733, 0, x_8845); +lean_ctor_set(x_10733, 1, x_8853); +x_10734 = lean_ctor_get(x_1, 0); +lean_inc(x_10734); +x_10735 = l_Lean_IR_ToIR_bindVar(x_10734, x_10195, x_4, x_5, x_10658); +x_10736 = lean_ctor_get(x_10735, 0); +lean_inc(x_10736); +x_10737 = lean_ctor_get(x_10735, 1); +lean_inc(x_10737); +lean_dec(x_10735); +x_10738 = lean_ctor_get(x_10736, 0); +lean_inc(x_10738); +x_10739 = lean_ctor_get(x_10736, 1); +lean_inc(x_10739); +lean_dec(x_10736); +x_10740 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_10741 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10738, x_10733, x_10740, x_10739, x_4, x_5, x_10737); +if (lean_obj_tag(x_10741) == 0) +{ +lean_object* x_10742; lean_object* x_10743; lean_object* x_10744; lean_object* x_10745; lean_object* x_10746; lean_object* x_10747; lean_object* x_10748; +x_10742 = lean_ctor_get(x_10741, 0); +lean_inc(x_10742); +x_10743 = lean_ctor_get(x_10741, 1); +lean_inc(x_10743); +lean_dec(x_10741); +x_10744 = lean_ctor_get(x_10742, 0); +lean_inc(x_10744); +x_10745 = lean_ctor_get(x_10742, 1); +lean_inc(x_10745); +if (lean_is_exclusive(x_10742)) { + lean_ctor_release(x_10742, 0); + lean_ctor_release(x_10742, 1); + x_10746 = x_10742; +} else { + lean_dec_ref(x_10742); + x_10746 = lean_box(0); +} +if (lean_is_scalar(x_10661)) { + x_10747 = lean_alloc_ctor(1, 1, 0); +} else { + x_10747 = x_10661; +} +lean_ctor_set(x_10747, 0, x_10744); +if (lean_is_scalar(x_10746)) { + x_10748 = lean_alloc_ctor(0, 2, 0); +} else { + x_10748 = x_10746; +} +lean_ctor_set(x_10748, 0, x_10747); +lean_ctor_set(x_10748, 1, x_10745); +x_10625 = x_10748; +x_10626 = x_10743; +goto block_10652; +} +else +{ +lean_object* x_10749; lean_object* x_10750; lean_object* x_10751; lean_object* x_10752; +lean_dec(x_10661); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10749 = lean_ctor_get(x_10741, 0); +lean_inc(x_10749); +x_10750 = lean_ctor_get(x_10741, 1); +lean_inc(x_10750); +if (lean_is_exclusive(x_10741)) { + lean_ctor_release(x_10741, 0); + lean_ctor_release(x_10741, 1); + x_10751 = x_10741; +} else { + lean_dec_ref(x_10741); + x_10751 = lean_box(0); +} +if (lean_is_scalar(x_10751)) { + x_10752 = lean_alloc_ctor(1, 2, 0); +} else { + x_10752 = x_10751; +} +lean_ctor_set(x_10752, 0, x_10749); +lean_ctor_set(x_10752, 1, x_10750); +return x_10752; +} +} +} +block_10652: +{ +lean_object* x_10627; +x_10627 = lean_ctor_get(x_10625, 0); +lean_inc(x_10627); +if (lean_obj_tag(x_10627) == 0) +{ +lean_object* x_10628; lean_object* x_10629; lean_object* x_10630; lean_object* x_10631; lean_object* x_10632; lean_object* x_10633; lean_object* x_10634; lean_object* x_10635; lean_object* x_10636; lean_object* x_10637; +lean_dec(x_10199); +x_10628 = lean_ctor_get(x_10625, 1); +lean_inc(x_10628); +lean_dec(x_10625); +x_10629 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_10629, 0, x_8845); +lean_ctor_set(x_10629, 1, x_8853); +x_10630 = lean_ctor_get(x_1, 0); +lean_inc(x_10630); +x_10631 = l_Lean_IR_ToIR_bindVar(x_10630, x_10628, x_4, x_5, x_10626); +x_10632 = lean_ctor_get(x_10631, 0); +lean_inc(x_10632); +x_10633 = lean_ctor_get(x_10631, 1); +lean_inc(x_10633); +lean_dec(x_10631); +x_10634 = lean_ctor_get(x_10632, 0); +lean_inc(x_10634); +x_10635 = lean_ctor_get(x_10632, 1); +lean_inc(x_10635); +lean_dec(x_10632); +x_10636 = lean_ctor_get(x_1, 2); +lean_inc(x_10636); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_10637 = l_Lean_IR_ToIR_lowerType(x_10636, x_10635, x_4, x_5, x_10633); +if (lean_obj_tag(x_10637) == 0) +{ +lean_object* x_10638; lean_object* x_10639; lean_object* x_10640; lean_object* x_10641; lean_object* x_10642; +x_10638 = lean_ctor_get(x_10637, 0); +lean_inc(x_10638); +x_10639 = lean_ctor_get(x_10637, 1); +lean_inc(x_10639); +lean_dec(x_10637); +x_10640 = lean_ctor_get(x_10638, 0); +lean_inc(x_10640); +x_10641 = lean_ctor_get(x_10638, 1); +lean_inc(x_10641); +lean_dec(x_10638); +x_10642 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_10634, x_10629, x_10640, x_10641, x_4, x_5, x_10639); +return x_10642; +} +else +{ +lean_object* x_10643; lean_object* x_10644; lean_object* x_10645; lean_object* x_10646; +lean_dec(x_10634); +lean_dec(x_10629); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_10643 = lean_ctor_get(x_10637, 0); +lean_inc(x_10643); +x_10644 = lean_ctor_get(x_10637, 1); +lean_inc(x_10644); +if (lean_is_exclusive(x_10637)) { + lean_ctor_release(x_10637, 0); + lean_ctor_release(x_10637, 1); + x_10645 = x_10637; +} else { + lean_dec_ref(x_10637); + x_10645 = lean_box(0); +} +if (lean_is_scalar(x_10645)) { + x_10646 = lean_alloc_ctor(1, 2, 0); +} else { + x_10646 = x_10645; +} +lean_ctor_set(x_10646, 0, x_10643); +lean_ctor_set(x_10646, 1, x_10644); +return x_10646; +} +} +else +{ +lean_object* x_10647; lean_object* x_10648; lean_object* x_10649; lean_object* x_10650; lean_object* x_10651; +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10647 = lean_ctor_get(x_10625, 1); +lean_inc(x_10647); +if (lean_is_exclusive(x_10625)) { + lean_ctor_release(x_10625, 0); + lean_ctor_release(x_10625, 1); + x_10648 = x_10625; +} else { + lean_dec_ref(x_10625); + x_10648 = lean_box(0); +} +x_10649 = lean_ctor_get(x_10627, 0); +lean_inc(x_10649); +lean_dec(x_10627); +if (lean_is_scalar(x_10648)) { + x_10650 = lean_alloc_ctor(0, 2, 0); +} else { + x_10650 = x_10648; +} +lean_ctor_set(x_10650, 0, x_10649); +lean_ctor_set(x_10650, 1, x_10647); +if (lean_is_scalar(x_10199)) { + x_10651 = lean_alloc_ctor(0, 2, 0); +} else { + x_10651 = x_10199; +} +lean_ctor_set(x_10651, 0, x_10650); +lean_ctor_set(x_10651, 1, x_10626); +return x_10651; +} +} +} +} +default: +{ +lean_object* x_10753; uint8_t x_10754; lean_object* x_10755; lean_object* x_10756; lean_object* x_10757; lean_object* x_10758; lean_object* x_10759; lean_object* x_10760; lean_object* x_10761; lean_object* x_10762; lean_object* x_10763; +lean_dec(x_10200); +lean_dec(x_10199); +lean_dec(x_8853); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_10205)) { + lean_ctor_release(x_10205, 0); + x_10753 = x_10205; +} else { + lean_dec_ref(x_10205); + x_10753 = lean_box(0); +} +x_10754 = 1; +x_10755 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_10756 = l_Lean_Name_toString(x_8845, x_10754, x_10755); +if (lean_is_scalar(x_10753)) { + x_10757 = lean_alloc_ctor(3, 1, 0); +} else { + x_10757 = x_10753; + lean_ctor_set_tag(x_10757, 3); +} +lean_ctor_set(x_10757, 0, x_10756); +x_10758 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_10759 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_10759, 0, x_10758); +lean_ctor_set(x_10759, 1, x_10757); +x_10760 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_10761 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_10761, 0, x_10759); +lean_ctor_set(x_10761, 1, x_10760); +x_10762 = l_Lean_MessageData_ofFormat(x_10761); +x_10763 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_10762, x_10195, x_4, x_5, x_10198); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_10195); +return x_10763; +} +} +} +} +} +else +{ +uint8_t x_10764; +lean_dec(x_8853); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_10764 = !lean_is_exclusive(x_8855); +if (x_10764 == 0) +{ +lean_object* x_10765; lean_object* x_10766; lean_object* x_10767; +x_10765 = lean_ctor_get(x_8855, 0); +lean_dec(x_10765); +x_10766 = lean_ctor_get(x_8857, 0); +lean_inc(x_10766); +lean_dec(x_8857); +lean_ctor_set(x_8855, 0, x_10766); +if (lean_is_scalar(x_8851)) { + x_10767 = lean_alloc_ctor(0, 2, 0); +} else { + x_10767 = x_8851; +} +lean_ctor_set(x_10767, 0, x_8855); +lean_ctor_set(x_10767, 1, x_8856); +return x_10767; +} +else +{ +lean_object* x_10768; lean_object* x_10769; lean_object* x_10770; lean_object* x_10771; +x_10768 = lean_ctor_get(x_8855, 1); +lean_inc(x_10768); +lean_dec(x_8855); +x_10769 = lean_ctor_get(x_8857, 0); +lean_inc(x_10769); +lean_dec(x_8857); +x_10770 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10770, 0, x_10769); +lean_ctor_set(x_10770, 1, x_10768); +if (lean_is_scalar(x_8851)) { + x_10771 = lean_alloc_ctor(0, 2, 0); +} else { + x_10771 = x_8851; +} +lean_ctor_set(x_10771, 0, x_10770); +lean_ctor_set(x_10771, 1, x_8856); +return x_10771; +} +} +} +} +else +{ +lean_object* x_11053; lean_object* x_11054; lean_object* x_11055; lean_object* x_11056; lean_object* x_11634; lean_object* x_11635; +x_11053 = lean_ctor_get(x_8849, 0); +x_11054 = lean_ctor_get(x_8849, 1); +lean_inc(x_11054); +lean_inc(x_11053); +lean_dec(x_8849); +lean_inc(x_8845); +x_11634 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_8845, x_4, x_5, x_8850); +x_11635 = lean_ctor_get(x_11634, 0); +lean_inc(x_11635); +if (lean_obj_tag(x_11635) == 0) +{ +lean_object* x_11636; lean_object* x_11637; lean_object* x_11638; +x_11636 = lean_ctor_get(x_11634, 1); +lean_inc(x_11636); +lean_dec(x_11634); +x_11637 = lean_box(0); +x_11638 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_11638, 0, x_11637); +lean_ctor_set(x_11638, 1, x_11054); +x_11055 = x_11638; +x_11056 = x_11636; +goto block_11633; +} +else +{ +lean_object* x_11639; lean_object* x_11640; lean_object* x_11641; lean_object* x_11642; lean_object* x_11643; lean_object* x_11644; lean_object* x_11645; uint8_t x_11646; +x_11639 = lean_ctor_get(x_11634, 1); +lean_inc(x_11639); +if (lean_is_exclusive(x_11634)) { + lean_ctor_release(x_11634, 0); + lean_ctor_release(x_11634, 1); + x_11640 = x_11634; +} else { + lean_dec_ref(x_11634); + x_11640 = lean_box(0); +} +x_11641 = lean_ctor_get(x_11635, 0); +lean_inc(x_11641); +if (lean_is_exclusive(x_11635)) { + lean_ctor_release(x_11635, 0); + x_11642 = x_11635; +} else { + lean_dec_ref(x_11635); + x_11642 = lean_box(0); +} +x_11643 = lean_array_get_size(x_11053); +x_11644 = lean_ctor_get(x_11641, 3); +lean_inc(x_11644); +lean_dec(x_11641); +x_11645 = lean_array_get_size(x_11644); +lean_dec(x_11644); +x_11646 = lean_nat_dec_lt(x_11643, x_11645); +if (x_11646 == 0) +{ +uint8_t x_11647; +x_11647 = lean_nat_dec_eq(x_11643, x_11645); +if (x_11647 == 0) +{ +lean_object* x_11648; lean_object* x_11649; lean_object* x_11650; lean_object* x_11651; lean_object* x_11652; lean_object* x_11653; lean_object* x_11654; lean_object* x_11655; lean_object* x_11656; lean_object* x_11657; lean_object* x_11658; lean_object* x_11659; lean_object* x_11660; lean_object* x_11661; lean_object* x_11662; lean_object* x_11663; lean_object* x_11664; +x_11648 = lean_unsigned_to_nat(0u); +x_11649 = l_Array_extract___rarg(x_11053, x_11648, x_11645); +x_11650 = l_Array_extract___rarg(x_11053, x_11645, x_11643); +lean_dec(x_11643); +lean_inc(x_8845); +if (lean_is_scalar(x_11640)) { + x_11651 = lean_alloc_ctor(6, 2, 0); +} else { + x_11651 = x_11640; + lean_ctor_set_tag(x_11651, 6); +} +lean_ctor_set(x_11651, 0, x_8845); +lean_ctor_set(x_11651, 1, x_11649); +x_11652 = lean_ctor_get(x_1, 0); +lean_inc(x_11652); +x_11653 = l_Lean_IR_ToIR_bindVar(x_11652, x_11054, x_4, x_5, x_11639); +x_11654 = lean_ctor_get(x_11653, 0); +lean_inc(x_11654); +x_11655 = lean_ctor_get(x_11653, 1); +lean_inc(x_11655); +lean_dec(x_11653); +x_11656 = lean_ctor_get(x_11654, 0); +lean_inc(x_11656); +x_11657 = lean_ctor_get(x_11654, 1); +lean_inc(x_11657); +lean_dec(x_11654); +x_11658 = l_Lean_IR_ToIR_newVar(x_11657, x_4, x_5, x_11655); +x_11659 = lean_ctor_get(x_11658, 0); +lean_inc(x_11659); +x_11660 = lean_ctor_get(x_11658, 1); +lean_inc(x_11660); +lean_dec(x_11658); +x_11661 = lean_ctor_get(x_11659, 0); +lean_inc(x_11661); +x_11662 = lean_ctor_get(x_11659, 1); +lean_inc(x_11662); +lean_dec(x_11659); +x_11663 = lean_ctor_get(x_1, 2); +lean_inc(x_11663); +lean_inc(x_5); +lean_inc(x_4); +x_11664 = l_Lean_IR_ToIR_lowerType(x_11663, x_11662, x_4, x_5, x_11660); +if (lean_obj_tag(x_11664) == 0) +{ +lean_object* x_11665; lean_object* x_11666; lean_object* x_11667; lean_object* x_11668; lean_object* x_11669; +x_11665 = lean_ctor_get(x_11664, 0); +lean_inc(x_11665); +x_11666 = lean_ctor_get(x_11664, 1); +lean_inc(x_11666); +lean_dec(x_11664); +x_11667 = lean_ctor_get(x_11665, 0); +lean_inc(x_11667); +x_11668 = lean_ctor_get(x_11665, 1); +lean_inc(x_11668); +lean_dec(x_11665); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11669 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_11661, x_11650, x_11656, x_11651, x_11667, x_11668, x_4, x_5, x_11666); +if (lean_obj_tag(x_11669) == 0) +{ +lean_object* x_11670; lean_object* x_11671; lean_object* x_11672; lean_object* x_11673; lean_object* x_11674; lean_object* x_11675; lean_object* x_11676; +x_11670 = lean_ctor_get(x_11669, 0); +lean_inc(x_11670); +x_11671 = lean_ctor_get(x_11669, 1); +lean_inc(x_11671); +lean_dec(x_11669); +x_11672 = lean_ctor_get(x_11670, 0); +lean_inc(x_11672); +x_11673 = lean_ctor_get(x_11670, 1); +lean_inc(x_11673); +if (lean_is_exclusive(x_11670)) { + lean_ctor_release(x_11670, 0); + lean_ctor_release(x_11670, 1); + x_11674 = x_11670; +} else { + lean_dec_ref(x_11670); + x_11674 = lean_box(0); +} +if (lean_is_scalar(x_11642)) { + x_11675 = lean_alloc_ctor(1, 1, 0); +} else { + x_11675 = x_11642; +} +lean_ctor_set(x_11675, 0, x_11672); +if (lean_is_scalar(x_11674)) { + x_11676 = lean_alloc_ctor(0, 2, 0); +} else { + x_11676 = x_11674; +} +lean_ctor_set(x_11676, 0, x_11675); +lean_ctor_set(x_11676, 1, x_11673); +x_11055 = x_11676; +x_11056 = x_11671; +goto block_11633; +} +else +{ +lean_object* x_11677; lean_object* x_11678; lean_object* x_11679; lean_object* x_11680; +lean_dec(x_11642); +lean_dec(x_11053); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11677 = lean_ctor_get(x_11669, 0); +lean_inc(x_11677); +x_11678 = lean_ctor_get(x_11669, 1); +lean_inc(x_11678); +if (lean_is_exclusive(x_11669)) { + lean_ctor_release(x_11669, 0); + lean_ctor_release(x_11669, 1); + x_11679 = x_11669; +} else { + lean_dec_ref(x_11669); + x_11679 = lean_box(0); +} +if (lean_is_scalar(x_11679)) { + x_11680 = lean_alloc_ctor(1, 2, 0); +} else { + x_11680 = x_11679; +} +lean_ctor_set(x_11680, 0, x_11677); +lean_ctor_set(x_11680, 1, x_11678); +return x_11680; +} +} +else +{ +lean_object* x_11681; lean_object* x_11682; lean_object* x_11683; lean_object* x_11684; +lean_dec(x_11661); +lean_dec(x_11656); +lean_dec(x_11651); +lean_dec(x_11650); +lean_dec(x_11642); +lean_dec(x_11053); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11681 = lean_ctor_get(x_11664, 0); +lean_inc(x_11681); +x_11682 = lean_ctor_get(x_11664, 1); +lean_inc(x_11682); +if (lean_is_exclusive(x_11664)) { + lean_ctor_release(x_11664, 0); + lean_ctor_release(x_11664, 1); + x_11683 = x_11664; +} else { + lean_dec_ref(x_11664); + x_11683 = lean_box(0); +} +if (lean_is_scalar(x_11683)) { + x_11684 = lean_alloc_ctor(1, 2, 0); +} else { + x_11684 = x_11683; +} +lean_ctor_set(x_11684, 0, x_11681); +lean_ctor_set(x_11684, 1, x_11682); +return x_11684; +} +} +else +{ +lean_object* x_11685; lean_object* x_11686; lean_object* x_11687; lean_object* x_11688; lean_object* x_11689; lean_object* x_11690; lean_object* x_11691; lean_object* x_11692; lean_object* x_11693; +lean_dec(x_11645); +lean_dec(x_11643); +lean_inc(x_11053); +lean_inc(x_8845); +if (lean_is_scalar(x_11640)) { + x_11685 = lean_alloc_ctor(6, 2, 0); +} else { + x_11685 = x_11640; + lean_ctor_set_tag(x_11685, 6); +} +lean_ctor_set(x_11685, 0, x_8845); +lean_ctor_set(x_11685, 1, x_11053); +x_11686 = lean_ctor_get(x_1, 0); +lean_inc(x_11686); +x_11687 = l_Lean_IR_ToIR_bindVar(x_11686, x_11054, x_4, x_5, x_11639); +x_11688 = lean_ctor_get(x_11687, 0); +lean_inc(x_11688); +x_11689 = lean_ctor_get(x_11687, 1); +lean_inc(x_11689); +lean_dec(x_11687); +x_11690 = lean_ctor_get(x_11688, 0); +lean_inc(x_11690); +x_11691 = lean_ctor_get(x_11688, 1); +lean_inc(x_11691); +lean_dec(x_11688); +x_11692 = lean_ctor_get(x_1, 2); +lean_inc(x_11692); +lean_inc(x_5); +lean_inc(x_4); +x_11693 = l_Lean_IR_ToIR_lowerType(x_11692, x_11691, x_4, x_5, x_11689); +if (lean_obj_tag(x_11693) == 0) +{ +lean_object* x_11694; lean_object* x_11695; lean_object* x_11696; lean_object* x_11697; lean_object* x_11698; +x_11694 = lean_ctor_get(x_11693, 0); +lean_inc(x_11694); +x_11695 = lean_ctor_get(x_11693, 1); +lean_inc(x_11695); +lean_dec(x_11693); +x_11696 = lean_ctor_get(x_11694, 0); +lean_inc(x_11696); +x_11697 = lean_ctor_get(x_11694, 1); +lean_inc(x_11697); +lean_dec(x_11694); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11698 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11690, x_11685, x_11696, x_11697, x_4, x_5, x_11695); +if (lean_obj_tag(x_11698) == 0) +{ +lean_object* x_11699; lean_object* x_11700; lean_object* x_11701; lean_object* x_11702; lean_object* x_11703; lean_object* x_11704; lean_object* x_11705; +x_11699 = lean_ctor_get(x_11698, 0); +lean_inc(x_11699); +x_11700 = lean_ctor_get(x_11698, 1); +lean_inc(x_11700); +lean_dec(x_11698); +x_11701 = lean_ctor_get(x_11699, 0); +lean_inc(x_11701); +x_11702 = lean_ctor_get(x_11699, 1); +lean_inc(x_11702); +if (lean_is_exclusive(x_11699)) { + lean_ctor_release(x_11699, 0); + lean_ctor_release(x_11699, 1); + x_11703 = x_11699; +} else { + lean_dec_ref(x_11699); + x_11703 = lean_box(0); +} +if (lean_is_scalar(x_11642)) { + x_11704 = lean_alloc_ctor(1, 1, 0); +} else { + x_11704 = x_11642; +} +lean_ctor_set(x_11704, 0, x_11701); +if (lean_is_scalar(x_11703)) { + x_11705 = lean_alloc_ctor(0, 2, 0); +} else { + x_11705 = x_11703; +} +lean_ctor_set(x_11705, 0, x_11704); +lean_ctor_set(x_11705, 1, x_11702); +x_11055 = x_11705; +x_11056 = x_11700; +goto block_11633; +} +else +{ +lean_object* x_11706; lean_object* x_11707; lean_object* x_11708; lean_object* x_11709; +lean_dec(x_11642); +lean_dec(x_11053); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11706 = lean_ctor_get(x_11698, 0); +lean_inc(x_11706); +x_11707 = lean_ctor_get(x_11698, 1); +lean_inc(x_11707); +if (lean_is_exclusive(x_11698)) { + lean_ctor_release(x_11698, 0); + lean_ctor_release(x_11698, 1); + x_11708 = x_11698; +} else { + lean_dec_ref(x_11698); + x_11708 = lean_box(0); +} +if (lean_is_scalar(x_11708)) { + x_11709 = lean_alloc_ctor(1, 2, 0); +} else { + x_11709 = x_11708; +} +lean_ctor_set(x_11709, 0, x_11706); +lean_ctor_set(x_11709, 1, x_11707); +return x_11709; +} +} +else +{ +lean_object* x_11710; lean_object* x_11711; lean_object* x_11712; lean_object* x_11713; +lean_dec(x_11690); +lean_dec(x_11685); +lean_dec(x_11642); +lean_dec(x_11053); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11710 = lean_ctor_get(x_11693, 0); +lean_inc(x_11710); +x_11711 = lean_ctor_get(x_11693, 1); +lean_inc(x_11711); +if (lean_is_exclusive(x_11693)) { + lean_ctor_release(x_11693, 0); + lean_ctor_release(x_11693, 1); + x_11712 = x_11693; +} else { + lean_dec_ref(x_11693); + x_11712 = lean_box(0); +} +if (lean_is_scalar(x_11712)) { + x_11713 = lean_alloc_ctor(1, 2, 0); +} else { + x_11713 = x_11712; +} +lean_ctor_set(x_11713, 0, x_11710); +lean_ctor_set(x_11713, 1, x_11711); +return x_11713; +} +} +} +else +{ +lean_object* x_11714; lean_object* x_11715; lean_object* x_11716; lean_object* x_11717; lean_object* x_11718; lean_object* x_11719; lean_object* x_11720; lean_object* x_11721; lean_object* x_11722; +lean_dec(x_11645); +lean_dec(x_11643); +lean_inc(x_11053); +lean_inc(x_8845); +if (lean_is_scalar(x_11640)) { + x_11714 = lean_alloc_ctor(7, 2, 0); +} else { + x_11714 = x_11640; + lean_ctor_set_tag(x_11714, 7); +} +lean_ctor_set(x_11714, 0, x_8845); +lean_ctor_set(x_11714, 1, x_11053); +x_11715 = lean_ctor_get(x_1, 0); +lean_inc(x_11715); +x_11716 = l_Lean_IR_ToIR_bindVar(x_11715, x_11054, x_4, x_5, x_11639); +x_11717 = lean_ctor_get(x_11716, 0); +lean_inc(x_11717); +x_11718 = lean_ctor_get(x_11716, 1); +lean_inc(x_11718); +lean_dec(x_11716); +x_11719 = lean_ctor_get(x_11717, 0); +lean_inc(x_11719); +x_11720 = lean_ctor_get(x_11717, 1); +lean_inc(x_11720); +lean_dec(x_11717); +x_11721 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11722 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11719, x_11714, x_11721, x_11720, x_4, x_5, x_11718); +if (lean_obj_tag(x_11722) == 0) +{ +lean_object* x_11723; lean_object* x_11724; lean_object* x_11725; lean_object* x_11726; lean_object* x_11727; lean_object* x_11728; lean_object* x_11729; +x_11723 = lean_ctor_get(x_11722, 0); +lean_inc(x_11723); +x_11724 = lean_ctor_get(x_11722, 1); +lean_inc(x_11724); +lean_dec(x_11722); +x_11725 = lean_ctor_get(x_11723, 0); +lean_inc(x_11725); +x_11726 = lean_ctor_get(x_11723, 1); +lean_inc(x_11726); +if (lean_is_exclusive(x_11723)) { + lean_ctor_release(x_11723, 0); + lean_ctor_release(x_11723, 1); + x_11727 = x_11723; +} else { + lean_dec_ref(x_11723); + x_11727 = lean_box(0); +} +if (lean_is_scalar(x_11642)) { + x_11728 = lean_alloc_ctor(1, 1, 0); +} else { + x_11728 = x_11642; +} +lean_ctor_set(x_11728, 0, x_11725); +if (lean_is_scalar(x_11727)) { + x_11729 = lean_alloc_ctor(0, 2, 0); +} else { + x_11729 = x_11727; +} +lean_ctor_set(x_11729, 0, x_11728); +lean_ctor_set(x_11729, 1, x_11726); +x_11055 = x_11729; +x_11056 = x_11724; +goto block_11633; +} +else +{ +lean_object* x_11730; lean_object* x_11731; lean_object* x_11732; lean_object* x_11733; +lean_dec(x_11642); +lean_dec(x_11053); +lean_dec(x_8851); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11730 = lean_ctor_get(x_11722, 0); +lean_inc(x_11730); +x_11731 = lean_ctor_get(x_11722, 1); +lean_inc(x_11731); +if (lean_is_exclusive(x_11722)) { + lean_ctor_release(x_11722, 0); + lean_ctor_release(x_11722, 1); + x_11732 = x_11722; +} else { + lean_dec_ref(x_11722); + x_11732 = lean_box(0); +} +if (lean_is_scalar(x_11732)) { + x_11733 = lean_alloc_ctor(1, 2, 0); +} else { + x_11733 = x_11732; +} +lean_ctor_set(x_11733, 0, x_11730); +lean_ctor_set(x_11733, 1, x_11731); +return x_11733; +} +} +} +block_11633: +{ +lean_object* x_11057; +x_11057 = lean_ctor_get(x_11055, 0); +lean_inc(x_11057); +if (lean_obj_tag(x_11057) == 0) +{ +lean_object* x_11058; lean_object* x_11059; lean_object* x_11060; lean_object* x_11061; lean_object* x_11062; lean_object* x_11063; lean_object* x_11064; uint8_t x_11065; lean_object* x_11066; +lean_dec(x_8851); +x_11058 = lean_ctor_get(x_11055, 1); +lean_inc(x_11058); +if (lean_is_exclusive(x_11055)) { + lean_ctor_release(x_11055, 0); + lean_ctor_release(x_11055, 1); + x_11059 = x_11055; +} else { + lean_dec_ref(x_11055); + x_11059 = lean_box(0); +} +x_11060 = lean_st_ref_get(x_5, x_11056); +x_11061 = lean_ctor_get(x_11060, 0); +lean_inc(x_11061); +x_11062 = lean_ctor_get(x_11060, 1); +lean_inc(x_11062); +if (lean_is_exclusive(x_11060)) { + lean_ctor_release(x_11060, 0); + lean_ctor_release(x_11060, 1); + x_11063 = x_11060; +} else { + lean_dec_ref(x_11060); + x_11063 = lean_box(0); +} +x_11064 = lean_ctor_get(x_11061, 0); +lean_inc(x_11064); +lean_dec(x_11061); +x_11065 = 0; +lean_inc(x_8845); +lean_inc(x_11064); +x_11066 = l_Lean_Environment_find_x3f(x_11064, x_8845, x_11065); +if (lean_obj_tag(x_11066) == 0) +{ +lean_object* x_11067; lean_object* x_11068; +lean_dec(x_11064); +lean_dec(x_11063); +lean_dec(x_11059); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +x_11067 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_11068 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_11067, x_11058, x_4, x_5, x_11062); +return x_11068; +} +else +{ +lean_object* x_11069; +x_11069 = lean_ctor_get(x_11066, 0); +lean_inc(x_11069); +lean_dec(x_11066); +switch (lean_obj_tag(x_11069)) { +case 0: +{ +lean_object* x_11070; lean_object* x_11071; uint8_t x_11072; +lean_dec(x_11064); +lean_dec(x_5945); +lean_dec(x_5944); +if (lean_is_exclusive(x_11069)) { + lean_ctor_release(x_11069, 0); + x_11070 = x_11069; +} else { + lean_dec_ref(x_11069); + x_11070 = lean_box(0); +} +x_11071 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_11072 = lean_name_eq(x_8845, x_11071); +if (x_11072 == 0) +{ +lean_object* x_11073; uint8_t x_11074; +x_11073 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_11074 = lean_name_eq(x_8845, x_11073); +if (x_11074 == 0) +{ +lean_object* x_11075; lean_object* x_11076; lean_object* x_11077; +lean_dec(x_11063); +lean_dec(x_11059); +lean_inc(x_8845); +x_11075 = l_Lean_IR_ToIR_findDecl(x_8845, x_11058, x_4, x_5, x_11062); +x_11076 = lean_ctor_get(x_11075, 0); +lean_inc(x_11076); +x_11077 = lean_ctor_get(x_11076, 0); +lean_inc(x_11077); +if (lean_obj_tag(x_11077) == 0) +{ +lean_object* x_11078; lean_object* x_11079; lean_object* x_11080; lean_object* x_11081; uint8_t x_11082; lean_object* x_11083; lean_object* x_11084; lean_object* x_11085; lean_object* x_11086; lean_object* x_11087; lean_object* x_11088; lean_object* x_11089; lean_object* x_11090; lean_object* x_11091; +lean_dec(x_11053); +lean_dec(x_2); +lean_dec(x_1); +x_11078 = lean_ctor_get(x_11075, 1); +lean_inc(x_11078); +if (lean_is_exclusive(x_11075)) { + lean_ctor_release(x_11075, 0); + lean_ctor_release(x_11075, 1); + x_11079 = x_11075; +} else { + lean_dec_ref(x_11075); + x_11079 = lean_box(0); +} +x_11080 = lean_ctor_get(x_11076, 1); +lean_inc(x_11080); +if (lean_is_exclusive(x_11076)) { + lean_ctor_release(x_11076, 0); + lean_ctor_release(x_11076, 1); + x_11081 = x_11076; +} else { + lean_dec_ref(x_11076); + x_11081 = lean_box(0); +} +x_11082 = 1; +x_11083 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_11084 = l_Lean_Name_toString(x_8845, x_11082, x_11083); +if (lean_is_scalar(x_11070)) { + x_11085 = lean_alloc_ctor(3, 1, 0); +} else { + x_11085 = x_11070; + lean_ctor_set_tag(x_11085, 3); +} +lean_ctor_set(x_11085, 0, x_11084); +x_11086 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_11081)) { + x_11087 = lean_alloc_ctor(5, 2, 0); +} else { + x_11087 = x_11081; + lean_ctor_set_tag(x_11087, 5); +} +lean_ctor_set(x_11087, 0, x_11086); +lean_ctor_set(x_11087, 1, x_11085); +x_11088 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_11079)) { + x_11089 = lean_alloc_ctor(5, 2, 0); +} else { + x_11089 = x_11079; + lean_ctor_set_tag(x_11089, 5); +} +lean_ctor_set(x_11089, 0, x_11087); +lean_ctor_set(x_11089, 1, x_11088); +x_11090 = l_Lean_MessageData_ofFormat(x_11089); +x_11091 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_11090, x_11080, x_4, x_5, x_11078); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_11080); +return x_11091; +} +else +{ +lean_object* x_11092; lean_object* x_11093; lean_object* x_11094; lean_object* x_11095; lean_object* x_11096; lean_object* x_11097; lean_object* x_11098; uint8_t x_11099; +lean_dec(x_11070); +x_11092 = lean_ctor_get(x_11075, 1); +lean_inc(x_11092); +lean_dec(x_11075); +x_11093 = lean_ctor_get(x_11076, 1); +lean_inc(x_11093); +if (lean_is_exclusive(x_11076)) { + lean_ctor_release(x_11076, 0); + lean_ctor_release(x_11076, 1); + x_11094 = x_11076; +} else { + lean_dec_ref(x_11076); + x_11094 = lean_box(0); +} +x_11095 = lean_ctor_get(x_11077, 0); +lean_inc(x_11095); +lean_dec(x_11077); +x_11096 = lean_array_get_size(x_11053); +x_11097 = l_Lean_IR_Decl_params(x_11095); +lean_dec(x_11095); +x_11098 = lean_array_get_size(x_11097); +lean_dec(x_11097); +x_11099 = lean_nat_dec_lt(x_11096, x_11098); +if (x_11099 == 0) +{ +uint8_t x_11100; +x_11100 = lean_nat_dec_eq(x_11096, x_11098); +if (x_11100 == 0) +{ +lean_object* x_11101; lean_object* x_11102; lean_object* x_11103; lean_object* x_11104; lean_object* x_11105; lean_object* x_11106; lean_object* x_11107; lean_object* x_11108; lean_object* x_11109; lean_object* x_11110; lean_object* x_11111; lean_object* x_11112; lean_object* x_11113; lean_object* x_11114; lean_object* x_11115; lean_object* x_11116; lean_object* x_11117; +x_11101 = lean_unsigned_to_nat(0u); +x_11102 = l_Array_extract___rarg(x_11053, x_11101, x_11098); +x_11103 = l_Array_extract___rarg(x_11053, x_11098, x_11096); +lean_dec(x_11096); +lean_dec(x_11053); +if (lean_is_scalar(x_11094)) { + x_11104 = lean_alloc_ctor(6, 2, 0); +} else { + x_11104 = x_11094; + lean_ctor_set_tag(x_11104, 6); +} +lean_ctor_set(x_11104, 0, x_8845); +lean_ctor_set(x_11104, 1, x_11102); +x_11105 = lean_ctor_get(x_1, 0); +lean_inc(x_11105); +x_11106 = l_Lean_IR_ToIR_bindVar(x_11105, x_11093, x_4, x_5, x_11092); +x_11107 = lean_ctor_get(x_11106, 0); +lean_inc(x_11107); +x_11108 = lean_ctor_get(x_11106, 1); +lean_inc(x_11108); +lean_dec(x_11106); +x_11109 = lean_ctor_get(x_11107, 0); +lean_inc(x_11109); +x_11110 = lean_ctor_get(x_11107, 1); +lean_inc(x_11110); +lean_dec(x_11107); +x_11111 = l_Lean_IR_ToIR_newVar(x_11110, x_4, x_5, x_11108); +x_11112 = lean_ctor_get(x_11111, 0); +lean_inc(x_11112); +x_11113 = lean_ctor_get(x_11111, 1); +lean_inc(x_11113); +lean_dec(x_11111); +x_11114 = lean_ctor_get(x_11112, 0); +lean_inc(x_11114); +x_11115 = lean_ctor_get(x_11112, 1); +lean_inc(x_11115); +lean_dec(x_11112); +x_11116 = lean_ctor_get(x_1, 2); +lean_inc(x_11116); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_11117 = l_Lean_IR_ToIR_lowerType(x_11116, x_11115, x_4, x_5, x_11113); +if (lean_obj_tag(x_11117) == 0) +{ +lean_object* x_11118; lean_object* x_11119; lean_object* x_11120; lean_object* x_11121; lean_object* x_11122; +x_11118 = lean_ctor_get(x_11117, 0); +lean_inc(x_11118); +x_11119 = lean_ctor_get(x_11117, 1); +lean_inc(x_11119); +lean_dec(x_11117); +x_11120 = lean_ctor_get(x_11118, 0); +lean_inc(x_11120); +x_11121 = lean_ctor_get(x_11118, 1); +lean_inc(x_11121); +lean_dec(x_11118); +x_11122 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_11114, x_11103, x_11109, x_11104, x_11120, x_11121, x_4, x_5, x_11119); +return x_11122; +} +else +{ +lean_object* x_11123; lean_object* x_11124; lean_object* x_11125; lean_object* x_11126; +lean_dec(x_11114); +lean_dec(x_11109); +lean_dec(x_11104); +lean_dec(x_11103); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_11123 = lean_ctor_get(x_11117, 0); +lean_inc(x_11123); +x_11124 = lean_ctor_get(x_11117, 1); +lean_inc(x_11124); +if (lean_is_exclusive(x_11117)) { + lean_ctor_release(x_11117, 0); + lean_ctor_release(x_11117, 1); + x_11125 = x_11117; +} else { + lean_dec_ref(x_11117); + x_11125 = lean_box(0); +} +if (lean_is_scalar(x_11125)) { + x_11126 = lean_alloc_ctor(1, 2, 0); +} else { + x_11126 = x_11125; +} +lean_ctor_set(x_11126, 0, x_11123); +lean_ctor_set(x_11126, 1, x_11124); +return x_11126; +} +} +else +{ +lean_object* x_11127; lean_object* x_11128; lean_object* x_11129; lean_object* x_11130; lean_object* x_11131; lean_object* x_11132; lean_object* x_11133; lean_object* x_11134; lean_object* x_11135; +lean_dec(x_11098); +lean_dec(x_11096); +if (lean_is_scalar(x_11094)) { + x_11127 = lean_alloc_ctor(6, 2, 0); +} else { + x_11127 = x_11094; + lean_ctor_set_tag(x_11127, 6); +} +lean_ctor_set(x_11127, 0, x_8845); +lean_ctor_set(x_11127, 1, x_11053); +x_11128 = lean_ctor_get(x_1, 0); +lean_inc(x_11128); +x_11129 = l_Lean_IR_ToIR_bindVar(x_11128, x_11093, x_4, x_5, x_11092); +x_11130 = lean_ctor_get(x_11129, 0); +lean_inc(x_11130); +x_11131 = lean_ctor_get(x_11129, 1); +lean_inc(x_11131); +lean_dec(x_11129); +x_11132 = lean_ctor_get(x_11130, 0); +lean_inc(x_11132); +x_11133 = lean_ctor_get(x_11130, 1); +lean_inc(x_11133); +lean_dec(x_11130); +x_11134 = lean_ctor_get(x_1, 2); +lean_inc(x_11134); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_11135 = l_Lean_IR_ToIR_lowerType(x_11134, x_11133, x_4, x_5, x_11131); +if (lean_obj_tag(x_11135) == 0) +{ +lean_object* x_11136; lean_object* x_11137; lean_object* x_11138; lean_object* x_11139; lean_object* x_11140; +x_11136 = lean_ctor_get(x_11135, 0); +lean_inc(x_11136); +x_11137 = lean_ctor_get(x_11135, 1); +lean_inc(x_11137); +lean_dec(x_11135); +x_11138 = lean_ctor_get(x_11136, 0); +lean_inc(x_11138); +x_11139 = lean_ctor_get(x_11136, 1); +lean_inc(x_11139); +lean_dec(x_11136); +x_11140 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11132, x_11127, x_11138, x_11139, x_4, x_5, x_11137); +return x_11140; +} +else +{ +lean_object* x_11141; lean_object* x_11142; lean_object* x_11143; lean_object* x_11144; +lean_dec(x_11132); +lean_dec(x_11127); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_11141 = lean_ctor_get(x_11135, 0); +lean_inc(x_11141); +x_11142 = lean_ctor_get(x_11135, 1); +lean_inc(x_11142); +if (lean_is_exclusive(x_11135)) { + lean_ctor_release(x_11135, 0); + lean_ctor_release(x_11135, 1); + x_11143 = x_11135; +} else { + lean_dec_ref(x_11135); + x_11143 = lean_box(0); +} +if (lean_is_scalar(x_11143)) { + x_11144 = lean_alloc_ctor(1, 2, 0); +} else { + x_11144 = x_11143; +} +lean_ctor_set(x_11144, 0, x_11141); +lean_ctor_set(x_11144, 1, x_11142); +return x_11144; +} +} +} +else +{ +lean_object* x_11145; lean_object* x_11146; lean_object* x_11147; lean_object* x_11148; lean_object* x_11149; lean_object* x_11150; lean_object* x_11151; lean_object* x_11152; lean_object* x_11153; +lean_dec(x_11098); +lean_dec(x_11096); +if (lean_is_scalar(x_11094)) { + x_11145 = lean_alloc_ctor(7, 2, 0); +} else { + x_11145 = x_11094; + lean_ctor_set_tag(x_11145, 7); +} +lean_ctor_set(x_11145, 0, x_8845); +lean_ctor_set(x_11145, 1, x_11053); +x_11146 = lean_ctor_get(x_1, 0); +lean_inc(x_11146); +lean_dec(x_1); +x_11147 = l_Lean_IR_ToIR_bindVar(x_11146, x_11093, x_4, x_5, x_11092); +x_11148 = lean_ctor_get(x_11147, 0); +lean_inc(x_11148); +x_11149 = lean_ctor_get(x_11147, 1); +lean_inc(x_11149); +lean_dec(x_11147); +x_11150 = lean_ctor_get(x_11148, 0); +lean_inc(x_11150); +x_11151 = lean_ctor_get(x_11148, 1); +lean_inc(x_11151); +lean_dec(x_11148); +x_11152 = lean_box(7); +x_11153 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11150, x_11145, x_11152, x_11151, x_4, x_5, x_11149); +return x_11153; +} +} +} +else +{ +lean_object* x_11154; lean_object* x_11155; lean_object* x_11156; +lean_dec(x_11070); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11154 = lean_box(13); +if (lean_is_scalar(x_11059)) { + x_11155 = lean_alloc_ctor(0, 2, 0); +} else { + x_11155 = x_11059; +} +lean_ctor_set(x_11155, 0, x_11154); +lean_ctor_set(x_11155, 1, x_11058); +if (lean_is_scalar(x_11063)) { + x_11156 = lean_alloc_ctor(0, 2, 0); +} else { + x_11156 = x_11063; +} +lean_ctor_set(x_11156, 0, x_11155); +lean_ctor_set(x_11156, 1, x_11062); +return x_11156; +} +} +else +{ +lean_object* x_11157; lean_object* x_11158; lean_object* x_11159; +lean_dec(x_11070); +lean_dec(x_11063); +lean_dec(x_11059); +lean_dec(x_8845); +x_11157 = l_Lean_IR_instInhabitedArg; +x_11158 = lean_unsigned_to_nat(2u); +x_11159 = lean_array_get(x_11157, x_11053, x_11158); +lean_dec(x_11053); +if (lean_obj_tag(x_11159) == 0) +{ +lean_object* x_11160; lean_object* x_11161; lean_object* x_11162; lean_object* x_11163; lean_object* x_11164; lean_object* x_11165; lean_object* x_11166; +x_11160 = lean_ctor_get(x_11159, 0); +lean_inc(x_11160); +lean_dec(x_11159); +x_11161 = lean_ctor_get(x_1, 0); +lean_inc(x_11161); +lean_dec(x_1); +x_11162 = l_Lean_IR_ToIR_bindVarToVarId(x_11161, x_11160, x_11058, x_4, x_5, x_11062); +x_11163 = lean_ctor_get(x_11162, 0); +lean_inc(x_11163); +x_11164 = lean_ctor_get(x_11162, 1); +lean_inc(x_11164); +lean_dec(x_11162); +x_11165 = lean_ctor_get(x_11163, 1); +lean_inc(x_11165); +lean_dec(x_11163); +x_11166 = l_Lean_IR_ToIR_lowerCode(x_2, x_11165, x_4, x_5, x_11164); +return x_11166; +} +else +{ +lean_object* x_11167; lean_object* x_11168; lean_object* x_11169; lean_object* x_11170; lean_object* x_11171; lean_object* x_11172; +x_11167 = lean_ctor_get(x_1, 0); +lean_inc(x_11167); +lean_dec(x_1); +x_11168 = l_Lean_IR_ToIR_bindErased(x_11167, x_11058, x_4, x_5, x_11062); +x_11169 = lean_ctor_get(x_11168, 0); +lean_inc(x_11169); +x_11170 = lean_ctor_get(x_11168, 1); +lean_inc(x_11170); +lean_dec(x_11168); +x_11171 = lean_ctor_get(x_11169, 1); +lean_inc(x_11171); +lean_dec(x_11169); +x_11172 = l_Lean_IR_ToIR_lowerCode(x_2, x_11171, x_4, x_5, x_11170); +return x_11172; +} +} +} +case 1: +{ +lean_object* x_11173; lean_object* x_11174; lean_object* x_11201; lean_object* x_11202; +lean_dec(x_11069); +lean_dec(x_11064); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_8845); +x_11201 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_8845, x_4, x_5, x_11062); +x_11202 = lean_ctor_get(x_11201, 0); +lean_inc(x_11202); +if (lean_obj_tag(x_11202) == 0) +{ +lean_object* x_11203; lean_object* x_11204; lean_object* x_11205; +x_11203 = lean_ctor_get(x_11201, 1); +lean_inc(x_11203); +lean_dec(x_11201); +x_11204 = lean_box(0); +if (lean_is_scalar(x_11059)) { + x_11205 = lean_alloc_ctor(0, 2, 0); +} else { + x_11205 = x_11059; +} +lean_ctor_set(x_11205, 0, x_11204); +lean_ctor_set(x_11205, 1, x_11058); +x_11173 = x_11205; +x_11174 = x_11203; +goto block_11200; +} +else +{ +lean_object* x_11206; lean_object* x_11207; lean_object* x_11208; lean_object* x_11209; lean_object* x_11210; lean_object* x_11211; lean_object* x_11212; uint8_t x_11213; +lean_dec(x_11059); +x_11206 = lean_ctor_get(x_11201, 1); +lean_inc(x_11206); +if (lean_is_exclusive(x_11201)) { + lean_ctor_release(x_11201, 0); + lean_ctor_release(x_11201, 1); + x_11207 = x_11201; +} else { + lean_dec_ref(x_11201); + x_11207 = lean_box(0); +} +x_11208 = lean_ctor_get(x_11202, 0); +lean_inc(x_11208); +if (lean_is_exclusive(x_11202)) { + lean_ctor_release(x_11202, 0); + x_11209 = x_11202; +} else { + lean_dec_ref(x_11202); + x_11209 = lean_box(0); +} +x_11210 = lean_array_get_size(x_11053); +x_11211 = lean_ctor_get(x_11208, 3); +lean_inc(x_11211); +lean_dec(x_11208); +x_11212 = lean_array_get_size(x_11211); +lean_dec(x_11211); +x_11213 = lean_nat_dec_lt(x_11210, x_11212); +if (x_11213 == 0) +{ +uint8_t x_11214; +x_11214 = lean_nat_dec_eq(x_11210, x_11212); +if (x_11214 == 0) +{ +lean_object* x_11215; lean_object* x_11216; lean_object* x_11217; lean_object* x_11218; lean_object* x_11219; lean_object* x_11220; lean_object* x_11221; lean_object* x_11222; lean_object* x_11223; lean_object* x_11224; lean_object* x_11225; lean_object* x_11226; lean_object* x_11227; lean_object* x_11228; lean_object* x_11229; lean_object* x_11230; lean_object* x_11231; +x_11215 = lean_unsigned_to_nat(0u); +x_11216 = l_Array_extract___rarg(x_11053, x_11215, x_11212); +x_11217 = l_Array_extract___rarg(x_11053, x_11212, x_11210); +lean_dec(x_11210); +lean_inc(x_8845); +if (lean_is_scalar(x_11207)) { + x_11218 = lean_alloc_ctor(6, 2, 0); +} else { + x_11218 = x_11207; + lean_ctor_set_tag(x_11218, 6); +} +lean_ctor_set(x_11218, 0, x_8845); +lean_ctor_set(x_11218, 1, x_11216); +x_11219 = lean_ctor_get(x_1, 0); +lean_inc(x_11219); +x_11220 = l_Lean_IR_ToIR_bindVar(x_11219, x_11058, x_4, x_5, x_11206); +x_11221 = lean_ctor_get(x_11220, 0); +lean_inc(x_11221); +x_11222 = lean_ctor_get(x_11220, 1); +lean_inc(x_11222); +lean_dec(x_11220); +x_11223 = lean_ctor_get(x_11221, 0); +lean_inc(x_11223); +x_11224 = lean_ctor_get(x_11221, 1); +lean_inc(x_11224); +lean_dec(x_11221); +x_11225 = l_Lean_IR_ToIR_newVar(x_11224, x_4, x_5, x_11222); +x_11226 = lean_ctor_get(x_11225, 0); +lean_inc(x_11226); +x_11227 = lean_ctor_get(x_11225, 1); +lean_inc(x_11227); +lean_dec(x_11225); +x_11228 = lean_ctor_get(x_11226, 0); +lean_inc(x_11228); +x_11229 = lean_ctor_get(x_11226, 1); +lean_inc(x_11229); +lean_dec(x_11226); +x_11230 = lean_ctor_get(x_1, 2); +lean_inc(x_11230); +lean_inc(x_5); +lean_inc(x_4); +x_11231 = l_Lean_IR_ToIR_lowerType(x_11230, x_11229, x_4, x_5, x_11227); +if (lean_obj_tag(x_11231) == 0) +{ +lean_object* x_11232; lean_object* x_11233; lean_object* x_11234; lean_object* x_11235; lean_object* x_11236; +x_11232 = lean_ctor_get(x_11231, 0); +lean_inc(x_11232); +x_11233 = lean_ctor_get(x_11231, 1); +lean_inc(x_11233); +lean_dec(x_11231); +x_11234 = lean_ctor_get(x_11232, 0); +lean_inc(x_11234); +x_11235 = lean_ctor_get(x_11232, 1); +lean_inc(x_11235); +lean_dec(x_11232); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11236 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_11228, x_11217, x_11223, x_11218, x_11234, x_11235, x_4, x_5, x_11233); +if (lean_obj_tag(x_11236) == 0) +{ +lean_object* x_11237; lean_object* x_11238; lean_object* x_11239; lean_object* x_11240; lean_object* x_11241; lean_object* x_11242; lean_object* x_11243; +x_11237 = lean_ctor_get(x_11236, 0); +lean_inc(x_11237); +x_11238 = lean_ctor_get(x_11236, 1); +lean_inc(x_11238); +lean_dec(x_11236); +x_11239 = lean_ctor_get(x_11237, 0); +lean_inc(x_11239); +x_11240 = lean_ctor_get(x_11237, 1); +lean_inc(x_11240); +if (lean_is_exclusive(x_11237)) { + lean_ctor_release(x_11237, 0); + lean_ctor_release(x_11237, 1); + x_11241 = x_11237; +} else { + lean_dec_ref(x_11237); + x_11241 = lean_box(0); +} +if (lean_is_scalar(x_11209)) { + x_11242 = lean_alloc_ctor(1, 1, 0); +} else { + x_11242 = x_11209; +} +lean_ctor_set(x_11242, 0, x_11239); +if (lean_is_scalar(x_11241)) { + x_11243 = lean_alloc_ctor(0, 2, 0); +} else { + x_11243 = x_11241; +} +lean_ctor_set(x_11243, 0, x_11242); +lean_ctor_set(x_11243, 1, x_11240); +x_11173 = x_11243; +x_11174 = x_11238; +goto block_11200; +} +else +{ +lean_object* x_11244; lean_object* x_11245; lean_object* x_11246; lean_object* x_11247; +lean_dec(x_11209); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11244 = lean_ctor_get(x_11236, 0); +lean_inc(x_11244); +x_11245 = lean_ctor_get(x_11236, 1); +lean_inc(x_11245); +if (lean_is_exclusive(x_11236)) { + lean_ctor_release(x_11236, 0); + lean_ctor_release(x_11236, 1); + x_11246 = x_11236; +} else { + lean_dec_ref(x_11236); + x_11246 = lean_box(0); +} +if (lean_is_scalar(x_11246)) { + x_11247 = lean_alloc_ctor(1, 2, 0); +} else { + x_11247 = x_11246; +} +lean_ctor_set(x_11247, 0, x_11244); +lean_ctor_set(x_11247, 1, x_11245); +return x_11247; +} +} +else +{ +lean_object* x_11248; lean_object* x_11249; lean_object* x_11250; lean_object* x_11251; +lean_dec(x_11228); +lean_dec(x_11223); +lean_dec(x_11218); +lean_dec(x_11217); +lean_dec(x_11209); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11248 = lean_ctor_get(x_11231, 0); +lean_inc(x_11248); +x_11249 = lean_ctor_get(x_11231, 1); +lean_inc(x_11249); +if (lean_is_exclusive(x_11231)) { + lean_ctor_release(x_11231, 0); + lean_ctor_release(x_11231, 1); + x_11250 = x_11231; +} else { + lean_dec_ref(x_11231); + x_11250 = lean_box(0); +} +if (lean_is_scalar(x_11250)) { + x_11251 = lean_alloc_ctor(1, 2, 0); +} else { + x_11251 = x_11250; +} +lean_ctor_set(x_11251, 0, x_11248); +lean_ctor_set(x_11251, 1, x_11249); +return x_11251; +} +} +else +{ +lean_object* x_11252; lean_object* x_11253; lean_object* x_11254; lean_object* x_11255; lean_object* x_11256; lean_object* x_11257; lean_object* x_11258; lean_object* x_11259; lean_object* x_11260; +lean_dec(x_11212); +lean_dec(x_11210); +lean_inc(x_11053); +lean_inc(x_8845); +if (lean_is_scalar(x_11207)) { + x_11252 = lean_alloc_ctor(6, 2, 0); +} else { + x_11252 = x_11207; + lean_ctor_set_tag(x_11252, 6); +} +lean_ctor_set(x_11252, 0, x_8845); +lean_ctor_set(x_11252, 1, x_11053); +x_11253 = lean_ctor_get(x_1, 0); +lean_inc(x_11253); +x_11254 = l_Lean_IR_ToIR_bindVar(x_11253, x_11058, x_4, x_5, x_11206); +x_11255 = lean_ctor_get(x_11254, 0); +lean_inc(x_11255); +x_11256 = lean_ctor_get(x_11254, 1); +lean_inc(x_11256); +lean_dec(x_11254); +x_11257 = lean_ctor_get(x_11255, 0); +lean_inc(x_11257); +x_11258 = lean_ctor_get(x_11255, 1); +lean_inc(x_11258); +lean_dec(x_11255); +x_11259 = lean_ctor_get(x_1, 2); +lean_inc(x_11259); +lean_inc(x_5); +lean_inc(x_4); +x_11260 = l_Lean_IR_ToIR_lowerType(x_11259, x_11258, x_4, x_5, x_11256); +if (lean_obj_tag(x_11260) == 0) +{ +lean_object* x_11261; lean_object* x_11262; lean_object* x_11263; lean_object* x_11264; lean_object* x_11265; +x_11261 = lean_ctor_get(x_11260, 0); +lean_inc(x_11261); +x_11262 = lean_ctor_get(x_11260, 1); +lean_inc(x_11262); +lean_dec(x_11260); +x_11263 = lean_ctor_get(x_11261, 0); +lean_inc(x_11263); +x_11264 = lean_ctor_get(x_11261, 1); +lean_inc(x_11264); +lean_dec(x_11261); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11265 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11257, x_11252, x_11263, x_11264, x_4, x_5, x_11262); +if (lean_obj_tag(x_11265) == 0) +{ +lean_object* x_11266; lean_object* x_11267; lean_object* x_11268; lean_object* x_11269; lean_object* x_11270; lean_object* x_11271; lean_object* x_11272; +x_11266 = lean_ctor_get(x_11265, 0); +lean_inc(x_11266); +x_11267 = lean_ctor_get(x_11265, 1); +lean_inc(x_11267); +lean_dec(x_11265); +x_11268 = lean_ctor_get(x_11266, 0); +lean_inc(x_11268); +x_11269 = lean_ctor_get(x_11266, 1); +lean_inc(x_11269); +if (lean_is_exclusive(x_11266)) { + lean_ctor_release(x_11266, 0); + lean_ctor_release(x_11266, 1); + x_11270 = x_11266; +} else { + lean_dec_ref(x_11266); + x_11270 = lean_box(0); +} +if (lean_is_scalar(x_11209)) { + x_11271 = lean_alloc_ctor(1, 1, 0); +} else { + x_11271 = x_11209; +} +lean_ctor_set(x_11271, 0, x_11268); +if (lean_is_scalar(x_11270)) { + x_11272 = lean_alloc_ctor(0, 2, 0); +} else { + x_11272 = x_11270; +} +lean_ctor_set(x_11272, 0, x_11271); +lean_ctor_set(x_11272, 1, x_11269); +x_11173 = x_11272; +x_11174 = x_11267; +goto block_11200; +} +else +{ +lean_object* x_11273; lean_object* x_11274; lean_object* x_11275; lean_object* x_11276; +lean_dec(x_11209); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11273 = lean_ctor_get(x_11265, 0); +lean_inc(x_11273); +x_11274 = lean_ctor_get(x_11265, 1); +lean_inc(x_11274); +if (lean_is_exclusive(x_11265)) { + lean_ctor_release(x_11265, 0); + lean_ctor_release(x_11265, 1); + x_11275 = x_11265; +} else { + lean_dec_ref(x_11265); + x_11275 = lean_box(0); +} +if (lean_is_scalar(x_11275)) { + x_11276 = lean_alloc_ctor(1, 2, 0); +} else { + x_11276 = x_11275; +} +lean_ctor_set(x_11276, 0, x_11273); +lean_ctor_set(x_11276, 1, x_11274); +return x_11276; +} +} +else +{ +lean_object* x_11277; lean_object* x_11278; lean_object* x_11279; lean_object* x_11280; +lean_dec(x_11257); +lean_dec(x_11252); +lean_dec(x_11209); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11277 = lean_ctor_get(x_11260, 0); +lean_inc(x_11277); +x_11278 = lean_ctor_get(x_11260, 1); +lean_inc(x_11278); +if (lean_is_exclusive(x_11260)) { + lean_ctor_release(x_11260, 0); + lean_ctor_release(x_11260, 1); + x_11279 = x_11260; +} else { + lean_dec_ref(x_11260); + x_11279 = lean_box(0); +} +if (lean_is_scalar(x_11279)) { + x_11280 = lean_alloc_ctor(1, 2, 0); +} else { + x_11280 = x_11279; +} +lean_ctor_set(x_11280, 0, x_11277); +lean_ctor_set(x_11280, 1, x_11278); +return x_11280; +} +} +} +else +{ +lean_object* x_11281; lean_object* x_11282; lean_object* x_11283; lean_object* x_11284; lean_object* x_11285; lean_object* x_11286; lean_object* x_11287; lean_object* x_11288; lean_object* x_11289; +lean_dec(x_11212); +lean_dec(x_11210); +lean_inc(x_11053); +lean_inc(x_8845); +if (lean_is_scalar(x_11207)) { + x_11281 = lean_alloc_ctor(7, 2, 0); +} else { + x_11281 = x_11207; + lean_ctor_set_tag(x_11281, 7); +} +lean_ctor_set(x_11281, 0, x_8845); +lean_ctor_set(x_11281, 1, x_11053); +x_11282 = lean_ctor_get(x_1, 0); +lean_inc(x_11282); +x_11283 = l_Lean_IR_ToIR_bindVar(x_11282, x_11058, x_4, x_5, x_11206); +x_11284 = lean_ctor_get(x_11283, 0); +lean_inc(x_11284); +x_11285 = lean_ctor_get(x_11283, 1); +lean_inc(x_11285); +lean_dec(x_11283); +x_11286 = lean_ctor_get(x_11284, 0); +lean_inc(x_11286); +x_11287 = lean_ctor_get(x_11284, 1); +lean_inc(x_11287); +lean_dec(x_11284); +x_11288 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11289 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11286, x_11281, x_11288, x_11287, x_4, x_5, x_11285); +if (lean_obj_tag(x_11289) == 0) +{ +lean_object* x_11290; lean_object* x_11291; lean_object* x_11292; lean_object* x_11293; lean_object* x_11294; lean_object* x_11295; lean_object* x_11296; +x_11290 = lean_ctor_get(x_11289, 0); +lean_inc(x_11290); +x_11291 = lean_ctor_get(x_11289, 1); +lean_inc(x_11291); +lean_dec(x_11289); +x_11292 = lean_ctor_get(x_11290, 0); +lean_inc(x_11292); +x_11293 = lean_ctor_get(x_11290, 1); +lean_inc(x_11293); +if (lean_is_exclusive(x_11290)) { + lean_ctor_release(x_11290, 0); + lean_ctor_release(x_11290, 1); + x_11294 = x_11290; +} else { + lean_dec_ref(x_11290); + x_11294 = lean_box(0); +} +if (lean_is_scalar(x_11209)) { + x_11295 = lean_alloc_ctor(1, 1, 0); +} else { + x_11295 = x_11209; +} +lean_ctor_set(x_11295, 0, x_11292); +if (lean_is_scalar(x_11294)) { + x_11296 = lean_alloc_ctor(0, 2, 0); +} else { + x_11296 = x_11294; +} +lean_ctor_set(x_11296, 0, x_11295); +lean_ctor_set(x_11296, 1, x_11293); +x_11173 = x_11296; +x_11174 = x_11291; +goto block_11200; +} +else +{ +lean_object* x_11297; lean_object* x_11298; lean_object* x_11299; lean_object* x_11300; +lean_dec(x_11209); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11297 = lean_ctor_get(x_11289, 0); +lean_inc(x_11297); +x_11298 = lean_ctor_get(x_11289, 1); +lean_inc(x_11298); +if (lean_is_exclusive(x_11289)) { + lean_ctor_release(x_11289, 0); + lean_ctor_release(x_11289, 1); + x_11299 = x_11289; +} else { + lean_dec_ref(x_11289); + x_11299 = lean_box(0); +} +if (lean_is_scalar(x_11299)) { + x_11300 = lean_alloc_ctor(1, 2, 0); +} else { + x_11300 = x_11299; +} +lean_ctor_set(x_11300, 0, x_11297); +lean_ctor_set(x_11300, 1, x_11298); +return x_11300; +} +} +} +block_11200: +{ +lean_object* x_11175; +x_11175 = lean_ctor_get(x_11173, 0); +lean_inc(x_11175); +if (lean_obj_tag(x_11175) == 0) +{ +lean_object* x_11176; lean_object* x_11177; lean_object* x_11178; lean_object* x_11179; lean_object* x_11180; lean_object* x_11181; lean_object* x_11182; lean_object* x_11183; lean_object* x_11184; lean_object* x_11185; +lean_dec(x_11063); +x_11176 = lean_ctor_get(x_11173, 1); +lean_inc(x_11176); +lean_dec(x_11173); +x_11177 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_11177, 0, x_8845); +lean_ctor_set(x_11177, 1, x_11053); +x_11178 = lean_ctor_get(x_1, 0); +lean_inc(x_11178); +x_11179 = l_Lean_IR_ToIR_bindVar(x_11178, x_11176, x_4, x_5, x_11174); +x_11180 = lean_ctor_get(x_11179, 0); +lean_inc(x_11180); +x_11181 = lean_ctor_get(x_11179, 1); +lean_inc(x_11181); +lean_dec(x_11179); +x_11182 = lean_ctor_get(x_11180, 0); +lean_inc(x_11182); +x_11183 = lean_ctor_get(x_11180, 1); +lean_inc(x_11183); +lean_dec(x_11180); +x_11184 = lean_ctor_get(x_1, 2); +lean_inc(x_11184); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_11185 = l_Lean_IR_ToIR_lowerType(x_11184, x_11183, x_4, x_5, x_11181); +if (lean_obj_tag(x_11185) == 0) +{ +lean_object* x_11186; lean_object* x_11187; lean_object* x_11188; lean_object* x_11189; lean_object* x_11190; +x_11186 = lean_ctor_get(x_11185, 0); +lean_inc(x_11186); +x_11187 = lean_ctor_get(x_11185, 1); +lean_inc(x_11187); +lean_dec(x_11185); +x_11188 = lean_ctor_get(x_11186, 0); +lean_inc(x_11188); +x_11189 = lean_ctor_get(x_11186, 1); +lean_inc(x_11189); +lean_dec(x_11186); +x_11190 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11182, x_11177, x_11188, x_11189, x_4, x_5, x_11187); +return x_11190; +} +else +{ +lean_object* x_11191; lean_object* x_11192; lean_object* x_11193; lean_object* x_11194; +lean_dec(x_11182); +lean_dec(x_11177); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_11191 = lean_ctor_get(x_11185, 0); +lean_inc(x_11191); +x_11192 = lean_ctor_get(x_11185, 1); +lean_inc(x_11192); +if (lean_is_exclusive(x_11185)) { + lean_ctor_release(x_11185, 0); + lean_ctor_release(x_11185, 1); + x_11193 = x_11185; +} else { + lean_dec_ref(x_11185); + x_11193 = lean_box(0); +} +if (lean_is_scalar(x_11193)) { + x_11194 = lean_alloc_ctor(1, 2, 0); +} else { + x_11194 = x_11193; +} +lean_ctor_set(x_11194, 0, x_11191); +lean_ctor_set(x_11194, 1, x_11192); +return x_11194; +} +} +else +{ +lean_object* x_11195; lean_object* x_11196; lean_object* x_11197; lean_object* x_11198; lean_object* x_11199; +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11195 = lean_ctor_get(x_11173, 1); +lean_inc(x_11195); +if (lean_is_exclusive(x_11173)) { + lean_ctor_release(x_11173, 0); + lean_ctor_release(x_11173, 1); + x_11196 = x_11173; +} else { + lean_dec_ref(x_11173); + x_11196 = lean_box(0); +} +x_11197 = lean_ctor_get(x_11175, 0); +lean_inc(x_11197); +lean_dec(x_11175); +if (lean_is_scalar(x_11196)) { + x_11198 = lean_alloc_ctor(0, 2, 0); +} else { + x_11198 = x_11196; +} +lean_ctor_set(x_11198, 0, x_11197); +lean_ctor_set(x_11198, 1, x_11195); +if (lean_is_scalar(x_11063)) { + x_11199 = lean_alloc_ctor(0, 2, 0); +} else { + x_11199 = x_11063; +} +lean_ctor_set(x_11199, 0, x_11198); +lean_ctor_set(x_11199, 1, x_11174); +return x_11199; +} +} +} +case 2: +{ +lean_object* x_11301; lean_object* x_11302; +lean_dec(x_11069); +lean_dec(x_11064); +lean_dec(x_11063); +lean_dec(x_11059); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +x_11301 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_11302 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_11301, x_11058, x_4, x_5, x_11062); +return x_11302; +} +case 3: +{ +lean_object* x_11303; lean_object* x_11304; lean_object* x_11331; lean_object* x_11332; +lean_dec(x_11069); +lean_dec(x_11064); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_8845); +x_11331 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_8845, x_4, x_5, x_11062); +x_11332 = lean_ctor_get(x_11331, 0); +lean_inc(x_11332); +if (lean_obj_tag(x_11332) == 0) +{ +lean_object* x_11333; lean_object* x_11334; lean_object* x_11335; +x_11333 = lean_ctor_get(x_11331, 1); +lean_inc(x_11333); +lean_dec(x_11331); +x_11334 = lean_box(0); +if (lean_is_scalar(x_11059)) { + x_11335 = lean_alloc_ctor(0, 2, 0); +} else { + x_11335 = x_11059; +} +lean_ctor_set(x_11335, 0, x_11334); +lean_ctor_set(x_11335, 1, x_11058); +x_11303 = x_11335; +x_11304 = x_11333; +goto block_11330; +} +else +{ +lean_object* x_11336; lean_object* x_11337; lean_object* x_11338; lean_object* x_11339; lean_object* x_11340; lean_object* x_11341; lean_object* x_11342; uint8_t x_11343; +lean_dec(x_11059); +x_11336 = lean_ctor_get(x_11331, 1); +lean_inc(x_11336); +if (lean_is_exclusive(x_11331)) { + lean_ctor_release(x_11331, 0); + lean_ctor_release(x_11331, 1); + x_11337 = x_11331; +} else { + lean_dec_ref(x_11331); + x_11337 = lean_box(0); +} +x_11338 = lean_ctor_get(x_11332, 0); +lean_inc(x_11338); +if (lean_is_exclusive(x_11332)) { + lean_ctor_release(x_11332, 0); + x_11339 = x_11332; +} else { + lean_dec_ref(x_11332); + x_11339 = lean_box(0); +} +x_11340 = lean_array_get_size(x_11053); +x_11341 = lean_ctor_get(x_11338, 3); +lean_inc(x_11341); +lean_dec(x_11338); +x_11342 = lean_array_get_size(x_11341); +lean_dec(x_11341); +x_11343 = lean_nat_dec_lt(x_11340, x_11342); +if (x_11343 == 0) +{ +uint8_t x_11344; +x_11344 = lean_nat_dec_eq(x_11340, x_11342); +if (x_11344 == 0) +{ +lean_object* x_11345; lean_object* x_11346; lean_object* x_11347; lean_object* x_11348; lean_object* x_11349; lean_object* x_11350; lean_object* x_11351; lean_object* x_11352; lean_object* x_11353; lean_object* x_11354; lean_object* x_11355; lean_object* x_11356; lean_object* x_11357; lean_object* x_11358; lean_object* x_11359; lean_object* x_11360; lean_object* x_11361; +x_11345 = lean_unsigned_to_nat(0u); +x_11346 = l_Array_extract___rarg(x_11053, x_11345, x_11342); +x_11347 = l_Array_extract___rarg(x_11053, x_11342, x_11340); +lean_dec(x_11340); +lean_inc(x_8845); +if (lean_is_scalar(x_11337)) { + x_11348 = lean_alloc_ctor(6, 2, 0); +} else { + x_11348 = x_11337; + lean_ctor_set_tag(x_11348, 6); +} +lean_ctor_set(x_11348, 0, x_8845); +lean_ctor_set(x_11348, 1, x_11346); +x_11349 = lean_ctor_get(x_1, 0); +lean_inc(x_11349); +x_11350 = l_Lean_IR_ToIR_bindVar(x_11349, x_11058, x_4, x_5, x_11336); +x_11351 = lean_ctor_get(x_11350, 0); +lean_inc(x_11351); +x_11352 = lean_ctor_get(x_11350, 1); +lean_inc(x_11352); +lean_dec(x_11350); +x_11353 = lean_ctor_get(x_11351, 0); +lean_inc(x_11353); +x_11354 = lean_ctor_get(x_11351, 1); +lean_inc(x_11354); +lean_dec(x_11351); +x_11355 = l_Lean_IR_ToIR_newVar(x_11354, x_4, x_5, x_11352); +x_11356 = lean_ctor_get(x_11355, 0); +lean_inc(x_11356); +x_11357 = lean_ctor_get(x_11355, 1); +lean_inc(x_11357); +lean_dec(x_11355); +x_11358 = lean_ctor_get(x_11356, 0); +lean_inc(x_11358); +x_11359 = lean_ctor_get(x_11356, 1); +lean_inc(x_11359); +lean_dec(x_11356); +x_11360 = lean_ctor_get(x_1, 2); +lean_inc(x_11360); +lean_inc(x_5); +lean_inc(x_4); +x_11361 = l_Lean_IR_ToIR_lowerType(x_11360, x_11359, x_4, x_5, x_11357); +if (lean_obj_tag(x_11361) == 0) +{ +lean_object* x_11362; lean_object* x_11363; lean_object* x_11364; lean_object* x_11365; lean_object* x_11366; +x_11362 = lean_ctor_get(x_11361, 0); +lean_inc(x_11362); +x_11363 = lean_ctor_get(x_11361, 1); +lean_inc(x_11363); +lean_dec(x_11361); +x_11364 = lean_ctor_get(x_11362, 0); +lean_inc(x_11364); +x_11365 = lean_ctor_get(x_11362, 1); +lean_inc(x_11365); +lean_dec(x_11362); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11366 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_11358, x_11347, x_11353, x_11348, x_11364, x_11365, x_4, x_5, x_11363); +if (lean_obj_tag(x_11366) == 0) +{ +lean_object* x_11367; lean_object* x_11368; lean_object* x_11369; lean_object* x_11370; lean_object* x_11371; lean_object* x_11372; lean_object* x_11373; +x_11367 = lean_ctor_get(x_11366, 0); +lean_inc(x_11367); +x_11368 = lean_ctor_get(x_11366, 1); +lean_inc(x_11368); +lean_dec(x_11366); +x_11369 = lean_ctor_get(x_11367, 0); +lean_inc(x_11369); +x_11370 = lean_ctor_get(x_11367, 1); +lean_inc(x_11370); +if (lean_is_exclusive(x_11367)) { + lean_ctor_release(x_11367, 0); + lean_ctor_release(x_11367, 1); + x_11371 = x_11367; +} else { + lean_dec_ref(x_11367); + x_11371 = lean_box(0); +} +if (lean_is_scalar(x_11339)) { + x_11372 = lean_alloc_ctor(1, 1, 0); +} else { + x_11372 = x_11339; +} +lean_ctor_set(x_11372, 0, x_11369); +if (lean_is_scalar(x_11371)) { + x_11373 = lean_alloc_ctor(0, 2, 0); +} else { + x_11373 = x_11371; +} +lean_ctor_set(x_11373, 0, x_11372); +lean_ctor_set(x_11373, 1, x_11370); +x_11303 = x_11373; +x_11304 = x_11368; +goto block_11330; +} +else +{ +lean_object* x_11374; lean_object* x_11375; lean_object* x_11376; lean_object* x_11377; +lean_dec(x_11339); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11374 = lean_ctor_get(x_11366, 0); +lean_inc(x_11374); +x_11375 = lean_ctor_get(x_11366, 1); +lean_inc(x_11375); +if (lean_is_exclusive(x_11366)) { + lean_ctor_release(x_11366, 0); + lean_ctor_release(x_11366, 1); + x_11376 = x_11366; +} else { + lean_dec_ref(x_11366); + x_11376 = lean_box(0); +} +if (lean_is_scalar(x_11376)) { + x_11377 = lean_alloc_ctor(1, 2, 0); +} else { + x_11377 = x_11376; +} +lean_ctor_set(x_11377, 0, x_11374); +lean_ctor_set(x_11377, 1, x_11375); +return x_11377; +} +} +else +{ +lean_object* x_11378; lean_object* x_11379; lean_object* x_11380; lean_object* x_11381; +lean_dec(x_11358); +lean_dec(x_11353); +lean_dec(x_11348); +lean_dec(x_11347); +lean_dec(x_11339); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11378 = lean_ctor_get(x_11361, 0); +lean_inc(x_11378); +x_11379 = lean_ctor_get(x_11361, 1); +lean_inc(x_11379); +if (lean_is_exclusive(x_11361)) { + lean_ctor_release(x_11361, 0); + lean_ctor_release(x_11361, 1); + x_11380 = x_11361; +} else { + lean_dec_ref(x_11361); + x_11380 = lean_box(0); +} +if (lean_is_scalar(x_11380)) { + x_11381 = lean_alloc_ctor(1, 2, 0); +} else { + x_11381 = x_11380; +} +lean_ctor_set(x_11381, 0, x_11378); +lean_ctor_set(x_11381, 1, x_11379); +return x_11381; +} +} +else +{ +lean_object* x_11382; lean_object* x_11383; lean_object* x_11384; lean_object* x_11385; lean_object* x_11386; lean_object* x_11387; lean_object* x_11388; lean_object* x_11389; lean_object* x_11390; +lean_dec(x_11342); +lean_dec(x_11340); +lean_inc(x_11053); +lean_inc(x_8845); +if (lean_is_scalar(x_11337)) { + x_11382 = lean_alloc_ctor(6, 2, 0); +} else { + x_11382 = x_11337; + lean_ctor_set_tag(x_11382, 6); +} +lean_ctor_set(x_11382, 0, x_8845); +lean_ctor_set(x_11382, 1, x_11053); +x_11383 = lean_ctor_get(x_1, 0); +lean_inc(x_11383); +x_11384 = l_Lean_IR_ToIR_bindVar(x_11383, x_11058, x_4, x_5, x_11336); +x_11385 = lean_ctor_get(x_11384, 0); +lean_inc(x_11385); +x_11386 = lean_ctor_get(x_11384, 1); +lean_inc(x_11386); +lean_dec(x_11384); +x_11387 = lean_ctor_get(x_11385, 0); +lean_inc(x_11387); +x_11388 = lean_ctor_get(x_11385, 1); +lean_inc(x_11388); +lean_dec(x_11385); +x_11389 = lean_ctor_get(x_1, 2); +lean_inc(x_11389); +lean_inc(x_5); +lean_inc(x_4); +x_11390 = l_Lean_IR_ToIR_lowerType(x_11389, x_11388, x_4, x_5, x_11386); +if (lean_obj_tag(x_11390) == 0) +{ +lean_object* x_11391; lean_object* x_11392; lean_object* x_11393; lean_object* x_11394; lean_object* x_11395; +x_11391 = lean_ctor_get(x_11390, 0); +lean_inc(x_11391); +x_11392 = lean_ctor_get(x_11390, 1); +lean_inc(x_11392); +lean_dec(x_11390); +x_11393 = lean_ctor_get(x_11391, 0); +lean_inc(x_11393); +x_11394 = lean_ctor_get(x_11391, 1); +lean_inc(x_11394); +lean_dec(x_11391); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11395 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11387, x_11382, x_11393, x_11394, x_4, x_5, x_11392); +if (lean_obj_tag(x_11395) == 0) +{ +lean_object* x_11396; lean_object* x_11397; lean_object* x_11398; lean_object* x_11399; lean_object* x_11400; lean_object* x_11401; lean_object* x_11402; +x_11396 = lean_ctor_get(x_11395, 0); +lean_inc(x_11396); +x_11397 = lean_ctor_get(x_11395, 1); +lean_inc(x_11397); +lean_dec(x_11395); +x_11398 = lean_ctor_get(x_11396, 0); +lean_inc(x_11398); +x_11399 = lean_ctor_get(x_11396, 1); +lean_inc(x_11399); +if (lean_is_exclusive(x_11396)) { + lean_ctor_release(x_11396, 0); + lean_ctor_release(x_11396, 1); + x_11400 = x_11396; +} else { + lean_dec_ref(x_11396); + x_11400 = lean_box(0); +} +if (lean_is_scalar(x_11339)) { + x_11401 = lean_alloc_ctor(1, 1, 0); +} else { + x_11401 = x_11339; +} +lean_ctor_set(x_11401, 0, x_11398); +if (lean_is_scalar(x_11400)) { + x_11402 = lean_alloc_ctor(0, 2, 0); +} else { + x_11402 = x_11400; +} +lean_ctor_set(x_11402, 0, x_11401); +lean_ctor_set(x_11402, 1, x_11399); +x_11303 = x_11402; +x_11304 = x_11397; +goto block_11330; +} +else +{ +lean_object* x_11403; lean_object* x_11404; lean_object* x_11405; lean_object* x_11406; +lean_dec(x_11339); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11403 = lean_ctor_get(x_11395, 0); +lean_inc(x_11403); +x_11404 = lean_ctor_get(x_11395, 1); +lean_inc(x_11404); +if (lean_is_exclusive(x_11395)) { + lean_ctor_release(x_11395, 0); + lean_ctor_release(x_11395, 1); + x_11405 = x_11395; +} else { + lean_dec_ref(x_11395); + x_11405 = lean_box(0); +} +if (lean_is_scalar(x_11405)) { + x_11406 = lean_alloc_ctor(1, 2, 0); +} else { + x_11406 = x_11405; +} +lean_ctor_set(x_11406, 0, x_11403); +lean_ctor_set(x_11406, 1, x_11404); +return x_11406; +} +} +else +{ +lean_object* x_11407; lean_object* x_11408; lean_object* x_11409; lean_object* x_11410; +lean_dec(x_11387); +lean_dec(x_11382); +lean_dec(x_11339); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11407 = lean_ctor_get(x_11390, 0); +lean_inc(x_11407); +x_11408 = lean_ctor_get(x_11390, 1); +lean_inc(x_11408); +if (lean_is_exclusive(x_11390)) { + lean_ctor_release(x_11390, 0); + lean_ctor_release(x_11390, 1); + x_11409 = x_11390; +} else { + lean_dec_ref(x_11390); + x_11409 = lean_box(0); +} +if (lean_is_scalar(x_11409)) { + x_11410 = lean_alloc_ctor(1, 2, 0); +} else { + x_11410 = x_11409; +} +lean_ctor_set(x_11410, 0, x_11407); +lean_ctor_set(x_11410, 1, x_11408); +return x_11410; +} +} +} +else +{ +lean_object* x_11411; lean_object* x_11412; lean_object* x_11413; lean_object* x_11414; lean_object* x_11415; lean_object* x_11416; lean_object* x_11417; lean_object* x_11418; lean_object* x_11419; +lean_dec(x_11342); +lean_dec(x_11340); +lean_inc(x_11053); +lean_inc(x_8845); +if (lean_is_scalar(x_11337)) { + x_11411 = lean_alloc_ctor(7, 2, 0); +} else { + x_11411 = x_11337; + lean_ctor_set_tag(x_11411, 7); +} +lean_ctor_set(x_11411, 0, x_8845); +lean_ctor_set(x_11411, 1, x_11053); +x_11412 = lean_ctor_get(x_1, 0); +lean_inc(x_11412); +x_11413 = l_Lean_IR_ToIR_bindVar(x_11412, x_11058, x_4, x_5, x_11336); +x_11414 = lean_ctor_get(x_11413, 0); +lean_inc(x_11414); +x_11415 = lean_ctor_get(x_11413, 1); +lean_inc(x_11415); +lean_dec(x_11413); +x_11416 = lean_ctor_get(x_11414, 0); +lean_inc(x_11416); +x_11417 = lean_ctor_get(x_11414, 1); +lean_inc(x_11417); +lean_dec(x_11414); +x_11418 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11419 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11416, x_11411, x_11418, x_11417, x_4, x_5, x_11415); +if (lean_obj_tag(x_11419) == 0) +{ +lean_object* x_11420; lean_object* x_11421; lean_object* x_11422; lean_object* x_11423; lean_object* x_11424; lean_object* x_11425; lean_object* x_11426; +x_11420 = lean_ctor_get(x_11419, 0); +lean_inc(x_11420); +x_11421 = lean_ctor_get(x_11419, 1); +lean_inc(x_11421); +lean_dec(x_11419); +x_11422 = lean_ctor_get(x_11420, 0); +lean_inc(x_11422); +x_11423 = lean_ctor_get(x_11420, 1); +lean_inc(x_11423); +if (lean_is_exclusive(x_11420)) { + lean_ctor_release(x_11420, 0); + lean_ctor_release(x_11420, 1); + x_11424 = x_11420; +} else { + lean_dec_ref(x_11420); + x_11424 = lean_box(0); +} +if (lean_is_scalar(x_11339)) { + x_11425 = lean_alloc_ctor(1, 1, 0); +} else { + x_11425 = x_11339; +} +lean_ctor_set(x_11425, 0, x_11422); +if (lean_is_scalar(x_11424)) { + x_11426 = lean_alloc_ctor(0, 2, 0); +} else { + x_11426 = x_11424; +} +lean_ctor_set(x_11426, 0, x_11425); +lean_ctor_set(x_11426, 1, x_11423); +x_11303 = x_11426; +x_11304 = x_11421; +goto block_11330; +} +else +{ +lean_object* x_11427; lean_object* x_11428; lean_object* x_11429; lean_object* x_11430; +lean_dec(x_11339); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11427 = lean_ctor_get(x_11419, 0); +lean_inc(x_11427); +x_11428 = lean_ctor_get(x_11419, 1); +lean_inc(x_11428); +if (lean_is_exclusive(x_11419)) { + lean_ctor_release(x_11419, 0); + lean_ctor_release(x_11419, 1); + x_11429 = x_11419; +} else { + lean_dec_ref(x_11419); + x_11429 = lean_box(0); +} +if (lean_is_scalar(x_11429)) { + x_11430 = lean_alloc_ctor(1, 2, 0); +} else { + x_11430 = x_11429; +} +lean_ctor_set(x_11430, 0, x_11427); +lean_ctor_set(x_11430, 1, x_11428); +return x_11430; +} +} +} +block_11330: +{ +lean_object* x_11305; +x_11305 = lean_ctor_get(x_11303, 0); +lean_inc(x_11305); +if (lean_obj_tag(x_11305) == 0) +{ +lean_object* x_11306; lean_object* x_11307; lean_object* x_11308; lean_object* x_11309; lean_object* x_11310; lean_object* x_11311; lean_object* x_11312; lean_object* x_11313; lean_object* x_11314; lean_object* x_11315; +lean_dec(x_11063); +x_11306 = lean_ctor_get(x_11303, 1); +lean_inc(x_11306); +lean_dec(x_11303); +x_11307 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_11307, 0, x_8845); +lean_ctor_set(x_11307, 1, x_11053); +x_11308 = lean_ctor_get(x_1, 0); +lean_inc(x_11308); +x_11309 = l_Lean_IR_ToIR_bindVar(x_11308, x_11306, x_4, x_5, x_11304); +x_11310 = lean_ctor_get(x_11309, 0); +lean_inc(x_11310); +x_11311 = lean_ctor_get(x_11309, 1); +lean_inc(x_11311); +lean_dec(x_11309); +x_11312 = lean_ctor_get(x_11310, 0); +lean_inc(x_11312); +x_11313 = lean_ctor_get(x_11310, 1); +lean_inc(x_11313); +lean_dec(x_11310); +x_11314 = lean_ctor_get(x_1, 2); +lean_inc(x_11314); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_11315 = l_Lean_IR_ToIR_lowerType(x_11314, x_11313, x_4, x_5, x_11311); +if (lean_obj_tag(x_11315) == 0) +{ +lean_object* x_11316; lean_object* x_11317; lean_object* x_11318; lean_object* x_11319; lean_object* x_11320; +x_11316 = lean_ctor_get(x_11315, 0); +lean_inc(x_11316); +x_11317 = lean_ctor_get(x_11315, 1); +lean_inc(x_11317); +lean_dec(x_11315); +x_11318 = lean_ctor_get(x_11316, 0); +lean_inc(x_11318); +x_11319 = lean_ctor_get(x_11316, 1); +lean_inc(x_11319); +lean_dec(x_11316); +x_11320 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11312, x_11307, x_11318, x_11319, x_4, x_5, x_11317); +return x_11320; +} +else +{ +lean_object* x_11321; lean_object* x_11322; lean_object* x_11323; lean_object* x_11324; +lean_dec(x_11312); +lean_dec(x_11307); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_11321 = lean_ctor_get(x_11315, 0); +lean_inc(x_11321); +x_11322 = lean_ctor_get(x_11315, 1); +lean_inc(x_11322); +if (lean_is_exclusive(x_11315)) { + lean_ctor_release(x_11315, 0); + lean_ctor_release(x_11315, 1); + x_11323 = x_11315; +} else { + lean_dec_ref(x_11315); + x_11323 = lean_box(0); +} +if (lean_is_scalar(x_11323)) { + x_11324 = lean_alloc_ctor(1, 2, 0); +} else { + x_11324 = x_11323; +} +lean_ctor_set(x_11324, 0, x_11321); +lean_ctor_set(x_11324, 1, x_11322); +return x_11324; +} +} +else +{ +lean_object* x_11325; lean_object* x_11326; lean_object* x_11327; lean_object* x_11328; lean_object* x_11329; +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11325 = lean_ctor_get(x_11303, 1); +lean_inc(x_11325); +if (lean_is_exclusive(x_11303)) { + lean_ctor_release(x_11303, 0); + lean_ctor_release(x_11303, 1); + x_11326 = x_11303; +} else { + lean_dec_ref(x_11303); + x_11326 = lean_box(0); +} +x_11327 = lean_ctor_get(x_11305, 0); +lean_inc(x_11327); +lean_dec(x_11305); +if (lean_is_scalar(x_11326)) { + x_11328 = lean_alloc_ctor(0, 2, 0); +} else { + x_11328 = x_11326; +} +lean_ctor_set(x_11328, 0, x_11327); +lean_ctor_set(x_11328, 1, x_11325); +if (lean_is_scalar(x_11063)) { + x_11329 = lean_alloc_ctor(0, 2, 0); +} else { + x_11329 = x_11063; +} +lean_ctor_set(x_11329, 0, x_11328); +lean_ctor_set(x_11329, 1, x_11304); +return x_11329; +} +} +} +case 4: +{ +lean_object* x_11431; lean_object* x_11432; uint8_t x_11433; +lean_dec(x_11064); +lean_dec(x_11063); +lean_dec(x_11059); +lean_dec(x_5945); +lean_dec(x_5944); +if (lean_is_exclusive(x_11069)) { + lean_ctor_release(x_11069, 0); + x_11431 = x_11069; +} else { + lean_dec_ref(x_11069); + x_11431 = lean_box(0); +} +x_11432 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_11433 = lean_name_eq(x_8845, x_11432); +if (x_11433 == 0) +{ +uint8_t x_11434; lean_object* x_11435; lean_object* x_11436; lean_object* x_11437; lean_object* x_11438; lean_object* x_11439; lean_object* x_11440; lean_object* x_11441; lean_object* x_11442; lean_object* x_11443; +lean_dec(x_11053); +lean_dec(x_2); +lean_dec(x_1); +x_11434 = 1; +x_11435 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_11436 = l_Lean_Name_toString(x_8845, x_11434, x_11435); +if (lean_is_scalar(x_11431)) { + x_11437 = lean_alloc_ctor(3, 1, 0); +} else { + x_11437 = x_11431; + lean_ctor_set_tag(x_11437, 3); +} +lean_ctor_set(x_11437, 0, x_11436); +x_11438 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_11439 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_11439, 0, x_11438); +lean_ctor_set(x_11439, 1, x_11437); +x_11440 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_11441 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_11441, 0, x_11439); +lean_ctor_set(x_11441, 1, x_11440); +x_11442 = l_Lean_MessageData_ofFormat(x_11441); +x_11443 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_11442, x_11058, x_4, x_5, x_11062); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_11058); +return x_11443; +} +else +{ +lean_object* x_11444; lean_object* x_11445; lean_object* x_11446; +lean_dec(x_11431); +lean_dec(x_8845); +x_11444 = l_Lean_IR_instInhabitedArg; +x_11445 = lean_unsigned_to_nat(2u); +x_11446 = lean_array_get(x_11444, x_11053, x_11445); +lean_dec(x_11053); +if (lean_obj_tag(x_11446) == 0) +{ +lean_object* x_11447; lean_object* x_11448; lean_object* x_11449; lean_object* x_11450; lean_object* x_11451; lean_object* x_11452; lean_object* x_11453; +x_11447 = lean_ctor_get(x_11446, 0); +lean_inc(x_11447); +lean_dec(x_11446); +x_11448 = lean_ctor_get(x_1, 0); +lean_inc(x_11448); +lean_dec(x_1); +x_11449 = l_Lean_IR_ToIR_bindVarToVarId(x_11448, x_11447, x_11058, x_4, x_5, x_11062); +x_11450 = lean_ctor_get(x_11449, 0); +lean_inc(x_11450); +x_11451 = lean_ctor_get(x_11449, 1); +lean_inc(x_11451); +lean_dec(x_11449); +x_11452 = lean_ctor_get(x_11450, 1); +lean_inc(x_11452); +lean_dec(x_11450); +x_11453 = l_Lean_IR_ToIR_lowerCode(x_2, x_11452, x_4, x_5, x_11451); +return x_11453; +} +else +{ +lean_object* x_11454; lean_object* x_11455; lean_object* x_11456; lean_object* x_11457; lean_object* x_11458; lean_object* x_11459; +x_11454 = lean_ctor_get(x_1, 0); +lean_inc(x_11454); +lean_dec(x_1); +x_11455 = l_Lean_IR_ToIR_bindErased(x_11454, x_11058, x_4, x_5, x_11062); +x_11456 = lean_ctor_get(x_11455, 0); +lean_inc(x_11456); +x_11457 = lean_ctor_get(x_11455, 1); +lean_inc(x_11457); +lean_dec(x_11455); +x_11458 = lean_ctor_get(x_11456, 1); +lean_inc(x_11458); +lean_dec(x_11456); +x_11459 = l_Lean_IR_ToIR_lowerCode(x_2, x_11458, x_4, x_5, x_11457); +return x_11459; +} +} +} +case 5: +{ +lean_object* x_11460; lean_object* x_11461; +lean_dec(x_11069); +lean_dec(x_11064); +lean_dec(x_11063); +lean_dec(x_11059); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +x_11460 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_11461 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_11460, x_11058, x_4, x_5, x_11062); +return x_11461; +} +case 6: +{ +lean_object* x_11462; uint8_t x_11463; +x_11462 = lean_ctor_get(x_11069, 0); +lean_inc(x_11462); +lean_dec(x_11069); +lean_inc(x_8845); +x_11463 = l_Lean_isExtern(x_11064, x_8845); +if (x_11463 == 0) +{ +lean_object* x_11464; +lean_dec(x_11063); +lean_dec(x_11059); +lean_dec(x_11053); +lean_inc(x_5); +lean_inc(x_4); +x_11464 = l_Lean_IR_ToIR_getCtorInfo(x_8845, x_11058, x_4, x_5, x_11062); +if (lean_obj_tag(x_11464) == 0) +{ +lean_object* x_11465; lean_object* x_11466; lean_object* x_11467; lean_object* x_11468; lean_object* x_11469; lean_object* x_11470; lean_object* x_11471; lean_object* x_11472; lean_object* x_11473; lean_object* x_11474; lean_object* x_11475; lean_object* x_11476; lean_object* x_11477; lean_object* x_11478; lean_object* x_11479; lean_object* x_11480; lean_object* x_11481; lean_object* x_11482; lean_object* x_11483; lean_object* x_11484; +x_11465 = lean_ctor_get(x_11464, 0); +lean_inc(x_11465); +x_11466 = lean_ctor_get(x_11465, 0); +lean_inc(x_11466); +x_11467 = lean_ctor_get(x_11464, 1); +lean_inc(x_11467); +lean_dec(x_11464); +x_11468 = lean_ctor_get(x_11465, 1); +lean_inc(x_11468); +lean_dec(x_11465); +x_11469 = lean_ctor_get(x_11466, 0); +lean_inc(x_11469); +x_11470 = lean_ctor_get(x_11466, 1); +lean_inc(x_11470); +lean_dec(x_11466); +x_11471 = lean_ctor_get(x_11462, 3); +lean_inc(x_11471); +lean_dec(x_11462); +x_11472 = lean_array_get_size(x_5944); +x_11473 = l_Array_extract___rarg(x_5944, x_11471, x_11472); +lean_dec(x_11472); +lean_dec(x_5944); +x_11474 = lean_array_get_size(x_11470); +x_11475 = lean_unsigned_to_nat(0u); +x_11476 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_5945)) { + x_11477 = lean_alloc_ctor(0, 3, 0); +} else { + x_11477 = x_5945; + lean_ctor_set_tag(x_11477, 0); +} +lean_ctor_set(x_11477, 0, x_11475); +lean_ctor_set(x_11477, 1, x_11474); +lean_ctor_set(x_11477, 2, x_11476); +x_11478 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_11479 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__5(x_11470, x_11473, x_11477, x_11477, x_11478, x_11475, lean_box(0), lean_box(0), x_11468, x_4, x_5, x_11467); +lean_dec(x_11477); +x_11480 = lean_ctor_get(x_11479, 0); +lean_inc(x_11480); +x_11481 = lean_ctor_get(x_11479, 1); +lean_inc(x_11481); +lean_dec(x_11479); +x_11482 = lean_ctor_get(x_11480, 0); +lean_inc(x_11482); +x_11483 = lean_ctor_get(x_11480, 1); +lean_inc(x_11483); +lean_dec(x_11480); +x_11484 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_11469, x_11470, x_11473, x_11482, x_11483, x_4, x_5, x_11481); +lean_dec(x_11473); +lean_dec(x_11470); +return x_11484; +} +else +{ +lean_object* x_11485; lean_object* x_11486; lean_object* x_11487; lean_object* x_11488; +lean_dec(x_11462); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11485 = lean_ctor_get(x_11464, 0); +lean_inc(x_11485); +x_11486 = lean_ctor_get(x_11464, 1); +lean_inc(x_11486); +if (lean_is_exclusive(x_11464)) { + lean_ctor_release(x_11464, 0); + lean_ctor_release(x_11464, 1); + x_11487 = x_11464; +} else { + lean_dec_ref(x_11464); + x_11487 = lean_box(0); +} +if (lean_is_scalar(x_11487)) { + x_11488 = lean_alloc_ctor(1, 2, 0); +} else { + x_11488 = x_11487; +} +lean_ctor_set(x_11488, 0, x_11485); +lean_ctor_set(x_11488, 1, x_11486); +return x_11488; +} +} +else +{ +lean_object* x_11489; lean_object* x_11490; lean_object* x_11517; lean_object* x_11518; +lean_dec(x_11462); +lean_dec(x_5945); +lean_dec(x_5944); +lean_inc(x_8845); +x_11517 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_8845, x_4, x_5, x_11062); +x_11518 = lean_ctor_get(x_11517, 0); +lean_inc(x_11518); +if (lean_obj_tag(x_11518) == 0) +{ +lean_object* x_11519; lean_object* x_11520; lean_object* x_11521; +x_11519 = lean_ctor_get(x_11517, 1); +lean_inc(x_11519); +lean_dec(x_11517); +x_11520 = lean_box(0); +if (lean_is_scalar(x_11059)) { + x_11521 = lean_alloc_ctor(0, 2, 0); +} else { + x_11521 = x_11059; +} +lean_ctor_set(x_11521, 0, x_11520); +lean_ctor_set(x_11521, 1, x_11058); +x_11489 = x_11521; +x_11490 = x_11519; +goto block_11516; +} +else +{ +lean_object* x_11522; lean_object* x_11523; lean_object* x_11524; lean_object* x_11525; lean_object* x_11526; lean_object* x_11527; lean_object* x_11528; uint8_t x_11529; +lean_dec(x_11059); +x_11522 = lean_ctor_get(x_11517, 1); +lean_inc(x_11522); +if (lean_is_exclusive(x_11517)) { + lean_ctor_release(x_11517, 0); + lean_ctor_release(x_11517, 1); + x_11523 = x_11517; +} else { + lean_dec_ref(x_11517); + x_11523 = lean_box(0); +} +x_11524 = lean_ctor_get(x_11518, 0); +lean_inc(x_11524); +if (lean_is_exclusive(x_11518)) { + lean_ctor_release(x_11518, 0); + x_11525 = x_11518; +} else { + lean_dec_ref(x_11518); + x_11525 = lean_box(0); +} +x_11526 = lean_array_get_size(x_11053); +x_11527 = lean_ctor_get(x_11524, 3); +lean_inc(x_11527); +lean_dec(x_11524); +x_11528 = lean_array_get_size(x_11527); +lean_dec(x_11527); +x_11529 = lean_nat_dec_lt(x_11526, x_11528); +if (x_11529 == 0) +{ +uint8_t x_11530; +x_11530 = lean_nat_dec_eq(x_11526, x_11528); +if (x_11530 == 0) +{ +lean_object* x_11531; lean_object* x_11532; lean_object* x_11533; lean_object* x_11534; lean_object* x_11535; lean_object* x_11536; lean_object* x_11537; lean_object* x_11538; lean_object* x_11539; lean_object* x_11540; lean_object* x_11541; lean_object* x_11542; lean_object* x_11543; lean_object* x_11544; lean_object* x_11545; lean_object* x_11546; lean_object* x_11547; +x_11531 = lean_unsigned_to_nat(0u); +x_11532 = l_Array_extract___rarg(x_11053, x_11531, x_11528); +x_11533 = l_Array_extract___rarg(x_11053, x_11528, x_11526); +lean_dec(x_11526); +lean_inc(x_8845); +if (lean_is_scalar(x_11523)) { + x_11534 = lean_alloc_ctor(6, 2, 0); +} else { + x_11534 = x_11523; + lean_ctor_set_tag(x_11534, 6); +} +lean_ctor_set(x_11534, 0, x_8845); +lean_ctor_set(x_11534, 1, x_11532); +x_11535 = lean_ctor_get(x_1, 0); +lean_inc(x_11535); +x_11536 = l_Lean_IR_ToIR_bindVar(x_11535, x_11058, x_4, x_5, x_11522); +x_11537 = lean_ctor_get(x_11536, 0); +lean_inc(x_11537); +x_11538 = lean_ctor_get(x_11536, 1); +lean_inc(x_11538); +lean_dec(x_11536); +x_11539 = lean_ctor_get(x_11537, 0); +lean_inc(x_11539); +x_11540 = lean_ctor_get(x_11537, 1); +lean_inc(x_11540); +lean_dec(x_11537); +x_11541 = l_Lean_IR_ToIR_newVar(x_11540, x_4, x_5, x_11538); +x_11542 = lean_ctor_get(x_11541, 0); +lean_inc(x_11542); +x_11543 = lean_ctor_get(x_11541, 1); +lean_inc(x_11543); +lean_dec(x_11541); +x_11544 = lean_ctor_get(x_11542, 0); +lean_inc(x_11544); +x_11545 = lean_ctor_get(x_11542, 1); +lean_inc(x_11545); +lean_dec(x_11542); +x_11546 = lean_ctor_get(x_1, 2); +lean_inc(x_11546); +lean_inc(x_5); +lean_inc(x_4); +x_11547 = l_Lean_IR_ToIR_lowerType(x_11546, x_11545, x_4, x_5, x_11543); +if (lean_obj_tag(x_11547) == 0) +{ +lean_object* x_11548; lean_object* x_11549; lean_object* x_11550; lean_object* x_11551; lean_object* x_11552; +x_11548 = lean_ctor_get(x_11547, 0); +lean_inc(x_11548); +x_11549 = lean_ctor_get(x_11547, 1); +lean_inc(x_11549); +lean_dec(x_11547); +x_11550 = lean_ctor_get(x_11548, 0); +lean_inc(x_11550); +x_11551 = lean_ctor_get(x_11548, 1); +lean_inc(x_11551); +lean_dec(x_11548); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11552 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_11544, x_11533, x_11539, x_11534, x_11550, x_11551, x_4, x_5, x_11549); +if (lean_obj_tag(x_11552) == 0) +{ +lean_object* x_11553; lean_object* x_11554; lean_object* x_11555; lean_object* x_11556; lean_object* x_11557; lean_object* x_11558; lean_object* x_11559; +x_11553 = lean_ctor_get(x_11552, 0); +lean_inc(x_11553); +x_11554 = lean_ctor_get(x_11552, 1); +lean_inc(x_11554); +lean_dec(x_11552); +x_11555 = lean_ctor_get(x_11553, 0); +lean_inc(x_11555); +x_11556 = lean_ctor_get(x_11553, 1); +lean_inc(x_11556); +if (lean_is_exclusive(x_11553)) { + lean_ctor_release(x_11553, 0); + lean_ctor_release(x_11553, 1); + x_11557 = x_11553; +} else { + lean_dec_ref(x_11553); + x_11557 = lean_box(0); +} +if (lean_is_scalar(x_11525)) { + x_11558 = lean_alloc_ctor(1, 1, 0); +} else { + x_11558 = x_11525; +} +lean_ctor_set(x_11558, 0, x_11555); +if (lean_is_scalar(x_11557)) { + x_11559 = lean_alloc_ctor(0, 2, 0); +} else { + x_11559 = x_11557; +} +lean_ctor_set(x_11559, 0, x_11558); +lean_ctor_set(x_11559, 1, x_11556); +x_11489 = x_11559; +x_11490 = x_11554; +goto block_11516; +} +else +{ +lean_object* x_11560; lean_object* x_11561; lean_object* x_11562; lean_object* x_11563; +lean_dec(x_11525); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11560 = lean_ctor_get(x_11552, 0); +lean_inc(x_11560); +x_11561 = lean_ctor_get(x_11552, 1); +lean_inc(x_11561); +if (lean_is_exclusive(x_11552)) { + lean_ctor_release(x_11552, 0); + lean_ctor_release(x_11552, 1); + x_11562 = x_11552; +} else { + lean_dec_ref(x_11552); + x_11562 = lean_box(0); +} +if (lean_is_scalar(x_11562)) { + x_11563 = lean_alloc_ctor(1, 2, 0); +} else { + x_11563 = x_11562; +} +lean_ctor_set(x_11563, 0, x_11560); +lean_ctor_set(x_11563, 1, x_11561); +return x_11563; +} +} +else +{ +lean_object* x_11564; lean_object* x_11565; lean_object* x_11566; lean_object* x_11567; +lean_dec(x_11544); +lean_dec(x_11539); +lean_dec(x_11534); +lean_dec(x_11533); +lean_dec(x_11525); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11564 = lean_ctor_get(x_11547, 0); +lean_inc(x_11564); +x_11565 = lean_ctor_get(x_11547, 1); +lean_inc(x_11565); +if (lean_is_exclusive(x_11547)) { + lean_ctor_release(x_11547, 0); + lean_ctor_release(x_11547, 1); + x_11566 = x_11547; +} else { + lean_dec_ref(x_11547); + x_11566 = lean_box(0); +} +if (lean_is_scalar(x_11566)) { + x_11567 = lean_alloc_ctor(1, 2, 0); +} else { + x_11567 = x_11566; +} +lean_ctor_set(x_11567, 0, x_11564); +lean_ctor_set(x_11567, 1, x_11565); +return x_11567; +} +} +else +{ +lean_object* x_11568; lean_object* x_11569; lean_object* x_11570; lean_object* x_11571; lean_object* x_11572; lean_object* x_11573; lean_object* x_11574; lean_object* x_11575; lean_object* x_11576; +lean_dec(x_11528); +lean_dec(x_11526); +lean_inc(x_11053); +lean_inc(x_8845); +if (lean_is_scalar(x_11523)) { + x_11568 = lean_alloc_ctor(6, 2, 0); +} else { + x_11568 = x_11523; + lean_ctor_set_tag(x_11568, 6); +} +lean_ctor_set(x_11568, 0, x_8845); +lean_ctor_set(x_11568, 1, x_11053); +x_11569 = lean_ctor_get(x_1, 0); +lean_inc(x_11569); +x_11570 = l_Lean_IR_ToIR_bindVar(x_11569, x_11058, x_4, x_5, x_11522); +x_11571 = lean_ctor_get(x_11570, 0); +lean_inc(x_11571); +x_11572 = lean_ctor_get(x_11570, 1); +lean_inc(x_11572); +lean_dec(x_11570); +x_11573 = lean_ctor_get(x_11571, 0); +lean_inc(x_11573); +x_11574 = lean_ctor_get(x_11571, 1); +lean_inc(x_11574); +lean_dec(x_11571); +x_11575 = lean_ctor_get(x_1, 2); +lean_inc(x_11575); +lean_inc(x_5); +lean_inc(x_4); +x_11576 = l_Lean_IR_ToIR_lowerType(x_11575, x_11574, x_4, x_5, x_11572); +if (lean_obj_tag(x_11576) == 0) +{ +lean_object* x_11577; lean_object* x_11578; lean_object* x_11579; lean_object* x_11580; lean_object* x_11581; +x_11577 = lean_ctor_get(x_11576, 0); +lean_inc(x_11577); +x_11578 = lean_ctor_get(x_11576, 1); +lean_inc(x_11578); +lean_dec(x_11576); +x_11579 = lean_ctor_get(x_11577, 0); +lean_inc(x_11579); +x_11580 = lean_ctor_get(x_11577, 1); +lean_inc(x_11580); +lean_dec(x_11577); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11581 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11573, x_11568, x_11579, x_11580, x_4, x_5, x_11578); +if (lean_obj_tag(x_11581) == 0) +{ +lean_object* x_11582; lean_object* x_11583; lean_object* x_11584; lean_object* x_11585; lean_object* x_11586; lean_object* x_11587; lean_object* x_11588; +x_11582 = lean_ctor_get(x_11581, 0); +lean_inc(x_11582); +x_11583 = lean_ctor_get(x_11581, 1); +lean_inc(x_11583); +lean_dec(x_11581); +x_11584 = lean_ctor_get(x_11582, 0); +lean_inc(x_11584); +x_11585 = lean_ctor_get(x_11582, 1); +lean_inc(x_11585); +if (lean_is_exclusive(x_11582)) { + lean_ctor_release(x_11582, 0); + lean_ctor_release(x_11582, 1); + x_11586 = x_11582; +} else { + lean_dec_ref(x_11582); + x_11586 = lean_box(0); +} +if (lean_is_scalar(x_11525)) { + x_11587 = lean_alloc_ctor(1, 1, 0); +} else { + x_11587 = x_11525; +} +lean_ctor_set(x_11587, 0, x_11584); +if (lean_is_scalar(x_11586)) { + x_11588 = lean_alloc_ctor(0, 2, 0); +} else { + x_11588 = x_11586; +} +lean_ctor_set(x_11588, 0, x_11587); +lean_ctor_set(x_11588, 1, x_11585); +x_11489 = x_11588; +x_11490 = x_11583; +goto block_11516; +} +else +{ +lean_object* x_11589; lean_object* x_11590; lean_object* x_11591; lean_object* x_11592; +lean_dec(x_11525); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11589 = lean_ctor_get(x_11581, 0); +lean_inc(x_11589); +x_11590 = lean_ctor_get(x_11581, 1); +lean_inc(x_11590); +if (lean_is_exclusive(x_11581)) { + lean_ctor_release(x_11581, 0); + lean_ctor_release(x_11581, 1); + x_11591 = x_11581; +} else { + lean_dec_ref(x_11581); + x_11591 = lean_box(0); +} +if (lean_is_scalar(x_11591)) { + x_11592 = lean_alloc_ctor(1, 2, 0); +} else { + x_11592 = x_11591; +} +lean_ctor_set(x_11592, 0, x_11589); +lean_ctor_set(x_11592, 1, x_11590); +return x_11592; +} +} +else +{ +lean_object* x_11593; lean_object* x_11594; lean_object* x_11595; lean_object* x_11596; +lean_dec(x_11573); +lean_dec(x_11568); +lean_dec(x_11525); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11593 = lean_ctor_get(x_11576, 0); +lean_inc(x_11593); +x_11594 = lean_ctor_get(x_11576, 1); +lean_inc(x_11594); +if (lean_is_exclusive(x_11576)) { + lean_ctor_release(x_11576, 0); + lean_ctor_release(x_11576, 1); + x_11595 = x_11576; +} else { + lean_dec_ref(x_11576); + x_11595 = lean_box(0); +} +if (lean_is_scalar(x_11595)) { + x_11596 = lean_alloc_ctor(1, 2, 0); +} else { + x_11596 = x_11595; +} +lean_ctor_set(x_11596, 0, x_11593); +lean_ctor_set(x_11596, 1, x_11594); +return x_11596; +} +} +} +else +{ +lean_object* x_11597; lean_object* x_11598; lean_object* x_11599; lean_object* x_11600; lean_object* x_11601; lean_object* x_11602; lean_object* x_11603; lean_object* x_11604; lean_object* x_11605; +lean_dec(x_11528); +lean_dec(x_11526); +lean_inc(x_11053); +lean_inc(x_8845); +if (lean_is_scalar(x_11523)) { + x_11597 = lean_alloc_ctor(7, 2, 0); +} else { + x_11597 = x_11523; + lean_ctor_set_tag(x_11597, 7); +} +lean_ctor_set(x_11597, 0, x_8845); +lean_ctor_set(x_11597, 1, x_11053); +x_11598 = lean_ctor_get(x_1, 0); +lean_inc(x_11598); +x_11599 = l_Lean_IR_ToIR_bindVar(x_11598, x_11058, x_4, x_5, x_11522); +x_11600 = lean_ctor_get(x_11599, 0); +lean_inc(x_11600); +x_11601 = lean_ctor_get(x_11599, 1); +lean_inc(x_11601); +lean_dec(x_11599); +x_11602 = lean_ctor_get(x_11600, 0); +lean_inc(x_11602); +x_11603 = lean_ctor_get(x_11600, 1); +lean_inc(x_11603); +lean_dec(x_11600); +x_11604 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_11605 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11602, x_11597, x_11604, x_11603, x_4, x_5, x_11601); +if (lean_obj_tag(x_11605) == 0) +{ +lean_object* x_11606; lean_object* x_11607; lean_object* x_11608; lean_object* x_11609; lean_object* x_11610; lean_object* x_11611; lean_object* x_11612; +x_11606 = lean_ctor_get(x_11605, 0); +lean_inc(x_11606); +x_11607 = lean_ctor_get(x_11605, 1); +lean_inc(x_11607); +lean_dec(x_11605); +x_11608 = lean_ctor_get(x_11606, 0); +lean_inc(x_11608); +x_11609 = lean_ctor_get(x_11606, 1); +lean_inc(x_11609); +if (lean_is_exclusive(x_11606)) { + lean_ctor_release(x_11606, 0); + lean_ctor_release(x_11606, 1); + x_11610 = x_11606; +} else { + lean_dec_ref(x_11606); + x_11610 = lean_box(0); +} +if (lean_is_scalar(x_11525)) { + x_11611 = lean_alloc_ctor(1, 1, 0); +} else { + x_11611 = x_11525; +} +lean_ctor_set(x_11611, 0, x_11608); +if (lean_is_scalar(x_11610)) { + x_11612 = lean_alloc_ctor(0, 2, 0); +} else { + x_11612 = x_11610; +} +lean_ctor_set(x_11612, 0, x_11611); +lean_ctor_set(x_11612, 1, x_11609); +x_11489 = x_11612; +x_11490 = x_11607; +goto block_11516; +} +else +{ +lean_object* x_11613; lean_object* x_11614; lean_object* x_11615; lean_object* x_11616; +lean_dec(x_11525); +lean_dec(x_11063); +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11613 = lean_ctor_get(x_11605, 0); +lean_inc(x_11613); +x_11614 = lean_ctor_get(x_11605, 1); +lean_inc(x_11614); +if (lean_is_exclusive(x_11605)) { + lean_ctor_release(x_11605, 0); + lean_ctor_release(x_11605, 1); + x_11615 = x_11605; +} else { + lean_dec_ref(x_11605); + x_11615 = lean_box(0); +} +if (lean_is_scalar(x_11615)) { + x_11616 = lean_alloc_ctor(1, 2, 0); +} else { + x_11616 = x_11615; +} +lean_ctor_set(x_11616, 0, x_11613); +lean_ctor_set(x_11616, 1, x_11614); +return x_11616; +} +} +} +block_11516: +{ +lean_object* x_11491; +x_11491 = lean_ctor_get(x_11489, 0); +lean_inc(x_11491); +if (lean_obj_tag(x_11491) == 0) +{ +lean_object* x_11492; lean_object* x_11493; lean_object* x_11494; lean_object* x_11495; lean_object* x_11496; lean_object* x_11497; lean_object* x_11498; lean_object* x_11499; lean_object* x_11500; lean_object* x_11501; +lean_dec(x_11063); +x_11492 = lean_ctor_get(x_11489, 1); +lean_inc(x_11492); +lean_dec(x_11489); +x_11493 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_11493, 0, x_8845); +lean_ctor_set(x_11493, 1, x_11053); +x_11494 = lean_ctor_get(x_1, 0); +lean_inc(x_11494); +x_11495 = l_Lean_IR_ToIR_bindVar(x_11494, x_11492, x_4, x_5, x_11490); +x_11496 = lean_ctor_get(x_11495, 0); +lean_inc(x_11496); +x_11497 = lean_ctor_get(x_11495, 1); +lean_inc(x_11497); +lean_dec(x_11495); +x_11498 = lean_ctor_get(x_11496, 0); +lean_inc(x_11498); +x_11499 = lean_ctor_get(x_11496, 1); +lean_inc(x_11499); +lean_dec(x_11496); +x_11500 = lean_ctor_get(x_1, 2); +lean_inc(x_11500); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_11501 = l_Lean_IR_ToIR_lowerType(x_11500, x_11499, x_4, x_5, x_11497); +if (lean_obj_tag(x_11501) == 0) +{ +lean_object* x_11502; lean_object* x_11503; lean_object* x_11504; lean_object* x_11505; lean_object* x_11506; +x_11502 = lean_ctor_get(x_11501, 0); +lean_inc(x_11502); +x_11503 = lean_ctor_get(x_11501, 1); +lean_inc(x_11503); +lean_dec(x_11501); +x_11504 = lean_ctor_get(x_11502, 0); +lean_inc(x_11504); +x_11505 = lean_ctor_get(x_11502, 1); +lean_inc(x_11505); +lean_dec(x_11502); +x_11506 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_11498, x_11493, x_11504, x_11505, x_4, x_5, x_11503); +return x_11506; +} +else +{ +lean_object* x_11507; lean_object* x_11508; lean_object* x_11509; lean_object* x_11510; +lean_dec(x_11498); +lean_dec(x_11493); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_11507 = lean_ctor_get(x_11501, 0); +lean_inc(x_11507); +x_11508 = lean_ctor_get(x_11501, 1); +lean_inc(x_11508); +if (lean_is_exclusive(x_11501)) { + lean_ctor_release(x_11501, 0); + lean_ctor_release(x_11501, 1); + x_11509 = x_11501; +} else { + lean_dec_ref(x_11501); + x_11509 = lean_box(0); +} +if (lean_is_scalar(x_11509)) { + x_11510 = lean_alloc_ctor(1, 2, 0); +} else { + x_11510 = x_11509; +} +lean_ctor_set(x_11510, 0, x_11507); +lean_ctor_set(x_11510, 1, x_11508); +return x_11510; +} +} +else +{ +lean_object* x_11511; lean_object* x_11512; lean_object* x_11513; lean_object* x_11514; lean_object* x_11515; +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11511 = lean_ctor_get(x_11489, 1); +lean_inc(x_11511); +if (lean_is_exclusive(x_11489)) { + lean_ctor_release(x_11489, 0); + lean_ctor_release(x_11489, 1); + x_11512 = x_11489; +} else { + lean_dec_ref(x_11489); + x_11512 = lean_box(0); +} +x_11513 = lean_ctor_get(x_11491, 0); +lean_inc(x_11513); +lean_dec(x_11491); +if (lean_is_scalar(x_11512)) { + x_11514 = lean_alloc_ctor(0, 2, 0); +} else { + x_11514 = x_11512; +} +lean_ctor_set(x_11514, 0, x_11513); +lean_ctor_set(x_11514, 1, x_11511); +if (lean_is_scalar(x_11063)) { + x_11515 = lean_alloc_ctor(0, 2, 0); +} else { + x_11515 = x_11063; +} +lean_ctor_set(x_11515, 0, x_11514); +lean_ctor_set(x_11515, 1, x_11490); +return x_11515; +} +} +} +} +default: +{ +lean_object* x_11617; uint8_t x_11618; lean_object* x_11619; lean_object* x_11620; lean_object* x_11621; lean_object* x_11622; lean_object* x_11623; lean_object* x_11624; lean_object* x_11625; lean_object* x_11626; lean_object* x_11627; +lean_dec(x_11064); +lean_dec(x_11063); +lean_dec(x_11059); +lean_dec(x_11053); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_11069)) { + lean_ctor_release(x_11069, 0); + x_11617 = x_11069; +} else { + lean_dec_ref(x_11069); + x_11617 = lean_box(0); +} +x_11618 = 1; +x_11619 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_11620 = l_Lean_Name_toString(x_8845, x_11618, x_11619); +if (lean_is_scalar(x_11617)) { + x_11621 = lean_alloc_ctor(3, 1, 0); +} else { + x_11621 = x_11617; + lean_ctor_set_tag(x_11621, 3); +} +lean_ctor_set(x_11621, 0, x_11620); +x_11622 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_11623 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_11623, 0, x_11622); +lean_ctor_set(x_11623, 1, x_11621); +x_11624 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_11625 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_11625, 0, x_11623); +lean_ctor_set(x_11625, 1, x_11624); +x_11626 = l_Lean_MessageData_ofFormat(x_11625); +x_11627 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_11626, x_11058, x_4, x_5, x_11062); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_11058); +return x_11627; +} +} +} +} +else +{ +lean_object* x_11628; lean_object* x_11629; lean_object* x_11630; lean_object* x_11631; lean_object* x_11632; +lean_dec(x_11053); +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11628 = lean_ctor_get(x_11055, 1); +lean_inc(x_11628); +if (lean_is_exclusive(x_11055)) { + lean_ctor_release(x_11055, 0); + lean_ctor_release(x_11055, 1); + x_11629 = x_11055; +} else { + lean_dec_ref(x_11055); + x_11629 = lean_box(0); +} +x_11630 = lean_ctor_get(x_11057, 0); +lean_inc(x_11630); +lean_dec(x_11057); +if (lean_is_scalar(x_11629)) { + x_11631 = lean_alloc_ctor(0, 2, 0); +} else { + x_11631 = x_11629; +} +lean_ctor_set(x_11631, 0, x_11630); +lean_ctor_set(x_11631, 1, x_11628); +if (lean_is_scalar(x_8851)) { + x_11632 = lean_alloc_ctor(0, 2, 0); +} else { + x_11632 = x_8851; +} +lean_ctor_set(x_11632, 0, x_11631); +lean_ctor_set(x_11632, 1, x_11056); +return x_11632; +} +} +} +} +else +{ +uint8_t x_11734; +lean_dec(x_8845); +lean_dec(x_5945); +lean_dec(x_5944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11734 = !lean_is_exclusive(x_8848); +if (x_11734 == 0) +{ +return x_8848; +} +else +{ +lean_object* x_11735; lean_object* x_11736; lean_object* x_11737; +x_11735 = lean_ctor_get(x_8848, 0); +x_11736 = lean_ctor_get(x_8848, 1); +lean_inc(x_11736); +lean_inc(x_11735); +lean_dec(x_8848); +x_11737 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_11737, 0, x_11735); +lean_ctor_set(x_11737, 1, x_11736); +return x_11737; +} +} +} +else +{ +size_t x_11738; size_t x_11739; lean_object* x_11740; +lean_dec(x_5946); +lean_dec(x_5945); +x_11738 = lean_array_size(x_5944); +x_11739 = 0; +lean_inc(x_5); +lean_inc(x_4); +x_11740 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_11738, x_11739, x_5944, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_11740) == 0) +{ +lean_object* x_11741; lean_object* x_11742; lean_object* x_11743; lean_object* x_11744; lean_object* x_11745; lean_object* x_11746; lean_object* x_11747; lean_object* x_11748; uint8_t x_11749; +x_11741 = lean_ctor_get(x_11740, 0); +lean_inc(x_11741); +x_11742 = lean_ctor_get(x_11740, 1); +lean_inc(x_11742); +lean_dec(x_11740); +x_11743 = lean_ctor_get(x_11741, 0); +lean_inc(x_11743); +x_11744 = lean_ctor_get(x_11741, 1); +lean_inc(x_11744); +lean_dec(x_11741); +x_11745 = lean_ctor_get(x_1, 0); +lean_inc(x_11745); +lean_dec(x_1); +x_11746 = l_Lean_IR_ToIR_bindVar(x_11745, x_11744, x_4, x_5, x_11742); +x_11747 = lean_ctor_get(x_11746, 0); +lean_inc(x_11747); +x_11748 = lean_ctor_get(x_11746, 1); +lean_inc(x_11748); +lean_dec(x_11746); +x_11749 = !lean_is_exclusive(x_11747); +if (x_11749 == 0) +{ +lean_object* x_11750; lean_object* x_11751; lean_object* x_11752; uint8_t x_11753; +x_11750 = lean_ctor_get(x_11747, 0); +x_11751 = lean_ctor_get(x_11747, 1); +x_11752 = l_Lean_IR_ToIR_newVar(x_11751, x_4, x_5, x_11748); +x_11753 = !lean_is_exclusive(x_11752); +if (x_11753 == 0) +{ +lean_object* x_11754; uint8_t x_11755; +x_11754 = lean_ctor_get(x_11752, 0); +x_11755 = !lean_is_exclusive(x_11754); +if (x_11755 == 0) +{ +lean_object* x_11756; lean_object* x_11757; lean_object* x_11758; lean_object* x_11759; +x_11756 = lean_ctor_get(x_11752, 1); +x_11757 = lean_ctor_get(x_11754, 0); +x_11758 = lean_ctor_get(x_11754, 1); +x_11759 = l_Lean_IR_ToIR_lowerCode(x_2, x_11758, x_4, x_5, x_11756); +if (lean_obj_tag(x_11759) == 0) +{ +uint8_t x_11760; +x_11760 = !lean_is_exclusive(x_11759); +if (x_11760 == 0) +{ +lean_object* x_11761; uint8_t x_11762; +x_11761 = lean_ctor_get(x_11759, 0); +x_11762 = !lean_is_exclusive(x_11761); +if (x_11762 == 0) +{ +lean_object* x_11763; lean_object* x_11764; lean_object* x_11765; lean_object* x_11766; lean_object* x_11767; lean_object* x_11768; lean_object* x_11769; lean_object* x_11770; lean_object* x_11771; lean_object* x_11772; lean_object* x_11773; lean_object* x_11774; +x_11763 = lean_ctor_get(x_11761, 0); +x_11764 = l_Lean_IR_instInhabitedArg; +x_11765 = lean_unsigned_to_nat(0u); +x_11766 = lean_array_get(x_11764, x_11743, x_11765); +lean_dec(x_11743); +lean_inc(x_11757); +x_11767 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_11767, 0, x_11757); +x_11768 = lean_box(0); +lean_ctor_set_tag(x_11754, 1); +lean_ctor_set(x_11754, 1, x_11768); +lean_ctor_set(x_11754, 0, x_11767); +lean_ctor_set_tag(x_11752, 1); +lean_ctor_set(x_11752, 1, x_11754); +lean_ctor_set(x_11752, 0, x_11766); +x_11769 = lean_array_mk(x_11752); +x_11770 = l_Lean_IR_ToIR_lowerLet___closed__35; +lean_ctor_set_tag(x_11747, 6); +lean_ctor_set(x_11747, 1, x_11769); +lean_ctor_set(x_11747, 0, x_11770); +x_11771 = lean_box(7); +x_11772 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_11772, 0, x_11750); +lean_ctor_set(x_11772, 1, x_11771); +lean_ctor_set(x_11772, 2, x_11747); +lean_ctor_set(x_11772, 3, x_11763); +x_11773 = l_Lean_IR_ToIR_lowerLet___closed__37; +x_11774 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_11774, 0, x_11757); +lean_ctor_set(x_11774, 1, x_11771); +lean_ctor_set(x_11774, 2, x_11773); +lean_ctor_set(x_11774, 3, x_11772); +lean_ctor_set(x_11761, 0, x_11774); +return x_11759; +} +else +{ +lean_object* x_11775; lean_object* x_11776; lean_object* x_11777; lean_object* x_11778; lean_object* x_11779; lean_object* x_11780; lean_object* x_11781; lean_object* x_11782; lean_object* x_11783; lean_object* x_11784; lean_object* x_11785; lean_object* x_11786; lean_object* x_11787; lean_object* x_11788; +x_11775 = lean_ctor_get(x_11761, 0); +x_11776 = lean_ctor_get(x_11761, 1); +lean_inc(x_11776); +lean_inc(x_11775); +lean_dec(x_11761); +x_11777 = l_Lean_IR_instInhabitedArg; +x_11778 = lean_unsigned_to_nat(0u); +x_11779 = lean_array_get(x_11777, x_11743, x_11778); +lean_dec(x_11743); +lean_inc(x_11757); +x_11780 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_11780, 0, x_11757); +x_11781 = lean_box(0); +lean_ctor_set_tag(x_11754, 1); +lean_ctor_set(x_11754, 1, x_11781); +lean_ctor_set(x_11754, 0, x_11780); +lean_ctor_set_tag(x_11752, 1); +lean_ctor_set(x_11752, 1, x_11754); +lean_ctor_set(x_11752, 0, x_11779); +x_11782 = lean_array_mk(x_11752); +x_11783 = l_Lean_IR_ToIR_lowerLet___closed__35; +lean_ctor_set_tag(x_11747, 6); +lean_ctor_set(x_11747, 1, x_11782); +lean_ctor_set(x_11747, 0, x_11783); +x_11784 = lean_box(7); +x_11785 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_11785, 0, x_11750); +lean_ctor_set(x_11785, 1, x_11784); +lean_ctor_set(x_11785, 2, x_11747); +lean_ctor_set(x_11785, 3, x_11775); +x_11786 = l_Lean_IR_ToIR_lowerLet___closed__37; +x_11787 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_11787, 0, x_11757); +lean_ctor_set(x_11787, 1, x_11784); +lean_ctor_set(x_11787, 2, x_11786); +lean_ctor_set(x_11787, 3, x_11785); +x_11788 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_11788, 0, x_11787); +lean_ctor_set(x_11788, 1, x_11776); +lean_ctor_set(x_11759, 0, x_11788); +return x_11759; +} +} +else +{ +lean_object* x_11789; lean_object* x_11790; lean_object* x_11791; lean_object* x_11792; lean_object* x_11793; lean_object* x_11794; lean_object* x_11795; lean_object* x_11796; lean_object* x_11797; lean_object* x_11798; lean_object* x_11799; lean_object* x_11800; lean_object* x_11801; lean_object* x_11802; lean_object* x_11803; lean_object* x_11804; lean_object* x_11805; lean_object* x_11806; +x_11789 = lean_ctor_get(x_11759, 0); +x_11790 = lean_ctor_get(x_11759, 1); +lean_inc(x_11790); +lean_inc(x_11789); +lean_dec(x_11759); +x_11791 = lean_ctor_get(x_11789, 0); +lean_inc(x_11791); +x_11792 = lean_ctor_get(x_11789, 1); +lean_inc(x_11792); +if (lean_is_exclusive(x_11789)) { + lean_ctor_release(x_11789, 0); + lean_ctor_release(x_11789, 1); + x_11793 = x_11789; +} else { + lean_dec_ref(x_11789); + x_11793 = lean_box(0); +} +x_11794 = l_Lean_IR_instInhabitedArg; +x_11795 = lean_unsigned_to_nat(0u); +x_11796 = lean_array_get(x_11794, x_11743, x_11795); +lean_dec(x_11743); +lean_inc(x_11757); +x_11797 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_11797, 0, x_11757); +x_11798 = lean_box(0); +lean_ctor_set_tag(x_11754, 1); +lean_ctor_set(x_11754, 1, x_11798); +lean_ctor_set(x_11754, 0, x_11797); +lean_ctor_set_tag(x_11752, 1); +lean_ctor_set(x_11752, 1, x_11754); +lean_ctor_set(x_11752, 0, x_11796); +x_11799 = lean_array_mk(x_11752); +x_11800 = l_Lean_IR_ToIR_lowerLet___closed__35; +lean_ctor_set_tag(x_11747, 6); +lean_ctor_set(x_11747, 1, x_11799); +lean_ctor_set(x_11747, 0, x_11800); +x_11801 = lean_box(7); +x_11802 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_11802, 0, x_11750); +lean_ctor_set(x_11802, 1, x_11801); +lean_ctor_set(x_11802, 2, x_11747); +lean_ctor_set(x_11802, 3, x_11791); +x_11803 = l_Lean_IR_ToIR_lowerLet___closed__37; +x_11804 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_11804, 0, x_11757); +lean_ctor_set(x_11804, 1, x_11801); +lean_ctor_set(x_11804, 2, x_11803); +lean_ctor_set(x_11804, 3, x_11802); +if (lean_is_scalar(x_11793)) { + x_11805 = lean_alloc_ctor(0, 2, 0); +} else { + x_11805 = x_11793; +} +lean_ctor_set(x_11805, 0, x_11804); +lean_ctor_set(x_11805, 1, x_11792); +x_11806 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_11806, 0, x_11805); +lean_ctor_set(x_11806, 1, x_11790); +return x_11806; +} +} +else +{ +uint8_t x_11807; +lean_free_object(x_11754); +lean_dec(x_11757); +lean_free_object(x_11752); +lean_free_object(x_11747); +lean_dec(x_11750); +lean_dec(x_11743); +x_11807 = !lean_is_exclusive(x_11759); +if (x_11807 == 0) +{ +return x_11759; +} +else +{ +lean_object* x_11808; lean_object* x_11809; lean_object* x_11810; +x_11808 = lean_ctor_get(x_11759, 0); +x_11809 = lean_ctor_get(x_11759, 1); +lean_inc(x_11809); +lean_inc(x_11808); +lean_dec(x_11759); +x_11810 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_11810, 0, x_11808); +lean_ctor_set(x_11810, 1, x_11809); +return x_11810; +} +} +} +else +{ +lean_object* x_11811; lean_object* x_11812; lean_object* x_11813; lean_object* x_11814; +x_11811 = lean_ctor_get(x_11752, 1); +x_11812 = lean_ctor_get(x_11754, 0); +x_11813 = lean_ctor_get(x_11754, 1); +lean_inc(x_11813); +lean_inc(x_11812); +lean_dec(x_11754); +x_11814 = l_Lean_IR_ToIR_lowerCode(x_2, x_11813, x_4, x_5, x_11811); +if (lean_obj_tag(x_11814) == 0) +{ +lean_object* x_11815; lean_object* x_11816; lean_object* x_11817; lean_object* x_11818; lean_object* x_11819; lean_object* x_11820; lean_object* x_11821; lean_object* x_11822; lean_object* x_11823; lean_object* x_11824; lean_object* x_11825; lean_object* x_11826; lean_object* x_11827; lean_object* x_11828; lean_object* x_11829; lean_object* x_11830; lean_object* x_11831; lean_object* x_11832; lean_object* x_11833; lean_object* x_11834; +x_11815 = lean_ctor_get(x_11814, 0); +lean_inc(x_11815); +x_11816 = lean_ctor_get(x_11814, 1); +lean_inc(x_11816); +if (lean_is_exclusive(x_11814)) { + lean_ctor_release(x_11814, 0); + lean_ctor_release(x_11814, 1); + x_11817 = x_11814; +} else { + lean_dec_ref(x_11814); + x_11817 = lean_box(0); +} +x_11818 = lean_ctor_get(x_11815, 0); +lean_inc(x_11818); +x_11819 = lean_ctor_get(x_11815, 1); +lean_inc(x_11819); +if (lean_is_exclusive(x_11815)) { + lean_ctor_release(x_11815, 0); + lean_ctor_release(x_11815, 1); + x_11820 = x_11815; +} else { + lean_dec_ref(x_11815); + x_11820 = lean_box(0); +} +x_11821 = l_Lean_IR_instInhabitedArg; +x_11822 = lean_unsigned_to_nat(0u); +x_11823 = lean_array_get(x_11821, x_11743, x_11822); +lean_dec(x_11743); +lean_inc(x_11812); +x_11824 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_11824, 0, x_11812); +x_11825 = lean_box(0); +x_11826 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_11826, 0, x_11824); +lean_ctor_set(x_11826, 1, x_11825); +lean_ctor_set_tag(x_11752, 1); +lean_ctor_set(x_11752, 1, x_11826); +lean_ctor_set(x_11752, 0, x_11823); +x_11827 = lean_array_mk(x_11752); +x_11828 = l_Lean_IR_ToIR_lowerLet___closed__35; +lean_ctor_set_tag(x_11747, 6); +lean_ctor_set(x_11747, 1, x_11827); +lean_ctor_set(x_11747, 0, x_11828); +x_11829 = lean_box(7); +x_11830 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_11830, 0, x_11750); +lean_ctor_set(x_11830, 1, x_11829); +lean_ctor_set(x_11830, 2, x_11747); +lean_ctor_set(x_11830, 3, x_11818); +x_11831 = l_Lean_IR_ToIR_lowerLet___closed__37; +x_11832 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_11832, 0, x_11812); +lean_ctor_set(x_11832, 1, x_11829); +lean_ctor_set(x_11832, 2, x_11831); +lean_ctor_set(x_11832, 3, x_11830); +if (lean_is_scalar(x_11820)) { + x_11833 = lean_alloc_ctor(0, 2, 0); +} else { + x_11833 = x_11820; +} +lean_ctor_set(x_11833, 0, x_11832); +lean_ctor_set(x_11833, 1, x_11819); +if (lean_is_scalar(x_11817)) { + x_11834 = lean_alloc_ctor(0, 2, 0); +} else { + x_11834 = x_11817; +} +lean_ctor_set(x_11834, 0, x_11833); +lean_ctor_set(x_11834, 1, x_11816); +return x_11834; +} +else +{ +lean_object* x_11835; lean_object* x_11836; lean_object* x_11837; lean_object* x_11838; +lean_dec(x_11812); +lean_free_object(x_11752); +lean_free_object(x_11747); +lean_dec(x_11750); +lean_dec(x_11743); +x_11835 = lean_ctor_get(x_11814, 0); +lean_inc(x_11835); +x_11836 = lean_ctor_get(x_11814, 1); +lean_inc(x_11836); +if (lean_is_exclusive(x_11814)) { + lean_ctor_release(x_11814, 0); + lean_ctor_release(x_11814, 1); + x_11837 = x_11814; +} else { + lean_dec_ref(x_11814); + x_11837 = lean_box(0); +} +if (lean_is_scalar(x_11837)) { + x_11838 = lean_alloc_ctor(1, 2, 0); +} else { + x_11838 = x_11837; +} +lean_ctor_set(x_11838, 0, x_11835); +lean_ctor_set(x_11838, 1, x_11836); +return x_11838; +} +} +} +else +{ +lean_object* x_11839; lean_object* x_11840; lean_object* x_11841; lean_object* x_11842; lean_object* x_11843; lean_object* x_11844; +x_11839 = lean_ctor_get(x_11752, 0); +x_11840 = lean_ctor_get(x_11752, 1); +lean_inc(x_11840); +lean_inc(x_11839); +lean_dec(x_11752); +x_11841 = lean_ctor_get(x_11839, 0); +lean_inc(x_11841); +x_11842 = lean_ctor_get(x_11839, 1); +lean_inc(x_11842); +if (lean_is_exclusive(x_11839)) { + lean_ctor_release(x_11839, 0); + lean_ctor_release(x_11839, 1); + x_11843 = x_11839; +} else { + lean_dec_ref(x_11839); + x_11843 = lean_box(0); +} +x_11844 = l_Lean_IR_ToIR_lowerCode(x_2, x_11842, x_4, x_5, x_11840); +if (lean_obj_tag(x_11844) == 0) +{ +lean_object* x_11845; lean_object* x_11846; lean_object* x_11847; lean_object* x_11848; lean_object* x_11849; lean_object* x_11850; lean_object* x_11851; lean_object* x_11852; lean_object* x_11853; lean_object* x_11854; lean_object* x_11855; lean_object* x_11856; lean_object* x_11857; lean_object* x_11858; lean_object* x_11859; lean_object* x_11860; lean_object* x_11861; lean_object* x_11862; lean_object* x_11863; lean_object* x_11864; lean_object* x_11865; +x_11845 = lean_ctor_get(x_11844, 0); +lean_inc(x_11845); +x_11846 = lean_ctor_get(x_11844, 1); +lean_inc(x_11846); +if (lean_is_exclusive(x_11844)) { + lean_ctor_release(x_11844, 0); + lean_ctor_release(x_11844, 1); + x_11847 = x_11844; +} else { + lean_dec_ref(x_11844); + x_11847 = lean_box(0); +} +x_11848 = lean_ctor_get(x_11845, 0); +lean_inc(x_11848); +x_11849 = lean_ctor_get(x_11845, 1); +lean_inc(x_11849); +if (lean_is_exclusive(x_11845)) { + lean_ctor_release(x_11845, 0); + lean_ctor_release(x_11845, 1); + x_11850 = x_11845; +} else { + lean_dec_ref(x_11845); + x_11850 = lean_box(0); +} +x_11851 = l_Lean_IR_instInhabitedArg; +x_11852 = lean_unsigned_to_nat(0u); +x_11853 = lean_array_get(x_11851, x_11743, x_11852); +lean_dec(x_11743); +lean_inc(x_11841); +x_11854 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_11854, 0, x_11841); +x_11855 = lean_box(0); +if (lean_is_scalar(x_11843)) { + x_11856 = lean_alloc_ctor(1, 2, 0); +} else { + x_11856 = x_11843; + lean_ctor_set_tag(x_11856, 1); +} +lean_ctor_set(x_11856, 0, x_11854); +lean_ctor_set(x_11856, 1, x_11855); +x_11857 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_11857, 0, x_11853); +lean_ctor_set(x_11857, 1, x_11856); +x_11858 = lean_array_mk(x_11857); +x_11859 = l_Lean_IR_ToIR_lowerLet___closed__35; +lean_ctor_set_tag(x_11747, 6); +lean_ctor_set(x_11747, 1, x_11858); +lean_ctor_set(x_11747, 0, x_11859); +x_11860 = lean_box(7); +x_11861 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_11861, 0, x_11750); +lean_ctor_set(x_11861, 1, x_11860); +lean_ctor_set(x_11861, 2, x_11747); +lean_ctor_set(x_11861, 3, x_11848); +x_11862 = l_Lean_IR_ToIR_lowerLet___closed__37; +x_11863 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_11863, 0, x_11841); +lean_ctor_set(x_11863, 1, x_11860); +lean_ctor_set(x_11863, 2, x_11862); +lean_ctor_set(x_11863, 3, x_11861); +if (lean_is_scalar(x_11850)) { + x_11864 = lean_alloc_ctor(0, 2, 0); +} else { + x_11864 = x_11850; +} +lean_ctor_set(x_11864, 0, x_11863); +lean_ctor_set(x_11864, 1, x_11849); +if (lean_is_scalar(x_11847)) { + x_11865 = lean_alloc_ctor(0, 2, 0); +} else { + x_11865 = x_11847; +} +lean_ctor_set(x_11865, 0, x_11864); +lean_ctor_set(x_11865, 1, x_11846); +return x_11865; +} +else +{ +lean_object* x_11866; lean_object* x_11867; lean_object* x_11868; lean_object* x_11869; +lean_dec(x_11843); +lean_dec(x_11841); +lean_free_object(x_11747); +lean_dec(x_11750); +lean_dec(x_11743); +x_11866 = lean_ctor_get(x_11844, 0); +lean_inc(x_11866); +x_11867 = lean_ctor_get(x_11844, 1); +lean_inc(x_11867); +if (lean_is_exclusive(x_11844)) { + lean_ctor_release(x_11844, 0); + lean_ctor_release(x_11844, 1); + x_11868 = x_11844; +} else { + lean_dec_ref(x_11844); + x_11868 = lean_box(0); +} +if (lean_is_scalar(x_11868)) { + x_11869 = lean_alloc_ctor(1, 2, 0); +} else { + x_11869 = x_11868; +} +lean_ctor_set(x_11869, 0, x_11866); +lean_ctor_set(x_11869, 1, x_11867); +return x_11869; +} +} +} +else +{ +lean_object* x_11870; lean_object* x_11871; lean_object* x_11872; lean_object* x_11873; lean_object* x_11874; lean_object* x_11875; lean_object* x_11876; lean_object* x_11877; lean_object* x_11878; lean_object* x_11879; +x_11870 = lean_ctor_get(x_11747, 0); +x_11871 = lean_ctor_get(x_11747, 1); +lean_inc(x_11871); +lean_inc(x_11870); +lean_dec(x_11747); +x_11872 = l_Lean_IR_ToIR_newVar(x_11871, x_4, x_5, x_11748); +x_11873 = lean_ctor_get(x_11872, 0); +lean_inc(x_11873); +x_11874 = lean_ctor_get(x_11872, 1); +lean_inc(x_11874); +if (lean_is_exclusive(x_11872)) { + lean_ctor_release(x_11872, 0); + lean_ctor_release(x_11872, 1); + x_11875 = x_11872; +} else { + lean_dec_ref(x_11872); + x_11875 = lean_box(0); +} +x_11876 = lean_ctor_get(x_11873, 0); +lean_inc(x_11876); +x_11877 = lean_ctor_get(x_11873, 1); +lean_inc(x_11877); +if (lean_is_exclusive(x_11873)) { + lean_ctor_release(x_11873, 0); + lean_ctor_release(x_11873, 1); + x_11878 = x_11873; +} else { + lean_dec_ref(x_11873); + x_11878 = lean_box(0); +} +x_11879 = l_Lean_IR_ToIR_lowerCode(x_2, x_11877, x_4, x_5, x_11874); +if (lean_obj_tag(x_11879) == 0) +{ +lean_object* x_11880; lean_object* x_11881; lean_object* x_11882; lean_object* x_11883; lean_object* x_11884; lean_object* x_11885; lean_object* x_11886; lean_object* x_11887; lean_object* x_11888; lean_object* x_11889; lean_object* x_11890; lean_object* x_11891; lean_object* x_11892; lean_object* x_11893; lean_object* x_11894; lean_object* x_11895; lean_object* x_11896; lean_object* x_11897; lean_object* x_11898; lean_object* x_11899; lean_object* x_11900; lean_object* x_11901; +x_11880 = lean_ctor_get(x_11879, 0); +lean_inc(x_11880); +x_11881 = lean_ctor_get(x_11879, 1); +lean_inc(x_11881); +if (lean_is_exclusive(x_11879)) { + lean_ctor_release(x_11879, 0); + lean_ctor_release(x_11879, 1); + x_11882 = x_11879; +} else { + lean_dec_ref(x_11879); + x_11882 = lean_box(0); +} +x_11883 = lean_ctor_get(x_11880, 0); +lean_inc(x_11883); +x_11884 = lean_ctor_get(x_11880, 1); +lean_inc(x_11884); +if (lean_is_exclusive(x_11880)) { + lean_ctor_release(x_11880, 0); + lean_ctor_release(x_11880, 1); + x_11885 = x_11880; +} else { + lean_dec_ref(x_11880); + x_11885 = lean_box(0); +} +x_11886 = l_Lean_IR_instInhabitedArg; +x_11887 = lean_unsigned_to_nat(0u); +x_11888 = lean_array_get(x_11886, x_11743, x_11887); +lean_dec(x_11743); +lean_inc(x_11876); +x_11889 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_11889, 0, x_11876); +x_11890 = lean_box(0); +if (lean_is_scalar(x_11878)) { + x_11891 = lean_alloc_ctor(1, 2, 0); +} else { + x_11891 = x_11878; + lean_ctor_set_tag(x_11891, 1); +} +lean_ctor_set(x_11891, 0, x_11889); +lean_ctor_set(x_11891, 1, x_11890); +if (lean_is_scalar(x_11875)) { + x_11892 = lean_alloc_ctor(1, 2, 0); +} else { + x_11892 = x_11875; + lean_ctor_set_tag(x_11892, 1); +} +lean_ctor_set(x_11892, 0, x_11888); +lean_ctor_set(x_11892, 1, x_11891); +x_11893 = lean_array_mk(x_11892); +x_11894 = l_Lean_IR_ToIR_lowerLet___closed__35; +x_11895 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_11895, 0, x_11894); +lean_ctor_set(x_11895, 1, x_11893); +x_11896 = lean_box(7); +x_11897 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_11897, 0, x_11870); +lean_ctor_set(x_11897, 1, x_11896); +lean_ctor_set(x_11897, 2, x_11895); +lean_ctor_set(x_11897, 3, x_11883); +x_11898 = l_Lean_IR_ToIR_lowerLet___closed__37; +x_11899 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_11899, 0, x_11876); +lean_ctor_set(x_11899, 1, x_11896); +lean_ctor_set(x_11899, 2, x_11898); +lean_ctor_set(x_11899, 3, x_11897); +if (lean_is_scalar(x_11885)) { + x_11900 = lean_alloc_ctor(0, 2, 0); +} else { + x_11900 = x_11885; +} +lean_ctor_set(x_11900, 0, x_11899); +lean_ctor_set(x_11900, 1, x_11884); +if (lean_is_scalar(x_11882)) { + x_11901 = lean_alloc_ctor(0, 2, 0); +} else { + x_11901 = x_11882; +} +lean_ctor_set(x_11901, 0, x_11900); +lean_ctor_set(x_11901, 1, x_11881); +return x_11901; +} +else +{ +lean_object* x_11902; lean_object* x_11903; lean_object* x_11904; lean_object* x_11905; +lean_dec(x_11878); +lean_dec(x_11876); +lean_dec(x_11875); +lean_dec(x_11870); +lean_dec(x_11743); +x_11902 = lean_ctor_get(x_11879, 0); +lean_inc(x_11902); +x_11903 = lean_ctor_get(x_11879, 1); +lean_inc(x_11903); +if (lean_is_exclusive(x_11879)) { + lean_ctor_release(x_11879, 0); + lean_ctor_release(x_11879, 1); + x_11904 = x_11879; +} else { + lean_dec_ref(x_11879); + x_11904 = lean_box(0); +} +if (lean_is_scalar(x_11904)) { + x_11905 = lean_alloc_ctor(1, 2, 0); +} else { + x_11905 = x_11904; +} +lean_ctor_set(x_11905, 0, x_11902); +lean_ctor_set(x_11905, 1, x_11903); +return x_11905; +} +} +} +else +{ +uint8_t x_11906; +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_11906 = !lean_is_exclusive(x_11740); +if (x_11906 == 0) +{ +return x_11740; +} +else +{ +lean_object* x_11907; lean_object* x_11908; lean_object* x_11909; +x_11907 = lean_ctor_get(x_11740, 0); +x_11908 = lean_ctor_get(x_11740, 1); +lean_inc(x_11908); +lean_inc(x_11907); +lean_dec(x_11740); +x_11909 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_11909, 0, x_11907); +lean_ctor_set(x_11909, 1, x_11908); +return x_11909; +} +} +} +} +} +case 1: +{ +lean_object* x_11910; lean_object* x_11911; size_t x_11912; size_t x_11913; lean_object* x_11914; +lean_dec(x_5943); +lean_dec(x_3048); +x_11910 = lean_ctor_get(x_7, 2); +lean_inc(x_11910); +if (lean_is_exclusive(x_7)) { + lean_ctor_release(x_7, 0); + lean_ctor_release(x_7, 1); + lean_ctor_release(x_7, 2); + x_11911 = x_7; +} else { + lean_dec_ref(x_7); + x_11911 = lean_box(0); +} +x_11912 = lean_array_size(x_11910); +x_11913 = 0; +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_11910); +x_11914 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_11912, x_11913, x_11910, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_11914) == 0) +{ +lean_object* x_11915; lean_object* x_11916; lean_object* x_11917; uint8_t x_11918; +x_11915 = lean_ctor_get(x_11914, 0); +lean_inc(x_11915); +x_11916 = lean_ctor_get(x_11914, 1); +lean_inc(x_11916); +if (lean_is_exclusive(x_11914)) { + lean_ctor_release(x_11914, 0); + lean_ctor_release(x_11914, 1); + x_11917 = x_11914; +} else { + lean_dec_ref(x_11914); + x_11917 = lean_box(0); +} +x_11918 = !lean_is_exclusive(x_11915); +if (x_11918 == 0) +{ +lean_object* x_11919; lean_object* x_11920; lean_object* x_11921; lean_object* x_11922; lean_object* x_13839; lean_object* x_13840; +x_11919 = lean_ctor_get(x_11915, 0); +x_11920 = lean_ctor_get(x_11915, 1); +lean_inc(x_153); +x_13839 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_11916); +x_13840 = lean_ctor_get(x_13839, 0); +lean_inc(x_13840); +if (lean_obj_tag(x_13840) == 0) +{ +lean_object* x_13841; lean_object* x_13842; +x_13841 = lean_ctor_get(x_13839, 1); +lean_inc(x_13841); +lean_dec(x_13839); +x_13842 = lean_box(0); +lean_ctor_set(x_11915, 0, x_13842); +x_11921 = x_11915; +x_11922 = x_13841; +goto block_13838; +} +else +{ +uint8_t x_13843; +lean_free_object(x_11915); +x_13843 = !lean_is_exclusive(x_13839); +if (x_13843 == 0) +{ +lean_object* x_13844; lean_object* x_13845; uint8_t x_13846; +x_13844 = lean_ctor_get(x_13839, 1); +x_13845 = lean_ctor_get(x_13839, 0); +lean_dec(x_13845); +x_13846 = !lean_is_exclusive(x_13840); +if (x_13846 == 0) +{ +lean_object* x_13847; lean_object* x_13848; lean_object* x_13849; lean_object* x_13850; uint8_t x_13851; +x_13847 = lean_ctor_get(x_13840, 0); +x_13848 = lean_array_get_size(x_11919); +x_13849 = lean_ctor_get(x_13847, 3); +lean_inc(x_13849); +lean_dec(x_13847); +x_13850 = lean_array_get_size(x_13849); +lean_dec(x_13849); +x_13851 = lean_nat_dec_lt(x_13848, x_13850); +if (x_13851 == 0) +{ +uint8_t x_13852; +x_13852 = lean_nat_dec_eq(x_13848, x_13850); +if (x_13852 == 0) +{ +lean_object* x_13853; lean_object* x_13854; lean_object* x_13855; lean_object* x_13856; lean_object* x_13857; lean_object* x_13858; lean_object* x_13859; lean_object* x_13860; lean_object* x_13861; lean_object* x_13862; lean_object* x_13863; lean_object* x_13864; lean_object* x_13865; lean_object* x_13866; lean_object* x_13867; lean_object* x_13868; +x_13853 = lean_unsigned_to_nat(0u); +x_13854 = l_Array_extract___rarg(x_11919, x_13853, x_13850); +x_13855 = l_Array_extract___rarg(x_11919, x_13850, x_13848); +lean_dec(x_13848); +lean_inc(x_153); +lean_ctor_set_tag(x_13839, 6); +lean_ctor_set(x_13839, 1, x_13854); +lean_ctor_set(x_13839, 0, x_153); +x_13856 = lean_ctor_get(x_1, 0); +lean_inc(x_13856); +x_13857 = l_Lean_IR_ToIR_bindVar(x_13856, x_11920, x_4, x_5, x_13844); +x_13858 = lean_ctor_get(x_13857, 0); +lean_inc(x_13858); +x_13859 = lean_ctor_get(x_13857, 1); +lean_inc(x_13859); +lean_dec(x_13857); +x_13860 = lean_ctor_get(x_13858, 0); +lean_inc(x_13860); +x_13861 = lean_ctor_get(x_13858, 1); +lean_inc(x_13861); +lean_dec(x_13858); +x_13862 = l_Lean_IR_ToIR_newVar(x_13861, x_4, x_5, x_13859); +x_13863 = lean_ctor_get(x_13862, 0); +lean_inc(x_13863); +x_13864 = lean_ctor_get(x_13862, 1); +lean_inc(x_13864); +lean_dec(x_13862); +x_13865 = lean_ctor_get(x_13863, 0); +lean_inc(x_13865); +x_13866 = lean_ctor_get(x_13863, 1); +lean_inc(x_13866); +lean_dec(x_13863); +x_13867 = lean_ctor_get(x_1, 2); +lean_inc(x_13867); +lean_inc(x_5); +lean_inc(x_4); +x_13868 = l_Lean_IR_ToIR_lowerType(x_13867, x_13866, x_4, x_5, x_13864); +if (lean_obj_tag(x_13868) == 0) +{ +lean_object* x_13869; lean_object* x_13870; lean_object* x_13871; lean_object* x_13872; lean_object* x_13873; +x_13869 = lean_ctor_get(x_13868, 0); +lean_inc(x_13869); +x_13870 = lean_ctor_get(x_13868, 1); +lean_inc(x_13870); +lean_dec(x_13868); +x_13871 = lean_ctor_get(x_13869, 0); +lean_inc(x_13871); +x_13872 = lean_ctor_get(x_13869, 1); +lean_inc(x_13872); +lean_dec(x_13869); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13873 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_13865, x_13855, x_13860, x_13839, x_13871, x_13872, x_4, x_5, x_13870); +if (lean_obj_tag(x_13873) == 0) +{ +lean_object* x_13874; lean_object* x_13875; uint8_t x_13876; +x_13874 = lean_ctor_get(x_13873, 0); +lean_inc(x_13874); +x_13875 = lean_ctor_get(x_13873, 1); +lean_inc(x_13875); +lean_dec(x_13873); +x_13876 = !lean_is_exclusive(x_13874); +if (x_13876 == 0) +{ +lean_object* x_13877; +x_13877 = lean_ctor_get(x_13874, 0); +lean_ctor_set(x_13840, 0, x_13877); +lean_ctor_set(x_13874, 0, x_13840); +x_11921 = x_13874; +x_11922 = x_13875; +goto block_13838; +} +else +{ +lean_object* x_13878; lean_object* x_13879; lean_object* x_13880; +x_13878 = lean_ctor_get(x_13874, 0); +x_13879 = lean_ctor_get(x_13874, 1); +lean_inc(x_13879); +lean_inc(x_13878); +lean_dec(x_13874); +lean_ctor_set(x_13840, 0, x_13878); +x_13880 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13880, 0, x_13840); +lean_ctor_set(x_13880, 1, x_13879); +x_11921 = x_13880; +x_11922 = x_13875; +goto block_13838; +} +} +else +{ +uint8_t x_13881; +lean_free_object(x_13840); +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13881 = !lean_is_exclusive(x_13873); +if (x_13881 == 0) +{ +return x_13873; +} +else +{ +lean_object* x_13882; lean_object* x_13883; lean_object* x_13884; +x_13882 = lean_ctor_get(x_13873, 0); +x_13883 = lean_ctor_get(x_13873, 1); +lean_inc(x_13883); +lean_inc(x_13882); +lean_dec(x_13873); +x_13884 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_13884, 0, x_13882); +lean_ctor_set(x_13884, 1, x_13883); +return x_13884; +} +} +} +else +{ +uint8_t x_13885; +lean_dec(x_13865); +lean_dec(x_13860); +lean_dec(x_13839); +lean_dec(x_13855); +lean_free_object(x_13840); +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13885 = !lean_is_exclusive(x_13868); +if (x_13885 == 0) +{ +return x_13868; +} +else +{ +lean_object* x_13886; lean_object* x_13887; lean_object* x_13888; +x_13886 = lean_ctor_get(x_13868, 0); +x_13887 = lean_ctor_get(x_13868, 1); +lean_inc(x_13887); +lean_inc(x_13886); +lean_dec(x_13868); +x_13888 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_13888, 0, x_13886); +lean_ctor_set(x_13888, 1, x_13887); +return x_13888; +} +} +} +else +{ +lean_object* x_13889; lean_object* x_13890; lean_object* x_13891; lean_object* x_13892; lean_object* x_13893; lean_object* x_13894; lean_object* x_13895; lean_object* x_13896; +lean_dec(x_13850); +lean_dec(x_13848); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_13839, 6); +lean_ctor_set(x_13839, 1, x_11919); +lean_ctor_set(x_13839, 0, x_153); +x_13889 = lean_ctor_get(x_1, 0); +lean_inc(x_13889); +x_13890 = l_Lean_IR_ToIR_bindVar(x_13889, x_11920, x_4, x_5, x_13844); +x_13891 = lean_ctor_get(x_13890, 0); +lean_inc(x_13891); +x_13892 = lean_ctor_get(x_13890, 1); +lean_inc(x_13892); +lean_dec(x_13890); +x_13893 = lean_ctor_get(x_13891, 0); +lean_inc(x_13893); +x_13894 = lean_ctor_get(x_13891, 1); +lean_inc(x_13894); +lean_dec(x_13891); +x_13895 = lean_ctor_get(x_1, 2); +lean_inc(x_13895); +lean_inc(x_5); +lean_inc(x_4); +x_13896 = l_Lean_IR_ToIR_lowerType(x_13895, x_13894, x_4, x_5, x_13892); +if (lean_obj_tag(x_13896) == 0) +{ +lean_object* x_13897; lean_object* x_13898; lean_object* x_13899; lean_object* x_13900; lean_object* x_13901; +x_13897 = lean_ctor_get(x_13896, 0); +lean_inc(x_13897); +x_13898 = lean_ctor_get(x_13896, 1); +lean_inc(x_13898); +lean_dec(x_13896); +x_13899 = lean_ctor_get(x_13897, 0); +lean_inc(x_13899); +x_13900 = lean_ctor_get(x_13897, 1); +lean_inc(x_13900); +lean_dec(x_13897); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13901 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13893, x_13839, x_13899, x_13900, x_4, x_5, x_13898); +if (lean_obj_tag(x_13901) == 0) +{ +lean_object* x_13902; lean_object* x_13903; uint8_t x_13904; +x_13902 = lean_ctor_get(x_13901, 0); +lean_inc(x_13902); +x_13903 = lean_ctor_get(x_13901, 1); +lean_inc(x_13903); +lean_dec(x_13901); +x_13904 = !lean_is_exclusive(x_13902); +if (x_13904 == 0) +{ +lean_object* x_13905; +x_13905 = lean_ctor_get(x_13902, 0); +lean_ctor_set(x_13840, 0, x_13905); +lean_ctor_set(x_13902, 0, x_13840); +x_11921 = x_13902; +x_11922 = x_13903; +goto block_13838; +} +else +{ +lean_object* x_13906; lean_object* x_13907; lean_object* x_13908; +x_13906 = lean_ctor_get(x_13902, 0); +x_13907 = lean_ctor_get(x_13902, 1); +lean_inc(x_13907); +lean_inc(x_13906); +lean_dec(x_13902); +lean_ctor_set(x_13840, 0, x_13906); +x_13908 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13908, 0, x_13840); +lean_ctor_set(x_13908, 1, x_13907); +x_11921 = x_13908; +x_11922 = x_13903; +goto block_13838; +} +} +else +{ +uint8_t x_13909; +lean_free_object(x_13840); +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13909 = !lean_is_exclusive(x_13901); +if (x_13909 == 0) +{ +return x_13901; +} +else +{ +lean_object* x_13910; lean_object* x_13911; lean_object* x_13912; +x_13910 = lean_ctor_get(x_13901, 0); +x_13911 = lean_ctor_get(x_13901, 1); +lean_inc(x_13911); +lean_inc(x_13910); +lean_dec(x_13901); +x_13912 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_13912, 0, x_13910); +lean_ctor_set(x_13912, 1, x_13911); +return x_13912; +} +} +} +else +{ +uint8_t x_13913; +lean_dec(x_13893); +lean_dec(x_13839); +lean_free_object(x_13840); +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13913 = !lean_is_exclusive(x_13896); +if (x_13913 == 0) +{ +return x_13896; +} +else +{ +lean_object* x_13914; lean_object* x_13915; lean_object* x_13916; +x_13914 = lean_ctor_get(x_13896, 0); +x_13915 = lean_ctor_get(x_13896, 1); +lean_inc(x_13915); +lean_inc(x_13914); +lean_dec(x_13896); +x_13916 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_13916, 0, x_13914); +lean_ctor_set(x_13916, 1, x_13915); +return x_13916; +} +} +} +} +else +{ +lean_object* x_13917; lean_object* x_13918; lean_object* x_13919; lean_object* x_13920; lean_object* x_13921; lean_object* x_13922; lean_object* x_13923; lean_object* x_13924; +lean_dec(x_13850); +lean_dec(x_13848); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_13839, 7); +lean_ctor_set(x_13839, 1, x_11919); +lean_ctor_set(x_13839, 0, x_153); +x_13917 = lean_ctor_get(x_1, 0); +lean_inc(x_13917); +x_13918 = l_Lean_IR_ToIR_bindVar(x_13917, x_11920, x_4, x_5, x_13844); +x_13919 = lean_ctor_get(x_13918, 0); +lean_inc(x_13919); +x_13920 = lean_ctor_get(x_13918, 1); +lean_inc(x_13920); +lean_dec(x_13918); +x_13921 = lean_ctor_get(x_13919, 0); +lean_inc(x_13921); +x_13922 = lean_ctor_get(x_13919, 1); +lean_inc(x_13922); +lean_dec(x_13919); +x_13923 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13924 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13921, x_13839, x_13923, x_13922, x_4, x_5, x_13920); +if (lean_obj_tag(x_13924) == 0) +{ +lean_object* x_13925; lean_object* x_13926; uint8_t x_13927; +x_13925 = lean_ctor_get(x_13924, 0); +lean_inc(x_13925); +x_13926 = lean_ctor_get(x_13924, 1); +lean_inc(x_13926); +lean_dec(x_13924); +x_13927 = !lean_is_exclusive(x_13925); +if (x_13927 == 0) +{ +lean_object* x_13928; +x_13928 = lean_ctor_get(x_13925, 0); +lean_ctor_set(x_13840, 0, x_13928); +lean_ctor_set(x_13925, 0, x_13840); +x_11921 = x_13925; +x_11922 = x_13926; +goto block_13838; +} +else +{ +lean_object* x_13929; lean_object* x_13930; lean_object* x_13931; +x_13929 = lean_ctor_get(x_13925, 0); +x_13930 = lean_ctor_get(x_13925, 1); +lean_inc(x_13930); +lean_inc(x_13929); +lean_dec(x_13925); +lean_ctor_set(x_13840, 0, x_13929); +x_13931 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13931, 0, x_13840); +lean_ctor_set(x_13931, 1, x_13930); +x_11921 = x_13931; +x_11922 = x_13926; +goto block_13838; +} +} +else +{ +uint8_t x_13932; +lean_free_object(x_13840); +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13932 = !lean_is_exclusive(x_13924); +if (x_13932 == 0) +{ +return x_13924; +} +else +{ +lean_object* x_13933; lean_object* x_13934; lean_object* x_13935; +x_13933 = lean_ctor_get(x_13924, 0); +x_13934 = lean_ctor_get(x_13924, 1); +lean_inc(x_13934); +lean_inc(x_13933); +lean_dec(x_13924); +x_13935 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_13935, 0, x_13933); +lean_ctor_set(x_13935, 1, x_13934); +return x_13935; +} +} +} +} +else +{ +lean_object* x_13936; lean_object* x_13937; lean_object* x_13938; lean_object* x_13939; uint8_t x_13940; +x_13936 = lean_ctor_get(x_13840, 0); +lean_inc(x_13936); +lean_dec(x_13840); +x_13937 = lean_array_get_size(x_11919); +x_13938 = lean_ctor_get(x_13936, 3); +lean_inc(x_13938); +lean_dec(x_13936); +x_13939 = lean_array_get_size(x_13938); +lean_dec(x_13938); +x_13940 = lean_nat_dec_lt(x_13937, x_13939); +if (x_13940 == 0) +{ +uint8_t x_13941; +x_13941 = lean_nat_dec_eq(x_13937, x_13939); +if (x_13941 == 0) +{ +lean_object* x_13942; lean_object* x_13943; lean_object* x_13944; lean_object* x_13945; lean_object* x_13946; lean_object* x_13947; lean_object* x_13948; lean_object* x_13949; lean_object* x_13950; lean_object* x_13951; lean_object* x_13952; lean_object* x_13953; lean_object* x_13954; lean_object* x_13955; lean_object* x_13956; lean_object* x_13957; +x_13942 = lean_unsigned_to_nat(0u); +x_13943 = l_Array_extract___rarg(x_11919, x_13942, x_13939); +x_13944 = l_Array_extract___rarg(x_11919, x_13939, x_13937); +lean_dec(x_13937); +lean_inc(x_153); +lean_ctor_set_tag(x_13839, 6); +lean_ctor_set(x_13839, 1, x_13943); +lean_ctor_set(x_13839, 0, x_153); +x_13945 = lean_ctor_get(x_1, 0); +lean_inc(x_13945); +x_13946 = l_Lean_IR_ToIR_bindVar(x_13945, x_11920, x_4, x_5, x_13844); +x_13947 = lean_ctor_get(x_13946, 0); +lean_inc(x_13947); +x_13948 = lean_ctor_get(x_13946, 1); +lean_inc(x_13948); +lean_dec(x_13946); +x_13949 = lean_ctor_get(x_13947, 0); +lean_inc(x_13949); +x_13950 = lean_ctor_get(x_13947, 1); +lean_inc(x_13950); +lean_dec(x_13947); +x_13951 = l_Lean_IR_ToIR_newVar(x_13950, x_4, x_5, x_13948); +x_13952 = lean_ctor_get(x_13951, 0); +lean_inc(x_13952); +x_13953 = lean_ctor_get(x_13951, 1); +lean_inc(x_13953); +lean_dec(x_13951); +x_13954 = lean_ctor_get(x_13952, 0); +lean_inc(x_13954); +x_13955 = lean_ctor_get(x_13952, 1); +lean_inc(x_13955); +lean_dec(x_13952); +x_13956 = lean_ctor_get(x_1, 2); +lean_inc(x_13956); +lean_inc(x_5); +lean_inc(x_4); +x_13957 = l_Lean_IR_ToIR_lowerType(x_13956, x_13955, x_4, x_5, x_13953); +if (lean_obj_tag(x_13957) == 0) +{ +lean_object* x_13958; lean_object* x_13959; lean_object* x_13960; lean_object* x_13961; lean_object* x_13962; +x_13958 = lean_ctor_get(x_13957, 0); +lean_inc(x_13958); +x_13959 = lean_ctor_get(x_13957, 1); +lean_inc(x_13959); +lean_dec(x_13957); +x_13960 = lean_ctor_get(x_13958, 0); +lean_inc(x_13960); +x_13961 = lean_ctor_get(x_13958, 1); +lean_inc(x_13961); +lean_dec(x_13958); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13962 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_13954, x_13944, x_13949, x_13839, x_13960, x_13961, x_4, x_5, x_13959); +if (lean_obj_tag(x_13962) == 0) +{ +lean_object* x_13963; lean_object* x_13964; lean_object* x_13965; lean_object* x_13966; lean_object* x_13967; lean_object* x_13968; lean_object* x_13969; +x_13963 = lean_ctor_get(x_13962, 0); +lean_inc(x_13963); +x_13964 = lean_ctor_get(x_13962, 1); +lean_inc(x_13964); +lean_dec(x_13962); +x_13965 = lean_ctor_get(x_13963, 0); +lean_inc(x_13965); +x_13966 = lean_ctor_get(x_13963, 1); +lean_inc(x_13966); +if (lean_is_exclusive(x_13963)) { + lean_ctor_release(x_13963, 0); + lean_ctor_release(x_13963, 1); + x_13967 = x_13963; +} else { + lean_dec_ref(x_13963); + x_13967 = lean_box(0); +} +x_13968 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_13968, 0, x_13965); +if (lean_is_scalar(x_13967)) { + x_13969 = lean_alloc_ctor(0, 2, 0); +} else { + x_13969 = x_13967; +} +lean_ctor_set(x_13969, 0, x_13968); +lean_ctor_set(x_13969, 1, x_13966); +x_11921 = x_13969; +x_11922 = x_13964; +goto block_13838; +} +else +{ +lean_object* x_13970; lean_object* x_13971; lean_object* x_13972; lean_object* x_13973; +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13970 = lean_ctor_get(x_13962, 0); +lean_inc(x_13970); +x_13971 = lean_ctor_get(x_13962, 1); +lean_inc(x_13971); +if (lean_is_exclusive(x_13962)) { + lean_ctor_release(x_13962, 0); + lean_ctor_release(x_13962, 1); + x_13972 = x_13962; +} else { + lean_dec_ref(x_13962); + x_13972 = lean_box(0); +} +if (lean_is_scalar(x_13972)) { + x_13973 = lean_alloc_ctor(1, 2, 0); +} else { + x_13973 = x_13972; +} +lean_ctor_set(x_13973, 0, x_13970); +lean_ctor_set(x_13973, 1, x_13971); +return x_13973; +} +} +else +{ +lean_object* x_13974; lean_object* x_13975; lean_object* x_13976; lean_object* x_13977; +lean_dec(x_13954); +lean_dec(x_13949); +lean_dec(x_13839); +lean_dec(x_13944); +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13974 = lean_ctor_get(x_13957, 0); +lean_inc(x_13974); +x_13975 = lean_ctor_get(x_13957, 1); +lean_inc(x_13975); +if (lean_is_exclusive(x_13957)) { + lean_ctor_release(x_13957, 0); + lean_ctor_release(x_13957, 1); + x_13976 = x_13957; +} else { + lean_dec_ref(x_13957); + x_13976 = lean_box(0); +} +if (lean_is_scalar(x_13976)) { + x_13977 = lean_alloc_ctor(1, 2, 0); +} else { + x_13977 = x_13976; +} +lean_ctor_set(x_13977, 0, x_13974); +lean_ctor_set(x_13977, 1, x_13975); +return x_13977; +} +} +else +{ +lean_object* x_13978; lean_object* x_13979; lean_object* x_13980; lean_object* x_13981; lean_object* x_13982; lean_object* x_13983; lean_object* x_13984; lean_object* x_13985; +lean_dec(x_13939); +lean_dec(x_13937); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_13839, 6); +lean_ctor_set(x_13839, 1, x_11919); +lean_ctor_set(x_13839, 0, x_153); +x_13978 = lean_ctor_get(x_1, 0); +lean_inc(x_13978); +x_13979 = l_Lean_IR_ToIR_bindVar(x_13978, x_11920, x_4, x_5, x_13844); +x_13980 = lean_ctor_get(x_13979, 0); +lean_inc(x_13980); +x_13981 = lean_ctor_get(x_13979, 1); +lean_inc(x_13981); +lean_dec(x_13979); +x_13982 = lean_ctor_get(x_13980, 0); +lean_inc(x_13982); +x_13983 = lean_ctor_get(x_13980, 1); +lean_inc(x_13983); +lean_dec(x_13980); +x_13984 = lean_ctor_get(x_1, 2); +lean_inc(x_13984); +lean_inc(x_5); +lean_inc(x_4); +x_13985 = l_Lean_IR_ToIR_lowerType(x_13984, x_13983, x_4, x_5, x_13981); +if (lean_obj_tag(x_13985) == 0) +{ +lean_object* x_13986; lean_object* x_13987; lean_object* x_13988; lean_object* x_13989; lean_object* x_13990; +x_13986 = lean_ctor_get(x_13985, 0); +lean_inc(x_13986); +x_13987 = lean_ctor_get(x_13985, 1); +lean_inc(x_13987); +lean_dec(x_13985); +x_13988 = lean_ctor_get(x_13986, 0); +lean_inc(x_13988); +x_13989 = lean_ctor_get(x_13986, 1); +lean_inc(x_13989); +lean_dec(x_13986); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13990 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13982, x_13839, x_13988, x_13989, x_4, x_5, x_13987); +if (lean_obj_tag(x_13990) == 0) +{ +lean_object* x_13991; lean_object* x_13992; lean_object* x_13993; lean_object* x_13994; lean_object* x_13995; lean_object* x_13996; lean_object* x_13997; +x_13991 = lean_ctor_get(x_13990, 0); +lean_inc(x_13991); +x_13992 = lean_ctor_get(x_13990, 1); +lean_inc(x_13992); +lean_dec(x_13990); +x_13993 = lean_ctor_get(x_13991, 0); +lean_inc(x_13993); +x_13994 = lean_ctor_get(x_13991, 1); +lean_inc(x_13994); +if (lean_is_exclusive(x_13991)) { + lean_ctor_release(x_13991, 0); + lean_ctor_release(x_13991, 1); + x_13995 = x_13991; +} else { + lean_dec_ref(x_13991); + x_13995 = lean_box(0); +} +x_13996 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_13996, 0, x_13993); +if (lean_is_scalar(x_13995)) { + x_13997 = lean_alloc_ctor(0, 2, 0); +} else { + x_13997 = x_13995; +} +lean_ctor_set(x_13997, 0, x_13996); +lean_ctor_set(x_13997, 1, x_13994); +x_11921 = x_13997; +x_11922 = x_13992; +goto block_13838; +} +else +{ +lean_object* x_13998; lean_object* x_13999; lean_object* x_14000; lean_object* x_14001; +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13998 = lean_ctor_get(x_13990, 0); +lean_inc(x_13998); +x_13999 = lean_ctor_get(x_13990, 1); +lean_inc(x_13999); +if (lean_is_exclusive(x_13990)) { + lean_ctor_release(x_13990, 0); + lean_ctor_release(x_13990, 1); + x_14000 = x_13990; +} else { + lean_dec_ref(x_13990); + x_14000 = lean_box(0); +} +if (lean_is_scalar(x_14000)) { + x_14001 = lean_alloc_ctor(1, 2, 0); +} else { + x_14001 = x_14000; +} +lean_ctor_set(x_14001, 0, x_13998); +lean_ctor_set(x_14001, 1, x_13999); +return x_14001; +} +} +else +{ +lean_object* x_14002; lean_object* x_14003; lean_object* x_14004; lean_object* x_14005; +lean_dec(x_13982); +lean_dec(x_13839); +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14002 = lean_ctor_get(x_13985, 0); +lean_inc(x_14002); +x_14003 = lean_ctor_get(x_13985, 1); +lean_inc(x_14003); +if (lean_is_exclusive(x_13985)) { + lean_ctor_release(x_13985, 0); + lean_ctor_release(x_13985, 1); + x_14004 = x_13985; +} else { + lean_dec_ref(x_13985); + x_14004 = lean_box(0); +} +if (lean_is_scalar(x_14004)) { + x_14005 = lean_alloc_ctor(1, 2, 0); +} else { + x_14005 = x_14004; +} +lean_ctor_set(x_14005, 0, x_14002); +lean_ctor_set(x_14005, 1, x_14003); +return x_14005; +} +} +} +else +{ +lean_object* x_14006; lean_object* x_14007; lean_object* x_14008; lean_object* x_14009; lean_object* x_14010; lean_object* x_14011; lean_object* x_14012; lean_object* x_14013; +lean_dec(x_13939); +lean_dec(x_13937); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_13839, 7); +lean_ctor_set(x_13839, 1, x_11919); +lean_ctor_set(x_13839, 0, x_153); +x_14006 = lean_ctor_get(x_1, 0); +lean_inc(x_14006); +x_14007 = l_Lean_IR_ToIR_bindVar(x_14006, x_11920, x_4, x_5, x_13844); +x_14008 = lean_ctor_get(x_14007, 0); +lean_inc(x_14008); +x_14009 = lean_ctor_get(x_14007, 1); +lean_inc(x_14009); +lean_dec(x_14007); +x_14010 = lean_ctor_get(x_14008, 0); +lean_inc(x_14010); +x_14011 = lean_ctor_get(x_14008, 1); +lean_inc(x_14011); +lean_dec(x_14008); +x_14012 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14013 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14010, x_13839, x_14012, x_14011, x_4, x_5, x_14009); +if (lean_obj_tag(x_14013) == 0) +{ +lean_object* x_14014; lean_object* x_14015; lean_object* x_14016; lean_object* x_14017; lean_object* x_14018; lean_object* x_14019; lean_object* x_14020; +x_14014 = lean_ctor_get(x_14013, 0); +lean_inc(x_14014); +x_14015 = lean_ctor_get(x_14013, 1); +lean_inc(x_14015); +lean_dec(x_14013); +x_14016 = lean_ctor_get(x_14014, 0); +lean_inc(x_14016); +x_14017 = lean_ctor_get(x_14014, 1); +lean_inc(x_14017); +if (lean_is_exclusive(x_14014)) { + lean_ctor_release(x_14014, 0); + lean_ctor_release(x_14014, 1); + x_14018 = x_14014; +} else { + lean_dec_ref(x_14014); + x_14018 = lean_box(0); +} +x_14019 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_14019, 0, x_14016); +if (lean_is_scalar(x_14018)) { + x_14020 = lean_alloc_ctor(0, 2, 0); +} else { + x_14020 = x_14018; +} +lean_ctor_set(x_14020, 0, x_14019); +lean_ctor_set(x_14020, 1, x_14017); +x_11921 = x_14020; +x_11922 = x_14015; +goto block_13838; +} +else +{ +lean_object* x_14021; lean_object* x_14022; lean_object* x_14023; lean_object* x_14024; +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14021 = lean_ctor_get(x_14013, 0); +lean_inc(x_14021); +x_14022 = lean_ctor_get(x_14013, 1); +lean_inc(x_14022); +if (lean_is_exclusive(x_14013)) { + lean_ctor_release(x_14013, 0); + lean_ctor_release(x_14013, 1); + x_14023 = x_14013; +} else { + lean_dec_ref(x_14013); + x_14023 = lean_box(0); +} +if (lean_is_scalar(x_14023)) { + x_14024 = lean_alloc_ctor(1, 2, 0); +} else { + x_14024 = x_14023; +} +lean_ctor_set(x_14024, 0, x_14021); +lean_ctor_set(x_14024, 1, x_14022); +return x_14024; +} +} +} +} +else +{ +lean_object* x_14025; lean_object* x_14026; lean_object* x_14027; lean_object* x_14028; lean_object* x_14029; lean_object* x_14030; uint8_t x_14031; +x_14025 = lean_ctor_get(x_13839, 1); +lean_inc(x_14025); +lean_dec(x_13839); +x_14026 = lean_ctor_get(x_13840, 0); +lean_inc(x_14026); +if (lean_is_exclusive(x_13840)) { + lean_ctor_release(x_13840, 0); + x_14027 = x_13840; +} else { + lean_dec_ref(x_13840); + x_14027 = lean_box(0); +} +x_14028 = lean_array_get_size(x_11919); +x_14029 = lean_ctor_get(x_14026, 3); +lean_inc(x_14029); +lean_dec(x_14026); +x_14030 = lean_array_get_size(x_14029); +lean_dec(x_14029); +x_14031 = lean_nat_dec_lt(x_14028, x_14030); +if (x_14031 == 0) +{ +uint8_t x_14032; +x_14032 = lean_nat_dec_eq(x_14028, x_14030); +if (x_14032 == 0) +{ +lean_object* x_14033; lean_object* x_14034; lean_object* x_14035; lean_object* x_14036; lean_object* x_14037; lean_object* x_14038; lean_object* x_14039; lean_object* x_14040; lean_object* x_14041; lean_object* x_14042; lean_object* x_14043; lean_object* x_14044; lean_object* x_14045; lean_object* x_14046; lean_object* x_14047; lean_object* x_14048; lean_object* x_14049; +x_14033 = lean_unsigned_to_nat(0u); +x_14034 = l_Array_extract___rarg(x_11919, x_14033, x_14030); +x_14035 = l_Array_extract___rarg(x_11919, x_14030, x_14028); +lean_dec(x_14028); +lean_inc(x_153); +x_14036 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_14036, 0, x_153); +lean_ctor_set(x_14036, 1, x_14034); +x_14037 = lean_ctor_get(x_1, 0); +lean_inc(x_14037); +x_14038 = l_Lean_IR_ToIR_bindVar(x_14037, x_11920, x_4, x_5, x_14025); +x_14039 = lean_ctor_get(x_14038, 0); +lean_inc(x_14039); +x_14040 = lean_ctor_get(x_14038, 1); +lean_inc(x_14040); +lean_dec(x_14038); +x_14041 = lean_ctor_get(x_14039, 0); +lean_inc(x_14041); +x_14042 = lean_ctor_get(x_14039, 1); +lean_inc(x_14042); +lean_dec(x_14039); +x_14043 = l_Lean_IR_ToIR_newVar(x_14042, x_4, x_5, x_14040); +x_14044 = lean_ctor_get(x_14043, 0); +lean_inc(x_14044); +x_14045 = lean_ctor_get(x_14043, 1); +lean_inc(x_14045); +lean_dec(x_14043); +x_14046 = lean_ctor_get(x_14044, 0); +lean_inc(x_14046); +x_14047 = lean_ctor_get(x_14044, 1); +lean_inc(x_14047); +lean_dec(x_14044); +x_14048 = lean_ctor_get(x_1, 2); +lean_inc(x_14048); +lean_inc(x_5); +lean_inc(x_4); +x_14049 = l_Lean_IR_ToIR_lowerType(x_14048, x_14047, x_4, x_5, x_14045); +if (lean_obj_tag(x_14049) == 0) +{ +lean_object* x_14050; lean_object* x_14051; lean_object* x_14052; lean_object* x_14053; lean_object* x_14054; +x_14050 = lean_ctor_get(x_14049, 0); +lean_inc(x_14050); +x_14051 = lean_ctor_get(x_14049, 1); +lean_inc(x_14051); +lean_dec(x_14049); +x_14052 = lean_ctor_get(x_14050, 0); +lean_inc(x_14052); +x_14053 = lean_ctor_get(x_14050, 1); +lean_inc(x_14053); +lean_dec(x_14050); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14054 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_14046, x_14035, x_14041, x_14036, x_14052, x_14053, x_4, x_5, x_14051); +if (lean_obj_tag(x_14054) == 0) +{ +lean_object* x_14055; lean_object* x_14056; lean_object* x_14057; lean_object* x_14058; lean_object* x_14059; lean_object* x_14060; lean_object* x_14061; +x_14055 = lean_ctor_get(x_14054, 0); +lean_inc(x_14055); +x_14056 = lean_ctor_get(x_14054, 1); +lean_inc(x_14056); +lean_dec(x_14054); +x_14057 = lean_ctor_get(x_14055, 0); +lean_inc(x_14057); +x_14058 = lean_ctor_get(x_14055, 1); +lean_inc(x_14058); +if (lean_is_exclusive(x_14055)) { + lean_ctor_release(x_14055, 0); + lean_ctor_release(x_14055, 1); + x_14059 = x_14055; +} else { + lean_dec_ref(x_14055); + x_14059 = lean_box(0); +} +if (lean_is_scalar(x_14027)) { + x_14060 = lean_alloc_ctor(1, 1, 0); +} else { + x_14060 = x_14027; +} +lean_ctor_set(x_14060, 0, x_14057); +if (lean_is_scalar(x_14059)) { + x_14061 = lean_alloc_ctor(0, 2, 0); +} else { + x_14061 = x_14059; +} +lean_ctor_set(x_14061, 0, x_14060); +lean_ctor_set(x_14061, 1, x_14058); +x_11921 = x_14061; +x_11922 = x_14056; +goto block_13838; +} +else +{ +lean_object* x_14062; lean_object* x_14063; lean_object* x_14064; lean_object* x_14065; +lean_dec(x_14027); +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14062 = lean_ctor_get(x_14054, 0); +lean_inc(x_14062); +x_14063 = lean_ctor_get(x_14054, 1); +lean_inc(x_14063); +if (lean_is_exclusive(x_14054)) { + lean_ctor_release(x_14054, 0); + lean_ctor_release(x_14054, 1); + x_14064 = x_14054; +} else { + lean_dec_ref(x_14054); + x_14064 = lean_box(0); +} +if (lean_is_scalar(x_14064)) { + x_14065 = lean_alloc_ctor(1, 2, 0); +} else { + x_14065 = x_14064; +} +lean_ctor_set(x_14065, 0, x_14062); +lean_ctor_set(x_14065, 1, x_14063); +return x_14065; +} +} +else +{ +lean_object* x_14066; lean_object* x_14067; lean_object* x_14068; lean_object* x_14069; +lean_dec(x_14046); +lean_dec(x_14041); +lean_dec(x_14036); +lean_dec(x_14035); +lean_dec(x_14027); +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14066 = lean_ctor_get(x_14049, 0); +lean_inc(x_14066); +x_14067 = lean_ctor_get(x_14049, 1); +lean_inc(x_14067); +if (lean_is_exclusive(x_14049)) { + lean_ctor_release(x_14049, 0); + lean_ctor_release(x_14049, 1); + x_14068 = x_14049; +} else { + lean_dec_ref(x_14049); + x_14068 = lean_box(0); +} +if (lean_is_scalar(x_14068)) { + x_14069 = lean_alloc_ctor(1, 2, 0); +} else { + x_14069 = x_14068; +} +lean_ctor_set(x_14069, 0, x_14066); +lean_ctor_set(x_14069, 1, x_14067); +return x_14069; +} +} +else +{ +lean_object* x_14070; lean_object* x_14071; lean_object* x_14072; lean_object* x_14073; lean_object* x_14074; lean_object* x_14075; lean_object* x_14076; lean_object* x_14077; lean_object* x_14078; +lean_dec(x_14030); +lean_dec(x_14028); +lean_inc(x_11919); +lean_inc(x_153); +x_14070 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_14070, 0, x_153); +lean_ctor_set(x_14070, 1, x_11919); +x_14071 = lean_ctor_get(x_1, 0); +lean_inc(x_14071); +x_14072 = l_Lean_IR_ToIR_bindVar(x_14071, x_11920, x_4, x_5, x_14025); +x_14073 = lean_ctor_get(x_14072, 0); +lean_inc(x_14073); +x_14074 = lean_ctor_get(x_14072, 1); +lean_inc(x_14074); +lean_dec(x_14072); +x_14075 = lean_ctor_get(x_14073, 0); +lean_inc(x_14075); +x_14076 = lean_ctor_get(x_14073, 1); +lean_inc(x_14076); +lean_dec(x_14073); +x_14077 = lean_ctor_get(x_1, 2); +lean_inc(x_14077); +lean_inc(x_5); +lean_inc(x_4); +x_14078 = l_Lean_IR_ToIR_lowerType(x_14077, x_14076, x_4, x_5, x_14074); +if (lean_obj_tag(x_14078) == 0) +{ +lean_object* x_14079; lean_object* x_14080; lean_object* x_14081; lean_object* x_14082; lean_object* x_14083; +x_14079 = lean_ctor_get(x_14078, 0); +lean_inc(x_14079); +x_14080 = lean_ctor_get(x_14078, 1); +lean_inc(x_14080); +lean_dec(x_14078); +x_14081 = lean_ctor_get(x_14079, 0); +lean_inc(x_14081); +x_14082 = lean_ctor_get(x_14079, 1); +lean_inc(x_14082); +lean_dec(x_14079); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14083 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14075, x_14070, x_14081, x_14082, x_4, x_5, x_14080); +if (lean_obj_tag(x_14083) == 0) +{ +lean_object* x_14084; lean_object* x_14085; lean_object* x_14086; lean_object* x_14087; lean_object* x_14088; lean_object* x_14089; lean_object* x_14090; +x_14084 = lean_ctor_get(x_14083, 0); +lean_inc(x_14084); +x_14085 = lean_ctor_get(x_14083, 1); +lean_inc(x_14085); +lean_dec(x_14083); +x_14086 = lean_ctor_get(x_14084, 0); +lean_inc(x_14086); +x_14087 = lean_ctor_get(x_14084, 1); +lean_inc(x_14087); +if (lean_is_exclusive(x_14084)) { + lean_ctor_release(x_14084, 0); + lean_ctor_release(x_14084, 1); + x_14088 = x_14084; +} else { + lean_dec_ref(x_14084); + x_14088 = lean_box(0); +} +if (lean_is_scalar(x_14027)) { + x_14089 = lean_alloc_ctor(1, 1, 0); +} else { + x_14089 = x_14027; +} +lean_ctor_set(x_14089, 0, x_14086); +if (lean_is_scalar(x_14088)) { + x_14090 = lean_alloc_ctor(0, 2, 0); +} else { + x_14090 = x_14088; +} +lean_ctor_set(x_14090, 0, x_14089); +lean_ctor_set(x_14090, 1, x_14087); +x_11921 = x_14090; +x_11922 = x_14085; +goto block_13838; +} +else +{ +lean_object* x_14091; lean_object* x_14092; lean_object* x_14093; lean_object* x_14094; +lean_dec(x_14027); +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14091 = lean_ctor_get(x_14083, 0); +lean_inc(x_14091); +x_14092 = lean_ctor_get(x_14083, 1); +lean_inc(x_14092); +if (lean_is_exclusive(x_14083)) { + lean_ctor_release(x_14083, 0); + lean_ctor_release(x_14083, 1); + x_14093 = x_14083; +} else { + lean_dec_ref(x_14083); + x_14093 = lean_box(0); +} +if (lean_is_scalar(x_14093)) { + x_14094 = lean_alloc_ctor(1, 2, 0); +} else { + x_14094 = x_14093; +} +lean_ctor_set(x_14094, 0, x_14091); +lean_ctor_set(x_14094, 1, x_14092); +return x_14094; +} +} +else +{ +lean_object* x_14095; lean_object* x_14096; lean_object* x_14097; lean_object* x_14098; +lean_dec(x_14075); +lean_dec(x_14070); +lean_dec(x_14027); +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14095 = lean_ctor_get(x_14078, 0); +lean_inc(x_14095); +x_14096 = lean_ctor_get(x_14078, 1); +lean_inc(x_14096); +if (lean_is_exclusive(x_14078)) { + lean_ctor_release(x_14078, 0); + lean_ctor_release(x_14078, 1); + x_14097 = x_14078; +} else { + lean_dec_ref(x_14078); + x_14097 = lean_box(0); +} +if (lean_is_scalar(x_14097)) { + x_14098 = lean_alloc_ctor(1, 2, 0); +} else { + x_14098 = x_14097; +} +lean_ctor_set(x_14098, 0, x_14095); +lean_ctor_set(x_14098, 1, x_14096); +return x_14098; +} +} +} +else +{ +lean_object* x_14099; lean_object* x_14100; lean_object* x_14101; lean_object* x_14102; lean_object* x_14103; lean_object* x_14104; lean_object* x_14105; lean_object* x_14106; lean_object* x_14107; +lean_dec(x_14030); +lean_dec(x_14028); +lean_inc(x_11919); +lean_inc(x_153); +x_14099 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_14099, 0, x_153); +lean_ctor_set(x_14099, 1, x_11919); +x_14100 = lean_ctor_get(x_1, 0); +lean_inc(x_14100); +x_14101 = l_Lean_IR_ToIR_bindVar(x_14100, x_11920, x_4, x_5, x_14025); +x_14102 = lean_ctor_get(x_14101, 0); +lean_inc(x_14102); +x_14103 = lean_ctor_get(x_14101, 1); +lean_inc(x_14103); +lean_dec(x_14101); +x_14104 = lean_ctor_get(x_14102, 0); +lean_inc(x_14104); +x_14105 = lean_ctor_get(x_14102, 1); +lean_inc(x_14105); +lean_dec(x_14102); +x_14106 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14107 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14104, x_14099, x_14106, x_14105, x_4, x_5, x_14103); +if (lean_obj_tag(x_14107) == 0) +{ +lean_object* x_14108; lean_object* x_14109; lean_object* x_14110; lean_object* x_14111; lean_object* x_14112; lean_object* x_14113; lean_object* x_14114; +x_14108 = lean_ctor_get(x_14107, 0); +lean_inc(x_14108); +x_14109 = lean_ctor_get(x_14107, 1); +lean_inc(x_14109); +lean_dec(x_14107); +x_14110 = lean_ctor_get(x_14108, 0); +lean_inc(x_14110); +x_14111 = lean_ctor_get(x_14108, 1); +lean_inc(x_14111); +if (lean_is_exclusive(x_14108)) { + lean_ctor_release(x_14108, 0); + lean_ctor_release(x_14108, 1); + x_14112 = x_14108; +} else { + lean_dec_ref(x_14108); + x_14112 = lean_box(0); +} +if (lean_is_scalar(x_14027)) { + x_14113 = lean_alloc_ctor(1, 1, 0); +} else { + x_14113 = x_14027; +} +lean_ctor_set(x_14113, 0, x_14110); +if (lean_is_scalar(x_14112)) { + x_14114 = lean_alloc_ctor(0, 2, 0); +} else { + x_14114 = x_14112; +} +lean_ctor_set(x_14114, 0, x_14113); +lean_ctor_set(x_14114, 1, x_14111); +x_11921 = x_14114; +x_11922 = x_14109; +goto block_13838; +} +else +{ +lean_object* x_14115; lean_object* x_14116; lean_object* x_14117; lean_object* x_14118; +lean_dec(x_14027); +lean_dec(x_11919); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14115 = lean_ctor_get(x_14107, 0); +lean_inc(x_14115); +x_14116 = lean_ctor_get(x_14107, 1); +lean_inc(x_14116); +if (lean_is_exclusive(x_14107)) { + lean_ctor_release(x_14107, 0); + lean_ctor_release(x_14107, 1); + x_14117 = x_14107; +} else { + lean_dec_ref(x_14107); + x_14117 = lean_box(0); +} +if (lean_is_scalar(x_14117)) { + x_14118 = lean_alloc_ctor(1, 2, 0); +} else { + x_14118 = x_14117; +} +lean_ctor_set(x_14118, 0, x_14115); +lean_ctor_set(x_14118, 1, x_14116); +return x_14118; +} +} +} +} +block_13838: +{ +lean_object* x_11923; +x_11923 = lean_ctor_get(x_11921, 0); +lean_inc(x_11923); +if (lean_obj_tag(x_11923) == 0) +{ +uint8_t x_11924; +lean_dec(x_11917); +x_11924 = !lean_is_exclusive(x_11921); +if (x_11924 == 0) +{ +lean_object* x_11925; lean_object* x_11926; lean_object* x_11927; lean_object* x_11928; lean_object* x_11929; lean_object* x_11930; lean_object* x_11931; uint8_t x_11932; lean_object* x_11933; +x_11925 = lean_ctor_get(x_11921, 1); +x_11926 = lean_ctor_get(x_11921, 0); +lean_dec(x_11926); +x_11927 = lean_st_ref_get(x_5, x_11922); +x_11928 = lean_ctor_get(x_11927, 0); +lean_inc(x_11928); +x_11929 = lean_ctor_get(x_11927, 1); +lean_inc(x_11929); +if (lean_is_exclusive(x_11927)) { + lean_ctor_release(x_11927, 0); + lean_ctor_release(x_11927, 1); + x_11930 = x_11927; +} else { + lean_dec_ref(x_11927); + x_11930 = lean_box(0); +} +x_11931 = lean_ctor_get(x_11928, 0); +lean_inc(x_11931); +lean_dec(x_11928); +x_11932 = 0; +lean_inc(x_153); +lean_inc(x_11931); +x_11933 = l_Lean_Environment_find_x3f(x_11931, x_153, x_11932); +if (lean_obj_tag(x_11933) == 0) +{ +lean_object* x_11934; lean_object* x_11935; +lean_dec(x_11931); +lean_dec(x_11930); +lean_free_object(x_11921); +lean_dec(x_11919); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_11934 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_11935 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_11934, x_11925, x_4, x_5, x_11929); +return x_11935; +} +else +{ +lean_object* x_11936; +x_11936 = lean_ctor_get(x_11933, 0); +lean_inc(x_11936); +lean_dec(x_11933); +switch (lean_obj_tag(x_11936)) { +case 0: +{ +uint8_t x_11937; +lean_dec(x_11931); +lean_dec(x_11911); +lean_dec(x_11910); +x_11937 = !lean_is_exclusive(x_11936); +if (x_11937 == 0) +{ +lean_object* x_11938; lean_object* x_11939; uint8_t x_11940; +x_11938 = lean_ctor_get(x_11936, 0); +lean_dec(x_11938); +x_11939 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_11940 = lean_name_eq(x_153, x_11939); +if (x_11940 == 0) +{ +lean_object* x_11941; uint8_t x_11942; +x_11941 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_11942 = lean_name_eq(x_153, x_11941); +if (x_11942 == 0) +{ +lean_object* x_11943; lean_object* x_11944; lean_object* x_11945; +lean_dec(x_11930); +lean_free_object(x_11921); +lean_inc(x_153); +x_11943 = l_Lean_IR_ToIR_findDecl(x_153, x_11925, x_4, x_5, x_11929); +x_11944 = lean_ctor_get(x_11943, 0); +lean_inc(x_11944); +x_11945 = lean_ctor_get(x_11944, 0); +lean_inc(x_11945); +if (lean_obj_tag(x_11945) == 0) +{ +uint8_t x_11946; +lean_dec(x_11919); +lean_dec(x_2); +lean_dec(x_1); +x_11946 = !lean_is_exclusive(x_11943); +if (x_11946 == 0) +{ +lean_object* x_11947; lean_object* x_11948; uint8_t x_11949; +x_11947 = lean_ctor_get(x_11943, 1); +x_11948 = lean_ctor_get(x_11943, 0); +lean_dec(x_11948); +x_11949 = !lean_is_exclusive(x_11944); +if (x_11949 == 0) +{ +lean_object* x_11950; lean_object* x_11951; uint8_t x_11952; lean_object* x_11953; lean_object* x_11954; lean_object* x_11955; lean_object* x_11956; lean_object* x_11957; lean_object* x_11958; +x_11950 = lean_ctor_get(x_11944, 1); +x_11951 = lean_ctor_get(x_11944, 0); +lean_dec(x_11951); +x_11952 = 1; +x_11953 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_11954 = l_Lean_Name_toString(x_153, x_11952, x_11953); +lean_ctor_set_tag(x_11936, 3); +lean_ctor_set(x_11936, 0, x_11954); +x_11955 = l_Lean_IR_ToIR_lowerLet___closed__13; +lean_ctor_set_tag(x_11944, 5); +lean_ctor_set(x_11944, 1, x_11936); +lean_ctor_set(x_11944, 0, x_11955); +x_11956 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_11943, 5); +lean_ctor_set(x_11943, 1, x_11956); +x_11957 = l_Lean_MessageData_ofFormat(x_11943); +x_11958 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_11957, x_11950, x_4, x_5, x_11947); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_11950); +return x_11958; +} +else +{ +lean_object* x_11959; uint8_t x_11960; lean_object* x_11961; lean_object* x_11962; lean_object* x_11963; lean_object* x_11964; lean_object* x_11965; lean_object* x_11966; lean_object* x_11967; +x_11959 = lean_ctor_get(x_11944, 1); +lean_inc(x_11959); +lean_dec(x_11944); +x_11960 = 1; +x_11961 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_11962 = l_Lean_Name_toString(x_153, x_11960, x_11961); +lean_ctor_set_tag(x_11936, 3); +lean_ctor_set(x_11936, 0, x_11962); +x_11963 = l_Lean_IR_ToIR_lowerLet___closed__13; +x_11964 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_11964, 0, x_11963); +lean_ctor_set(x_11964, 1, x_11936); +x_11965 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_11943, 5); +lean_ctor_set(x_11943, 1, x_11965); +lean_ctor_set(x_11943, 0, x_11964); +x_11966 = l_Lean_MessageData_ofFormat(x_11943); +x_11967 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_11966, x_11959, x_4, x_5, x_11947); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_11959); +return x_11967; +} +} +else +{ +lean_object* x_11968; lean_object* x_11969; lean_object* x_11970; uint8_t x_11971; lean_object* x_11972; lean_object* x_11973; lean_object* x_11974; lean_object* x_11975; lean_object* x_11976; lean_object* x_11977; lean_object* x_11978; lean_object* x_11979; +x_11968 = lean_ctor_get(x_11943, 1); +lean_inc(x_11968); +lean_dec(x_11943); +x_11969 = lean_ctor_get(x_11944, 1); +lean_inc(x_11969); +if (lean_is_exclusive(x_11944)) { + lean_ctor_release(x_11944, 0); + lean_ctor_release(x_11944, 1); + x_11970 = x_11944; +} else { + lean_dec_ref(x_11944); + x_11970 = lean_box(0); +} +x_11971 = 1; +x_11972 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_11973 = l_Lean_Name_toString(x_153, x_11971, x_11972); +lean_ctor_set_tag(x_11936, 3); +lean_ctor_set(x_11936, 0, x_11973); +x_11974 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_11970)) { + x_11975 = lean_alloc_ctor(5, 2, 0); +} else { + x_11975 = x_11970; + lean_ctor_set_tag(x_11975, 5); +} +lean_ctor_set(x_11975, 0, x_11974); +lean_ctor_set(x_11975, 1, x_11936); +x_11976 = l_Lean_IR_ToIR_lowerLet___closed__16; +x_11977 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_11977, 0, x_11975); +lean_ctor_set(x_11977, 1, x_11976); +x_11978 = l_Lean_MessageData_ofFormat(x_11977); +x_11979 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_11978, x_11969, x_4, x_5, x_11968); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_11969); +return x_11979; +} +} +else +{ +lean_object* x_11980; uint8_t x_11981; +lean_free_object(x_11936); +x_11980 = lean_ctor_get(x_11943, 1); +lean_inc(x_11980); +lean_dec(x_11943); +x_11981 = !lean_is_exclusive(x_11944); +if (x_11981 == 0) +{ +lean_object* x_11982; lean_object* x_11983; lean_object* x_11984; lean_object* x_11985; lean_object* x_11986; lean_object* x_11987; uint8_t x_11988; +x_11982 = lean_ctor_get(x_11944, 1); +x_11983 = lean_ctor_get(x_11944, 0); +lean_dec(x_11983); +x_11984 = lean_ctor_get(x_11945, 0); +lean_inc(x_11984); +lean_dec(x_11945); +x_11985 = lean_array_get_size(x_11919); +x_11986 = l_Lean_IR_Decl_params(x_11984); +lean_dec(x_11984); +x_11987 = lean_array_get_size(x_11986); +lean_dec(x_11986); +x_11988 = lean_nat_dec_lt(x_11985, x_11987); +if (x_11988 == 0) +{ +uint8_t x_11989; +x_11989 = lean_nat_dec_eq(x_11985, x_11987); +if (x_11989 == 0) +{ +lean_object* x_11990; lean_object* x_11991; lean_object* x_11992; lean_object* x_11993; lean_object* x_11994; lean_object* x_11995; lean_object* x_11996; lean_object* x_11997; lean_object* x_11998; lean_object* x_11999; lean_object* x_12000; lean_object* x_12001; lean_object* x_12002; lean_object* x_12003; lean_object* x_12004; lean_object* x_12005; +x_11990 = lean_unsigned_to_nat(0u); +x_11991 = l_Array_extract___rarg(x_11919, x_11990, x_11987); +x_11992 = l_Array_extract___rarg(x_11919, x_11987, x_11985); +lean_dec(x_11985); +lean_dec(x_11919); +lean_ctor_set_tag(x_11944, 6); +lean_ctor_set(x_11944, 1, x_11991); +lean_ctor_set(x_11944, 0, x_153); +x_11993 = lean_ctor_get(x_1, 0); +lean_inc(x_11993); +x_11994 = l_Lean_IR_ToIR_bindVar(x_11993, x_11982, x_4, x_5, x_11980); +x_11995 = lean_ctor_get(x_11994, 0); +lean_inc(x_11995); +x_11996 = lean_ctor_get(x_11994, 1); +lean_inc(x_11996); +lean_dec(x_11994); +x_11997 = lean_ctor_get(x_11995, 0); +lean_inc(x_11997); +x_11998 = lean_ctor_get(x_11995, 1); +lean_inc(x_11998); +lean_dec(x_11995); +x_11999 = l_Lean_IR_ToIR_newVar(x_11998, x_4, x_5, x_11996); +x_12000 = lean_ctor_get(x_11999, 0); +lean_inc(x_12000); +x_12001 = lean_ctor_get(x_11999, 1); +lean_inc(x_12001); +lean_dec(x_11999); +x_12002 = lean_ctor_get(x_12000, 0); +lean_inc(x_12002); +x_12003 = lean_ctor_get(x_12000, 1); +lean_inc(x_12003); +lean_dec(x_12000); +x_12004 = lean_ctor_get(x_1, 2); +lean_inc(x_12004); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_12005 = l_Lean_IR_ToIR_lowerType(x_12004, x_12003, x_4, x_5, x_12001); +if (lean_obj_tag(x_12005) == 0) +{ +lean_object* x_12006; lean_object* x_12007; lean_object* x_12008; lean_object* x_12009; lean_object* x_12010; +x_12006 = lean_ctor_get(x_12005, 0); +lean_inc(x_12006); +x_12007 = lean_ctor_get(x_12005, 1); +lean_inc(x_12007); +lean_dec(x_12005); +x_12008 = lean_ctor_get(x_12006, 0); +lean_inc(x_12008); +x_12009 = lean_ctor_get(x_12006, 1); +lean_inc(x_12009); +lean_dec(x_12006); +x_12010 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_12002, x_11992, x_11997, x_11944, x_12008, x_12009, x_4, x_5, x_12007); +return x_12010; +} +else +{ +uint8_t x_12011; +lean_dec(x_12002); +lean_dec(x_11997); +lean_dec(x_11944); +lean_dec(x_11992); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_12011 = !lean_is_exclusive(x_12005); +if (x_12011 == 0) +{ +return x_12005; +} +else +{ +lean_object* x_12012; lean_object* x_12013; lean_object* x_12014; +x_12012 = lean_ctor_get(x_12005, 0); +x_12013 = lean_ctor_get(x_12005, 1); +lean_inc(x_12013); +lean_inc(x_12012); +lean_dec(x_12005); +x_12014 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12014, 0, x_12012); +lean_ctor_set(x_12014, 1, x_12013); +return x_12014; +} +} +} +else +{ +lean_object* x_12015; lean_object* x_12016; lean_object* x_12017; lean_object* x_12018; lean_object* x_12019; lean_object* x_12020; lean_object* x_12021; lean_object* x_12022; +lean_dec(x_11987); +lean_dec(x_11985); +lean_ctor_set_tag(x_11944, 6); +lean_ctor_set(x_11944, 1, x_11919); +lean_ctor_set(x_11944, 0, x_153); +x_12015 = lean_ctor_get(x_1, 0); +lean_inc(x_12015); +x_12016 = l_Lean_IR_ToIR_bindVar(x_12015, x_11982, x_4, x_5, x_11980); +x_12017 = lean_ctor_get(x_12016, 0); +lean_inc(x_12017); +x_12018 = lean_ctor_get(x_12016, 1); +lean_inc(x_12018); +lean_dec(x_12016); +x_12019 = lean_ctor_get(x_12017, 0); +lean_inc(x_12019); +x_12020 = lean_ctor_get(x_12017, 1); +lean_inc(x_12020); +lean_dec(x_12017); +x_12021 = lean_ctor_get(x_1, 2); +lean_inc(x_12021); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_12022 = l_Lean_IR_ToIR_lowerType(x_12021, x_12020, x_4, x_5, x_12018); +if (lean_obj_tag(x_12022) == 0) +{ +lean_object* x_12023; lean_object* x_12024; lean_object* x_12025; lean_object* x_12026; lean_object* x_12027; +x_12023 = lean_ctor_get(x_12022, 0); +lean_inc(x_12023); +x_12024 = lean_ctor_get(x_12022, 1); +lean_inc(x_12024); +lean_dec(x_12022); +x_12025 = lean_ctor_get(x_12023, 0); +lean_inc(x_12025); +x_12026 = lean_ctor_get(x_12023, 1); +lean_inc(x_12026); +lean_dec(x_12023); +x_12027 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12019, x_11944, x_12025, x_12026, x_4, x_5, x_12024); +return x_12027; +} +else +{ +uint8_t x_12028; +lean_dec(x_12019); +lean_dec(x_11944); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_12028 = !lean_is_exclusive(x_12022); +if (x_12028 == 0) +{ +return x_12022; +} +else +{ +lean_object* x_12029; lean_object* x_12030; lean_object* x_12031; +x_12029 = lean_ctor_get(x_12022, 0); +x_12030 = lean_ctor_get(x_12022, 1); +lean_inc(x_12030); +lean_inc(x_12029); +lean_dec(x_12022); +x_12031 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12031, 0, x_12029); +lean_ctor_set(x_12031, 1, x_12030); +return x_12031; +} +} +} +} +else +{ +lean_object* x_12032; lean_object* x_12033; lean_object* x_12034; lean_object* x_12035; lean_object* x_12036; lean_object* x_12037; lean_object* x_12038; lean_object* x_12039; +lean_dec(x_11987); +lean_dec(x_11985); +lean_ctor_set_tag(x_11944, 7); +lean_ctor_set(x_11944, 1, x_11919); +lean_ctor_set(x_11944, 0, x_153); +x_12032 = lean_ctor_get(x_1, 0); +lean_inc(x_12032); +lean_dec(x_1); +x_12033 = l_Lean_IR_ToIR_bindVar(x_12032, x_11982, x_4, x_5, x_11980); +x_12034 = lean_ctor_get(x_12033, 0); +lean_inc(x_12034); +x_12035 = lean_ctor_get(x_12033, 1); +lean_inc(x_12035); +lean_dec(x_12033); +x_12036 = lean_ctor_get(x_12034, 0); +lean_inc(x_12036); +x_12037 = lean_ctor_get(x_12034, 1); +lean_inc(x_12037); +lean_dec(x_12034); +x_12038 = lean_box(7); +x_12039 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12036, x_11944, x_12038, x_12037, x_4, x_5, x_12035); +return x_12039; +} +} +else +{ +lean_object* x_12040; lean_object* x_12041; lean_object* x_12042; lean_object* x_12043; lean_object* x_12044; uint8_t x_12045; +x_12040 = lean_ctor_get(x_11944, 1); +lean_inc(x_12040); +lean_dec(x_11944); +x_12041 = lean_ctor_get(x_11945, 0); +lean_inc(x_12041); +lean_dec(x_11945); +x_12042 = lean_array_get_size(x_11919); +x_12043 = l_Lean_IR_Decl_params(x_12041); +lean_dec(x_12041); +x_12044 = lean_array_get_size(x_12043); +lean_dec(x_12043); +x_12045 = lean_nat_dec_lt(x_12042, x_12044); +if (x_12045 == 0) +{ +uint8_t x_12046; +x_12046 = lean_nat_dec_eq(x_12042, x_12044); +if (x_12046 == 0) +{ +lean_object* x_12047; lean_object* x_12048; lean_object* x_12049; lean_object* x_12050; lean_object* x_12051; lean_object* x_12052; lean_object* x_12053; lean_object* x_12054; lean_object* x_12055; lean_object* x_12056; lean_object* x_12057; lean_object* x_12058; lean_object* x_12059; lean_object* x_12060; lean_object* x_12061; lean_object* x_12062; lean_object* x_12063; +x_12047 = lean_unsigned_to_nat(0u); +x_12048 = l_Array_extract___rarg(x_11919, x_12047, x_12044); +x_12049 = l_Array_extract___rarg(x_11919, x_12044, x_12042); +lean_dec(x_12042); +lean_dec(x_11919); +x_12050 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_12050, 0, x_153); +lean_ctor_set(x_12050, 1, x_12048); +x_12051 = lean_ctor_get(x_1, 0); +lean_inc(x_12051); +x_12052 = l_Lean_IR_ToIR_bindVar(x_12051, x_12040, x_4, x_5, x_11980); +x_12053 = lean_ctor_get(x_12052, 0); +lean_inc(x_12053); +x_12054 = lean_ctor_get(x_12052, 1); +lean_inc(x_12054); +lean_dec(x_12052); +x_12055 = lean_ctor_get(x_12053, 0); +lean_inc(x_12055); +x_12056 = lean_ctor_get(x_12053, 1); +lean_inc(x_12056); +lean_dec(x_12053); +x_12057 = l_Lean_IR_ToIR_newVar(x_12056, x_4, x_5, x_12054); +x_12058 = lean_ctor_get(x_12057, 0); +lean_inc(x_12058); +x_12059 = lean_ctor_get(x_12057, 1); +lean_inc(x_12059); +lean_dec(x_12057); +x_12060 = lean_ctor_get(x_12058, 0); +lean_inc(x_12060); +x_12061 = lean_ctor_get(x_12058, 1); +lean_inc(x_12061); +lean_dec(x_12058); +x_12062 = lean_ctor_get(x_1, 2); +lean_inc(x_12062); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_12063 = l_Lean_IR_ToIR_lowerType(x_12062, x_12061, x_4, x_5, x_12059); +if (lean_obj_tag(x_12063) == 0) +{ +lean_object* x_12064; lean_object* x_12065; lean_object* x_12066; lean_object* x_12067; lean_object* x_12068; +x_12064 = lean_ctor_get(x_12063, 0); +lean_inc(x_12064); +x_12065 = lean_ctor_get(x_12063, 1); +lean_inc(x_12065); +lean_dec(x_12063); +x_12066 = lean_ctor_get(x_12064, 0); +lean_inc(x_12066); +x_12067 = lean_ctor_get(x_12064, 1); +lean_inc(x_12067); +lean_dec(x_12064); +x_12068 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_12060, x_12049, x_12055, x_12050, x_12066, x_12067, x_4, x_5, x_12065); +return x_12068; +} +else +{ +lean_object* x_12069; lean_object* x_12070; lean_object* x_12071; lean_object* x_12072; +lean_dec(x_12060); +lean_dec(x_12055); +lean_dec(x_12050); +lean_dec(x_12049); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_12069 = lean_ctor_get(x_12063, 0); +lean_inc(x_12069); +x_12070 = lean_ctor_get(x_12063, 1); +lean_inc(x_12070); +if (lean_is_exclusive(x_12063)) { + lean_ctor_release(x_12063, 0); + lean_ctor_release(x_12063, 1); + x_12071 = x_12063; +} else { + lean_dec_ref(x_12063); + x_12071 = lean_box(0); +} +if (lean_is_scalar(x_12071)) { + x_12072 = lean_alloc_ctor(1, 2, 0); +} else { + x_12072 = x_12071; +} +lean_ctor_set(x_12072, 0, x_12069); +lean_ctor_set(x_12072, 1, x_12070); +return x_12072; +} +} +else +{ +lean_object* x_12073; lean_object* x_12074; lean_object* x_12075; lean_object* x_12076; lean_object* x_12077; lean_object* x_12078; lean_object* x_12079; lean_object* x_12080; lean_object* x_12081; +lean_dec(x_12044); +lean_dec(x_12042); +x_12073 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_12073, 0, x_153); +lean_ctor_set(x_12073, 1, x_11919); +x_12074 = lean_ctor_get(x_1, 0); +lean_inc(x_12074); +x_12075 = l_Lean_IR_ToIR_bindVar(x_12074, x_12040, x_4, x_5, x_11980); +x_12076 = lean_ctor_get(x_12075, 0); +lean_inc(x_12076); +x_12077 = lean_ctor_get(x_12075, 1); +lean_inc(x_12077); +lean_dec(x_12075); +x_12078 = lean_ctor_get(x_12076, 0); +lean_inc(x_12078); +x_12079 = lean_ctor_get(x_12076, 1); +lean_inc(x_12079); +lean_dec(x_12076); +x_12080 = lean_ctor_get(x_1, 2); +lean_inc(x_12080); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_12081 = l_Lean_IR_ToIR_lowerType(x_12080, x_12079, x_4, x_5, x_12077); +if (lean_obj_tag(x_12081) == 0) +{ +lean_object* x_12082; lean_object* x_12083; lean_object* x_12084; lean_object* x_12085; lean_object* x_12086; +x_12082 = lean_ctor_get(x_12081, 0); +lean_inc(x_12082); +x_12083 = lean_ctor_get(x_12081, 1); +lean_inc(x_12083); +lean_dec(x_12081); +x_12084 = lean_ctor_get(x_12082, 0); +lean_inc(x_12084); +x_12085 = lean_ctor_get(x_12082, 1); +lean_inc(x_12085); +lean_dec(x_12082); +x_12086 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12078, x_12073, x_12084, x_12085, x_4, x_5, x_12083); +return x_12086; +} +else +{ +lean_object* x_12087; lean_object* x_12088; lean_object* x_12089; lean_object* x_12090; +lean_dec(x_12078); +lean_dec(x_12073); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_12087 = lean_ctor_get(x_12081, 0); +lean_inc(x_12087); +x_12088 = lean_ctor_get(x_12081, 1); +lean_inc(x_12088); +if (lean_is_exclusive(x_12081)) { + lean_ctor_release(x_12081, 0); + lean_ctor_release(x_12081, 1); + x_12089 = x_12081; +} else { + lean_dec_ref(x_12081); + x_12089 = lean_box(0); +} +if (lean_is_scalar(x_12089)) { + x_12090 = lean_alloc_ctor(1, 2, 0); +} else { + x_12090 = x_12089; +} +lean_ctor_set(x_12090, 0, x_12087); +lean_ctor_set(x_12090, 1, x_12088); +return x_12090; +} +} +} +else +{ +lean_object* x_12091; lean_object* x_12092; lean_object* x_12093; lean_object* x_12094; lean_object* x_12095; lean_object* x_12096; lean_object* x_12097; lean_object* x_12098; lean_object* x_12099; +lean_dec(x_12044); +lean_dec(x_12042); +x_12091 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_12091, 0, x_153); +lean_ctor_set(x_12091, 1, x_11919); +x_12092 = lean_ctor_get(x_1, 0); +lean_inc(x_12092); +lean_dec(x_1); +x_12093 = l_Lean_IR_ToIR_bindVar(x_12092, x_12040, x_4, x_5, x_11980); +x_12094 = lean_ctor_get(x_12093, 0); +lean_inc(x_12094); +x_12095 = lean_ctor_get(x_12093, 1); +lean_inc(x_12095); +lean_dec(x_12093); +x_12096 = lean_ctor_get(x_12094, 0); +lean_inc(x_12096); +x_12097 = lean_ctor_get(x_12094, 1); +lean_inc(x_12097); +lean_dec(x_12094); +x_12098 = lean_box(7); +x_12099 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12096, x_12091, x_12098, x_12097, x_4, x_5, x_12095); +return x_12099; +} +} +} +} +else +{ +lean_object* x_12100; lean_object* x_12101; +lean_free_object(x_11936); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12100 = lean_box(13); +lean_ctor_set(x_11921, 0, x_12100); +if (lean_is_scalar(x_11930)) { + x_12101 = lean_alloc_ctor(0, 2, 0); +} else { + x_12101 = x_11930; +} +lean_ctor_set(x_12101, 0, x_11921); +lean_ctor_set(x_12101, 1, x_11929); +return x_12101; +} +} +else +{ +lean_object* x_12102; lean_object* x_12103; lean_object* x_12104; +lean_free_object(x_11936); +lean_dec(x_11930); +lean_free_object(x_11921); +lean_dec(x_153); +x_12102 = l_Lean_IR_instInhabitedArg; +x_12103 = lean_unsigned_to_nat(2u); +x_12104 = lean_array_get(x_12102, x_11919, x_12103); +lean_dec(x_11919); +if (lean_obj_tag(x_12104) == 0) +{ +lean_object* x_12105; lean_object* x_12106; lean_object* x_12107; lean_object* x_12108; lean_object* x_12109; lean_object* x_12110; lean_object* x_12111; +x_12105 = lean_ctor_get(x_12104, 0); +lean_inc(x_12105); +lean_dec(x_12104); +x_12106 = lean_ctor_get(x_1, 0); +lean_inc(x_12106); +lean_dec(x_1); +x_12107 = l_Lean_IR_ToIR_bindVarToVarId(x_12106, x_12105, x_11925, x_4, x_5, x_11929); +x_12108 = lean_ctor_get(x_12107, 0); +lean_inc(x_12108); +x_12109 = lean_ctor_get(x_12107, 1); +lean_inc(x_12109); +lean_dec(x_12107); +x_12110 = lean_ctor_get(x_12108, 1); +lean_inc(x_12110); +lean_dec(x_12108); +x_12111 = l_Lean_IR_ToIR_lowerCode(x_2, x_12110, x_4, x_5, x_12109); +return x_12111; +} +else +{ +lean_object* x_12112; lean_object* x_12113; lean_object* x_12114; lean_object* x_12115; lean_object* x_12116; lean_object* x_12117; +x_12112 = lean_ctor_get(x_1, 0); +lean_inc(x_12112); +lean_dec(x_1); +x_12113 = l_Lean_IR_ToIR_bindErased(x_12112, x_11925, x_4, x_5, x_11929); +x_12114 = lean_ctor_get(x_12113, 0); +lean_inc(x_12114); +x_12115 = lean_ctor_get(x_12113, 1); +lean_inc(x_12115); +lean_dec(x_12113); +x_12116 = lean_ctor_get(x_12114, 1); +lean_inc(x_12116); +lean_dec(x_12114); +x_12117 = l_Lean_IR_ToIR_lowerCode(x_2, x_12116, x_4, x_5, x_12115); +return x_12117; +} +} +} +else +{ +lean_object* x_12118; uint8_t x_12119; +lean_dec(x_11936); +x_12118 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_12119 = lean_name_eq(x_153, x_12118); +if (x_12119 == 0) +{ +lean_object* x_12120; uint8_t x_12121; +x_12120 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_12121 = lean_name_eq(x_153, x_12120); +if (x_12121 == 0) +{ +lean_object* x_12122; lean_object* x_12123; lean_object* x_12124; +lean_dec(x_11930); +lean_free_object(x_11921); +lean_inc(x_153); +x_12122 = l_Lean_IR_ToIR_findDecl(x_153, x_11925, x_4, x_5, x_11929); +x_12123 = lean_ctor_get(x_12122, 0); +lean_inc(x_12123); +x_12124 = lean_ctor_get(x_12123, 0); +lean_inc(x_12124); +if (lean_obj_tag(x_12124) == 0) +{ +lean_object* x_12125; lean_object* x_12126; lean_object* x_12127; lean_object* x_12128; uint8_t x_12129; lean_object* x_12130; lean_object* x_12131; lean_object* x_12132; lean_object* x_12133; lean_object* x_12134; lean_object* x_12135; lean_object* x_12136; lean_object* x_12137; lean_object* x_12138; +lean_dec(x_11919); +lean_dec(x_2); +lean_dec(x_1); +x_12125 = lean_ctor_get(x_12122, 1); +lean_inc(x_12125); +if (lean_is_exclusive(x_12122)) { + lean_ctor_release(x_12122, 0); + lean_ctor_release(x_12122, 1); + x_12126 = x_12122; +} else { + lean_dec_ref(x_12122); + x_12126 = lean_box(0); +} +x_12127 = lean_ctor_get(x_12123, 1); +lean_inc(x_12127); +if (lean_is_exclusive(x_12123)) { + lean_ctor_release(x_12123, 0); + lean_ctor_release(x_12123, 1); + x_12128 = x_12123; +} else { + lean_dec_ref(x_12123); + x_12128 = lean_box(0); +} +x_12129 = 1; +x_12130 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_12131 = l_Lean_Name_toString(x_153, x_12129, x_12130); +x_12132 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_12132, 0, x_12131); +x_12133 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_12128)) { + x_12134 = lean_alloc_ctor(5, 2, 0); +} else { + x_12134 = x_12128; + lean_ctor_set_tag(x_12134, 5); +} +lean_ctor_set(x_12134, 0, x_12133); +lean_ctor_set(x_12134, 1, x_12132); +x_12135 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_12126)) { + x_12136 = lean_alloc_ctor(5, 2, 0); +} else { + x_12136 = x_12126; + lean_ctor_set_tag(x_12136, 5); +} +lean_ctor_set(x_12136, 0, x_12134); +lean_ctor_set(x_12136, 1, x_12135); +x_12137 = l_Lean_MessageData_ofFormat(x_12136); +x_12138 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_12137, x_12127, x_4, x_5, x_12125); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_12127); +return x_12138; +} +else +{ +lean_object* x_12139; lean_object* x_12140; lean_object* x_12141; lean_object* x_12142; lean_object* x_12143; lean_object* x_12144; lean_object* x_12145; uint8_t x_12146; +x_12139 = lean_ctor_get(x_12122, 1); +lean_inc(x_12139); +lean_dec(x_12122); +x_12140 = lean_ctor_get(x_12123, 1); +lean_inc(x_12140); +if (lean_is_exclusive(x_12123)) { + lean_ctor_release(x_12123, 0); + lean_ctor_release(x_12123, 1); + x_12141 = x_12123; +} else { + lean_dec_ref(x_12123); + x_12141 = lean_box(0); +} +x_12142 = lean_ctor_get(x_12124, 0); +lean_inc(x_12142); +lean_dec(x_12124); +x_12143 = lean_array_get_size(x_11919); +x_12144 = l_Lean_IR_Decl_params(x_12142); +lean_dec(x_12142); +x_12145 = lean_array_get_size(x_12144); +lean_dec(x_12144); +x_12146 = lean_nat_dec_lt(x_12143, x_12145); +if (x_12146 == 0) +{ +uint8_t x_12147; +x_12147 = lean_nat_dec_eq(x_12143, x_12145); +if (x_12147 == 0) +{ +lean_object* x_12148; lean_object* x_12149; lean_object* x_12150; lean_object* x_12151; lean_object* x_12152; lean_object* x_12153; lean_object* x_12154; lean_object* x_12155; lean_object* x_12156; lean_object* x_12157; lean_object* x_12158; lean_object* x_12159; lean_object* x_12160; lean_object* x_12161; lean_object* x_12162; lean_object* x_12163; lean_object* x_12164; +x_12148 = lean_unsigned_to_nat(0u); +x_12149 = l_Array_extract___rarg(x_11919, x_12148, x_12145); +x_12150 = l_Array_extract___rarg(x_11919, x_12145, x_12143); +lean_dec(x_12143); +lean_dec(x_11919); +if (lean_is_scalar(x_12141)) { + x_12151 = lean_alloc_ctor(6, 2, 0); +} else { + x_12151 = x_12141; + lean_ctor_set_tag(x_12151, 6); +} +lean_ctor_set(x_12151, 0, x_153); +lean_ctor_set(x_12151, 1, x_12149); +x_12152 = lean_ctor_get(x_1, 0); +lean_inc(x_12152); +x_12153 = l_Lean_IR_ToIR_bindVar(x_12152, x_12140, x_4, x_5, x_12139); +x_12154 = lean_ctor_get(x_12153, 0); +lean_inc(x_12154); +x_12155 = lean_ctor_get(x_12153, 1); +lean_inc(x_12155); +lean_dec(x_12153); +x_12156 = lean_ctor_get(x_12154, 0); +lean_inc(x_12156); +x_12157 = lean_ctor_get(x_12154, 1); +lean_inc(x_12157); +lean_dec(x_12154); +x_12158 = l_Lean_IR_ToIR_newVar(x_12157, x_4, x_5, x_12155); +x_12159 = lean_ctor_get(x_12158, 0); +lean_inc(x_12159); +x_12160 = lean_ctor_get(x_12158, 1); +lean_inc(x_12160); +lean_dec(x_12158); +x_12161 = lean_ctor_get(x_12159, 0); +lean_inc(x_12161); +x_12162 = lean_ctor_get(x_12159, 1); +lean_inc(x_12162); +lean_dec(x_12159); +x_12163 = lean_ctor_get(x_1, 2); +lean_inc(x_12163); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_12164 = l_Lean_IR_ToIR_lowerType(x_12163, x_12162, x_4, x_5, x_12160); +if (lean_obj_tag(x_12164) == 0) +{ +lean_object* x_12165; lean_object* x_12166; lean_object* x_12167; lean_object* x_12168; lean_object* x_12169; +x_12165 = lean_ctor_get(x_12164, 0); +lean_inc(x_12165); +x_12166 = lean_ctor_get(x_12164, 1); +lean_inc(x_12166); +lean_dec(x_12164); +x_12167 = lean_ctor_get(x_12165, 0); +lean_inc(x_12167); +x_12168 = lean_ctor_get(x_12165, 1); +lean_inc(x_12168); +lean_dec(x_12165); +x_12169 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_12161, x_12150, x_12156, x_12151, x_12167, x_12168, x_4, x_5, x_12166); +return x_12169; +} +else +{ +lean_object* x_12170; lean_object* x_12171; lean_object* x_12172; lean_object* x_12173; +lean_dec(x_12161); +lean_dec(x_12156); +lean_dec(x_12151); +lean_dec(x_12150); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_12170 = lean_ctor_get(x_12164, 0); +lean_inc(x_12170); +x_12171 = lean_ctor_get(x_12164, 1); +lean_inc(x_12171); +if (lean_is_exclusive(x_12164)) { + lean_ctor_release(x_12164, 0); + lean_ctor_release(x_12164, 1); + x_12172 = x_12164; +} else { + lean_dec_ref(x_12164); + x_12172 = lean_box(0); +} +if (lean_is_scalar(x_12172)) { + x_12173 = lean_alloc_ctor(1, 2, 0); +} else { + x_12173 = x_12172; +} +lean_ctor_set(x_12173, 0, x_12170); +lean_ctor_set(x_12173, 1, x_12171); +return x_12173; +} +} +else +{ +lean_object* x_12174; lean_object* x_12175; lean_object* x_12176; lean_object* x_12177; lean_object* x_12178; lean_object* x_12179; lean_object* x_12180; lean_object* x_12181; lean_object* x_12182; +lean_dec(x_12145); +lean_dec(x_12143); +if (lean_is_scalar(x_12141)) { + x_12174 = lean_alloc_ctor(6, 2, 0); +} else { + x_12174 = x_12141; + lean_ctor_set_tag(x_12174, 6); +} +lean_ctor_set(x_12174, 0, x_153); +lean_ctor_set(x_12174, 1, x_11919); +x_12175 = lean_ctor_get(x_1, 0); +lean_inc(x_12175); +x_12176 = l_Lean_IR_ToIR_bindVar(x_12175, x_12140, x_4, x_5, x_12139); +x_12177 = lean_ctor_get(x_12176, 0); +lean_inc(x_12177); +x_12178 = lean_ctor_get(x_12176, 1); +lean_inc(x_12178); +lean_dec(x_12176); +x_12179 = lean_ctor_get(x_12177, 0); +lean_inc(x_12179); +x_12180 = lean_ctor_get(x_12177, 1); +lean_inc(x_12180); +lean_dec(x_12177); +x_12181 = lean_ctor_get(x_1, 2); +lean_inc(x_12181); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_12182 = l_Lean_IR_ToIR_lowerType(x_12181, x_12180, x_4, x_5, x_12178); +if (lean_obj_tag(x_12182) == 0) +{ +lean_object* x_12183; lean_object* x_12184; lean_object* x_12185; lean_object* x_12186; lean_object* x_12187; +x_12183 = lean_ctor_get(x_12182, 0); +lean_inc(x_12183); +x_12184 = lean_ctor_get(x_12182, 1); +lean_inc(x_12184); +lean_dec(x_12182); +x_12185 = lean_ctor_get(x_12183, 0); +lean_inc(x_12185); +x_12186 = lean_ctor_get(x_12183, 1); +lean_inc(x_12186); +lean_dec(x_12183); +x_12187 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12179, x_12174, x_12185, x_12186, x_4, x_5, x_12184); +return x_12187; +} +else +{ +lean_object* x_12188; lean_object* x_12189; lean_object* x_12190; lean_object* x_12191; +lean_dec(x_12179); +lean_dec(x_12174); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_12188 = lean_ctor_get(x_12182, 0); +lean_inc(x_12188); +x_12189 = lean_ctor_get(x_12182, 1); +lean_inc(x_12189); +if (lean_is_exclusive(x_12182)) { + lean_ctor_release(x_12182, 0); + lean_ctor_release(x_12182, 1); + x_12190 = x_12182; +} else { + lean_dec_ref(x_12182); + x_12190 = lean_box(0); +} +if (lean_is_scalar(x_12190)) { + x_12191 = lean_alloc_ctor(1, 2, 0); +} else { + x_12191 = x_12190; +} +lean_ctor_set(x_12191, 0, x_12188); +lean_ctor_set(x_12191, 1, x_12189); +return x_12191; +} +} +} +else +{ +lean_object* x_12192; lean_object* x_12193; lean_object* x_12194; lean_object* x_12195; lean_object* x_12196; lean_object* x_12197; lean_object* x_12198; lean_object* x_12199; lean_object* x_12200; +lean_dec(x_12145); +lean_dec(x_12143); +if (lean_is_scalar(x_12141)) { + x_12192 = lean_alloc_ctor(7, 2, 0); +} else { + x_12192 = x_12141; + lean_ctor_set_tag(x_12192, 7); +} +lean_ctor_set(x_12192, 0, x_153); +lean_ctor_set(x_12192, 1, x_11919); +x_12193 = lean_ctor_get(x_1, 0); +lean_inc(x_12193); +lean_dec(x_1); +x_12194 = l_Lean_IR_ToIR_bindVar(x_12193, x_12140, x_4, x_5, x_12139); +x_12195 = lean_ctor_get(x_12194, 0); +lean_inc(x_12195); +x_12196 = lean_ctor_get(x_12194, 1); +lean_inc(x_12196); +lean_dec(x_12194); +x_12197 = lean_ctor_get(x_12195, 0); +lean_inc(x_12197); +x_12198 = lean_ctor_get(x_12195, 1); +lean_inc(x_12198); +lean_dec(x_12195); +x_12199 = lean_box(7); +x_12200 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12197, x_12192, x_12199, x_12198, x_4, x_5, x_12196); +return x_12200; +} +} +} +else +{ +lean_object* x_12201; lean_object* x_12202; +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12201 = lean_box(13); +lean_ctor_set(x_11921, 0, x_12201); +if (lean_is_scalar(x_11930)) { + x_12202 = lean_alloc_ctor(0, 2, 0); +} else { + x_12202 = x_11930; +} +lean_ctor_set(x_12202, 0, x_11921); +lean_ctor_set(x_12202, 1, x_11929); +return x_12202; +} +} +else +{ +lean_object* x_12203; lean_object* x_12204; lean_object* x_12205; +lean_dec(x_11930); +lean_free_object(x_11921); +lean_dec(x_153); +x_12203 = l_Lean_IR_instInhabitedArg; +x_12204 = lean_unsigned_to_nat(2u); +x_12205 = lean_array_get(x_12203, x_11919, x_12204); +lean_dec(x_11919); +if (lean_obj_tag(x_12205) == 0) +{ +lean_object* x_12206; lean_object* x_12207; lean_object* x_12208; lean_object* x_12209; lean_object* x_12210; lean_object* x_12211; lean_object* x_12212; +x_12206 = lean_ctor_get(x_12205, 0); +lean_inc(x_12206); +lean_dec(x_12205); +x_12207 = lean_ctor_get(x_1, 0); +lean_inc(x_12207); +lean_dec(x_1); +x_12208 = l_Lean_IR_ToIR_bindVarToVarId(x_12207, x_12206, x_11925, x_4, x_5, x_11929); +x_12209 = lean_ctor_get(x_12208, 0); +lean_inc(x_12209); +x_12210 = lean_ctor_get(x_12208, 1); +lean_inc(x_12210); +lean_dec(x_12208); +x_12211 = lean_ctor_get(x_12209, 1); +lean_inc(x_12211); +lean_dec(x_12209); +x_12212 = l_Lean_IR_ToIR_lowerCode(x_2, x_12211, x_4, x_5, x_12210); +return x_12212; +} +else +{ +lean_object* x_12213; lean_object* x_12214; lean_object* x_12215; lean_object* x_12216; lean_object* x_12217; lean_object* x_12218; +x_12213 = lean_ctor_get(x_1, 0); +lean_inc(x_12213); +lean_dec(x_1); +x_12214 = l_Lean_IR_ToIR_bindErased(x_12213, x_11925, x_4, x_5, x_11929); +x_12215 = lean_ctor_get(x_12214, 0); +lean_inc(x_12215); +x_12216 = lean_ctor_get(x_12214, 1); +lean_inc(x_12216); +lean_dec(x_12214); +x_12217 = lean_ctor_get(x_12215, 1); +lean_inc(x_12217); +lean_dec(x_12215); +x_12218 = l_Lean_IR_ToIR_lowerCode(x_2, x_12217, x_4, x_5, x_12216); +return x_12218; +} +} +} +} +case 1: +{ +lean_object* x_12219; lean_object* x_12220; lean_object* x_12250; lean_object* x_12251; +lean_dec(x_11936); +lean_dec(x_11931); +lean_dec(x_11911); +lean_dec(x_11910); +lean_inc(x_153); +x_12250 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_11929); +x_12251 = lean_ctor_get(x_12250, 0); +lean_inc(x_12251); +if (lean_obj_tag(x_12251) == 0) +{ +lean_object* x_12252; lean_object* x_12253; +x_12252 = lean_ctor_get(x_12250, 1); +lean_inc(x_12252); +lean_dec(x_12250); +x_12253 = lean_box(0); +lean_ctor_set(x_11921, 0, x_12253); +x_12219 = x_11921; +x_12220 = x_12252; +goto block_12249; +} +else +{ +uint8_t x_12254; +lean_free_object(x_11921); +x_12254 = !lean_is_exclusive(x_12250); +if (x_12254 == 0) +{ +lean_object* x_12255; lean_object* x_12256; uint8_t x_12257; +x_12255 = lean_ctor_get(x_12250, 1); +x_12256 = lean_ctor_get(x_12250, 0); +lean_dec(x_12256); +x_12257 = !lean_is_exclusive(x_12251); +if (x_12257 == 0) +{ +lean_object* x_12258; lean_object* x_12259; lean_object* x_12260; lean_object* x_12261; uint8_t x_12262; +x_12258 = lean_ctor_get(x_12251, 0); +x_12259 = lean_array_get_size(x_11919); +x_12260 = lean_ctor_get(x_12258, 3); +lean_inc(x_12260); +lean_dec(x_12258); +x_12261 = lean_array_get_size(x_12260); +lean_dec(x_12260); +x_12262 = lean_nat_dec_lt(x_12259, x_12261); +if (x_12262 == 0) +{ +uint8_t x_12263; +x_12263 = lean_nat_dec_eq(x_12259, x_12261); +if (x_12263 == 0) +{ +lean_object* x_12264; lean_object* x_12265; lean_object* x_12266; lean_object* x_12267; lean_object* x_12268; lean_object* x_12269; lean_object* x_12270; lean_object* x_12271; lean_object* x_12272; lean_object* x_12273; lean_object* x_12274; lean_object* x_12275; lean_object* x_12276; lean_object* x_12277; lean_object* x_12278; lean_object* x_12279; +x_12264 = lean_unsigned_to_nat(0u); +x_12265 = l_Array_extract___rarg(x_11919, x_12264, x_12261); +x_12266 = l_Array_extract___rarg(x_11919, x_12261, x_12259); +lean_dec(x_12259); +lean_inc(x_153); +lean_ctor_set_tag(x_12250, 6); +lean_ctor_set(x_12250, 1, x_12265); +lean_ctor_set(x_12250, 0, x_153); +x_12267 = lean_ctor_get(x_1, 0); +lean_inc(x_12267); +x_12268 = l_Lean_IR_ToIR_bindVar(x_12267, x_11925, x_4, x_5, x_12255); +x_12269 = lean_ctor_get(x_12268, 0); +lean_inc(x_12269); +x_12270 = lean_ctor_get(x_12268, 1); +lean_inc(x_12270); +lean_dec(x_12268); +x_12271 = lean_ctor_get(x_12269, 0); +lean_inc(x_12271); +x_12272 = lean_ctor_get(x_12269, 1); +lean_inc(x_12272); +lean_dec(x_12269); +x_12273 = l_Lean_IR_ToIR_newVar(x_12272, x_4, x_5, x_12270); +x_12274 = lean_ctor_get(x_12273, 0); +lean_inc(x_12274); +x_12275 = lean_ctor_get(x_12273, 1); +lean_inc(x_12275); +lean_dec(x_12273); +x_12276 = lean_ctor_get(x_12274, 0); +lean_inc(x_12276); +x_12277 = lean_ctor_get(x_12274, 1); +lean_inc(x_12277); +lean_dec(x_12274); +x_12278 = lean_ctor_get(x_1, 2); +lean_inc(x_12278); +lean_inc(x_5); +lean_inc(x_4); +x_12279 = l_Lean_IR_ToIR_lowerType(x_12278, x_12277, x_4, x_5, x_12275); +if (lean_obj_tag(x_12279) == 0) +{ +lean_object* x_12280; lean_object* x_12281; lean_object* x_12282; lean_object* x_12283; lean_object* x_12284; +x_12280 = lean_ctor_get(x_12279, 0); +lean_inc(x_12280); +x_12281 = lean_ctor_get(x_12279, 1); +lean_inc(x_12281); +lean_dec(x_12279); +x_12282 = lean_ctor_get(x_12280, 0); +lean_inc(x_12282); +x_12283 = lean_ctor_get(x_12280, 1); +lean_inc(x_12283); +lean_dec(x_12280); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12284 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_12276, x_12266, x_12271, x_12250, x_12282, x_12283, x_4, x_5, x_12281); +if (lean_obj_tag(x_12284) == 0) +{ +lean_object* x_12285; lean_object* x_12286; uint8_t x_12287; +x_12285 = lean_ctor_get(x_12284, 0); +lean_inc(x_12285); +x_12286 = lean_ctor_get(x_12284, 1); +lean_inc(x_12286); +lean_dec(x_12284); +x_12287 = !lean_is_exclusive(x_12285); +if (x_12287 == 0) +{ +lean_object* x_12288; +x_12288 = lean_ctor_get(x_12285, 0); +lean_ctor_set(x_12251, 0, x_12288); +lean_ctor_set(x_12285, 0, x_12251); +x_12219 = x_12285; +x_12220 = x_12286; +goto block_12249; +} +else +{ +lean_object* x_12289; lean_object* x_12290; lean_object* x_12291; +x_12289 = lean_ctor_get(x_12285, 0); +x_12290 = lean_ctor_get(x_12285, 1); +lean_inc(x_12290); +lean_inc(x_12289); +lean_dec(x_12285); +lean_ctor_set(x_12251, 0, x_12289); +x_12291 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_12291, 0, x_12251); +lean_ctor_set(x_12291, 1, x_12290); +x_12219 = x_12291; +x_12220 = x_12286; +goto block_12249; +} +} +else +{ +uint8_t x_12292; +lean_free_object(x_12251); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12292 = !lean_is_exclusive(x_12284); +if (x_12292 == 0) +{ +return x_12284; +} +else +{ +lean_object* x_12293; lean_object* x_12294; lean_object* x_12295; +x_12293 = lean_ctor_get(x_12284, 0); +x_12294 = lean_ctor_get(x_12284, 1); +lean_inc(x_12294); +lean_inc(x_12293); +lean_dec(x_12284); +x_12295 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12295, 0, x_12293); +lean_ctor_set(x_12295, 1, x_12294); +return x_12295; +} +} +} +else +{ +uint8_t x_12296; +lean_dec(x_12276); +lean_dec(x_12271); +lean_dec(x_12250); +lean_dec(x_12266); +lean_free_object(x_12251); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12296 = !lean_is_exclusive(x_12279); +if (x_12296 == 0) +{ +return x_12279; +} +else +{ +lean_object* x_12297; lean_object* x_12298; lean_object* x_12299; +x_12297 = lean_ctor_get(x_12279, 0); +x_12298 = lean_ctor_get(x_12279, 1); +lean_inc(x_12298); +lean_inc(x_12297); +lean_dec(x_12279); +x_12299 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12299, 0, x_12297); +lean_ctor_set(x_12299, 1, x_12298); +return x_12299; +} +} +} +else +{ +lean_object* x_12300; lean_object* x_12301; lean_object* x_12302; lean_object* x_12303; lean_object* x_12304; lean_object* x_12305; lean_object* x_12306; lean_object* x_12307; +lean_dec(x_12261); +lean_dec(x_12259); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_12250, 6); +lean_ctor_set(x_12250, 1, x_11919); +lean_ctor_set(x_12250, 0, x_153); +x_12300 = lean_ctor_get(x_1, 0); +lean_inc(x_12300); +x_12301 = l_Lean_IR_ToIR_bindVar(x_12300, x_11925, x_4, x_5, x_12255); +x_12302 = lean_ctor_get(x_12301, 0); +lean_inc(x_12302); +x_12303 = lean_ctor_get(x_12301, 1); +lean_inc(x_12303); +lean_dec(x_12301); +x_12304 = lean_ctor_get(x_12302, 0); +lean_inc(x_12304); +x_12305 = lean_ctor_get(x_12302, 1); +lean_inc(x_12305); +lean_dec(x_12302); +x_12306 = lean_ctor_get(x_1, 2); +lean_inc(x_12306); +lean_inc(x_5); +lean_inc(x_4); +x_12307 = l_Lean_IR_ToIR_lowerType(x_12306, x_12305, x_4, x_5, x_12303); +if (lean_obj_tag(x_12307) == 0) +{ +lean_object* x_12308; lean_object* x_12309; lean_object* x_12310; lean_object* x_12311; lean_object* x_12312; +x_12308 = lean_ctor_get(x_12307, 0); +lean_inc(x_12308); +x_12309 = lean_ctor_get(x_12307, 1); +lean_inc(x_12309); +lean_dec(x_12307); +x_12310 = lean_ctor_get(x_12308, 0); +lean_inc(x_12310); +x_12311 = lean_ctor_get(x_12308, 1); +lean_inc(x_12311); +lean_dec(x_12308); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12312 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12304, x_12250, x_12310, x_12311, x_4, x_5, x_12309); +if (lean_obj_tag(x_12312) == 0) +{ +lean_object* x_12313; lean_object* x_12314; uint8_t x_12315; +x_12313 = lean_ctor_get(x_12312, 0); +lean_inc(x_12313); +x_12314 = lean_ctor_get(x_12312, 1); +lean_inc(x_12314); +lean_dec(x_12312); +x_12315 = !lean_is_exclusive(x_12313); +if (x_12315 == 0) +{ +lean_object* x_12316; +x_12316 = lean_ctor_get(x_12313, 0); +lean_ctor_set(x_12251, 0, x_12316); +lean_ctor_set(x_12313, 0, x_12251); +x_12219 = x_12313; +x_12220 = x_12314; +goto block_12249; +} +else +{ +lean_object* x_12317; lean_object* x_12318; lean_object* x_12319; +x_12317 = lean_ctor_get(x_12313, 0); +x_12318 = lean_ctor_get(x_12313, 1); +lean_inc(x_12318); +lean_inc(x_12317); +lean_dec(x_12313); +lean_ctor_set(x_12251, 0, x_12317); +x_12319 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_12319, 0, x_12251); +lean_ctor_set(x_12319, 1, x_12318); +x_12219 = x_12319; +x_12220 = x_12314; +goto block_12249; +} +} +else +{ +uint8_t x_12320; +lean_free_object(x_12251); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12320 = !lean_is_exclusive(x_12312); +if (x_12320 == 0) +{ +return x_12312; +} +else +{ +lean_object* x_12321; lean_object* x_12322; lean_object* x_12323; +x_12321 = lean_ctor_get(x_12312, 0); +x_12322 = lean_ctor_get(x_12312, 1); +lean_inc(x_12322); +lean_inc(x_12321); +lean_dec(x_12312); +x_12323 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12323, 0, x_12321); +lean_ctor_set(x_12323, 1, x_12322); +return x_12323; +} +} +} +else +{ +uint8_t x_12324; +lean_dec(x_12304); +lean_dec(x_12250); +lean_free_object(x_12251); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12324 = !lean_is_exclusive(x_12307); +if (x_12324 == 0) +{ +return x_12307; +} +else +{ +lean_object* x_12325; lean_object* x_12326; lean_object* x_12327; +x_12325 = lean_ctor_get(x_12307, 0); +x_12326 = lean_ctor_get(x_12307, 1); +lean_inc(x_12326); +lean_inc(x_12325); +lean_dec(x_12307); +x_12327 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12327, 0, x_12325); +lean_ctor_set(x_12327, 1, x_12326); +return x_12327; +} +} +} +} +else +{ +lean_object* x_12328; lean_object* x_12329; lean_object* x_12330; lean_object* x_12331; lean_object* x_12332; lean_object* x_12333; lean_object* x_12334; lean_object* x_12335; +lean_dec(x_12261); +lean_dec(x_12259); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_12250, 7); +lean_ctor_set(x_12250, 1, x_11919); +lean_ctor_set(x_12250, 0, x_153); +x_12328 = lean_ctor_get(x_1, 0); +lean_inc(x_12328); +x_12329 = l_Lean_IR_ToIR_bindVar(x_12328, x_11925, x_4, x_5, x_12255); +x_12330 = lean_ctor_get(x_12329, 0); +lean_inc(x_12330); +x_12331 = lean_ctor_get(x_12329, 1); +lean_inc(x_12331); +lean_dec(x_12329); +x_12332 = lean_ctor_get(x_12330, 0); +lean_inc(x_12332); +x_12333 = lean_ctor_get(x_12330, 1); +lean_inc(x_12333); +lean_dec(x_12330); +x_12334 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12335 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12332, x_12250, x_12334, x_12333, x_4, x_5, x_12331); +if (lean_obj_tag(x_12335) == 0) +{ +lean_object* x_12336; lean_object* x_12337; uint8_t x_12338; +x_12336 = lean_ctor_get(x_12335, 0); +lean_inc(x_12336); +x_12337 = lean_ctor_get(x_12335, 1); +lean_inc(x_12337); +lean_dec(x_12335); +x_12338 = !lean_is_exclusive(x_12336); +if (x_12338 == 0) +{ +lean_object* x_12339; +x_12339 = lean_ctor_get(x_12336, 0); +lean_ctor_set(x_12251, 0, x_12339); +lean_ctor_set(x_12336, 0, x_12251); +x_12219 = x_12336; +x_12220 = x_12337; +goto block_12249; +} +else +{ +lean_object* x_12340; lean_object* x_12341; lean_object* x_12342; +x_12340 = lean_ctor_get(x_12336, 0); +x_12341 = lean_ctor_get(x_12336, 1); +lean_inc(x_12341); +lean_inc(x_12340); +lean_dec(x_12336); +lean_ctor_set(x_12251, 0, x_12340); +x_12342 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_12342, 0, x_12251); +lean_ctor_set(x_12342, 1, x_12341); +x_12219 = x_12342; +x_12220 = x_12337; +goto block_12249; +} +} +else +{ +uint8_t x_12343; +lean_free_object(x_12251); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12343 = !lean_is_exclusive(x_12335); +if (x_12343 == 0) +{ +return x_12335; +} +else +{ +lean_object* x_12344; lean_object* x_12345; lean_object* x_12346; +x_12344 = lean_ctor_get(x_12335, 0); +x_12345 = lean_ctor_get(x_12335, 1); +lean_inc(x_12345); +lean_inc(x_12344); +lean_dec(x_12335); +x_12346 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12346, 0, x_12344); +lean_ctor_set(x_12346, 1, x_12345); +return x_12346; +} +} +} +} +else +{ +lean_object* x_12347; lean_object* x_12348; lean_object* x_12349; lean_object* x_12350; uint8_t x_12351; +x_12347 = lean_ctor_get(x_12251, 0); +lean_inc(x_12347); +lean_dec(x_12251); +x_12348 = lean_array_get_size(x_11919); +x_12349 = lean_ctor_get(x_12347, 3); +lean_inc(x_12349); +lean_dec(x_12347); +x_12350 = lean_array_get_size(x_12349); +lean_dec(x_12349); +x_12351 = lean_nat_dec_lt(x_12348, x_12350); +if (x_12351 == 0) +{ +uint8_t x_12352; +x_12352 = lean_nat_dec_eq(x_12348, x_12350); +if (x_12352 == 0) +{ +lean_object* x_12353; lean_object* x_12354; lean_object* x_12355; lean_object* x_12356; lean_object* x_12357; lean_object* x_12358; lean_object* x_12359; lean_object* x_12360; lean_object* x_12361; lean_object* x_12362; lean_object* x_12363; lean_object* x_12364; lean_object* x_12365; lean_object* x_12366; lean_object* x_12367; lean_object* x_12368; +x_12353 = lean_unsigned_to_nat(0u); +x_12354 = l_Array_extract___rarg(x_11919, x_12353, x_12350); +x_12355 = l_Array_extract___rarg(x_11919, x_12350, x_12348); +lean_dec(x_12348); +lean_inc(x_153); +lean_ctor_set_tag(x_12250, 6); +lean_ctor_set(x_12250, 1, x_12354); +lean_ctor_set(x_12250, 0, x_153); +x_12356 = lean_ctor_get(x_1, 0); +lean_inc(x_12356); +x_12357 = l_Lean_IR_ToIR_bindVar(x_12356, x_11925, x_4, x_5, x_12255); +x_12358 = lean_ctor_get(x_12357, 0); +lean_inc(x_12358); +x_12359 = lean_ctor_get(x_12357, 1); +lean_inc(x_12359); +lean_dec(x_12357); +x_12360 = lean_ctor_get(x_12358, 0); +lean_inc(x_12360); +x_12361 = lean_ctor_get(x_12358, 1); +lean_inc(x_12361); +lean_dec(x_12358); +x_12362 = l_Lean_IR_ToIR_newVar(x_12361, x_4, x_5, x_12359); +x_12363 = lean_ctor_get(x_12362, 0); +lean_inc(x_12363); +x_12364 = lean_ctor_get(x_12362, 1); +lean_inc(x_12364); +lean_dec(x_12362); +x_12365 = lean_ctor_get(x_12363, 0); +lean_inc(x_12365); +x_12366 = lean_ctor_get(x_12363, 1); +lean_inc(x_12366); +lean_dec(x_12363); +x_12367 = lean_ctor_get(x_1, 2); +lean_inc(x_12367); +lean_inc(x_5); +lean_inc(x_4); +x_12368 = l_Lean_IR_ToIR_lowerType(x_12367, x_12366, x_4, x_5, x_12364); +if (lean_obj_tag(x_12368) == 0) +{ +lean_object* x_12369; lean_object* x_12370; lean_object* x_12371; lean_object* x_12372; lean_object* x_12373; +x_12369 = lean_ctor_get(x_12368, 0); +lean_inc(x_12369); +x_12370 = lean_ctor_get(x_12368, 1); +lean_inc(x_12370); +lean_dec(x_12368); +x_12371 = lean_ctor_get(x_12369, 0); +lean_inc(x_12371); +x_12372 = lean_ctor_get(x_12369, 1); +lean_inc(x_12372); +lean_dec(x_12369); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12373 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_12365, x_12355, x_12360, x_12250, x_12371, x_12372, x_4, x_5, x_12370); +if (lean_obj_tag(x_12373) == 0) +{ +lean_object* x_12374; lean_object* x_12375; lean_object* x_12376; lean_object* x_12377; lean_object* x_12378; lean_object* x_12379; lean_object* x_12380; +x_12374 = lean_ctor_get(x_12373, 0); +lean_inc(x_12374); +x_12375 = lean_ctor_get(x_12373, 1); +lean_inc(x_12375); +lean_dec(x_12373); +x_12376 = lean_ctor_get(x_12374, 0); +lean_inc(x_12376); +x_12377 = lean_ctor_get(x_12374, 1); +lean_inc(x_12377); +if (lean_is_exclusive(x_12374)) { + lean_ctor_release(x_12374, 0); + lean_ctor_release(x_12374, 1); + x_12378 = x_12374; +} else { + lean_dec_ref(x_12374); + x_12378 = lean_box(0); +} +x_12379 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_12379, 0, x_12376); +if (lean_is_scalar(x_12378)) { + x_12380 = lean_alloc_ctor(0, 2, 0); +} else { + x_12380 = x_12378; +} +lean_ctor_set(x_12380, 0, x_12379); +lean_ctor_set(x_12380, 1, x_12377); +x_12219 = x_12380; +x_12220 = x_12375; +goto block_12249; +} +else +{ +lean_object* x_12381; lean_object* x_12382; lean_object* x_12383; lean_object* x_12384; +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12381 = lean_ctor_get(x_12373, 0); +lean_inc(x_12381); +x_12382 = lean_ctor_get(x_12373, 1); +lean_inc(x_12382); +if (lean_is_exclusive(x_12373)) { + lean_ctor_release(x_12373, 0); + lean_ctor_release(x_12373, 1); + x_12383 = x_12373; +} else { + lean_dec_ref(x_12373); + x_12383 = lean_box(0); +} +if (lean_is_scalar(x_12383)) { + x_12384 = lean_alloc_ctor(1, 2, 0); +} else { + x_12384 = x_12383; +} +lean_ctor_set(x_12384, 0, x_12381); +lean_ctor_set(x_12384, 1, x_12382); +return x_12384; +} +} +else +{ +lean_object* x_12385; lean_object* x_12386; lean_object* x_12387; lean_object* x_12388; +lean_dec(x_12365); +lean_dec(x_12360); +lean_dec(x_12250); +lean_dec(x_12355); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12385 = lean_ctor_get(x_12368, 0); +lean_inc(x_12385); +x_12386 = lean_ctor_get(x_12368, 1); +lean_inc(x_12386); +if (lean_is_exclusive(x_12368)) { + lean_ctor_release(x_12368, 0); + lean_ctor_release(x_12368, 1); + x_12387 = x_12368; +} else { + lean_dec_ref(x_12368); + x_12387 = lean_box(0); +} +if (lean_is_scalar(x_12387)) { + x_12388 = lean_alloc_ctor(1, 2, 0); +} else { + x_12388 = x_12387; +} +lean_ctor_set(x_12388, 0, x_12385); +lean_ctor_set(x_12388, 1, x_12386); +return x_12388; +} +} +else +{ +lean_object* x_12389; lean_object* x_12390; lean_object* x_12391; lean_object* x_12392; lean_object* x_12393; lean_object* x_12394; lean_object* x_12395; lean_object* x_12396; +lean_dec(x_12350); +lean_dec(x_12348); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_12250, 6); +lean_ctor_set(x_12250, 1, x_11919); +lean_ctor_set(x_12250, 0, x_153); +x_12389 = lean_ctor_get(x_1, 0); +lean_inc(x_12389); +x_12390 = l_Lean_IR_ToIR_bindVar(x_12389, x_11925, x_4, x_5, x_12255); +x_12391 = lean_ctor_get(x_12390, 0); +lean_inc(x_12391); +x_12392 = lean_ctor_get(x_12390, 1); +lean_inc(x_12392); +lean_dec(x_12390); +x_12393 = lean_ctor_get(x_12391, 0); +lean_inc(x_12393); +x_12394 = lean_ctor_get(x_12391, 1); +lean_inc(x_12394); +lean_dec(x_12391); +x_12395 = lean_ctor_get(x_1, 2); +lean_inc(x_12395); +lean_inc(x_5); +lean_inc(x_4); +x_12396 = l_Lean_IR_ToIR_lowerType(x_12395, x_12394, x_4, x_5, x_12392); +if (lean_obj_tag(x_12396) == 0) +{ +lean_object* x_12397; lean_object* x_12398; lean_object* x_12399; lean_object* x_12400; lean_object* x_12401; +x_12397 = lean_ctor_get(x_12396, 0); +lean_inc(x_12397); +x_12398 = lean_ctor_get(x_12396, 1); +lean_inc(x_12398); +lean_dec(x_12396); +x_12399 = lean_ctor_get(x_12397, 0); +lean_inc(x_12399); +x_12400 = lean_ctor_get(x_12397, 1); +lean_inc(x_12400); +lean_dec(x_12397); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12401 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12393, x_12250, x_12399, x_12400, x_4, x_5, x_12398); +if (lean_obj_tag(x_12401) == 0) +{ +lean_object* x_12402; lean_object* x_12403; lean_object* x_12404; lean_object* x_12405; lean_object* x_12406; lean_object* x_12407; lean_object* x_12408; +x_12402 = lean_ctor_get(x_12401, 0); +lean_inc(x_12402); +x_12403 = lean_ctor_get(x_12401, 1); +lean_inc(x_12403); +lean_dec(x_12401); +x_12404 = lean_ctor_get(x_12402, 0); +lean_inc(x_12404); +x_12405 = lean_ctor_get(x_12402, 1); +lean_inc(x_12405); +if (lean_is_exclusive(x_12402)) { + lean_ctor_release(x_12402, 0); + lean_ctor_release(x_12402, 1); + x_12406 = x_12402; +} else { + lean_dec_ref(x_12402); + x_12406 = lean_box(0); +} +x_12407 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_12407, 0, x_12404); +if (lean_is_scalar(x_12406)) { + x_12408 = lean_alloc_ctor(0, 2, 0); +} else { + x_12408 = x_12406; +} +lean_ctor_set(x_12408, 0, x_12407); +lean_ctor_set(x_12408, 1, x_12405); +x_12219 = x_12408; +x_12220 = x_12403; +goto block_12249; +} +else +{ +lean_object* x_12409; lean_object* x_12410; lean_object* x_12411; lean_object* x_12412; +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12409 = lean_ctor_get(x_12401, 0); +lean_inc(x_12409); +x_12410 = lean_ctor_get(x_12401, 1); +lean_inc(x_12410); +if (lean_is_exclusive(x_12401)) { + lean_ctor_release(x_12401, 0); + lean_ctor_release(x_12401, 1); + x_12411 = x_12401; +} else { + lean_dec_ref(x_12401); + x_12411 = lean_box(0); +} +if (lean_is_scalar(x_12411)) { + x_12412 = lean_alloc_ctor(1, 2, 0); +} else { + x_12412 = x_12411; +} +lean_ctor_set(x_12412, 0, x_12409); +lean_ctor_set(x_12412, 1, x_12410); +return x_12412; +} +} +else +{ +lean_object* x_12413; lean_object* x_12414; lean_object* x_12415; lean_object* x_12416; +lean_dec(x_12393); +lean_dec(x_12250); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12413 = lean_ctor_get(x_12396, 0); +lean_inc(x_12413); +x_12414 = lean_ctor_get(x_12396, 1); +lean_inc(x_12414); +if (lean_is_exclusive(x_12396)) { + lean_ctor_release(x_12396, 0); + lean_ctor_release(x_12396, 1); + x_12415 = x_12396; +} else { + lean_dec_ref(x_12396); + x_12415 = lean_box(0); +} +if (lean_is_scalar(x_12415)) { + x_12416 = lean_alloc_ctor(1, 2, 0); +} else { + x_12416 = x_12415; +} +lean_ctor_set(x_12416, 0, x_12413); +lean_ctor_set(x_12416, 1, x_12414); +return x_12416; +} +} +} +else +{ +lean_object* x_12417; lean_object* x_12418; lean_object* x_12419; lean_object* x_12420; lean_object* x_12421; lean_object* x_12422; lean_object* x_12423; lean_object* x_12424; +lean_dec(x_12350); +lean_dec(x_12348); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_12250, 7); +lean_ctor_set(x_12250, 1, x_11919); +lean_ctor_set(x_12250, 0, x_153); +x_12417 = lean_ctor_get(x_1, 0); +lean_inc(x_12417); +x_12418 = l_Lean_IR_ToIR_bindVar(x_12417, x_11925, x_4, x_5, x_12255); +x_12419 = lean_ctor_get(x_12418, 0); +lean_inc(x_12419); +x_12420 = lean_ctor_get(x_12418, 1); +lean_inc(x_12420); +lean_dec(x_12418); +x_12421 = lean_ctor_get(x_12419, 0); +lean_inc(x_12421); +x_12422 = lean_ctor_get(x_12419, 1); +lean_inc(x_12422); +lean_dec(x_12419); +x_12423 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12424 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12421, x_12250, x_12423, x_12422, x_4, x_5, x_12420); +if (lean_obj_tag(x_12424) == 0) +{ +lean_object* x_12425; lean_object* x_12426; lean_object* x_12427; lean_object* x_12428; lean_object* x_12429; lean_object* x_12430; lean_object* x_12431; +x_12425 = lean_ctor_get(x_12424, 0); +lean_inc(x_12425); +x_12426 = lean_ctor_get(x_12424, 1); +lean_inc(x_12426); +lean_dec(x_12424); +x_12427 = lean_ctor_get(x_12425, 0); +lean_inc(x_12427); +x_12428 = lean_ctor_get(x_12425, 1); +lean_inc(x_12428); +if (lean_is_exclusive(x_12425)) { + lean_ctor_release(x_12425, 0); + lean_ctor_release(x_12425, 1); + x_12429 = x_12425; +} else { + lean_dec_ref(x_12425); + x_12429 = lean_box(0); +} +x_12430 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_12430, 0, x_12427); +if (lean_is_scalar(x_12429)) { + x_12431 = lean_alloc_ctor(0, 2, 0); +} else { + x_12431 = x_12429; +} +lean_ctor_set(x_12431, 0, x_12430); +lean_ctor_set(x_12431, 1, x_12428); +x_12219 = x_12431; +x_12220 = x_12426; +goto block_12249; +} +else +{ +lean_object* x_12432; lean_object* x_12433; lean_object* x_12434; lean_object* x_12435; +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12432 = lean_ctor_get(x_12424, 0); +lean_inc(x_12432); +x_12433 = lean_ctor_get(x_12424, 1); +lean_inc(x_12433); +if (lean_is_exclusive(x_12424)) { + lean_ctor_release(x_12424, 0); + lean_ctor_release(x_12424, 1); + x_12434 = x_12424; +} else { + lean_dec_ref(x_12424); + x_12434 = lean_box(0); +} +if (lean_is_scalar(x_12434)) { + x_12435 = lean_alloc_ctor(1, 2, 0); +} else { + x_12435 = x_12434; +} +lean_ctor_set(x_12435, 0, x_12432); +lean_ctor_set(x_12435, 1, x_12433); +return x_12435; +} +} +} +} +else +{ +lean_object* x_12436; lean_object* x_12437; lean_object* x_12438; lean_object* x_12439; lean_object* x_12440; lean_object* x_12441; uint8_t x_12442; +x_12436 = lean_ctor_get(x_12250, 1); +lean_inc(x_12436); +lean_dec(x_12250); +x_12437 = lean_ctor_get(x_12251, 0); +lean_inc(x_12437); +if (lean_is_exclusive(x_12251)) { + lean_ctor_release(x_12251, 0); + x_12438 = x_12251; +} else { + lean_dec_ref(x_12251); + x_12438 = lean_box(0); +} +x_12439 = lean_array_get_size(x_11919); +x_12440 = lean_ctor_get(x_12437, 3); +lean_inc(x_12440); +lean_dec(x_12437); +x_12441 = lean_array_get_size(x_12440); +lean_dec(x_12440); +x_12442 = lean_nat_dec_lt(x_12439, x_12441); +if (x_12442 == 0) +{ +uint8_t x_12443; +x_12443 = lean_nat_dec_eq(x_12439, x_12441); +if (x_12443 == 0) +{ +lean_object* x_12444; lean_object* x_12445; lean_object* x_12446; lean_object* x_12447; lean_object* x_12448; lean_object* x_12449; lean_object* x_12450; lean_object* x_12451; lean_object* x_12452; lean_object* x_12453; lean_object* x_12454; lean_object* x_12455; lean_object* x_12456; lean_object* x_12457; lean_object* x_12458; lean_object* x_12459; lean_object* x_12460; +x_12444 = lean_unsigned_to_nat(0u); +x_12445 = l_Array_extract___rarg(x_11919, x_12444, x_12441); +x_12446 = l_Array_extract___rarg(x_11919, x_12441, x_12439); +lean_dec(x_12439); +lean_inc(x_153); +x_12447 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_12447, 0, x_153); +lean_ctor_set(x_12447, 1, x_12445); +x_12448 = lean_ctor_get(x_1, 0); +lean_inc(x_12448); +x_12449 = l_Lean_IR_ToIR_bindVar(x_12448, x_11925, x_4, x_5, x_12436); +x_12450 = lean_ctor_get(x_12449, 0); +lean_inc(x_12450); +x_12451 = lean_ctor_get(x_12449, 1); +lean_inc(x_12451); +lean_dec(x_12449); +x_12452 = lean_ctor_get(x_12450, 0); +lean_inc(x_12452); +x_12453 = lean_ctor_get(x_12450, 1); +lean_inc(x_12453); +lean_dec(x_12450); +x_12454 = l_Lean_IR_ToIR_newVar(x_12453, x_4, x_5, x_12451); +x_12455 = lean_ctor_get(x_12454, 0); +lean_inc(x_12455); +x_12456 = lean_ctor_get(x_12454, 1); +lean_inc(x_12456); +lean_dec(x_12454); +x_12457 = lean_ctor_get(x_12455, 0); +lean_inc(x_12457); +x_12458 = lean_ctor_get(x_12455, 1); +lean_inc(x_12458); +lean_dec(x_12455); +x_12459 = lean_ctor_get(x_1, 2); +lean_inc(x_12459); +lean_inc(x_5); +lean_inc(x_4); +x_12460 = l_Lean_IR_ToIR_lowerType(x_12459, x_12458, x_4, x_5, x_12456); +if (lean_obj_tag(x_12460) == 0) +{ +lean_object* x_12461; lean_object* x_12462; lean_object* x_12463; lean_object* x_12464; lean_object* x_12465; +x_12461 = lean_ctor_get(x_12460, 0); +lean_inc(x_12461); +x_12462 = lean_ctor_get(x_12460, 1); +lean_inc(x_12462); +lean_dec(x_12460); +x_12463 = lean_ctor_get(x_12461, 0); +lean_inc(x_12463); +x_12464 = lean_ctor_get(x_12461, 1); +lean_inc(x_12464); +lean_dec(x_12461); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12465 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_12457, x_12446, x_12452, x_12447, x_12463, x_12464, x_4, x_5, x_12462); +if (lean_obj_tag(x_12465) == 0) +{ +lean_object* x_12466; lean_object* x_12467; lean_object* x_12468; lean_object* x_12469; lean_object* x_12470; lean_object* x_12471; lean_object* x_12472; +x_12466 = lean_ctor_get(x_12465, 0); +lean_inc(x_12466); +x_12467 = lean_ctor_get(x_12465, 1); +lean_inc(x_12467); +lean_dec(x_12465); +x_12468 = lean_ctor_get(x_12466, 0); +lean_inc(x_12468); +x_12469 = lean_ctor_get(x_12466, 1); +lean_inc(x_12469); +if (lean_is_exclusive(x_12466)) { + lean_ctor_release(x_12466, 0); + lean_ctor_release(x_12466, 1); + x_12470 = x_12466; +} else { + lean_dec_ref(x_12466); + x_12470 = lean_box(0); +} +if (lean_is_scalar(x_12438)) { + x_12471 = lean_alloc_ctor(1, 1, 0); +} else { + x_12471 = x_12438; +} +lean_ctor_set(x_12471, 0, x_12468); +if (lean_is_scalar(x_12470)) { + x_12472 = lean_alloc_ctor(0, 2, 0); +} else { + x_12472 = x_12470; +} +lean_ctor_set(x_12472, 0, x_12471); +lean_ctor_set(x_12472, 1, x_12469); +x_12219 = x_12472; +x_12220 = x_12467; +goto block_12249; +} +else +{ +lean_object* x_12473; lean_object* x_12474; lean_object* x_12475; lean_object* x_12476; +lean_dec(x_12438); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12473 = lean_ctor_get(x_12465, 0); +lean_inc(x_12473); +x_12474 = lean_ctor_get(x_12465, 1); +lean_inc(x_12474); +if (lean_is_exclusive(x_12465)) { + lean_ctor_release(x_12465, 0); + lean_ctor_release(x_12465, 1); + x_12475 = x_12465; +} else { + lean_dec_ref(x_12465); + x_12475 = lean_box(0); +} +if (lean_is_scalar(x_12475)) { + x_12476 = lean_alloc_ctor(1, 2, 0); +} else { + x_12476 = x_12475; +} +lean_ctor_set(x_12476, 0, x_12473); +lean_ctor_set(x_12476, 1, x_12474); +return x_12476; +} +} +else +{ +lean_object* x_12477; lean_object* x_12478; lean_object* x_12479; lean_object* x_12480; +lean_dec(x_12457); +lean_dec(x_12452); +lean_dec(x_12447); +lean_dec(x_12446); +lean_dec(x_12438); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12477 = lean_ctor_get(x_12460, 0); +lean_inc(x_12477); +x_12478 = lean_ctor_get(x_12460, 1); +lean_inc(x_12478); +if (lean_is_exclusive(x_12460)) { + lean_ctor_release(x_12460, 0); + lean_ctor_release(x_12460, 1); + x_12479 = x_12460; +} else { + lean_dec_ref(x_12460); + x_12479 = lean_box(0); +} +if (lean_is_scalar(x_12479)) { + x_12480 = lean_alloc_ctor(1, 2, 0); +} else { + x_12480 = x_12479; +} +lean_ctor_set(x_12480, 0, x_12477); +lean_ctor_set(x_12480, 1, x_12478); +return x_12480; +} +} +else +{ +lean_object* x_12481; lean_object* x_12482; lean_object* x_12483; lean_object* x_12484; lean_object* x_12485; lean_object* x_12486; lean_object* x_12487; lean_object* x_12488; lean_object* x_12489; +lean_dec(x_12441); +lean_dec(x_12439); +lean_inc(x_11919); +lean_inc(x_153); +x_12481 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_12481, 0, x_153); +lean_ctor_set(x_12481, 1, x_11919); +x_12482 = lean_ctor_get(x_1, 0); +lean_inc(x_12482); +x_12483 = l_Lean_IR_ToIR_bindVar(x_12482, x_11925, x_4, x_5, x_12436); +x_12484 = lean_ctor_get(x_12483, 0); +lean_inc(x_12484); +x_12485 = lean_ctor_get(x_12483, 1); +lean_inc(x_12485); +lean_dec(x_12483); +x_12486 = lean_ctor_get(x_12484, 0); +lean_inc(x_12486); +x_12487 = lean_ctor_get(x_12484, 1); +lean_inc(x_12487); +lean_dec(x_12484); +x_12488 = lean_ctor_get(x_1, 2); +lean_inc(x_12488); +lean_inc(x_5); +lean_inc(x_4); +x_12489 = l_Lean_IR_ToIR_lowerType(x_12488, x_12487, x_4, x_5, x_12485); +if (lean_obj_tag(x_12489) == 0) +{ +lean_object* x_12490; lean_object* x_12491; lean_object* x_12492; lean_object* x_12493; lean_object* x_12494; +x_12490 = lean_ctor_get(x_12489, 0); +lean_inc(x_12490); +x_12491 = lean_ctor_get(x_12489, 1); +lean_inc(x_12491); +lean_dec(x_12489); +x_12492 = lean_ctor_get(x_12490, 0); +lean_inc(x_12492); +x_12493 = lean_ctor_get(x_12490, 1); +lean_inc(x_12493); +lean_dec(x_12490); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12494 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12486, x_12481, x_12492, x_12493, x_4, x_5, x_12491); +if (lean_obj_tag(x_12494) == 0) +{ +lean_object* x_12495; lean_object* x_12496; lean_object* x_12497; lean_object* x_12498; lean_object* x_12499; lean_object* x_12500; lean_object* x_12501; +x_12495 = lean_ctor_get(x_12494, 0); +lean_inc(x_12495); +x_12496 = lean_ctor_get(x_12494, 1); +lean_inc(x_12496); +lean_dec(x_12494); +x_12497 = lean_ctor_get(x_12495, 0); +lean_inc(x_12497); +x_12498 = lean_ctor_get(x_12495, 1); +lean_inc(x_12498); +if (lean_is_exclusive(x_12495)) { + lean_ctor_release(x_12495, 0); + lean_ctor_release(x_12495, 1); + x_12499 = x_12495; +} else { + lean_dec_ref(x_12495); + x_12499 = lean_box(0); +} +if (lean_is_scalar(x_12438)) { + x_12500 = lean_alloc_ctor(1, 1, 0); +} else { + x_12500 = x_12438; +} +lean_ctor_set(x_12500, 0, x_12497); +if (lean_is_scalar(x_12499)) { + x_12501 = lean_alloc_ctor(0, 2, 0); +} else { + x_12501 = x_12499; +} +lean_ctor_set(x_12501, 0, x_12500); +lean_ctor_set(x_12501, 1, x_12498); +x_12219 = x_12501; +x_12220 = x_12496; +goto block_12249; +} +else +{ +lean_object* x_12502; lean_object* x_12503; lean_object* x_12504; lean_object* x_12505; +lean_dec(x_12438); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12502 = lean_ctor_get(x_12494, 0); +lean_inc(x_12502); +x_12503 = lean_ctor_get(x_12494, 1); +lean_inc(x_12503); +if (lean_is_exclusive(x_12494)) { + lean_ctor_release(x_12494, 0); + lean_ctor_release(x_12494, 1); + x_12504 = x_12494; +} else { + lean_dec_ref(x_12494); + x_12504 = lean_box(0); +} +if (lean_is_scalar(x_12504)) { + x_12505 = lean_alloc_ctor(1, 2, 0); +} else { + x_12505 = x_12504; +} +lean_ctor_set(x_12505, 0, x_12502); +lean_ctor_set(x_12505, 1, x_12503); +return x_12505; +} +} +else +{ +lean_object* x_12506; lean_object* x_12507; lean_object* x_12508; lean_object* x_12509; +lean_dec(x_12486); +lean_dec(x_12481); +lean_dec(x_12438); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12506 = lean_ctor_get(x_12489, 0); +lean_inc(x_12506); +x_12507 = lean_ctor_get(x_12489, 1); +lean_inc(x_12507); +if (lean_is_exclusive(x_12489)) { + lean_ctor_release(x_12489, 0); + lean_ctor_release(x_12489, 1); + x_12508 = x_12489; +} else { + lean_dec_ref(x_12489); + x_12508 = lean_box(0); +} +if (lean_is_scalar(x_12508)) { + x_12509 = lean_alloc_ctor(1, 2, 0); +} else { + x_12509 = x_12508; +} +lean_ctor_set(x_12509, 0, x_12506); +lean_ctor_set(x_12509, 1, x_12507); +return x_12509; +} +} +} +else +{ +lean_object* x_12510; lean_object* x_12511; lean_object* x_12512; lean_object* x_12513; lean_object* x_12514; lean_object* x_12515; lean_object* x_12516; lean_object* x_12517; lean_object* x_12518; +lean_dec(x_12441); +lean_dec(x_12439); +lean_inc(x_11919); +lean_inc(x_153); +x_12510 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_12510, 0, x_153); +lean_ctor_set(x_12510, 1, x_11919); +x_12511 = lean_ctor_get(x_1, 0); +lean_inc(x_12511); +x_12512 = l_Lean_IR_ToIR_bindVar(x_12511, x_11925, x_4, x_5, x_12436); +x_12513 = lean_ctor_get(x_12512, 0); +lean_inc(x_12513); +x_12514 = lean_ctor_get(x_12512, 1); +lean_inc(x_12514); +lean_dec(x_12512); +x_12515 = lean_ctor_get(x_12513, 0); +lean_inc(x_12515); +x_12516 = lean_ctor_get(x_12513, 1); +lean_inc(x_12516); +lean_dec(x_12513); +x_12517 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12518 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12515, x_12510, x_12517, x_12516, x_4, x_5, x_12514); +if (lean_obj_tag(x_12518) == 0) +{ +lean_object* x_12519; lean_object* x_12520; lean_object* x_12521; lean_object* x_12522; lean_object* x_12523; lean_object* x_12524; lean_object* x_12525; +x_12519 = lean_ctor_get(x_12518, 0); +lean_inc(x_12519); +x_12520 = lean_ctor_get(x_12518, 1); +lean_inc(x_12520); +lean_dec(x_12518); +x_12521 = lean_ctor_get(x_12519, 0); +lean_inc(x_12521); +x_12522 = lean_ctor_get(x_12519, 1); +lean_inc(x_12522); +if (lean_is_exclusive(x_12519)) { + lean_ctor_release(x_12519, 0); + lean_ctor_release(x_12519, 1); + x_12523 = x_12519; +} else { + lean_dec_ref(x_12519); + x_12523 = lean_box(0); +} +if (lean_is_scalar(x_12438)) { + x_12524 = lean_alloc_ctor(1, 1, 0); +} else { + x_12524 = x_12438; +} +lean_ctor_set(x_12524, 0, x_12521); +if (lean_is_scalar(x_12523)) { + x_12525 = lean_alloc_ctor(0, 2, 0); +} else { + x_12525 = x_12523; +} +lean_ctor_set(x_12525, 0, x_12524); +lean_ctor_set(x_12525, 1, x_12522); +x_12219 = x_12525; +x_12220 = x_12520; +goto block_12249; +} +else +{ +lean_object* x_12526; lean_object* x_12527; lean_object* x_12528; lean_object* x_12529; +lean_dec(x_12438); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12526 = lean_ctor_get(x_12518, 0); +lean_inc(x_12526); +x_12527 = lean_ctor_get(x_12518, 1); +lean_inc(x_12527); +if (lean_is_exclusive(x_12518)) { + lean_ctor_release(x_12518, 0); + lean_ctor_release(x_12518, 1); + x_12528 = x_12518; +} else { + lean_dec_ref(x_12518); + x_12528 = lean_box(0); +} +if (lean_is_scalar(x_12528)) { + x_12529 = lean_alloc_ctor(1, 2, 0); +} else { + x_12529 = x_12528; +} +lean_ctor_set(x_12529, 0, x_12526); +lean_ctor_set(x_12529, 1, x_12527); +return x_12529; +} +} +} +} +block_12249: +{ +lean_object* x_12221; +x_12221 = lean_ctor_get(x_12219, 0); +lean_inc(x_12221); +if (lean_obj_tag(x_12221) == 0) +{ +lean_object* x_12222; lean_object* x_12223; lean_object* x_12224; lean_object* x_12225; lean_object* x_12226; lean_object* x_12227; lean_object* x_12228; lean_object* x_12229; lean_object* x_12230; lean_object* x_12231; +lean_dec(x_11930); +x_12222 = lean_ctor_get(x_12219, 1); +lean_inc(x_12222); +lean_dec(x_12219); +x_12223 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_12223, 0, x_153); +lean_ctor_set(x_12223, 1, x_11919); +x_12224 = lean_ctor_get(x_1, 0); +lean_inc(x_12224); +x_12225 = l_Lean_IR_ToIR_bindVar(x_12224, x_12222, x_4, x_5, x_12220); +x_12226 = lean_ctor_get(x_12225, 0); +lean_inc(x_12226); +x_12227 = lean_ctor_get(x_12225, 1); +lean_inc(x_12227); +lean_dec(x_12225); +x_12228 = lean_ctor_get(x_12226, 0); +lean_inc(x_12228); +x_12229 = lean_ctor_get(x_12226, 1); +lean_inc(x_12229); +lean_dec(x_12226); +x_12230 = lean_ctor_get(x_1, 2); +lean_inc(x_12230); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_12231 = l_Lean_IR_ToIR_lowerType(x_12230, x_12229, x_4, x_5, x_12227); +if (lean_obj_tag(x_12231) == 0) +{ +lean_object* x_12232; lean_object* x_12233; lean_object* x_12234; lean_object* x_12235; lean_object* x_12236; +x_12232 = lean_ctor_get(x_12231, 0); +lean_inc(x_12232); +x_12233 = lean_ctor_get(x_12231, 1); +lean_inc(x_12233); +lean_dec(x_12231); +x_12234 = lean_ctor_get(x_12232, 0); +lean_inc(x_12234); +x_12235 = lean_ctor_get(x_12232, 1); +lean_inc(x_12235); +lean_dec(x_12232); +x_12236 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12228, x_12223, x_12234, x_12235, x_4, x_5, x_12233); +return x_12236; +} +else +{ +uint8_t x_12237; +lean_dec(x_12228); +lean_dec(x_12223); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_12237 = !lean_is_exclusive(x_12231); +if (x_12237 == 0) +{ +return x_12231; +} +else +{ +lean_object* x_12238; lean_object* x_12239; lean_object* x_12240; +x_12238 = lean_ctor_get(x_12231, 0); +x_12239 = lean_ctor_get(x_12231, 1); +lean_inc(x_12239); +lean_inc(x_12238); +lean_dec(x_12231); +x_12240 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12240, 0, x_12238); +lean_ctor_set(x_12240, 1, x_12239); +return x_12240; +} +} +} +else +{ +uint8_t x_12241; +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12241 = !lean_is_exclusive(x_12219); +if (x_12241 == 0) +{ +lean_object* x_12242; lean_object* x_12243; lean_object* x_12244; +x_12242 = lean_ctor_get(x_12219, 0); +lean_dec(x_12242); +x_12243 = lean_ctor_get(x_12221, 0); +lean_inc(x_12243); +lean_dec(x_12221); +lean_ctor_set(x_12219, 0, x_12243); +if (lean_is_scalar(x_11930)) { + x_12244 = lean_alloc_ctor(0, 2, 0); +} else { + x_12244 = x_11930; +} +lean_ctor_set(x_12244, 0, x_12219); +lean_ctor_set(x_12244, 1, x_12220); +return x_12244; +} +else +{ +lean_object* x_12245; lean_object* x_12246; lean_object* x_12247; lean_object* x_12248; +x_12245 = lean_ctor_get(x_12219, 1); +lean_inc(x_12245); +lean_dec(x_12219); +x_12246 = lean_ctor_get(x_12221, 0); +lean_inc(x_12246); +lean_dec(x_12221); +x_12247 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_12247, 0, x_12246); +lean_ctor_set(x_12247, 1, x_12245); +if (lean_is_scalar(x_11930)) { + x_12248 = lean_alloc_ctor(0, 2, 0); +} else { + x_12248 = x_11930; +} +lean_ctor_set(x_12248, 0, x_12247); +lean_ctor_set(x_12248, 1, x_12220); +return x_12248; +} +} +} +} +case 2: +{ +lean_object* x_12530; lean_object* x_12531; +lean_dec(x_11936); +lean_dec(x_11931); +lean_dec(x_11930); +lean_free_object(x_11921); +lean_dec(x_11919); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_12530 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_12531 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_12530, x_11925, x_4, x_5, x_11929); +return x_12531; +} +case 3: +{ +lean_object* x_12532; lean_object* x_12533; lean_object* x_12563; lean_object* x_12564; +lean_dec(x_11936); +lean_dec(x_11931); +lean_dec(x_11911); +lean_dec(x_11910); +lean_inc(x_153); +x_12563 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_11929); +x_12564 = lean_ctor_get(x_12563, 0); +lean_inc(x_12564); +if (lean_obj_tag(x_12564) == 0) +{ +lean_object* x_12565; lean_object* x_12566; +x_12565 = lean_ctor_get(x_12563, 1); +lean_inc(x_12565); +lean_dec(x_12563); +x_12566 = lean_box(0); +lean_ctor_set(x_11921, 0, x_12566); +x_12532 = x_11921; +x_12533 = x_12565; +goto block_12562; +} +else +{ +uint8_t x_12567; +lean_free_object(x_11921); +x_12567 = !lean_is_exclusive(x_12563); +if (x_12567 == 0) +{ +lean_object* x_12568; lean_object* x_12569; uint8_t x_12570; +x_12568 = lean_ctor_get(x_12563, 1); +x_12569 = lean_ctor_get(x_12563, 0); +lean_dec(x_12569); +x_12570 = !lean_is_exclusive(x_12564); +if (x_12570 == 0) +{ +lean_object* x_12571; lean_object* x_12572; lean_object* x_12573; lean_object* x_12574; uint8_t x_12575; +x_12571 = lean_ctor_get(x_12564, 0); +x_12572 = lean_array_get_size(x_11919); +x_12573 = lean_ctor_get(x_12571, 3); +lean_inc(x_12573); +lean_dec(x_12571); +x_12574 = lean_array_get_size(x_12573); +lean_dec(x_12573); +x_12575 = lean_nat_dec_lt(x_12572, x_12574); +if (x_12575 == 0) +{ +uint8_t x_12576; +x_12576 = lean_nat_dec_eq(x_12572, x_12574); +if (x_12576 == 0) +{ +lean_object* x_12577; lean_object* x_12578; lean_object* x_12579; lean_object* x_12580; lean_object* x_12581; lean_object* x_12582; lean_object* x_12583; lean_object* x_12584; lean_object* x_12585; lean_object* x_12586; lean_object* x_12587; lean_object* x_12588; lean_object* x_12589; lean_object* x_12590; lean_object* x_12591; lean_object* x_12592; +x_12577 = lean_unsigned_to_nat(0u); +x_12578 = l_Array_extract___rarg(x_11919, x_12577, x_12574); +x_12579 = l_Array_extract___rarg(x_11919, x_12574, x_12572); +lean_dec(x_12572); +lean_inc(x_153); +lean_ctor_set_tag(x_12563, 6); +lean_ctor_set(x_12563, 1, x_12578); +lean_ctor_set(x_12563, 0, x_153); +x_12580 = lean_ctor_get(x_1, 0); +lean_inc(x_12580); +x_12581 = l_Lean_IR_ToIR_bindVar(x_12580, x_11925, x_4, x_5, x_12568); +x_12582 = lean_ctor_get(x_12581, 0); +lean_inc(x_12582); +x_12583 = lean_ctor_get(x_12581, 1); +lean_inc(x_12583); +lean_dec(x_12581); +x_12584 = lean_ctor_get(x_12582, 0); +lean_inc(x_12584); +x_12585 = lean_ctor_get(x_12582, 1); +lean_inc(x_12585); +lean_dec(x_12582); +x_12586 = l_Lean_IR_ToIR_newVar(x_12585, x_4, x_5, x_12583); +x_12587 = lean_ctor_get(x_12586, 0); +lean_inc(x_12587); +x_12588 = lean_ctor_get(x_12586, 1); +lean_inc(x_12588); +lean_dec(x_12586); +x_12589 = lean_ctor_get(x_12587, 0); +lean_inc(x_12589); +x_12590 = lean_ctor_get(x_12587, 1); +lean_inc(x_12590); +lean_dec(x_12587); +x_12591 = lean_ctor_get(x_1, 2); +lean_inc(x_12591); +lean_inc(x_5); +lean_inc(x_4); +x_12592 = l_Lean_IR_ToIR_lowerType(x_12591, x_12590, x_4, x_5, x_12588); +if (lean_obj_tag(x_12592) == 0) +{ +lean_object* x_12593; lean_object* x_12594; lean_object* x_12595; lean_object* x_12596; lean_object* x_12597; +x_12593 = lean_ctor_get(x_12592, 0); +lean_inc(x_12593); +x_12594 = lean_ctor_get(x_12592, 1); +lean_inc(x_12594); +lean_dec(x_12592); +x_12595 = lean_ctor_get(x_12593, 0); +lean_inc(x_12595); +x_12596 = lean_ctor_get(x_12593, 1); +lean_inc(x_12596); +lean_dec(x_12593); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12597 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_12589, x_12579, x_12584, x_12563, x_12595, x_12596, x_4, x_5, x_12594); +if (lean_obj_tag(x_12597) == 0) +{ +lean_object* x_12598; lean_object* x_12599; uint8_t x_12600; +x_12598 = lean_ctor_get(x_12597, 0); +lean_inc(x_12598); +x_12599 = lean_ctor_get(x_12597, 1); +lean_inc(x_12599); +lean_dec(x_12597); +x_12600 = !lean_is_exclusive(x_12598); +if (x_12600 == 0) +{ +lean_object* x_12601; +x_12601 = lean_ctor_get(x_12598, 0); +lean_ctor_set(x_12564, 0, x_12601); +lean_ctor_set(x_12598, 0, x_12564); +x_12532 = x_12598; +x_12533 = x_12599; +goto block_12562; +} +else +{ +lean_object* x_12602; lean_object* x_12603; lean_object* x_12604; +x_12602 = lean_ctor_get(x_12598, 0); +x_12603 = lean_ctor_get(x_12598, 1); +lean_inc(x_12603); +lean_inc(x_12602); +lean_dec(x_12598); +lean_ctor_set(x_12564, 0, x_12602); +x_12604 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_12604, 0, x_12564); +lean_ctor_set(x_12604, 1, x_12603); +x_12532 = x_12604; +x_12533 = x_12599; +goto block_12562; +} +} +else +{ +uint8_t x_12605; +lean_free_object(x_12564); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12605 = !lean_is_exclusive(x_12597); +if (x_12605 == 0) +{ +return x_12597; +} +else +{ +lean_object* x_12606; lean_object* x_12607; lean_object* x_12608; +x_12606 = lean_ctor_get(x_12597, 0); +x_12607 = lean_ctor_get(x_12597, 1); +lean_inc(x_12607); +lean_inc(x_12606); +lean_dec(x_12597); +x_12608 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12608, 0, x_12606); +lean_ctor_set(x_12608, 1, x_12607); +return x_12608; +} +} +} +else +{ +uint8_t x_12609; +lean_dec(x_12589); +lean_dec(x_12584); +lean_dec(x_12563); +lean_dec(x_12579); +lean_free_object(x_12564); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12609 = !lean_is_exclusive(x_12592); +if (x_12609 == 0) +{ +return x_12592; +} +else +{ +lean_object* x_12610; lean_object* x_12611; lean_object* x_12612; +x_12610 = lean_ctor_get(x_12592, 0); +x_12611 = lean_ctor_get(x_12592, 1); +lean_inc(x_12611); +lean_inc(x_12610); +lean_dec(x_12592); +x_12612 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12612, 0, x_12610); +lean_ctor_set(x_12612, 1, x_12611); +return x_12612; +} +} +} +else +{ +lean_object* x_12613; lean_object* x_12614; lean_object* x_12615; lean_object* x_12616; lean_object* x_12617; lean_object* x_12618; lean_object* x_12619; lean_object* x_12620; +lean_dec(x_12574); +lean_dec(x_12572); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_12563, 6); +lean_ctor_set(x_12563, 1, x_11919); +lean_ctor_set(x_12563, 0, x_153); +x_12613 = lean_ctor_get(x_1, 0); +lean_inc(x_12613); +x_12614 = l_Lean_IR_ToIR_bindVar(x_12613, x_11925, x_4, x_5, x_12568); +x_12615 = lean_ctor_get(x_12614, 0); +lean_inc(x_12615); +x_12616 = lean_ctor_get(x_12614, 1); +lean_inc(x_12616); +lean_dec(x_12614); +x_12617 = lean_ctor_get(x_12615, 0); +lean_inc(x_12617); +x_12618 = lean_ctor_get(x_12615, 1); +lean_inc(x_12618); +lean_dec(x_12615); +x_12619 = lean_ctor_get(x_1, 2); +lean_inc(x_12619); +lean_inc(x_5); +lean_inc(x_4); +x_12620 = l_Lean_IR_ToIR_lowerType(x_12619, x_12618, x_4, x_5, x_12616); +if (lean_obj_tag(x_12620) == 0) +{ +lean_object* x_12621; lean_object* x_12622; lean_object* x_12623; lean_object* x_12624; lean_object* x_12625; +x_12621 = lean_ctor_get(x_12620, 0); +lean_inc(x_12621); +x_12622 = lean_ctor_get(x_12620, 1); +lean_inc(x_12622); +lean_dec(x_12620); +x_12623 = lean_ctor_get(x_12621, 0); +lean_inc(x_12623); +x_12624 = lean_ctor_get(x_12621, 1); +lean_inc(x_12624); +lean_dec(x_12621); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12625 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12617, x_12563, x_12623, x_12624, x_4, x_5, x_12622); +if (lean_obj_tag(x_12625) == 0) +{ +lean_object* x_12626; lean_object* x_12627; uint8_t x_12628; +x_12626 = lean_ctor_get(x_12625, 0); +lean_inc(x_12626); +x_12627 = lean_ctor_get(x_12625, 1); +lean_inc(x_12627); +lean_dec(x_12625); +x_12628 = !lean_is_exclusive(x_12626); +if (x_12628 == 0) +{ +lean_object* x_12629; +x_12629 = lean_ctor_get(x_12626, 0); +lean_ctor_set(x_12564, 0, x_12629); +lean_ctor_set(x_12626, 0, x_12564); +x_12532 = x_12626; +x_12533 = x_12627; +goto block_12562; +} +else +{ +lean_object* x_12630; lean_object* x_12631; lean_object* x_12632; +x_12630 = lean_ctor_get(x_12626, 0); +x_12631 = lean_ctor_get(x_12626, 1); +lean_inc(x_12631); +lean_inc(x_12630); +lean_dec(x_12626); +lean_ctor_set(x_12564, 0, x_12630); +x_12632 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_12632, 0, x_12564); +lean_ctor_set(x_12632, 1, x_12631); +x_12532 = x_12632; +x_12533 = x_12627; +goto block_12562; +} +} +else +{ +uint8_t x_12633; +lean_free_object(x_12564); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12633 = !lean_is_exclusive(x_12625); +if (x_12633 == 0) +{ +return x_12625; +} +else +{ +lean_object* x_12634; lean_object* x_12635; lean_object* x_12636; +x_12634 = lean_ctor_get(x_12625, 0); +x_12635 = lean_ctor_get(x_12625, 1); +lean_inc(x_12635); +lean_inc(x_12634); +lean_dec(x_12625); +x_12636 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12636, 0, x_12634); +lean_ctor_set(x_12636, 1, x_12635); +return x_12636; +} +} +} +else +{ +uint8_t x_12637; +lean_dec(x_12617); +lean_dec(x_12563); +lean_free_object(x_12564); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12637 = !lean_is_exclusive(x_12620); +if (x_12637 == 0) +{ +return x_12620; +} +else +{ +lean_object* x_12638; lean_object* x_12639; lean_object* x_12640; +x_12638 = lean_ctor_get(x_12620, 0); +x_12639 = lean_ctor_get(x_12620, 1); +lean_inc(x_12639); +lean_inc(x_12638); +lean_dec(x_12620); +x_12640 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12640, 0, x_12638); +lean_ctor_set(x_12640, 1, x_12639); +return x_12640; +} +} +} +} +else +{ +lean_object* x_12641; lean_object* x_12642; lean_object* x_12643; lean_object* x_12644; lean_object* x_12645; lean_object* x_12646; lean_object* x_12647; lean_object* x_12648; +lean_dec(x_12574); +lean_dec(x_12572); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_12563, 7); +lean_ctor_set(x_12563, 1, x_11919); +lean_ctor_set(x_12563, 0, x_153); +x_12641 = lean_ctor_get(x_1, 0); +lean_inc(x_12641); +x_12642 = l_Lean_IR_ToIR_bindVar(x_12641, x_11925, x_4, x_5, x_12568); +x_12643 = lean_ctor_get(x_12642, 0); +lean_inc(x_12643); +x_12644 = lean_ctor_get(x_12642, 1); +lean_inc(x_12644); +lean_dec(x_12642); +x_12645 = lean_ctor_get(x_12643, 0); +lean_inc(x_12645); +x_12646 = lean_ctor_get(x_12643, 1); +lean_inc(x_12646); +lean_dec(x_12643); +x_12647 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12648 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12645, x_12563, x_12647, x_12646, x_4, x_5, x_12644); +if (lean_obj_tag(x_12648) == 0) +{ +lean_object* x_12649; lean_object* x_12650; uint8_t x_12651; +x_12649 = lean_ctor_get(x_12648, 0); +lean_inc(x_12649); +x_12650 = lean_ctor_get(x_12648, 1); +lean_inc(x_12650); +lean_dec(x_12648); +x_12651 = !lean_is_exclusive(x_12649); +if (x_12651 == 0) +{ +lean_object* x_12652; +x_12652 = lean_ctor_get(x_12649, 0); +lean_ctor_set(x_12564, 0, x_12652); +lean_ctor_set(x_12649, 0, x_12564); +x_12532 = x_12649; +x_12533 = x_12650; +goto block_12562; +} +else +{ +lean_object* x_12653; lean_object* x_12654; lean_object* x_12655; +x_12653 = lean_ctor_get(x_12649, 0); +x_12654 = lean_ctor_get(x_12649, 1); +lean_inc(x_12654); +lean_inc(x_12653); +lean_dec(x_12649); +lean_ctor_set(x_12564, 0, x_12653); +x_12655 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_12655, 0, x_12564); +lean_ctor_set(x_12655, 1, x_12654); +x_12532 = x_12655; +x_12533 = x_12650; +goto block_12562; +} +} +else +{ +uint8_t x_12656; +lean_free_object(x_12564); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12656 = !lean_is_exclusive(x_12648); +if (x_12656 == 0) +{ +return x_12648; +} +else +{ +lean_object* x_12657; lean_object* x_12658; lean_object* x_12659; +x_12657 = lean_ctor_get(x_12648, 0); +x_12658 = lean_ctor_get(x_12648, 1); +lean_inc(x_12658); +lean_inc(x_12657); +lean_dec(x_12648); +x_12659 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12659, 0, x_12657); +lean_ctor_set(x_12659, 1, x_12658); +return x_12659; +} +} +} +} +else +{ +lean_object* x_12660; lean_object* x_12661; lean_object* x_12662; lean_object* x_12663; uint8_t x_12664; +x_12660 = lean_ctor_get(x_12564, 0); +lean_inc(x_12660); +lean_dec(x_12564); +x_12661 = lean_array_get_size(x_11919); +x_12662 = lean_ctor_get(x_12660, 3); +lean_inc(x_12662); +lean_dec(x_12660); +x_12663 = lean_array_get_size(x_12662); +lean_dec(x_12662); +x_12664 = lean_nat_dec_lt(x_12661, x_12663); +if (x_12664 == 0) +{ +uint8_t x_12665; +x_12665 = lean_nat_dec_eq(x_12661, x_12663); +if (x_12665 == 0) +{ +lean_object* x_12666; lean_object* x_12667; lean_object* x_12668; lean_object* x_12669; lean_object* x_12670; lean_object* x_12671; lean_object* x_12672; lean_object* x_12673; lean_object* x_12674; lean_object* x_12675; lean_object* x_12676; lean_object* x_12677; lean_object* x_12678; lean_object* x_12679; lean_object* x_12680; lean_object* x_12681; +x_12666 = lean_unsigned_to_nat(0u); +x_12667 = l_Array_extract___rarg(x_11919, x_12666, x_12663); +x_12668 = l_Array_extract___rarg(x_11919, x_12663, x_12661); +lean_dec(x_12661); +lean_inc(x_153); +lean_ctor_set_tag(x_12563, 6); +lean_ctor_set(x_12563, 1, x_12667); +lean_ctor_set(x_12563, 0, x_153); +x_12669 = lean_ctor_get(x_1, 0); +lean_inc(x_12669); +x_12670 = l_Lean_IR_ToIR_bindVar(x_12669, x_11925, x_4, x_5, x_12568); +x_12671 = lean_ctor_get(x_12670, 0); +lean_inc(x_12671); +x_12672 = lean_ctor_get(x_12670, 1); +lean_inc(x_12672); +lean_dec(x_12670); +x_12673 = lean_ctor_get(x_12671, 0); +lean_inc(x_12673); +x_12674 = lean_ctor_get(x_12671, 1); +lean_inc(x_12674); +lean_dec(x_12671); +x_12675 = l_Lean_IR_ToIR_newVar(x_12674, x_4, x_5, x_12672); +x_12676 = lean_ctor_get(x_12675, 0); +lean_inc(x_12676); +x_12677 = lean_ctor_get(x_12675, 1); +lean_inc(x_12677); +lean_dec(x_12675); +x_12678 = lean_ctor_get(x_12676, 0); +lean_inc(x_12678); +x_12679 = lean_ctor_get(x_12676, 1); +lean_inc(x_12679); +lean_dec(x_12676); +x_12680 = lean_ctor_get(x_1, 2); +lean_inc(x_12680); +lean_inc(x_5); +lean_inc(x_4); +x_12681 = l_Lean_IR_ToIR_lowerType(x_12680, x_12679, x_4, x_5, x_12677); +if (lean_obj_tag(x_12681) == 0) +{ +lean_object* x_12682; lean_object* x_12683; lean_object* x_12684; lean_object* x_12685; lean_object* x_12686; +x_12682 = lean_ctor_get(x_12681, 0); +lean_inc(x_12682); +x_12683 = lean_ctor_get(x_12681, 1); +lean_inc(x_12683); +lean_dec(x_12681); +x_12684 = lean_ctor_get(x_12682, 0); +lean_inc(x_12684); +x_12685 = lean_ctor_get(x_12682, 1); +lean_inc(x_12685); +lean_dec(x_12682); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12686 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_12678, x_12668, x_12673, x_12563, x_12684, x_12685, x_4, x_5, x_12683); +if (lean_obj_tag(x_12686) == 0) +{ +lean_object* x_12687; lean_object* x_12688; lean_object* x_12689; lean_object* x_12690; lean_object* x_12691; lean_object* x_12692; lean_object* x_12693; +x_12687 = lean_ctor_get(x_12686, 0); +lean_inc(x_12687); +x_12688 = lean_ctor_get(x_12686, 1); +lean_inc(x_12688); +lean_dec(x_12686); +x_12689 = lean_ctor_get(x_12687, 0); +lean_inc(x_12689); +x_12690 = lean_ctor_get(x_12687, 1); +lean_inc(x_12690); +if (lean_is_exclusive(x_12687)) { + lean_ctor_release(x_12687, 0); + lean_ctor_release(x_12687, 1); + x_12691 = x_12687; +} else { + lean_dec_ref(x_12687); + x_12691 = lean_box(0); +} +x_12692 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_12692, 0, x_12689); +if (lean_is_scalar(x_12691)) { + x_12693 = lean_alloc_ctor(0, 2, 0); +} else { + x_12693 = x_12691; +} +lean_ctor_set(x_12693, 0, x_12692); +lean_ctor_set(x_12693, 1, x_12690); +x_12532 = x_12693; +x_12533 = x_12688; +goto block_12562; +} +else +{ +lean_object* x_12694; lean_object* x_12695; lean_object* x_12696; lean_object* x_12697; +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12694 = lean_ctor_get(x_12686, 0); +lean_inc(x_12694); +x_12695 = lean_ctor_get(x_12686, 1); +lean_inc(x_12695); +if (lean_is_exclusive(x_12686)) { + lean_ctor_release(x_12686, 0); + lean_ctor_release(x_12686, 1); + x_12696 = x_12686; +} else { + lean_dec_ref(x_12686); + x_12696 = lean_box(0); +} +if (lean_is_scalar(x_12696)) { + x_12697 = lean_alloc_ctor(1, 2, 0); +} else { + x_12697 = x_12696; +} +lean_ctor_set(x_12697, 0, x_12694); +lean_ctor_set(x_12697, 1, x_12695); +return x_12697; +} +} +else +{ +lean_object* x_12698; lean_object* x_12699; lean_object* x_12700; lean_object* x_12701; +lean_dec(x_12678); +lean_dec(x_12673); +lean_dec(x_12563); +lean_dec(x_12668); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12698 = lean_ctor_get(x_12681, 0); +lean_inc(x_12698); +x_12699 = lean_ctor_get(x_12681, 1); +lean_inc(x_12699); +if (lean_is_exclusive(x_12681)) { + lean_ctor_release(x_12681, 0); + lean_ctor_release(x_12681, 1); + x_12700 = x_12681; +} else { + lean_dec_ref(x_12681); + x_12700 = lean_box(0); +} +if (lean_is_scalar(x_12700)) { + x_12701 = lean_alloc_ctor(1, 2, 0); +} else { + x_12701 = x_12700; +} +lean_ctor_set(x_12701, 0, x_12698); +lean_ctor_set(x_12701, 1, x_12699); +return x_12701; +} +} +else +{ +lean_object* x_12702; lean_object* x_12703; lean_object* x_12704; lean_object* x_12705; lean_object* x_12706; lean_object* x_12707; lean_object* x_12708; lean_object* x_12709; +lean_dec(x_12663); +lean_dec(x_12661); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_12563, 6); +lean_ctor_set(x_12563, 1, x_11919); +lean_ctor_set(x_12563, 0, x_153); +x_12702 = lean_ctor_get(x_1, 0); +lean_inc(x_12702); +x_12703 = l_Lean_IR_ToIR_bindVar(x_12702, x_11925, x_4, x_5, x_12568); +x_12704 = lean_ctor_get(x_12703, 0); +lean_inc(x_12704); +x_12705 = lean_ctor_get(x_12703, 1); +lean_inc(x_12705); +lean_dec(x_12703); +x_12706 = lean_ctor_get(x_12704, 0); +lean_inc(x_12706); +x_12707 = lean_ctor_get(x_12704, 1); +lean_inc(x_12707); +lean_dec(x_12704); +x_12708 = lean_ctor_get(x_1, 2); +lean_inc(x_12708); +lean_inc(x_5); +lean_inc(x_4); +x_12709 = l_Lean_IR_ToIR_lowerType(x_12708, x_12707, x_4, x_5, x_12705); +if (lean_obj_tag(x_12709) == 0) +{ +lean_object* x_12710; lean_object* x_12711; lean_object* x_12712; lean_object* x_12713; lean_object* x_12714; +x_12710 = lean_ctor_get(x_12709, 0); +lean_inc(x_12710); +x_12711 = lean_ctor_get(x_12709, 1); +lean_inc(x_12711); +lean_dec(x_12709); +x_12712 = lean_ctor_get(x_12710, 0); +lean_inc(x_12712); +x_12713 = lean_ctor_get(x_12710, 1); +lean_inc(x_12713); +lean_dec(x_12710); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12714 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12706, x_12563, x_12712, x_12713, x_4, x_5, x_12711); +if (lean_obj_tag(x_12714) == 0) +{ +lean_object* x_12715; lean_object* x_12716; lean_object* x_12717; lean_object* x_12718; lean_object* x_12719; lean_object* x_12720; lean_object* x_12721; +x_12715 = lean_ctor_get(x_12714, 0); +lean_inc(x_12715); +x_12716 = lean_ctor_get(x_12714, 1); +lean_inc(x_12716); +lean_dec(x_12714); +x_12717 = lean_ctor_get(x_12715, 0); +lean_inc(x_12717); +x_12718 = lean_ctor_get(x_12715, 1); +lean_inc(x_12718); +if (lean_is_exclusive(x_12715)) { + lean_ctor_release(x_12715, 0); + lean_ctor_release(x_12715, 1); + x_12719 = x_12715; +} else { + lean_dec_ref(x_12715); + x_12719 = lean_box(0); +} +x_12720 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_12720, 0, x_12717); +if (lean_is_scalar(x_12719)) { + x_12721 = lean_alloc_ctor(0, 2, 0); +} else { + x_12721 = x_12719; +} +lean_ctor_set(x_12721, 0, x_12720); +lean_ctor_set(x_12721, 1, x_12718); +x_12532 = x_12721; +x_12533 = x_12716; +goto block_12562; +} +else +{ +lean_object* x_12722; lean_object* x_12723; lean_object* x_12724; lean_object* x_12725; +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12722 = lean_ctor_get(x_12714, 0); +lean_inc(x_12722); +x_12723 = lean_ctor_get(x_12714, 1); +lean_inc(x_12723); +if (lean_is_exclusive(x_12714)) { + lean_ctor_release(x_12714, 0); + lean_ctor_release(x_12714, 1); + x_12724 = x_12714; +} else { + lean_dec_ref(x_12714); + x_12724 = lean_box(0); +} +if (lean_is_scalar(x_12724)) { + x_12725 = lean_alloc_ctor(1, 2, 0); +} else { + x_12725 = x_12724; +} +lean_ctor_set(x_12725, 0, x_12722); +lean_ctor_set(x_12725, 1, x_12723); +return x_12725; +} +} +else +{ +lean_object* x_12726; lean_object* x_12727; lean_object* x_12728; lean_object* x_12729; +lean_dec(x_12706); +lean_dec(x_12563); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12726 = lean_ctor_get(x_12709, 0); +lean_inc(x_12726); +x_12727 = lean_ctor_get(x_12709, 1); +lean_inc(x_12727); +if (lean_is_exclusive(x_12709)) { + lean_ctor_release(x_12709, 0); + lean_ctor_release(x_12709, 1); + x_12728 = x_12709; +} else { + lean_dec_ref(x_12709); + x_12728 = lean_box(0); +} +if (lean_is_scalar(x_12728)) { + x_12729 = lean_alloc_ctor(1, 2, 0); +} else { + x_12729 = x_12728; +} +lean_ctor_set(x_12729, 0, x_12726); +lean_ctor_set(x_12729, 1, x_12727); +return x_12729; +} +} +} +else +{ +lean_object* x_12730; lean_object* x_12731; lean_object* x_12732; lean_object* x_12733; lean_object* x_12734; lean_object* x_12735; lean_object* x_12736; lean_object* x_12737; +lean_dec(x_12663); +lean_dec(x_12661); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_12563, 7); +lean_ctor_set(x_12563, 1, x_11919); +lean_ctor_set(x_12563, 0, x_153); +x_12730 = lean_ctor_get(x_1, 0); +lean_inc(x_12730); +x_12731 = l_Lean_IR_ToIR_bindVar(x_12730, x_11925, x_4, x_5, x_12568); +x_12732 = lean_ctor_get(x_12731, 0); +lean_inc(x_12732); +x_12733 = lean_ctor_get(x_12731, 1); +lean_inc(x_12733); +lean_dec(x_12731); +x_12734 = lean_ctor_get(x_12732, 0); +lean_inc(x_12734); +x_12735 = lean_ctor_get(x_12732, 1); +lean_inc(x_12735); +lean_dec(x_12732); +x_12736 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12737 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12734, x_12563, x_12736, x_12735, x_4, x_5, x_12733); +if (lean_obj_tag(x_12737) == 0) +{ +lean_object* x_12738; lean_object* x_12739; lean_object* x_12740; lean_object* x_12741; lean_object* x_12742; lean_object* x_12743; lean_object* x_12744; +x_12738 = lean_ctor_get(x_12737, 0); +lean_inc(x_12738); +x_12739 = lean_ctor_get(x_12737, 1); +lean_inc(x_12739); +lean_dec(x_12737); +x_12740 = lean_ctor_get(x_12738, 0); +lean_inc(x_12740); +x_12741 = lean_ctor_get(x_12738, 1); +lean_inc(x_12741); +if (lean_is_exclusive(x_12738)) { + lean_ctor_release(x_12738, 0); + lean_ctor_release(x_12738, 1); + x_12742 = x_12738; +} else { + lean_dec_ref(x_12738); + x_12742 = lean_box(0); +} +x_12743 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_12743, 0, x_12740); +if (lean_is_scalar(x_12742)) { + x_12744 = lean_alloc_ctor(0, 2, 0); +} else { + x_12744 = x_12742; +} +lean_ctor_set(x_12744, 0, x_12743); +lean_ctor_set(x_12744, 1, x_12741); +x_12532 = x_12744; +x_12533 = x_12739; +goto block_12562; +} +else +{ +lean_object* x_12745; lean_object* x_12746; lean_object* x_12747; lean_object* x_12748; +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12745 = lean_ctor_get(x_12737, 0); +lean_inc(x_12745); +x_12746 = lean_ctor_get(x_12737, 1); +lean_inc(x_12746); +if (lean_is_exclusive(x_12737)) { + lean_ctor_release(x_12737, 0); + lean_ctor_release(x_12737, 1); + x_12747 = x_12737; +} else { + lean_dec_ref(x_12737); + x_12747 = lean_box(0); +} +if (lean_is_scalar(x_12747)) { + x_12748 = lean_alloc_ctor(1, 2, 0); +} else { + x_12748 = x_12747; +} +lean_ctor_set(x_12748, 0, x_12745); +lean_ctor_set(x_12748, 1, x_12746); +return x_12748; +} +} +} +} +else +{ +lean_object* x_12749; lean_object* x_12750; lean_object* x_12751; lean_object* x_12752; lean_object* x_12753; lean_object* x_12754; uint8_t x_12755; +x_12749 = lean_ctor_get(x_12563, 1); +lean_inc(x_12749); +lean_dec(x_12563); +x_12750 = lean_ctor_get(x_12564, 0); +lean_inc(x_12750); +if (lean_is_exclusive(x_12564)) { + lean_ctor_release(x_12564, 0); + x_12751 = x_12564; +} else { + lean_dec_ref(x_12564); + x_12751 = lean_box(0); +} +x_12752 = lean_array_get_size(x_11919); +x_12753 = lean_ctor_get(x_12750, 3); +lean_inc(x_12753); +lean_dec(x_12750); +x_12754 = lean_array_get_size(x_12753); +lean_dec(x_12753); +x_12755 = lean_nat_dec_lt(x_12752, x_12754); +if (x_12755 == 0) +{ +uint8_t x_12756; +x_12756 = lean_nat_dec_eq(x_12752, x_12754); +if (x_12756 == 0) +{ +lean_object* x_12757; lean_object* x_12758; lean_object* x_12759; lean_object* x_12760; lean_object* x_12761; lean_object* x_12762; lean_object* x_12763; lean_object* x_12764; lean_object* x_12765; lean_object* x_12766; lean_object* x_12767; lean_object* x_12768; lean_object* x_12769; lean_object* x_12770; lean_object* x_12771; lean_object* x_12772; lean_object* x_12773; +x_12757 = lean_unsigned_to_nat(0u); +x_12758 = l_Array_extract___rarg(x_11919, x_12757, x_12754); +x_12759 = l_Array_extract___rarg(x_11919, x_12754, x_12752); +lean_dec(x_12752); +lean_inc(x_153); +x_12760 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_12760, 0, x_153); +lean_ctor_set(x_12760, 1, x_12758); +x_12761 = lean_ctor_get(x_1, 0); +lean_inc(x_12761); +x_12762 = l_Lean_IR_ToIR_bindVar(x_12761, x_11925, x_4, x_5, x_12749); +x_12763 = lean_ctor_get(x_12762, 0); +lean_inc(x_12763); +x_12764 = lean_ctor_get(x_12762, 1); +lean_inc(x_12764); +lean_dec(x_12762); +x_12765 = lean_ctor_get(x_12763, 0); +lean_inc(x_12765); +x_12766 = lean_ctor_get(x_12763, 1); +lean_inc(x_12766); +lean_dec(x_12763); +x_12767 = l_Lean_IR_ToIR_newVar(x_12766, x_4, x_5, x_12764); +x_12768 = lean_ctor_get(x_12767, 0); +lean_inc(x_12768); +x_12769 = lean_ctor_get(x_12767, 1); +lean_inc(x_12769); +lean_dec(x_12767); +x_12770 = lean_ctor_get(x_12768, 0); +lean_inc(x_12770); +x_12771 = lean_ctor_get(x_12768, 1); +lean_inc(x_12771); +lean_dec(x_12768); +x_12772 = lean_ctor_get(x_1, 2); +lean_inc(x_12772); +lean_inc(x_5); +lean_inc(x_4); +x_12773 = l_Lean_IR_ToIR_lowerType(x_12772, x_12771, x_4, x_5, x_12769); +if (lean_obj_tag(x_12773) == 0) +{ +lean_object* x_12774; lean_object* x_12775; lean_object* x_12776; lean_object* x_12777; lean_object* x_12778; +x_12774 = lean_ctor_get(x_12773, 0); +lean_inc(x_12774); +x_12775 = lean_ctor_get(x_12773, 1); +lean_inc(x_12775); +lean_dec(x_12773); +x_12776 = lean_ctor_get(x_12774, 0); +lean_inc(x_12776); +x_12777 = lean_ctor_get(x_12774, 1); +lean_inc(x_12777); +lean_dec(x_12774); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12778 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_12770, x_12759, x_12765, x_12760, x_12776, x_12777, x_4, x_5, x_12775); +if (lean_obj_tag(x_12778) == 0) +{ +lean_object* x_12779; lean_object* x_12780; lean_object* x_12781; lean_object* x_12782; lean_object* x_12783; lean_object* x_12784; lean_object* x_12785; +x_12779 = lean_ctor_get(x_12778, 0); +lean_inc(x_12779); +x_12780 = lean_ctor_get(x_12778, 1); +lean_inc(x_12780); +lean_dec(x_12778); +x_12781 = lean_ctor_get(x_12779, 0); +lean_inc(x_12781); +x_12782 = lean_ctor_get(x_12779, 1); +lean_inc(x_12782); +if (lean_is_exclusive(x_12779)) { + lean_ctor_release(x_12779, 0); + lean_ctor_release(x_12779, 1); + x_12783 = x_12779; +} else { + lean_dec_ref(x_12779); + x_12783 = lean_box(0); +} +if (lean_is_scalar(x_12751)) { + x_12784 = lean_alloc_ctor(1, 1, 0); +} else { + x_12784 = x_12751; +} +lean_ctor_set(x_12784, 0, x_12781); +if (lean_is_scalar(x_12783)) { + x_12785 = lean_alloc_ctor(0, 2, 0); +} else { + x_12785 = x_12783; +} +lean_ctor_set(x_12785, 0, x_12784); +lean_ctor_set(x_12785, 1, x_12782); +x_12532 = x_12785; +x_12533 = x_12780; +goto block_12562; +} +else +{ +lean_object* x_12786; lean_object* x_12787; lean_object* x_12788; lean_object* x_12789; +lean_dec(x_12751); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12786 = lean_ctor_get(x_12778, 0); +lean_inc(x_12786); +x_12787 = lean_ctor_get(x_12778, 1); +lean_inc(x_12787); +if (lean_is_exclusive(x_12778)) { + lean_ctor_release(x_12778, 0); + lean_ctor_release(x_12778, 1); + x_12788 = x_12778; +} else { + lean_dec_ref(x_12778); + x_12788 = lean_box(0); +} +if (lean_is_scalar(x_12788)) { + x_12789 = lean_alloc_ctor(1, 2, 0); +} else { + x_12789 = x_12788; +} +lean_ctor_set(x_12789, 0, x_12786); +lean_ctor_set(x_12789, 1, x_12787); +return x_12789; +} +} +else +{ +lean_object* x_12790; lean_object* x_12791; lean_object* x_12792; lean_object* x_12793; +lean_dec(x_12770); +lean_dec(x_12765); +lean_dec(x_12760); +lean_dec(x_12759); +lean_dec(x_12751); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12790 = lean_ctor_get(x_12773, 0); +lean_inc(x_12790); +x_12791 = lean_ctor_get(x_12773, 1); +lean_inc(x_12791); +if (lean_is_exclusive(x_12773)) { + lean_ctor_release(x_12773, 0); + lean_ctor_release(x_12773, 1); + x_12792 = x_12773; +} else { + lean_dec_ref(x_12773); + x_12792 = lean_box(0); +} +if (lean_is_scalar(x_12792)) { + x_12793 = lean_alloc_ctor(1, 2, 0); +} else { + x_12793 = x_12792; +} +lean_ctor_set(x_12793, 0, x_12790); +lean_ctor_set(x_12793, 1, x_12791); +return x_12793; +} +} +else +{ +lean_object* x_12794; lean_object* x_12795; lean_object* x_12796; lean_object* x_12797; lean_object* x_12798; lean_object* x_12799; lean_object* x_12800; lean_object* x_12801; lean_object* x_12802; +lean_dec(x_12754); +lean_dec(x_12752); +lean_inc(x_11919); +lean_inc(x_153); +x_12794 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_12794, 0, x_153); +lean_ctor_set(x_12794, 1, x_11919); +x_12795 = lean_ctor_get(x_1, 0); +lean_inc(x_12795); +x_12796 = l_Lean_IR_ToIR_bindVar(x_12795, x_11925, x_4, x_5, x_12749); +x_12797 = lean_ctor_get(x_12796, 0); +lean_inc(x_12797); +x_12798 = lean_ctor_get(x_12796, 1); +lean_inc(x_12798); +lean_dec(x_12796); +x_12799 = lean_ctor_get(x_12797, 0); +lean_inc(x_12799); +x_12800 = lean_ctor_get(x_12797, 1); +lean_inc(x_12800); +lean_dec(x_12797); +x_12801 = lean_ctor_get(x_1, 2); +lean_inc(x_12801); +lean_inc(x_5); +lean_inc(x_4); +x_12802 = l_Lean_IR_ToIR_lowerType(x_12801, x_12800, x_4, x_5, x_12798); +if (lean_obj_tag(x_12802) == 0) +{ +lean_object* x_12803; lean_object* x_12804; lean_object* x_12805; lean_object* x_12806; lean_object* x_12807; +x_12803 = lean_ctor_get(x_12802, 0); +lean_inc(x_12803); +x_12804 = lean_ctor_get(x_12802, 1); +lean_inc(x_12804); +lean_dec(x_12802); +x_12805 = lean_ctor_get(x_12803, 0); +lean_inc(x_12805); +x_12806 = lean_ctor_get(x_12803, 1); +lean_inc(x_12806); +lean_dec(x_12803); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12807 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12799, x_12794, x_12805, x_12806, x_4, x_5, x_12804); +if (lean_obj_tag(x_12807) == 0) +{ +lean_object* x_12808; lean_object* x_12809; lean_object* x_12810; lean_object* x_12811; lean_object* x_12812; lean_object* x_12813; lean_object* x_12814; +x_12808 = lean_ctor_get(x_12807, 0); +lean_inc(x_12808); +x_12809 = lean_ctor_get(x_12807, 1); +lean_inc(x_12809); +lean_dec(x_12807); +x_12810 = lean_ctor_get(x_12808, 0); +lean_inc(x_12810); +x_12811 = lean_ctor_get(x_12808, 1); +lean_inc(x_12811); +if (lean_is_exclusive(x_12808)) { + lean_ctor_release(x_12808, 0); + lean_ctor_release(x_12808, 1); + x_12812 = x_12808; +} else { + lean_dec_ref(x_12808); + x_12812 = lean_box(0); +} +if (lean_is_scalar(x_12751)) { + x_12813 = lean_alloc_ctor(1, 1, 0); +} else { + x_12813 = x_12751; +} +lean_ctor_set(x_12813, 0, x_12810); +if (lean_is_scalar(x_12812)) { + x_12814 = lean_alloc_ctor(0, 2, 0); +} else { + x_12814 = x_12812; +} +lean_ctor_set(x_12814, 0, x_12813); +lean_ctor_set(x_12814, 1, x_12811); +x_12532 = x_12814; +x_12533 = x_12809; +goto block_12562; +} +else +{ +lean_object* x_12815; lean_object* x_12816; lean_object* x_12817; lean_object* x_12818; +lean_dec(x_12751); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12815 = lean_ctor_get(x_12807, 0); +lean_inc(x_12815); +x_12816 = lean_ctor_get(x_12807, 1); +lean_inc(x_12816); +if (lean_is_exclusive(x_12807)) { + lean_ctor_release(x_12807, 0); + lean_ctor_release(x_12807, 1); + x_12817 = x_12807; +} else { + lean_dec_ref(x_12807); + x_12817 = lean_box(0); +} +if (lean_is_scalar(x_12817)) { + x_12818 = lean_alloc_ctor(1, 2, 0); +} else { + x_12818 = x_12817; +} +lean_ctor_set(x_12818, 0, x_12815); +lean_ctor_set(x_12818, 1, x_12816); +return x_12818; +} +} +else +{ +lean_object* x_12819; lean_object* x_12820; lean_object* x_12821; lean_object* x_12822; +lean_dec(x_12799); +lean_dec(x_12794); +lean_dec(x_12751); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12819 = lean_ctor_get(x_12802, 0); +lean_inc(x_12819); +x_12820 = lean_ctor_get(x_12802, 1); +lean_inc(x_12820); +if (lean_is_exclusive(x_12802)) { + lean_ctor_release(x_12802, 0); + lean_ctor_release(x_12802, 1); + x_12821 = x_12802; +} else { + lean_dec_ref(x_12802); + x_12821 = lean_box(0); +} +if (lean_is_scalar(x_12821)) { + x_12822 = lean_alloc_ctor(1, 2, 0); +} else { + x_12822 = x_12821; +} +lean_ctor_set(x_12822, 0, x_12819); +lean_ctor_set(x_12822, 1, x_12820); +return x_12822; +} +} +} +else +{ +lean_object* x_12823; lean_object* x_12824; lean_object* x_12825; lean_object* x_12826; lean_object* x_12827; lean_object* x_12828; lean_object* x_12829; lean_object* x_12830; lean_object* x_12831; +lean_dec(x_12754); +lean_dec(x_12752); +lean_inc(x_11919); +lean_inc(x_153); +x_12823 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_12823, 0, x_153); +lean_ctor_set(x_12823, 1, x_11919); +x_12824 = lean_ctor_get(x_1, 0); +lean_inc(x_12824); +x_12825 = l_Lean_IR_ToIR_bindVar(x_12824, x_11925, x_4, x_5, x_12749); +x_12826 = lean_ctor_get(x_12825, 0); +lean_inc(x_12826); +x_12827 = lean_ctor_get(x_12825, 1); +lean_inc(x_12827); +lean_dec(x_12825); +x_12828 = lean_ctor_get(x_12826, 0); +lean_inc(x_12828); +x_12829 = lean_ctor_get(x_12826, 1); +lean_inc(x_12829); +lean_dec(x_12826); +x_12830 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12831 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12828, x_12823, x_12830, x_12829, x_4, x_5, x_12827); +if (lean_obj_tag(x_12831) == 0) +{ +lean_object* x_12832; lean_object* x_12833; lean_object* x_12834; lean_object* x_12835; lean_object* x_12836; lean_object* x_12837; lean_object* x_12838; +x_12832 = lean_ctor_get(x_12831, 0); +lean_inc(x_12832); +x_12833 = lean_ctor_get(x_12831, 1); +lean_inc(x_12833); +lean_dec(x_12831); +x_12834 = lean_ctor_get(x_12832, 0); +lean_inc(x_12834); +x_12835 = lean_ctor_get(x_12832, 1); +lean_inc(x_12835); +if (lean_is_exclusive(x_12832)) { + lean_ctor_release(x_12832, 0); + lean_ctor_release(x_12832, 1); + x_12836 = x_12832; +} else { + lean_dec_ref(x_12832); + x_12836 = lean_box(0); +} +if (lean_is_scalar(x_12751)) { + x_12837 = lean_alloc_ctor(1, 1, 0); +} else { + x_12837 = x_12751; +} +lean_ctor_set(x_12837, 0, x_12834); +if (lean_is_scalar(x_12836)) { + x_12838 = lean_alloc_ctor(0, 2, 0); +} else { + x_12838 = x_12836; +} +lean_ctor_set(x_12838, 0, x_12837); +lean_ctor_set(x_12838, 1, x_12835); +x_12532 = x_12838; +x_12533 = x_12833; +goto block_12562; +} +else +{ +lean_object* x_12839; lean_object* x_12840; lean_object* x_12841; lean_object* x_12842; +lean_dec(x_12751); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12839 = lean_ctor_get(x_12831, 0); +lean_inc(x_12839); +x_12840 = lean_ctor_get(x_12831, 1); +lean_inc(x_12840); +if (lean_is_exclusive(x_12831)) { + lean_ctor_release(x_12831, 0); + lean_ctor_release(x_12831, 1); + x_12841 = x_12831; +} else { + lean_dec_ref(x_12831); + x_12841 = lean_box(0); +} +if (lean_is_scalar(x_12841)) { + x_12842 = lean_alloc_ctor(1, 2, 0); +} else { + x_12842 = x_12841; +} +lean_ctor_set(x_12842, 0, x_12839); +lean_ctor_set(x_12842, 1, x_12840); +return x_12842; +} +} +} +} +block_12562: +{ +lean_object* x_12534; +x_12534 = lean_ctor_get(x_12532, 0); +lean_inc(x_12534); +if (lean_obj_tag(x_12534) == 0) +{ +lean_object* x_12535; lean_object* x_12536; lean_object* x_12537; lean_object* x_12538; lean_object* x_12539; lean_object* x_12540; lean_object* x_12541; lean_object* x_12542; lean_object* x_12543; lean_object* x_12544; +lean_dec(x_11930); +x_12535 = lean_ctor_get(x_12532, 1); +lean_inc(x_12535); +lean_dec(x_12532); +x_12536 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_12536, 0, x_153); +lean_ctor_set(x_12536, 1, x_11919); +x_12537 = lean_ctor_get(x_1, 0); +lean_inc(x_12537); +x_12538 = l_Lean_IR_ToIR_bindVar(x_12537, x_12535, x_4, x_5, x_12533); +x_12539 = lean_ctor_get(x_12538, 0); +lean_inc(x_12539); +x_12540 = lean_ctor_get(x_12538, 1); +lean_inc(x_12540); +lean_dec(x_12538); +x_12541 = lean_ctor_get(x_12539, 0); +lean_inc(x_12541); +x_12542 = lean_ctor_get(x_12539, 1); +lean_inc(x_12542); +lean_dec(x_12539); +x_12543 = lean_ctor_get(x_1, 2); +lean_inc(x_12543); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_12544 = l_Lean_IR_ToIR_lowerType(x_12543, x_12542, x_4, x_5, x_12540); +if (lean_obj_tag(x_12544) == 0) +{ +lean_object* x_12545; lean_object* x_12546; lean_object* x_12547; lean_object* x_12548; lean_object* x_12549; +x_12545 = lean_ctor_get(x_12544, 0); +lean_inc(x_12545); +x_12546 = lean_ctor_get(x_12544, 1); +lean_inc(x_12546); +lean_dec(x_12544); +x_12547 = lean_ctor_get(x_12545, 0); +lean_inc(x_12547); +x_12548 = lean_ctor_get(x_12545, 1); +lean_inc(x_12548); +lean_dec(x_12545); +x_12549 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12541, x_12536, x_12547, x_12548, x_4, x_5, x_12546); +return x_12549; +} +else +{ +uint8_t x_12550; +lean_dec(x_12541); +lean_dec(x_12536); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_12550 = !lean_is_exclusive(x_12544); +if (x_12550 == 0) +{ +return x_12544; +} +else +{ +lean_object* x_12551; lean_object* x_12552; lean_object* x_12553; +x_12551 = lean_ctor_get(x_12544, 0); +x_12552 = lean_ctor_get(x_12544, 1); +lean_inc(x_12552); +lean_inc(x_12551); +lean_dec(x_12544); +x_12553 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12553, 0, x_12551); +lean_ctor_set(x_12553, 1, x_12552); +return x_12553; +} +} +} +else +{ +uint8_t x_12554; +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12554 = !lean_is_exclusive(x_12532); +if (x_12554 == 0) +{ +lean_object* x_12555; lean_object* x_12556; lean_object* x_12557; +x_12555 = lean_ctor_get(x_12532, 0); +lean_dec(x_12555); +x_12556 = lean_ctor_get(x_12534, 0); +lean_inc(x_12556); +lean_dec(x_12534); +lean_ctor_set(x_12532, 0, x_12556); +if (lean_is_scalar(x_11930)) { + x_12557 = lean_alloc_ctor(0, 2, 0); +} else { + x_12557 = x_11930; +} +lean_ctor_set(x_12557, 0, x_12532); +lean_ctor_set(x_12557, 1, x_12533); +return x_12557; +} +else +{ +lean_object* x_12558; lean_object* x_12559; lean_object* x_12560; lean_object* x_12561; +x_12558 = lean_ctor_get(x_12532, 1); +lean_inc(x_12558); +lean_dec(x_12532); +x_12559 = lean_ctor_get(x_12534, 0); +lean_inc(x_12559); +lean_dec(x_12534); +x_12560 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_12560, 0, x_12559); +lean_ctor_set(x_12560, 1, x_12558); +if (lean_is_scalar(x_11930)) { + x_12561 = lean_alloc_ctor(0, 2, 0); +} else { + x_12561 = x_11930; +} +lean_ctor_set(x_12561, 0, x_12560); +lean_ctor_set(x_12561, 1, x_12533); +return x_12561; +} +} +} +} +case 4: +{ +uint8_t x_12843; +lean_dec(x_11931); +lean_dec(x_11930); +lean_free_object(x_11921); +lean_dec(x_11911); +lean_dec(x_11910); +x_12843 = !lean_is_exclusive(x_11936); +if (x_12843 == 0) +{ +lean_object* x_12844; lean_object* x_12845; uint8_t x_12846; +x_12844 = lean_ctor_get(x_11936, 0); +lean_dec(x_12844); +x_12845 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_12846 = lean_name_eq(x_153, x_12845); +if (x_12846 == 0) +{ +uint8_t x_12847; lean_object* x_12848; lean_object* x_12849; lean_object* x_12850; lean_object* x_12851; lean_object* x_12852; lean_object* x_12853; lean_object* x_12854; lean_object* x_12855; +lean_dec(x_11919); +lean_dec(x_2); +lean_dec(x_1); +x_12847 = 1; +x_12848 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_12849 = l_Lean_Name_toString(x_153, x_12847, x_12848); +lean_ctor_set_tag(x_11936, 3); +lean_ctor_set(x_11936, 0, x_12849); +x_12850 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_12851 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_12851, 0, x_12850); +lean_ctor_set(x_12851, 1, x_11936); +x_12852 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_12853 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_12853, 0, x_12851); +lean_ctor_set(x_12853, 1, x_12852); +x_12854 = l_Lean_MessageData_ofFormat(x_12853); +x_12855 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_12854, x_11925, x_4, x_5, x_11929); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_11925); +return x_12855; +} +else +{ +lean_object* x_12856; lean_object* x_12857; lean_object* x_12858; +lean_free_object(x_11936); +lean_dec(x_153); +x_12856 = l_Lean_IR_instInhabitedArg; +x_12857 = lean_unsigned_to_nat(2u); +x_12858 = lean_array_get(x_12856, x_11919, x_12857); +lean_dec(x_11919); +if (lean_obj_tag(x_12858) == 0) +{ +lean_object* x_12859; lean_object* x_12860; lean_object* x_12861; lean_object* x_12862; lean_object* x_12863; lean_object* x_12864; lean_object* x_12865; +x_12859 = lean_ctor_get(x_12858, 0); +lean_inc(x_12859); +lean_dec(x_12858); +x_12860 = lean_ctor_get(x_1, 0); +lean_inc(x_12860); +lean_dec(x_1); +x_12861 = l_Lean_IR_ToIR_bindVarToVarId(x_12860, x_12859, x_11925, x_4, x_5, x_11929); +x_12862 = lean_ctor_get(x_12861, 0); +lean_inc(x_12862); +x_12863 = lean_ctor_get(x_12861, 1); +lean_inc(x_12863); +lean_dec(x_12861); +x_12864 = lean_ctor_get(x_12862, 1); +lean_inc(x_12864); +lean_dec(x_12862); +x_12865 = l_Lean_IR_ToIR_lowerCode(x_2, x_12864, x_4, x_5, x_12863); +return x_12865; +} +else +{ +lean_object* x_12866; lean_object* x_12867; lean_object* x_12868; lean_object* x_12869; lean_object* x_12870; lean_object* x_12871; +x_12866 = lean_ctor_get(x_1, 0); +lean_inc(x_12866); +lean_dec(x_1); +x_12867 = l_Lean_IR_ToIR_bindErased(x_12866, x_11925, x_4, x_5, x_11929); +x_12868 = lean_ctor_get(x_12867, 0); +lean_inc(x_12868); +x_12869 = lean_ctor_get(x_12867, 1); +lean_inc(x_12869); +lean_dec(x_12867); +x_12870 = lean_ctor_get(x_12868, 1); +lean_inc(x_12870); +lean_dec(x_12868); +x_12871 = l_Lean_IR_ToIR_lowerCode(x_2, x_12870, x_4, x_5, x_12869); +return x_12871; +} +} +} +else +{ +lean_object* x_12872; uint8_t x_12873; +lean_dec(x_11936); +x_12872 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_12873 = lean_name_eq(x_153, x_12872); +if (x_12873 == 0) +{ +uint8_t x_12874; lean_object* x_12875; lean_object* x_12876; lean_object* x_12877; lean_object* x_12878; lean_object* x_12879; lean_object* x_12880; lean_object* x_12881; lean_object* x_12882; lean_object* x_12883; +lean_dec(x_11919); +lean_dec(x_2); +lean_dec(x_1); +x_12874 = 1; +x_12875 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_12876 = l_Lean_Name_toString(x_153, x_12874, x_12875); +x_12877 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_12877, 0, x_12876); +x_12878 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_12879 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_12879, 0, x_12878); +lean_ctor_set(x_12879, 1, x_12877); +x_12880 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_12881 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_12881, 0, x_12879); +lean_ctor_set(x_12881, 1, x_12880); +x_12882 = l_Lean_MessageData_ofFormat(x_12881); +x_12883 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_12882, x_11925, x_4, x_5, x_11929); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_11925); +return x_12883; +} +else +{ +lean_object* x_12884; lean_object* x_12885; lean_object* x_12886; +lean_dec(x_153); +x_12884 = l_Lean_IR_instInhabitedArg; +x_12885 = lean_unsigned_to_nat(2u); +x_12886 = lean_array_get(x_12884, x_11919, x_12885); +lean_dec(x_11919); +if (lean_obj_tag(x_12886) == 0) +{ +lean_object* x_12887; lean_object* x_12888; lean_object* x_12889; lean_object* x_12890; lean_object* x_12891; lean_object* x_12892; lean_object* x_12893; +x_12887 = lean_ctor_get(x_12886, 0); +lean_inc(x_12887); +lean_dec(x_12886); +x_12888 = lean_ctor_get(x_1, 0); +lean_inc(x_12888); +lean_dec(x_1); +x_12889 = l_Lean_IR_ToIR_bindVarToVarId(x_12888, x_12887, x_11925, x_4, x_5, x_11929); +x_12890 = lean_ctor_get(x_12889, 0); +lean_inc(x_12890); +x_12891 = lean_ctor_get(x_12889, 1); +lean_inc(x_12891); +lean_dec(x_12889); +x_12892 = lean_ctor_get(x_12890, 1); +lean_inc(x_12892); +lean_dec(x_12890); +x_12893 = l_Lean_IR_ToIR_lowerCode(x_2, x_12892, x_4, x_5, x_12891); +return x_12893; +} +else +{ +lean_object* x_12894; lean_object* x_12895; lean_object* x_12896; lean_object* x_12897; lean_object* x_12898; lean_object* x_12899; +x_12894 = lean_ctor_get(x_1, 0); +lean_inc(x_12894); +lean_dec(x_1); +x_12895 = l_Lean_IR_ToIR_bindErased(x_12894, x_11925, x_4, x_5, x_11929); +x_12896 = lean_ctor_get(x_12895, 0); +lean_inc(x_12896); +x_12897 = lean_ctor_get(x_12895, 1); +lean_inc(x_12897); +lean_dec(x_12895); +x_12898 = lean_ctor_get(x_12896, 1); +lean_inc(x_12898); +lean_dec(x_12896); +x_12899 = l_Lean_IR_ToIR_lowerCode(x_2, x_12898, x_4, x_5, x_12897); +return x_12899; +} +} +} +} +case 5: +{ +lean_object* x_12900; lean_object* x_12901; +lean_dec(x_11936); +lean_dec(x_11931); +lean_dec(x_11930); +lean_free_object(x_11921); +lean_dec(x_11919); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_12900 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_12901 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_12900, x_11925, x_4, x_5, x_11929); +return x_12901; +} +case 6: +{ +lean_object* x_12902; uint8_t x_12903; +x_12902 = lean_ctor_get(x_11936, 0); +lean_inc(x_12902); +lean_dec(x_11936); +lean_inc(x_153); +x_12903 = l_Lean_isExtern(x_11931, x_153); +if (x_12903 == 0) +{ +lean_object* x_12904; +lean_dec(x_11930); +lean_free_object(x_11921); +lean_dec(x_11919); +lean_inc(x_5); +lean_inc(x_4); +x_12904 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_11925, x_4, x_5, x_11929); +if (lean_obj_tag(x_12904) == 0) +{ +lean_object* x_12905; lean_object* x_12906; lean_object* x_12907; lean_object* x_12908; lean_object* x_12909; lean_object* x_12910; lean_object* x_12911; lean_object* x_12912; lean_object* x_12913; lean_object* x_12914; lean_object* x_12915; lean_object* x_12916; lean_object* x_12917; lean_object* x_12918; lean_object* x_12919; lean_object* x_12920; lean_object* x_12921; lean_object* x_12922; lean_object* x_12923; lean_object* x_12924; +x_12905 = lean_ctor_get(x_12904, 0); +lean_inc(x_12905); +x_12906 = lean_ctor_get(x_12905, 0); +lean_inc(x_12906); +x_12907 = lean_ctor_get(x_12904, 1); +lean_inc(x_12907); +lean_dec(x_12904); +x_12908 = lean_ctor_get(x_12905, 1); +lean_inc(x_12908); +lean_dec(x_12905); +x_12909 = lean_ctor_get(x_12906, 0); +lean_inc(x_12909); +x_12910 = lean_ctor_get(x_12906, 1); +lean_inc(x_12910); +lean_dec(x_12906); +x_12911 = lean_ctor_get(x_12902, 3); +lean_inc(x_12911); +lean_dec(x_12902); +x_12912 = lean_array_get_size(x_11910); +x_12913 = l_Array_extract___rarg(x_11910, x_12911, x_12912); +lean_dec(x_12912); +lean_dec(x_11910); +x_12914 = lean_array_get_size(x_12910); +x_12915 = lean_unsigned_to_nat(0u); +x_12916 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_11911)) { + x_12917 = lean_alloc_ctor(0, 3, 0); +} else { + x_12917 = x_11911; + lean_ctor_set_tag(x_12917, 0); +} +lean_ctor_set(x_12917, 0, x_12915); +lean_ctor_set(x_12917, 1, x_12914); +lean_ctor_set(x_12917, 2, x_12916); +x_12918 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_12919 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__6(x_12910, x_12913, x_12917, x_12917, x_12918, x_12915, lean_box(0), lean_box(0), x_12908, x_4, x_5, x_12907); +lean_dec(x_12917); +x_12920 = lean_ctor_get(x_12919, 0); +lean_inc(x_12920); +x_12921 = lean_ctor_get(x_12919, 1); +lean_inc(x_12921); +lean_dec(x_12919); +x_12922 = lean_ctor_get(x_12920, 0); +lean_inc(x_12922); +x_12923 = lean_ctor_get(x_12920, 1); +lean_inc(x_12923); +lean_dec(x_12920); +x_12924 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_12909, x_12910, x_12913, x_12922, x_12923, x_4, x_5, x_12921); +lean_dec(x_12913); +lean_dec(x_12910); +return x_12924; +} +else +{ +uint8_t x_12925; +lean_dec(x_12902); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12925 = !lean_is_exclusive(x_12904); +if (x_12925 == 0) +{ +return x_12904; +} +else +{ +lean_object* x_12926; lean_object* x_12927; lean_object* x_12928; +x_12926 = lean_ctor_get(x_12904, 0); +x_12927 = lean_ctor_get(x_12904, 1); +lean_inc(x_12927); +lean_inc(x_12926); +lean_dec(x_12904); +x_12928 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12928, 0, x_12926); +lean_ctor_set(x_12928, 1, x_12927); +return x_12928; +} +} +} +else +{ +lean_object* x_12929; lean_object* x_12930; lean_object* x_12960; lean_object* x_12961; +lean_dec(x_12902); +lean_dec(x_11911); +lean_dec(x_11910); +lean_inc(x_153); +x_12960 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_11929); +x_12961 = lean_ctor_get(x_12960, 0); +lean_inc(x_12961); +if (lean_obj_tag(x_12961) == 0) +{ +lean_object* x_12962; lean_object* x_12963; +x_12962 = lean_ctor_get(x_12960, 1); +lean_inc(x_12962); +lean_dec(x_12960); +x_12963 = lean_box(0); +lean_ctor_set(x_11921, 0, x_12963); +x_12929 = x_11921; +x_12930 = x_12962; +goto block_12959; +} +else +{ +uint8_t x_12964; +lean_free_object(x_11921); +x_12964 = !lean_is_exclusive(x_12960); +if (x_12964 == 0) +{ +lean_object* x_12965; lean_object* x_12966; uint8_t x_12967; +x_12965 = lean_ctor_get(x_12960, 1); +x_12966 = lean_ctor_get(x_12960, 0); +lean_dec(x_12966); +x_12967 = !lean_is_exclusive(x_12961); +if (x_12967 == 0) +{ +lean_object* x_12968; lean_object* x_12969; lean_object* x_12970; lean_object* x_12971; uint8_t x_12972; +x_12968 = lean_ctor_get(x_12961, 0); +x_12969 = lean_array_get_size(x_11919); +x_12970 = lean_ctor_get(x_12968, 3); +lean_inc(x_12970); +lean_dec(x_12968); +x_12971 = lean_array_get_size(x_12970); +lean_dec(x_12970); +x_12972 = lean_nat_dec_lt(x_12969, x_12971); +if (x_12972 == 0) +{ +uint8_t x_12973; +x_12973 = lean_nat_dec_eq(x_12969, x_12971); +if (x_12973 == 0) +{ +lean_object* x_12974; lean_object* x_12975; lean_object* x_12976; lean_object* x_12977; lean_object* x_12978; lean_object* x_12979; lean_object* x_12980; lean_object* x_12981; lean_object* x_12982; lean_object* x_12983; lean_object* x_12984; lean_object* x_12985; lean_object* x_12986; lean_object* x_12987; lean_object* x_12988; lean_object* x_12989; +x_12974 = lean_unsigned_to_nat(0u); +x_12975 = l_Array_extract___rarg(x_11919, x_12974, x_12971); +x_12976 = l_Array_extract___rarg(x_11919, x_12971, x_12969); +lean_dec(x_12969); +lean_inc(x_153); +lean_ctor_set_tag(x_12960, 6); +lean_ctor_set(x_12960, 1, x_12975); +lean_ctor_set(x_12960, 0, x_153); +x_12977 = lean_ctor_get(x_1, 0); +lean_inc(x_12977); +x_12978 = l_Lean_IR_ToIR_bindVar(x_12977, x_11925, x_4, x_5, x_12965); +x_12979 = lean_ctor_get(x_12978, 0); +lean_inc(x_12979); +x_12980 = lean_ctor_get(x_12978, 1); +lean_inc(x_12980); +lean_dec(x_12978); +x_12981 = lean_ctor_get(x_12979, 0); +lean_inc(x_12981); +x_12982 = lean_ctor_get(x_12979, 1); +lean_inc(x_12982); +lean_dec(x_12979); +x_12983 = l_Lean_IR_ToIR_newVar(x_12982, x_4, x_5, x_12980); +x_12984 = lean_ctor_get(x_12983, 0); +lean_inc(x_12984); +x_12985 = lean_ctor_get(x_12983, 1); +lean_inc(x_12985); +lean_dec(x_12983); +x_12986 = lean_ctor_get(x_12984, 0); +lean_inc(x_12986); +x_12987 = lean_ctor_get(x_12984, 1); +lean_inc(x_12987); +lean_dec(x_12984); +x_12988 = lean_ctor_get(x_1, 2); +lean_inc(x_12988); +lean_inc(x_5); +lean_inc(x_4); +x_12989 = l_Lean_IR_ToIR_lowerType(x_12988, x_12987, x_4, x_5, x_12985); +if (lean_obj_tag(x_12989) == 0) +{ +lean_object* x_12990; lean_object* x_12991; lean_object* x_12992; lean_object* x_12993; lean_object* x_12994; +x_12990 = lean_ctor_get(x_12989, 0); +lean_inc(x_12990); +x_12991 = lean_ctor_get(x_12989, 1); +lean_inc(x_12991); +lean_dec(x_12989); +x_12992 = lean_ctor_get(x_12990, 0); +lean_inc(x_12992); +x_12993 = lean_ctor_get(x_12990, 1); +lean_inc(x_12993); +lean_dec(x_12990); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_12994 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_12986, x_12976, x_12981, x_12960, x_12992, x_12993, x_4, x_5, x_12991); +if (lean_obj_tag(x_12994) == 0) +{ +lean_object* x_12995; lean_object* x_12996; uint8_t x_12997; +x_12995 = lean_ctor_get(x_12994, 0); +lean_inc(x_12995); +x_12996 = lean_ctor_get(x_12994, 1); +lean_inc(x_12996); +lean_dec(x_12994); +x_12997 = !lean_is_exclusive(x_12995); +if (x_12997 == 0) +{ +lean_object* x_12998; +x_12998 = lean_ctor_get(x_12995, 0); +lean_ctor_set(x_12961, 0, x_12998); +lean_ctor_set(x_12995, 0, x_12961); +x_12929 = x_12995; +x_12930 = x_12996; +goto block_12959; +} +else +{ +lean_object* x_12999; lean_object* x_13000; lean_object* x_13001; +x_12999 = lean_ctor_get(x_12995, 0); +x_13000 = lean_ctor_get(x_12995, 1); +lean_inc(x_13000); +lean_inc(x_12999); +lean_dec(x_12995); +lean_ctor_set(x_12961, 0, x_12999); +x_13001 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13001, 0, x_12961); +lean_ctor_set(x_13001, 1, x_13000); +x_12929 = x_13001; +x_12930 = x_12996; +goto block_12959; +} +} +else +{ +uint8_t x_13002; +lean_free_object(x_12961); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13002 = !lean_is_exclusive(x_12994); +if (x_13002 == 0) +{ +return x_12994; +} +else +{ +lean_object* x_13003; lean_object* x_13004; lean_object* x_13005; +x_13003 = lean_ctor_get(x_12994, 0); +x_13004 = lean_ctor_get(x_12994, 1); +lean_inc(x_13004); +lean_inc(x_13003); +lean_dec(x_12994); +x_13005 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_13005, 0, x_13003); +lean_ctor_set(x_13005, 1, x_13004); +return x_13005; +} +} +} +else +{ +uint8_t x_13006; +lean_dec(x_12986); +lean_dec(x_12981); +lean_dec(x_12960); +lean_dec(x_12976); +lean_free_object(x_12961); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13006 = !lean_is_exclusive(x_12989); +if (x_13006 == 0) +{ +return x_12989; +} +else +{ +lean_object* x_13007; lean_object* x_13008; lean_object* x_13009; +x_13007 = lean_ctor_get(x_12989, 0); +x_13008 = lean_ctor_get(x_12989, 1); +lean_inc(x_13008); +lean_inc(x_13007); +lean_dec(x_12989); +x_13009 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_13009, 0, x_13007); +lean_ctor_set(x_13009, 1, x_13008); +return x_13009; +} +} +} +else +{ +lean_object* x_13010; lean_object* x_13011; lean_object* x_13012; lean_object* x_13013; lean_object* x_13014; lean_object* x_13015; lean_object* x_13016; lean_object* x_13017; +lean_dec(x_12971); +lean_dec(x_12969); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_12960, 6); +lean_ctor_set(x_12960, 1, x_11919); +lean_ctor_set(x_12960, 0, x_153); +x_13010 = lean_ctor_get(x_1, 0); +lean_inc(x_13010); +x_13011 = l_Lean_IR_ToIR_bindVar(x_13010, x_11925, x_4, x_5, x_12965); +x_13012 = lean_ctor_get(x_13011, 0); +lean_inc(x_13012); +x_13013 = lean_ctor_get(x_13011, 1); +lean_inc(x_13013); +lean_dec(x_13011); +x_13014 = lean_ctor_get(x_13012, 0); +lean_inc(x_13014); +x_13015 = lean_ctor_get(x_13012, 1); +lean_inc(x_13015); +lean_dec(x_13012); +x_13016 = lean_ctor_get(x_1, 2); +lean_inc(x_13016); +lean_inc(x_5); +lean_inc(x_4); +x_13017 = l_Lean_IR_ToIR_lowerType(x_13016, x_13015, x_4, x_5, x_13013); +if (lean_obj_tag(x_13017) == 0) +{ +lean_object* x_13018; lean_object* x_13019; lean_object* x_13020; lean_object* x_13021; lean_object* x_13022; +x_13018 = lean_ctor_get(x_13017, 0); +lean_inc(x_13018); +x_13019 = lean_ctor_get(x_13017, 1); +lean_inc(x_13019); +lean_dec(x_13017); +x_13020 = lean_ctor_get(x_13018, 0); +lean_inc(x_13020); +x_13021 = lean_ctor_get(x_13018, 1); +lean_inc(x_13021); +lean_dec(x_13018); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13022 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13014, x_12960, x_13020, x_13021, x_4, x_5, x_13019); +if (lean_obj_tag(x_13022) == 0) +{ +lean_object* x_13023; lean_object* x_13024; uint8_t x_13025; +x_13023 = lean_ctor_get(x_13022, 0); +lean_inc(x_13023); +x_13024 = lean_ctor_get(x_13022, 1); +lean_inc(x_13024); +lean_dec(x_13022); +x_13025 = !lean_is_exclusive(x_13023); +if (x_13025 == 0) +{ +lean_object* x_13026; +x_13026 = lean_ctor_get(x_13023, 0); +lean_ctor_set(x_12961, 0, x_13026); +lean_ctor_set(x_13023, 0, x_12961); +x_12929 = x_13023; +x_12930 = x_13024; +goto block_12959; +} +else +{ +lean_object* x_13027; lean_object* x_13028; lean_object* x_13029; +x_13027 = lean_ctor_get(x_13023, 0); +x_13028 = lean_ctor_get(x_13023, 1); +lean_inc(x_13028); +lean_inc(x_13027); +lean_dec(x_13023); +lean_ctor_set(x_12961, 0, x_13027); +x_13029 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13029, 0, x_12961); +lean_ctor_set(x_13029, 1, x_13028); +x_12929 = x_13029; +x_12930 = x_13024; +goto block_12959; +} +} +else +{ +uint8_t x_13030; +lean_free_object(x_12961); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13030 = !lean_is_exclusive(x_13022); +if (x_13030 == 0) +{ +return x_13022; +} +else +{ +lean_object* x_13031; lean_object* x_13032; lean_object* x_13033; +x_13031 = lean_ctor_get(x_13022, 0); +x_13032 = lean_ctor_get(x_13022, 1); +lean_inc(x_13032); +lean_inc(x_13031); +lean_dec(x_13022); +x_13033 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_13033, 0, x_13031); +lean_ctor_set(x_13033, 1, x_13032); +return x_13033; +} +} +} +else +{ +uint8_t x_13034; +lean_dec(x_13014); +lean_dec(x_12960); +lean_free_object(x_12961); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13034 = !lean_is_exclusive(x_13017); +if (x_13034 == 0) +{ +return x_13017; +} +else +{ +lean_object* x_13035; lean_object* x_13036; lean_object* x_13037; +x_13035 = lean_ctor_get(x_13017, 0); +x_13036 = lean_ctor_get(x_13017, 1); +lean_inc(x_13036); +lean_inc(x_13035); +lean_dec(x_13017); +x_13037 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_13037, 0, x_13035); +lean_ctor_set(x_13037, 1, x_13036); +return x_13037; +} +} +} +} +else +{ +lean_object* x_13038; lean_object* x_13039; lean_object* x_13040; lean_object* x_13041; lean_object* x_13042; lean_object* x_13043; lean_object* x_13044; lean_object* x_13045; +lean_dec(x_12971); +lean_dec(x_12969); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_12960, 7); +lean_ctor_set(x_12960, 1, x_11919); +lean_ctor_set(x_12960, 0, x_153); +x_13038 = lean_ctor_get(x_1, 0); +lean_inc(x_13038); +x_13039 = l_Lean_IR_ToIR_bindVar(x_13038, x_11925, x_4, x_5, x_12965); +x_13040 = lean_ctor_get(x_13039, 0); +lean_inc(x_13040); +x_13041 = lean_ctor_get(x_13039, 1); +lean_inc(x_13041); +lean_dec(x_13039); +x_13042 = lean_ctor_get(x_13040, 0); +lean_inc(x_13042); +x_13043 = lean_ctor_get(x_13040, 1); +lean_inc(x_13043); +lean_dec(x_13040); +x_13044 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13045 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13042, x_12960, x_13044, x_13043, x_4, x_5, x_13041); +if (lean_obj_tag(x_13045) == 0) +{ +lean_object* x_13046; lean_object* x_13047; uint8_t x_13048; +x_13046 = lean_ctor_get(x_13045, 0); +lean_inc(x_13046); +x_13047 = lean_ctor_get(x_13045, 1); +lean_inc(x_13047); +lean_dec(x_13045); +x_13048 = !lean_is_exclusive(x_13046); +if (x_13048 == 0) +{ +lean_object* x_13049; +x_13049 = lean_ctor_get(x_13046, 0); +lean_ctor_set(x_12961, 0, x_13049); +lean_ctor_set(x_13046, 0, x_12961); +x_12929 = x_13046; +x_12930 = x_13047; +goto block_12959; +} +else +{ +lean_object* x_13050; lean_object* x_13051; lean_object* x_13052; +x_13050 = lean_ctor_get(x_13046, 0); +x_13051 = lean_ctor_get(x_13046, 1); +lean_inc(x_13051); +lean_inc(x_13050); +lean_dec(x_13046); +lean_ctor_set(x_12961, 0, x_13050); +x_13052 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13052, 0, x_12961); +lean_ctor_set(x_13052, 1, x_13051); +x_12929 = x_13052; +x_12930 = x_13047; +goto block_12959; +} +} +else +{ +uint8_t x_13053; +lean_free_object(x_12961); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13053 = !lean_is_exclusive(x_13045); +if (x_13053 == 0) +{ +return x_13045; +} +else +{ +lean_object* x_13054; lean_object* x_13055; lean_object* x_13056; +x_13054 = lean_ctor_get(x_13045, 0); +x_13055 = lean_ctor_get(x_13045, 1); +lean_inc(x_13055); +lean_inc(x_13054); +lean_dec(x_13045); +x_13056 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_13056, 0, x_13054); +lean_ctor_set(x_13056, 1, x_13055); +return x_13056; +} +} +} +} +else +{ +lean_object* x_13057; lean_object* x_13058; lean_object* x_13059; lean_object* x_13060; uint8_t x_13061; +x_13057 = lean_ctor_get(x_12961, 0); +lean_inc(x_13057); +lean_dec(x_12961); +x_13058 = lean_array_get_size(x_11919); +x_13059 = lean_ctor_get(x_13057, 3); +lean_inc(x_13059); +lean_dec(x_13057); +x_13060 = lean_array_get_size(x_13059); +lean_dec(x_13059); +x_13061 = lean_nat_dec_lt(x_13058, x_13060); +if (x_13061 == 0) +{ +uint8_t x_13062; +x_13062 = lean_nat_dec_eq(x_13058, x_13060); +if (x_13062 == 0) +{ +lean_object* x_13063; lean_object* x_13064; lean_object* x_13065; lean_object* x_13066; lean_object* x_13067; lean_object* x_13068; lean_object* x_13069; lean_object* x_13070; lean_object* x_13071; lean_object* x_13072; lean_object* x_13073; lean_object* x_13074; lean_object* x_13075; lean_object* x_13076; lean_object* x_13077; lean_object* x_13078; +x_13063 = lean_unsigned_to_nat(0u); +x_13064 = l_Array_extract___rarg(x_11919, x_13063, x_13060); +x_13065 = l_Array_extract___rarg(x_11919, x_13060, x_13058); +lean_dec(x_13058); +lean_inc(x_153); +lean_ctor_set_tag(x_12960, 6); +lean_ctor_set(x_12960, 1, x_13064); +lean_ctor_set(x_12960, 0, x_153); +x_13066 = lean_ctor_get(x_1, 0); +lean_inc(x_13066); +x_13067 = l_Lean_IR_ToIR_bindVar(x_13066, x_11925, x_4, x_5, x_12965); +x_13068 = lean_ctor_get(x_13067, 0); +lean_inc(x_13068); +x_13069 = lean_ctor_get(x_13067, 1); +lean_inc(x_13069); +lean_dec(x_13067); +x_13070 = lean_ctor_get(x_13068, 0); +lean_inc(x_13070); +x_13071 = lean_ctor_get(x_13068, 1); +lean_inc(x_13071); +lean_dec(x_13068); +x_13072 = l_Lean_IR_ToIR_newVar(x_13071, x_4, x_5, x_13069); +x_13073 = lean_ctor_get(x_13072, 0); +lean_inc(x_13073); +x_13074 = lean_ctor_get(x_13072, 1); +lean_inc(x_13074); +lean_dec(x_13072); +x_13075 = lean_ctor_get(x_13073, 0); +lean_inc(x_13075); +x_13076 = lean_ctor_get(x_13073, 1); +lean_inc(x_13076); +lean_dec(x_13073); +x_13077 = lean_ctor_get(x_1, 2); +lean_inc(x_13077); +lean_inc(x_5); +lean_inc(x_4); +x_13078 = l_Lean_IR_ToIR_lowerType(x_13077, x_13076, x_4, x_5, x_13074); +if (lean_obj_tag(x_13078) == 0) +{ +lean_object* x_13079; lean_object* x_13080; lean_object* x_13081; lean_object* x_13082; lean_object* x_13083; +x_13079 = lean_ctor_get(x_13078, 0); +lean_inc(x_13079); +x_13080 = lean_ctor_get(x_13078, 1); +lean_inc(x_13080); +lean_dec(x_13078); +x_13081 = lean_ctor_get(x_13079, 0); +lean_inc(x_13081); +x_13082 = lean_ctor_get(x_13079, 1); +lean_inc(x_13082); +lean_dec(x_13079); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13083 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_13075, x_13065, x_13070, x_12960, x_13081, x_13082, x_4, x_5, x_13080); +if (lean_obj_tag(x_13083) == 0) +{ +lean_object* x_13084; lean_object* x_13085; lean_object* x_13086; lean_object* x_13087; lean_object* x_13088; lean_object* x_13089; lean_object* x_13090; +x_13084 = lean_ctor_get(x_13083, 0); +lean_inc(x_13084); +x_13085 = lean_ctor_get(x_13083, 1); +lean_inc(x_13085); +lean_dec(x_13083); +x_13086 = lean_ctor_get(x_13084, 0); +lean_inc(x_13086); +x_13087 = lean_ctor_get(x_13084, 1); +lean_inc(x_13087); +if (lean_is_exclusive(x_13084)) { + lean_ctor_release(x_13084, 0); + lean_ctor_release(x_13084, 1); + x_13088 = x_13084; +} else { + lean_dec_ref(x_13084); + x_13088 = lean_box(0); +} +x_13089 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_13089, 0, x_13086); +if (lean_is_scalar(x_13088)) { + x_13090 = lean_alloc_ctor(0, 2, 0); +} else { + x_13090 = x_13088; +} +lean_ctor_set(x_13090, 0, x_13089); +lean_ctor_set(x_13090, 1, x_13087); +x_12929 = x_13090; +x_12930 = x_13085; +goto block_12959; +} +else +{ +lean_object* x_13091; lean_object* x_13092; lean_object* x_13093; lean_object* x_13094; +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13091 = lean_ctor_get(x_13083, 0); +lean_inc(x_13091); +x_13092 = lean_ctor_get(x_13083, 1); +lean_inc(x_13092); +if (lean_is_exclusive(x_13083)) { + lean_ctor_release(x_13083, 0); + lean_ctor_release(x_13083, 1); + x_13093 = x_13083; +} else { + lean_dec_ref(x_13083); + x_13093 = lean_box(0); +} +if (lean_is_scalar(x_13093)) { + x_13094 = lean_alloc_ctor(1, 2, 0); +} else { + x_13094 = x_13093; +} +lean_ctor_set(x_13094, 0, x_13091); +lean_ctor_set(x_13094, 1, x_13092); +return x_13094; +} +} +else +{ +lean_object* x_13095; lean_object* x_13096; lean_object* x_13097; lean_object* x_13098; +lean_dec(x_13075); +lean_dec(x_13070); +lean_dec(x_12960); +lean_dec(x_13065); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13095 = lean_ctor_get(x_13078, 0); +lean_inc(x_13095); +x_13096 = lean_ctor_get(x_13078, 1); +lean_inc(x_13096); +if (lean_is_exclusive(x_13078)) { + lean_ctor_release(x_13078, 0); + lean_ctor_release(x_13078, 1); + x_13097 = x_13078; +} else { + lean_dec_ref(x_13078); + x_13097 = lean_box(0); +} +if (lean_is_scalar(x_13097)) { + x_13098 = lean_alloc_ctor(1, 2, 0); +} else { + x_13098 = x_13097; +} +lean_ctor_set(x_13098, 0, x_13095); +lean_ctor_set(x_13098, 1, x_13096); +return x_13098; +} +} +else +{ +lean_object* x_13099; lean_object* x_13100; lean_object* x_13101; lean_object* x_13102; lean_object* x_13103; lean_object* x_13104; lean_object* x_13105; lean_object* x_13106; +lean_dec(x_13060); +lean_dec(x_13058); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_12960, 6); +lean_ctor_set(x_12960, 1, x_11919); +lean_ctor_set(x_12960, 0, x_153); +x_13099 = lean_ctor_get(x_1, 0); +lean_inc(x_13099); +x_13100 = l_Lean_IR_ToIR_bindVar(x_13099, x_11925, x_4, x_5, x_12965); +x_13101 = lean_ctor_get(x_13100, 0); +lean_inc(x_13101); +x_13102 = lean_ctor_get(x_13100, 1); +lean_inc(x_13102); +lean_dec(x_13100); +x_13103 = lean_ctor_get(x_13101, 0); +lean_inc(x_13103); +x_13104 = lean_ctor_get(x_13101, 1); +lean_inc(x_13104); +lean_dec(x_13101); +x_13105 = lean_ctor_get(x_1, 2); +lean_inc(x_13105); +lean_inc(x_5); +lean_inc(x_4); +x_13106 = l_Lean_IR_ToIR_lowerType(x_13105, x_13104, x_4, x_5, x_13102); +if (lean_obj_tag(x_13106) == 0) +{ +lean_object* x_13107; lean_object* x_13108; lean_object* x_13109; lean_object* x_13110; lean_object* x_13111; +x_13107 = lean_ctor_get(x_13106, 0); +lean_inc(x_13107); +x_13108 = lean_ctor_get(x_13106, 1); +lean_inc(x_13108); +lean_dec(x_13106); +x_13109 = lean_ctor_get(x_13107, 0); +lean_inc(x_13109); +x_13110 = lean_ctor_get(x_13107, 1); +lean_inc(x_13110); +lean_dec(x_13107); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13111 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13103, x_12960, x_13109, x_13110, x_4, x_5, x_13108); +if (lean_obj_tag(x_13111) == 0) +{ +lean_object* x_13112; lean_object* x_13113; lean_object* x_13114; lean_object* x_13115; lean_object* x_13116; lean_object* x_13117; lean_object* x_13118; +x_13112 = lean_ctor_get(x_13111, 0); +lean_inc(x_13112); +x_13113 = lean_ctor_get(x_13111, 1); +lean_inc(x_13113); +lean_dec(x_13111); +x_13114 = lean_ctor_get(x_13112, 0); +lean_inc(x_13114); +x_13115 = lean_ctor_get(x_13112, 1); +lean_inc(x_13115); +if (lean_is_exclusive(x_13112)) { + lean_ctor_release(x_13112, 0); + lean_ctor_release(x_13112, 1); + x_13116 = x_13112; +} else { + lean_dec_ref(x_13112); + x_13116 = lean_box(0); +} +x_13117 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_13117, 0, x_13114); +if (lean_is_scalar(x_13116)) { + x_13118 = lean_alloc_ctor(0, 2, 0); +} else { + x_13118 = x_13116; +} +lean_ctor_set(x_13118, 0, x_13117); +lean_ctor_set(x_13118, 1, x_13115); +x_12929 = x_13118; +x_12930 = x_13113; +goto block_12959; +} +else +{ +lean_object* x_13119; lean_object* x_13120; lean_object* x_13121; lean_object* x_13122; +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13119 = lean_ctor_get(x_13111, 0); +lean_inc(x_13119); +x_13120 = lean_ctor_get(x_13111, 1); +lean_inc(x_13120); +if (lean_is_exclusive(x_13111)) { + lean_ctor_release(x_13111, 0); + lean_ctor_release(x_13111, 1); + x_13121 = x_13111; +} else { + lean_dec_ref(x_13111); + x_13121 = lean_box(0); +} +if (lean_is_scalar(x_13121)) { + x_13122 = lean_alloc_ctor(1, 2, 0); +} else { + x_13122 = x_13121; +} +lean_ctor_set(x_13122, 0, x_13119); +lean_ctor_set(x_13122, 1, x_13120); +return x_13122; +} +} +else +{ +lean_object* x_13123; lean_object* x_13124; lean_object* x_13125; lean_object* x_13126; +lean_dec(x_13103); +lean_dec(x_12960); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13123 = lean_ctor_get(x_13106, 0); +lean_inc(x_13123); +x_13124 = lean_ctor_get(x_13106, 1); +lean_inc(x_13124); +if (lean_is_exclusive(x_13106)) { + lean_ctor_release(x_13106, 0); + lean_ctor_release(x_13106, 1); + x_13125 = x_13106; +} else { + lean_dec_ref(x_13106); + x_13125 = lean_box(0); +} +if (lean_is_scalar(x_13125)) { + x_13126 = lean_alloc_ctor(1, 2, 0); +} else { + x_13126 = x_13125; +} +lean_ctor_set(x_13126, 0, x_13123); +lean_ctor_set(x_13126, 1, x_13124); +return x_13126; +} +} +} +else +{ +lean_object* x_13127; lean_object* x_13128; lean_object* x_13129; lean_object* x_13130; lean_object* x_13131; lean_object* x_13132; lean_object* x_13133; lean_object* x_13134; +lean_dec(x_13060); +lean_dec(x_13058); +lean_inc(x_11919); +lean_inc(x_153); +lean_ctor_set_tag(x_12960, 7); +lean_ctor_set(x_12960, 1, x_11919); +lean_ctor_set(x_12960, 0, x_153); +x_13127 = lean_ctor_get(x_1, 0); +lean_inc(x_13127); +x_13128 = l_Lean_IR_ToIR_bindVar(x_13127, x_11925, x_4, x_5, x_12965); +x_13129 = lean_ctor_get(x_13128, 0); +lean_inc(x_13129); +x_13130 = lean_ctor_get(x_13128, 1); +lean_inc(x_13130); +lean_dec(x_13128); +x_13131 = lean_ctor_get(x_13129, 0); +lean_inc(x_13131); +x_13132 = lean_ctor_get(x_13129, 1); +lean_inc(x_13132); +lean_dec(x_13129); +x_13133 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13134 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13131, x_12960, x_13133, x_13132, x_4, x_5, x_13130); +if (lean_obj_tag(x_13134) == 0) +{ +lean_object* x_13135; lean_object* x_13136; lean_object* x_13137; lean_object* x_13138; lean_object* x_13139; lean_object* x_13140; lean_object* x_13141; +x_13135 = lean_ctor_get(x_13134, 0); +lean_inc(x_13135); +x_13136 = lean_ctor_get(x_13134, 1); +lean_inc(x_13136); +lean_dec(x_13134); +x_13137 = lean_ctor_get(x_13135, 0); +lean_inc(x_13137); +x_13138 = lean_ctor_get(x_13135, 1); +lean_inc(x_13138); +if (lean_is_exclusive(x_13135)) { + lean_ctor_release(x_13135, 0); + lean_ctor_release(x_13135, 1); + x_13139 = x_13135; +} else { + lean_dec_ref(x_13135); + x_13139 = lean_box(0); +} +x_13140 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_13140, 0, x_13137); +if (lean_is_scalar(x_13139)) { + x_13141 = lean_alloc_ctor(0, 2, 0); +} else { + x_13141 = x_13139; +} +lean_ctor_set(x_13141, 0, x_13140); +lean_ctor_set(x_13141, 1, x_13138); +x_12929 = x_13141; +x_12930 = x_13136; +goto block_12959; +} +else +{ +lean_object* x_13142; lean_object* x_13143; lean_object* x_13144; lean_object* x_13145; +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13142 = lean_ctor_get(x_13134, 0); +lean_inc(x_13142); +x_13143 = lean_ctor_get(x_13134, 1); +lean_inc(x_13143); +if (lean_is_exclusive(x_13134)) { + lean_ctor_release(x_13134, 0); + lean_ctor_release(x_13134, 1); + x_13144 = x_13134; +} else { + lean_dec_ref(x_13134); + x_13144 = lean_box(0); +} +if (lean_is_scalar(x_13144)) { + x_13145 = lean_alloc_ctor(1, 2, 0); +} else { + x_13145 = x_13144; +} +lean_ctor_set(x_13145, 0, x_13142); +lean_ctor_set(x_13145, 1, x_13143); +return x_13145; +} +} +} +} +else +{ +lean_object* x_13146; lean_object* x_13147; lean_object* x_13148; lean_object* x_13149; lean_object* x_13150; lean_object* x_13151; uint8_t x_13152; +x_13146 = lean_ctor_get(x_12960, 1); +lean_inc(x_13146); +lean_dec(x_12960); +x_13147 = lean_ctor_get(x_12961, 0); +lean_inc(x_13147); +if (lean_is_exclusive(x_12961)) { + lean_ctor_release(x_12961, 0); + x_13148 = x_12961; +} else { + lean_dec_ref(x_12961); + x_13148 = lean_box(0); +} +x_13149 = lean_array_get_size(x_11919); +x_13150 = lean_ctor_get(x_13147, 3); +lean_inc(x_13150); +lean_dec(x_13147); +x_13151 = lean_array_get_size(x_13150); +lean_dec(x_13150); +x_13152 = lean_nat_dec_lt(x_13149, x_13151); +if (x_13152 == 0) +{ +uint8_t x_13153; +x_13153 = lean_nat_dec_eq(x_13149, x_13151); +if (x_13153 == 0) +{ +lean_object* x_13154; lean_object* x_13155; lean_object* x_13156; lean_object* x_13157; lean_object* x_13158; lean_object* x_13159; lean_object* x_13160; lean_object* x_13161; lean_object* x_13162; lean_object* x_13163; lean_object* x_13164; lean_object* x_13165; lean_object* x_13166; lean_object* x_13167; lean_object* x_13168; lean_object* x_13169; lean_object* x_13170; +x_13154 = lean_unsigned_to_nat(0u); +x_13155 = l_Array_extract___rarg(x_11919, x_13154, x_13151); +x_13156 = l_Array_extract___rarg(x_11919, x_13151, x_13149); +lean_dec(x_13149); +lean_inc(x_153); +x_13157 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_13157, 0, x_153); +lean_ctor_set(x_13157, 1, x_13155); +x_13158 = lean_ctor_get(x_1, 0); +lean_inc(x_13158); +x_13159 = l_Lean_IR_ToIR_bindVar(x_13158, x_11925, x_4, x_5, x_13146); +x_13160 = lean_ctor_get(x_13159, 0); +lean_inc(x_13160); +x_13161 = lean_ctor_get(x_13159, 1); +lean_inc(x_13161); +lean_dec(x_13159); +x_13162 = lean_ctor_get(x_13160, 0); +lean_inc(x_13162); +x_13163 = lean_ctor_get(x_13160, 1); +lean_inc(x_13163); +lean_dec(x_13160); +x_13164 = l_Lean_IR_ToIR_newVar(x_13163, x_4, x_5, x_13161); +x_13165 = lean_ctor_get(x_13164, 0); +lean_inc(x_13165); +x_13166 = lean_ctor_get(x_13164, 1); +lean_inc(x_13166); +lean_dec(x_13164); +x_13167 = lean_ctor_get(x_13165, 0); +lean_inc(x_13167); +x_13168 = lean_ctor_get(x_13165, 1); +lean_inc(x_13168); +lean_dec(x_13165); +x_13169 = lean_ctor_get(x_1, 2); +lean_inc(x_13169); +lean_inc(x_5); +lean_inc(x_4); +x_13170 = l_Lean_IR_ToIR_lowerType(x_13169, x_13168, x_4, x_5, x_13166); +if (lean_obj_tag(x_13170) == 0) +{ +lean_object* x_13171; lean_object* x_13172; lean_object* x_13173; lean_object* x_13174; lean_object* x_13175; +x_13171 = lean_ctor_get(x_13170, 0); +lean_inc(x_13171); +x_13172 = lean_ctor_get(x_13170, 1); +lean_inc(x_13172); +lean_dec(x_13170); +x_13173 = lean_ctor_get(x_13171, 0); +lean_inc(x_13173); +x_13174 = lean_ctor_get(x_13171, 1); +lean_inc(x_13174); +lean_dec(x_13171); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13175 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_13167, x_13156, x_13162, x_13157, x_13173, x_13174, x_4, x_5, x_13172); +if (lean_obj_tag(x_13175) == 0) +{ +lean_object* x_13176; lean_object* x_13177; lean_object* x_13178; lean_object* x_13179; lean_object* x_13180; lean_object* x_13181; lean_object* x_13182; +x_13176 = lean_ctor_get(x_13175, 0); +lean_inc(x_13176); +x_13177 = lean_ctor_get(x_13175, 1); +lean_inc(x_13177); +lean_dec(x_13175); +x_13178 = lean_ctor_get(x_13176, 0); +lean_inc(x_13178); +x_13179 = lean_ctor_get(x_13176, 1); +lean_inc(x_13179); +if (lean_is_exclusive(x_13176)) { + lean_ctor_release(x_13176, 0); + lean_ctor_release(x_13176, 1); + x_13180 = x_13176; +} else { + lean_dec_ref(x_13176); + x_13180 = lean_box(0); +} +if (lean_is_scalar(x_13148)) { + x_13181 = lean_alloc_ctor(1, 1, 0); +} else { + x_13181 = x_13148; +} +lean_ctor_set(x_13181, 0, x_13178); +if (lean_is_scalar(x_13180)) { + x_13182 = lean_alloc_ctor(0, 2, 0); +} else { + x_13182 = x_13180; +} +lean_ctor_set(x_13182, 0, x_13181); +lean_ctor_set(x_13182, 1, x_13179); +x_12929 = x_13182; +x_12930 = x_13177; +goto block_12959; +} +else +{ +lean_object* x_13183; lean_object* x_13184; lean_object* x_13185; lean_object* x_13186; +lean_dec(x_13148); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13183 = lean_ctor_get(x_13175, 0); +lean_inc(x_13183); +x_13184 = lean_ctor_get(x_13175, 1); +lean_inc(x_13184); +if (lean_is_exclusive(x_13175)) { + lean_ctor_release(x_13175, 0); + lean_ctor_release(x_13175, 1); + x_13185 = x_13175; +} else { + lean_dec_ref(x_13175); + x_13185 = lean_box(0); +} +if (lean_is_scalar(x_13185)) { + x_13186 = lean_alloc_ctor(1, 2, 0); +} else { + x_13186 = x_13185; +} +lean_ctor_set(x_13186, 0, x_13183); +lean_ctor_set(x_13186, 1, x_13184); +return x_13186; +} +} +else +{ +lean_object* x_13187; lean_object* x_13188; lean_object* x_13189; lean_object* x_13190; +lean_dec(x_13167); +lean_dec(x_13162); +lean_dec(x_13157); +lean_dec(x_13156); +lean_dec(x_13148); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13187 = lean_ctor_get(x_13170, 0); +lean_inc(x_13187); +x_13188 = lean_ctor_get(x_13170, 1); +lean_inc(x_13188); +if (lean_is_exclusive(x_13170)) { + lean_ctor_release(x_13170, 0); + lean_ctor_release(x_13170, 1); + x_13189 = x_13170; +} else { + lean_dec_ref(x_13170); + x_13189 = lean_box(0); +} +if (lean_is_scalar(x_13189)) { + x_13190 = lean_alloc_ctor(1, 2, 0); +} else { + x_13190 = x_13189; +} +lean_ctor_set(x_13190, 0, x_13187); +lean_ctor_set(x_13190, 1, x_13188); +return x_13190; +} +} +else +{ +lean_object* x_13191; lean_object* x_13192; lean_object* x_13193; lean_object* x_13194; lean_object* x_13195; lean_object* x_13196; lean_object* x_13197; lean_object* x_13198; lean_object* x_13199; +lean_dec(x_13151); +lean_dec(x_13149); +lean_inc(x_11919); +lean_inc(x_153); +x_13191 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_13191, 0, x_153); +lean_ctor_set(x_13191, 1, x_11919); +x_13192 = lean_ctor_get(x_1, 0); +lean_inc(x_13192); +x_13193 = l_Lean_IR_ToIR_bindVar(x_13192, x_11925, x_4, x_5, x_13146); +x_13194 = lean_ctor_get(x_13193, 0); +lean_inc(x_13194); +x_13195 = lean_ctor_get(x_13193, 1); +lean_inc(x_13195); +lean_dec(x_13193); +x_13196 = lean_ctor_get(x_13194, 0); +lean_inc(x_13196); +x_13197 = lean_ctor_get(x_13194, 1); +lean_inc(x_13197); +lean_dec(x_13194); +x_13198 = lean_ctor_get(x_1, 2); +lean_inc(x_13198); +lean_inc(x_5); +lean_inc(x_4); +x_13199 = l_Lean_IR_ToIR_lowerType(x_13198, x_13197, x_4, x_5, x_13195); +if (lean_obj_tag(x_13199) == 0) +{ +lean_object* x_13200; lean_object* x_13201; lean_object* x_13202; lean_object* x_13203; lean_object* x_13204; +x_13200 = lean_ctor_get(x_13199, 0); +lean_inc(x_13200); +x_13201 = lean_ctor_get(x_13199, 1); +lean_inc(x_13201); +lean_dec(x_13199); +x_13202 = lean_ctor_get(x_13200, 0); +lean_inc(x_13202); +x_13203 = lean_ctor_get(x_13200, 1); +lean_inc(x_13203); +lean_dec(x_13200); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13204 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13196, x_13191, x_13202, x_13203, x_4, x_5, x_13201); +if (lean_obj_tag(x_13204) == 0) +{ +lean_object* x_13205; lean_object* x_13206; lean_object* x_13207; lean_object* x_13208; lean_object* x_13209; lean_object* x_13210; lean_object* x_13211; +x_13205 = lean_ctor_get(x_13204, 0); +lean_inc(x_13205); +x_13206 = lean_ctor_get(x_13204, 1); +lean_inc(x_13206); +lean_dec(x_13204); +x_13207 = lean_ctor_get(x_13205, 0); +lean_inc(x_13207); +x_13208 = lean_ctor_get(x_13205, 1); +lean_inc(x_13208); +if (lean_is_exclusive(x_13205)) { + lean_ctor_release(x_13205, 0); + lean_ctor_release(x_13205, 1); + x_13209 = x_13205; +} else { + lean_dec_ref(x_13205); + x_13209 = lean_box(0); +} +if (lean_is_scalar(x_13148)) { + x_13210 = lean_alloc_ctor(1, 1, 0); +} else { + x_13210 = x_13148; +} +lean_ctor_set(x_13210, 0, x_13207); +if (lean_is_scalar(x_13209)) { + x_13211 = lean_alloc_ctor(0, 2, 0); +} else { + x_13211 = x_13209; +} +lean_ctor_set(x_13211, 0, x_13210); +lean_ctor_set(x_13211, 1, x_13208); +x_12929 = x_13211; +x_12930 = x_13206; +goto block_12959; +} +else +{ +lean_object* x_13212; lean_object* x_13213; lean_object* x_13214; lean_object* x_13215; +lean_dec(x_13148); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13212 = lean_ctor_get(x_13204, 0); +lean_inc(x_13212); +x_13213 = lean_ctor_get(x_13204, 1); +lean_inc(x_13213); +if (lean_is_exclusive(x_13204)) { + lean_ctor_release(x_13204, 0); + lean_ctor_release(x_13204, 1); + x_13214 = x_13204; +} else { + lean_dec_ref(x_13204); + x_13214 = lean_box(0); +} +if (lean_is_scalar(x_13214)) { + x_13215 = lean_alloc_ctor(1, 2, 0); +} else { + x_13215 = x_13214; +} +lean_ctor_set(x_13215, 0, x_13212); +lean_ctor_set(x_13215, 1, x_13213); +return x_13215; +} +} +else +{ +lean_object* x_13216; lean_object* x_13217; lean_object* x_13218; lean_object* x_13219; +lean_dec(x_13196); +lean_dec(x_13191); +lean_dec(x_13148); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13216 = lean_ctor_get(x_13199, 0); +lean_inc(x_13216); +x_13217 = lean_ctor_get(x_13199, 1); +lean_inc(x_13217); +if (lean_is_exclusive(x_13199)) { + lean_ctor_release(x_13199, 0); + lean_ctor_release(x_13199, 1); + x_13218 = x_13199; +} else { + lean_dec_ref(x_13199); + x_13218 = lean_box(0); +} +if (lean_is_scalar(x_13218)) { + x_13219 = lean_alloc_ctor(1, 2, 0); +} else { + x_13219 = x_13218; +} +lean_ctor_set(x_13219, 0, x_13216); +lean_ctor_set(x_13219, 1, x_13217); +return x_13219; +} +} +} +else +{ +lean_object* x_13220; lean_object* x_13221; lean_object* x_13222; lean_object* x_13223; lean_object* x_13224; lean_object* x_13225; lean_object* x_13226; lean_object* x_13227; lean_object* x_13228; +lean_dec(x_13151); +lean_dec(x_13149); +lean_inc(x_11919); +lean_inc(x_153); +x_13220 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_13220, 0, x_153); +lean_ctor_set(x_13220, 1, x_11919); +x_13221 = lean_ctor_get(x_1, 0); +lean_inc(x_13221); +x_13222 = l_Lean_IR_ToIR_bindVar(x_13221, x_11925, x_4, x_5, x_13146); +x_13223 = lean_ctor_get(x_13222, 0); +lean_inc(x_13223); +x_13224 = lean_ctor_get(x_13222, 1); +lean_inc(x_13224); +lean_dec(x_13222); +x_13225 = lean_ctor_get(x_13223, 0); +lean_inc(x_13225); +x_13226 = lean_ctor_get(x_13223, 1); +lean_inc(x_13226); +lean_dec(x_13223); +x_13227 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13228 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13225, x_13220, x_13227, x_13226, x_4, x_5, x_13224); +if (lean_obj_tag(x_13228) == 0) +{ +lean_object* x_13229; lean_object* x_13230; lean_object* x_13231; lean_object* x_13232; lean_object* x_13233; lean_object* x_13234; lean_object* x_13235; +x_13229 = lean_ctor_get(x_13228, 0); +lean_inc(x_13229); +x_13230 = lean_ctor_get(x_13228, 1); +lean_inc(x_13230); +lean_dec(x_13228); +x_13231 = lean_ctor_get(x_13229, 0); +lean_inc(x_13231); +x_13232 = lean_ctor_get(x_13229, 1); +lean_inc(x_13232); +if (lean_is_exclusive(x_13229)) { + lean_ctor_release(x_13229, 0); + lean_ctor_release(x_13229, 1); + x_13233 = x_13229; +} else { + lean_dec_ref(x_13229); + x_13233 = lean_box(0); +} +if (lean_is_scalar(x_13148)) { + x_13234 = lean_alloc_ctor(1, 1, 0); +} else { + x_13234 = x_13148; +} +lean_ctor_set(x_13234, 0, x_13231); +if (lean_is_scalar(x_13233)) { + x_13235 = lean_alloc_ctor(0, 2, 0); +} else { + x_13235 = x_13233; +} +lean_ctor_set(x_13235, 0, x_13234); +lean_ctor_set(x_13235, 1, x_13232); +x_12929 = x_13235; +x_12930 = x_13230; +goto block_12959; +} +else +{ +lean_object* x_13236; lean_object* x_13237; lean_object* x_13238; lean_object* x_13239; +lean_dec(x_13148); +lean_dec(x_11930); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13236 = lean_ctor_get(x_13228, 0); +lean_inc(x_13236); +x_13237 = lean_ctor_get(x_13228, 1); +lean_inc(x_13237); +if (lean_is_exclusive(x_13228)) { + lean_ctor_release(x_13228, 0); + lean_ctor_release(x_13228, 1); + x_13238 = x_13228; +} else { + lean_dec_ref(x_13228); + x_13238 = lean_box(0); +} +if (lean_is_scalar(x_13238)) { + x_13239 = lean_alloc_ctor(1, 2, 0); +} else { + x_13239 = x_13238; +} +lean_ctor_set(x_13239, 0, x_13236); +lean_ctor_set(x_13239, 1, x_13237); +return x_13239; +} +} +} +} +block_12959: +{ +lean_object* x_12931; +x_12931 = lean_ctor_get(x_12929, 0); +lean_inc(x_12931); +if (lean_obj_tag(x_12931) == 0) +{ +lean_object* x_12932; lean_object* x_12933; lean_object* x_12934; lean_object* x_12935; lean_object* x_12936; lean_object* x_12937; lean_object* x_12938; lean_object* x_12939; lean_object* x_12940; lean_object* x_12941; +lean_dec(x_11930); +x_12932 = lean_ctor_get(x_12929, 1); +lean_inc(x_12932); +lean_dec(x_12929); +x_12933 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_12933, 0, x_153); +lean_ctor_set(x_12933, 1, x_11919); +x_12934 = lean_ctor_get(x_1, 0); +lean_inc(x_12934); +x_12935 = l_Lean_IR_ToIR_bindVar(x_12934, x_12932, x_4, x_5, x_12930); +x_12936 = lean_ctor_get(x_12935, 0); +lean_inc(x_12936); +x_12937 = lean_ctor_get(x_12935, 1); +lean_inc(x_12937); +lean_dec(x_12935); +x_12938 = lean_ctor_get(x_12936, 0); +lean_inc(x_12938); +x_12939 = lean_ctor_get(x_12936, 1); +lean_inc(x_12939); +lean_dec(x_12936); +x_12940 = lean_ctor_get(x_1, 2); +lean_inc(x_12940); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_12941 = l_Lean_IR_ToIR_lowerType(x_12940, x_12939, x_4, x_5, x_12937); +if (lean_obj_tag(x_12941) == 0) +{ +lean_object* x_12942; lean_object* x_12943; lean_object* x_12944; lean_object* x_12945; lean_object* x_12946; +x_12942 = lean_ctor_get(x_12941, 0); +lean_inc(x_12942); +x_12943 = lean_ctor_get(x_12941, 1); +lean_inc(x_12943); +lean_dec(x_12941); +x_12944 = lean_ctor_get(x_12942, 0); +lean_inc(x_12944); +x_12945 = lean_ctor_get(x_12942, 1); +lean_inc(x_12945); +lean_dec(x_12942); +x_12946 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_12938, x_12933, x_12944, x_12945, x_4, x_5, x_12943); +return x_12946; +} +else +{ +uint8_t x_12947; +lean_dec(x_12938); +lean_dec(x_12933); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_12947 = !lean_is_exclusive(x_12941); +if (x_12947 == 0) +{ +return x_12941; +} +else +{ +lean_object* x_12948; lean_object* x_12949; lean_object* x_12950; +x_12948 = lean_ctor_get(x_12941, 0); +x_12949 = lean_ctor_get(x_12941, 1); +lean_inc(x_12949); +lean_inc(x_12948); +lean_dec(x_12941); +x_12950 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12950, 0, x_12948); +lean_ctor_set(x_12950, 1, x_12949); +return x_12950; +} +} +} +else +{ +uint8_t x_12951; +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_12951 = !lean_is_exclusive(x_12929); +if (x_12951 == 0) +{ +lean_object* x_12952; lean_object* x_12953; lean_object* x_12954; +x_12952 = lean_ctor_get(x_12929, 0); +lean_dec(x_12952); +x_12953 = lean_ctor_get(x_12931, 0); +lean_inc(x_12953); +lean_dec(x_12931); +lean_ctor_set(x_12929, 0, x_12953); +if (lean_is_scalar(x_11930)) { + x_12954 = lean_alloc_ctor(0, 2, 0); +} else { + x_12954 = x_11930; +} +lean_ctor_set(x_12954, 0, x_12929); +lean_ctor_set(x_12954, 1, x_12930); +return x_12954; +} +else +{ +lean_object* x_12955; lean_object* x_12956; lean_object* x_12957; lean_object* x_12958; +x_12955 = lean_ctor_get(x_12929, 1); +lean_inc(x_12955); +lean_dec(x_12929); +x_12956 = lean_ctor_get(x_12931, 0); +lean_inc(x_12956); +lean_dec(x_12931); +x_12957 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_12957, 0, x_12956); +lean_ctor_set(x_12957, 1, x_12955); +if (lean_is_scalar(x_11930)) { + x_12958 = lean_alloc_ctor(0, 2, 0); +} else { + x_12958 = x_11930; +} +lean_ctor_set(x_12958, 0, x_12957); +lean_ctor_set(x_12958, 1, x_12930); +return x_12958; +} +} +} +} +} +default: +{ +uint8_t x_13240; +lean_dec(x_11931); +lean_dec(x_11930); +lean_free_object(x_11921); +lean_dec(x_11919); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_2); +lean_dec(x_1); +x_13240 = !lean_is_exclusive(x_11936); +if (x_13240 == 0) +{ +lean_object* x_13241; uint8_t x_13242; lean_object* x_13243; lean_object* x_13244; lean_object* x_13245; lean_object* x_13246; lean_object* x_13247; lean_object* x_13248; lean_object* x_13249; lean_object* x_13250; +x_13241 = lean_ctor_get(x_11936, 0); +lean_dec(x_13241); +x_13242 = 1; +x_13243 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_13244 = l_Lean_Name_toString(x_153, x_13242, x_13243); +lean_ctor_set_tag(x_11936, 3); +lean_ctor_set(x_11936, 0, x_13244); +x_13245 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_13246 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13246, 0, x_13245); +lean_ctor_set(x_13246, 1, x_11936); +x_13247 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_13248 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13248, 0, x_13246); +lean_ctor_set(x_13248, 1, x_13247); +x_13249 = l_Lean_MessageData_ofFormat(x_13248); +x_13250 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_13249, x_11925, x_4, x_5, x_11929); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_11925); +return x_13250; +} +else +{ +uint8_t x_13251; lean_object* x_13252; lean_object* x_13253; lean_object* x_13254; lean_object* x_13255; lean_object* x_13256; lean_object* x_13257; lean_object* x_13258; lean_object* x_13259; lean_object* x_13260; +lean_dec(x_11936); +x_13251 = 1; +x_13252 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_13253 = l_Lean_Name_toString(x_153, x_13251, x_13252); +x_13254 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_13254, 0, x_13253); +x_13255 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_13256 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13256, 0, x_13255); +lean_ctor_set(x_13256, 1, x_13254); +x_13257 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_13258 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13258, 0, x_13256); +lean_ctor_set(x_13258, 1, x_13257); +x_13259 = l_Lean_MessageData_ofFormat(x_13258); +x_13260 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_13259, x_11925, x_4, x_5, x_11929); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_11925); +return x_13260; +} +} +} +} +} +else +{ +lean_object* x_13261; lean_object* x_13262; lean_object* x_13263; lean_object* x_13264; lean_object* x_13265; lean_object* x_13266; uint8_t x_13267; lean_object* x_13268; +x_13261 = lean_ctor_get(x_11921, 1); +lean_inc(x_13261); +lean_dec(x_11921); +x_13262 = lean_st_ref_get(x_5, x_11922); +x_13263 = lean_ctor_get(x_13262, 0); +lean_inc(x_13263); +x_13264 = lean_ctor_get(x_13262, 1); +lean_inc(x_13264); +if (lean_is_exclusive(x_13262)) { + lean_ctor_release(x_13262, 0); + lean_ctor_release(x_13262, 1); + x_13265 = x_13262; +} else { + lean_dec_ref(x_13262); + x_13265 = lean_box(0); +} +x_13266 = lean_ctor_get(x_13263, 0); +lean_inc(x_13266); +lean_dec(x_13263); +x_13267 = 0; +lean_inc(x_153); +lean_inc(x_13266); +x_13268 = l_Lean_Environment_find_x3f(x_13266, x_153, x_13267); +if (lean_obj_tag(x_13268) == 0) +{ +lean_object* x_13269; lean_object* x_13270; +lean_dec(x_13266); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_13269 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_13270 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_13269, x_13261, x_4, x_5, x_13264); +return x_13270; +} +else +{ +lean_object* x_13271; +x_13271 = lean_ctor_get(x_13268, 0); +lean_inc(x_13271); +lean_dec(x_13268); +switch (lean_obj_tag(x_13271)) { +case 0: +{ +lean_object* x_13272; lean_object* x_13273; uint8_t x_13274; +lean_dec(x_13266); +lean_dec(x_11911); +lean_dec(x_11910); +if (lean_is_exclusive(x_13271)) { + lean_ctor_release(x_13271, 0); + x_13272 = x_13271; +} else { + lean_dec_ref(x_13271); + x_13272 = lean_box(0); +} +x_13273 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_13274 = lean_name_eq(x_153, x_13273); +if (x_13274 == 0) +{ +lean_object* x_13275; uint8_t x_13276; +x_13275 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_13276 = lean_name_eq(x_153, x_13275); +if (x_13276 == 0) +{ +lean_object* x_13277; lean_object* x_13278; lean_object* x_13279; +lean_dec(x_13265); +lean_inc(x_153); +x_13277 = l_Lean_IR_ToIR_findDecl(x_153, x_13261, x_4, x_5, x_13264); +x_13278 = lean_ctor_get(x_13277, 0); +lean_inc(x_13278); +x_13279 = lean_ctor_get(x_13278, 0); +lean_inc(x_13279); +if (lean_obj_tag(x_13279) == 0) +{ +lean_object* x_13280; lean_object* x_13281; lean_object* x_13282; lean_object* x_13283; uint8_t x_13284; lean_object* x_13285; lean_object* x_13286; lean_object* x_13287; lean_object* x_13288; lean_object* x_13289; lean_object* x_13290; lean_object* x_13291; lean_object* x_13292; lean_object* x_13293; +lean_dec(x_11919); +lean_dec(x_2); +lean_dec(x_1); +x_13280 = lean_ctor_get(x_13277, 1); +lean_inc(x_13280); +if (lean_is_exclusive(x_13277)) { + lean_ctor_release(x_13277, 0); + lean_ctor_release(x_13277, 1); + x_13281 = x_13277; +} else { + lean_dec_ref(x_13277); + x_13281 = lean_box(0); +} +x_13282 = lean_ctor_get(x_13278, 1); +lean_inc(x_13282); +if (lean_is_exclusive(x_13278)) { + lean_ctor_release(x_13278, 0); + lean_ctor_release(x_13278, 1); + x_13283 = x_13278; +} else { + lean_dec_ref(x_13278); + x_13283 = lean_box(0); +} +x_13284 = 1; +x_13285 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_13286 = l_Lean_Name_toString(x_153, x_13284, x_13285); +if (lean_is_scalar(x_13272)) { + x_13287 = lean_alloc_ctor(3, 1, 0); +} else { + x_13287 = x_13272; + lean_ctor_set_tag(x_13287, 3); +} +lean_ctor_set(x_13287, 0, x_13286); +x_13288 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_13283)) { + x_13289 = lean_alloc_ctor(5, 2, 0); +} else { + x_13289 = x_13283; + lean_ctor_set_tag(x_13289, 5); +} +lean_ctor_set(x_13289, 0, x_13288); +lean_ctor_set(x_13289, 1, x_13287); +x_13290 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_13281)) { + x_13291 = lean_alloc_ctor(5, 2, 0); +} else { + x_13291 = x_13281; + lean_ctor_set_tag(x_13291, 5); +} +lean_ctor_set(x_13291, 0, x_13289); +lean_ctor_set(x_13291, 1, x_13290); +x_13292 = l_Lean_MessageData_ofFormat(x_13291); +x_13293 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_13292, x_13282, x_4, x_5, x_13280); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_13282); +return x_13293; +} +else +{ +lean_object* x_13294; lean_object* x_13295; lean_object* x_13296; lean_object* x_13297; lean_object* x_13298; lean_object* x_13299; lean_object* x_13300; uint8_t x_13301; +lean_dec(x_13272); +x_13294 = lean_ctor_get(x_13277, 1); +lean_inc(x_13294); +lean_dec(x_13277); +x_13295 = lean_ctor_get(x_13278, 1); +lean_inc(x_13295); +if (lean_is_exclusive(x_13278)) { + lean_ctor_release(x_13278, 0); + lean_ctor_release(x_13278, 1); + x_13296 = x_13278; +} else { + lean_dec_ref(x_13278); + x_13296 = lean_box(0); +} +x_13297 = lean_ctor_get(x_13279, 0); +lean_inc(x_13297); +lean_dec(x_13279); +x_13298 = lean_array_get_size(x_11919); +x_13299 = l_Lean_IR_Decl_params(x_13297); +lean_dec(x_13297); +x_13300 = lean_array_get_size(x_13299); +lean_dec(x_13299); +x_13301 = lean_nat_dec_lt(x_13298, x_13300); +if (x_13301 == 0) +{ +uint8_t x_13302; +x_13302 = lean_nat_dec_eq(x_13298, x_13300); +if (x_13302 == 0) +{ +lean_object* x_13303; lean_object* x_13304; lean_object* x_13305; lean_object* x_13306; lean_object* x_13307; lean_object* x_13308; lean_object* x_13309; lean_object* x_13310; lean_object* x_13311; lean_object* x_13312; lean_object* x_13313; lean_object* x_13314; lean_object* x_13315; lean_object* x_13316; lean_object* x_13317; lean_object* x_13318; lean_object* x_13319; +x_13303 = lean_unsigned_to_nat(0u); +x_13304 = l_Array_extract___rarg(x_11919, x_13303, x_13300); +x_13305 = l_Array_extract___rarg(x_11919, x_13300, x_13298); +lean_dec(x_13298); +lean_dec(x_11919); +if (lean_is_scalar(x_13296)) { + x_13306 = lean_alloc_ctor(6, 2, 0); +} else { + x_13306 = x_13296; + lean_ctor_set_tag(x_13306, 6); +} +lean_ctor_set(x_13306, 0, x_153); +lean_ctor_set(x_13306, 1, x_13304); +x_13307 = lean_ctor_get(x_1, 0); +lean_inc(x_13307); +x_13308 = l_Lean_IR_ToIR_bindVar(x_13307, x_13295, x_4, x_5, x_13294); +x_13309 = lean_ctor_get(x_13308, 0); +lean_inc(x_13309); +x_13310 = lean_ctor_get(x_13308, 1); +lean_inc(x_13310); +lean_dec(x_13308); +x_13311 = lean_ctor_get(x_13309, 0); +lean_inc(x_13311); +x_13312 = lean_ctor_get(x_13309, 1); +lean_inc(x_13312); +lean_dec(x_13309); +x_13313 = l_Lean_IR_ToIR_newVar(x_13312, x_4, x_5, x_13310); +x_13314 = lean_ctor_get(x_13313, 0); +lean_inc(x_13314); +x_13315 = lean_ctor_get(x_13313, 1); +lean_inc(x_13315); +lean_dec(x_13313); +x_13316 = lean_ctor_get(x_13314, 0); +lean_inc(x_13316); +x_13317 = lean_ctor_get(x_13314, 1); +lean_inc(x_13317); +lean_dec(x_13314); +x_13318 = lean_ctor_get(x_1, 2); +lean_inc(x_13318); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_13319 = l_Lean_IR_ToIR_lowerType(x_13318, x_13317, x_4, x_5, x_13315); +if (lean_obj_tag(x_13319) == 0) +{ +lean_object* x_13320; lean_object* x_13321; lean_object* x_13322; lean_object* x_13323; lean_object* x_13324; +x_13320 = lean_ctor_get(x_13319, 0); +lean_inc(x_13320); +x_13321 = lean_ctor_get(x_13319, 1); +lean_inc(x_13321); +lean_dec(x_13319); +x_13322 = lean_ctor_get(x_13320, 0); +lean_inc(x_13322); +x_13323 = lean_ctor_get(x_13320, 1); +lean_inc(x_13323); +lean_dec(x_13320); +x_13324 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_13316, x_13305, x_13311, x_13306, x_13322, x_13323, x_4, x_5, x_13321); +return x_13324; +} +else +{ +lean_object* x_13325; lean_object* x_13326; lean_object* x_13327; lean_object* x_13328; +lean_dec(x_13316); +lean_dec(x_13311); +lean_dec(x_13306); +lean_dec(x_13305); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_13325 = lean_ctor_get(x_13319, 0); +lean_inc(x_13325); +x_13326 = lean_ctor_get(x_13319, 1); +lean_inc(x_13326); +if (lean_is_exclusive(x_13319)) { + lean_ctor_release(x_13319, 0); + lean_ctor_release(x_13319, 1); + x_13327 = x_13319; +} else { + lean_dec_ref(x_13319); + x_13327 = lean_box(0); +} +if (lean_is_scalar(x_13327)) { + x_13328 = lean_alloc_ctor(1, 2, 0); +} else { + x_13328 = x_13327; +} +lean_ctor_set(x_13328, 0, x_13325); +lean_ctor_set(x_13328, 1, x_13326); +return x_13328; +} +} +else +{ +lean_object* x_13329; lean_object* x_13330; lean_object* x_13331; lean_object* x_13332; lean_object* x_13333; lean_object* x_13334; lean_object* x_13335; lean_object* x_13336; lean_object* x_13337; +lean_dec(x_13300); +lean_dec(x_13298); +if (lean_is_scalar(x_13296)) { + x_13329 = lean_alloc_ctor(6, 2, 0); +} else { + x_13329 = x_13296; + lean_ctor_set_tag(x_13329, 6); +} +lean_ctor_set(x_13329, 0, x_153); +lean_ctor_set(x_13329, 1, x_11919); +x_13330 = lean_ctor_get(x_1, 0); +lean_inc(x_13330); +x_13331 = l_Lean_IR_ToIR_bindVar(x_13330, x_13295, x_4, x_5, x_13294); +x_13332 = lean_ctor_get(x_13331, 0); +lean_inc(x_13332); +x_13333 = lean_ctor_get(x_13331, 1); +lean_inc(x_13333); +lean_dec(x_13331); +x_13334 = lean_ctor_get(x_13332, 0); +lean_inc(x_13334); +x_13335 = lean_ctor_get(x_13332, 1); +lean_inc(x_13335); +lean_dec(x_13332); +x_13336 = lean_ctor_get(x_1, 2); +lean_inc(x_13336); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_13337 = l_Lean_IR_ToIR_lowerType(x_13336, x_13335, x_4, x_5, x_13333); +if (lean_obj_tag(x_13337) == 0) +{ +lean_object* x_13338; lean_object* x_13339; lean_object* x_13340; lean_object* x_13341; lean_object* x_13342; +x_13338 = lean_ctor_get(x_13337, 0); +lean_inc(x_13338); +x_13339 = lean_ctor_get(x_13337, 1); +lean_inc(x_13339); +lean_dec(x_13337); +x_13340 = lean_ctor_get(x_13338, 0); +lean_inc(x_13340); +x_13341 = lean_ctor_get(x_13338, 1); +lean_inc(x_13341); +lean_dec(x_13338); +x_13342 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13334, x_13329, x_13340, x_13341, x_4, x_5, x_13339); +return x_13342; +} +else +{ +lean_object* x_13343; lean_object* x_13344; lean_object* x_13345; lean_object* x_13346; +lean_dec(x_13334); +lean_dec(x_13329); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_13343 = lean_ctor_get(x_13337, 0); +lean_inc(x_13343); +x_13344 = lean_ctor_get(x_13337, 1); +lean_inc(x_13344); +if (lean_is_exclusive(x_13337)) { + lean_ctor_release(x_13337, 0); + lean_ctor_release(x_13337, 1); + x_13345 = x_13337; +} else { + lean_dec_ref(x_13337); + x_13345 = lean_box(0); +} +if (lean_is_scalar(x_13345)) { + x_13346 = lean_alloc_ctor(1, 2, 0); +} else { + x_13346 = x_13345; +} +lean_ctor_set(x_13346, 0, x_13343); +lean_ctor_set(x_13346, 1, x_13344); +return x_13346; +} +} +} +else +{ +lean_object* x_13347; lean_object* x_13348; lean_object* x_13349; lean_object* x_13350; lean_object* x_13351; lean_object* x_13352; lean_object* x_13353; lean_object* x_13354; lean_object* x_13355; +lean_dec(x_13300); +lean_dec(x_13298); +if (lean_is_scalar(x_13296)) { + x_13347 = lean_alloc_ctor(7, 2, 0); +} else { + x_13347 = x_13296; + lean_ctor_set_tag(x_13347, 7); +} +lean_ctor_set(x_13347, 0, x_153); +lean_ctor_set(x_13347, 1, x_11919); +x_13348 = lean_ctor_get(x_1, 0); +lean_inc(x_13348); +lean_dec(x_1); +x_13349 = l_Lean_IR_ToIR_bindVar(x_13348, x_13295, x_4, x_5, x_13294); +x_13350 = lean_ctor_get(x_13349, 0); +lean_inc(x_13350); +x_13351 = lean_ctor_get(x_13349, 1); +lean_inc(x_13351); +lean_dec(x_13349); +x_13352 = lean_ctor_get(x_13350, 0); +lean_inc(x_13352); +x_13353 = lean_ctor_get(x_13350, 1); +lean_inc(x_13353); +lean_dec(x_13350); +x_13354 = lean_box(7); +x_13355 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13352, x_13347, x_13354, x_13353, x_4, x_5, x_13351); +return x_13355; +} +} +} +else +{ +lean_object* x_13356; lean_object* x_13357; lean_object* x_13358; +lean_dec(x_13272); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13356 = lean_box(13); +x_13357 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13357, 0, x_13356); +lean_ctor_set(x_13357, 1, x_13261); +if (lean_is_scalar(x_13265)) { + x_13358 = lean_alloc_ctor(0, 2, 0); +} else { + x_13358 = x_13265; +} +lean_ctor_set(x_13358, 0, x_13357); +lean_ctor_set(x_13358, 1, x_13264); +return x_13358; +} +} +else +{ +lean_object* x_13359; lean_object* x_13360; lean_object* x_13361; +lean_dec(x_13272); +lean_dec(x_13265); +lean_dec(x_153); +x_13359 = l_Lean_IR_instInhabitedArg; +x_13360 = lean_unsigned_to_nat(2u); +x_13361 = lean_array_get(x_13359, x_11919, x_13360); +lean_dec(x_11919); +if (lean_obj_tag(x_13361) == 0) +{ +lean_object* x_13362; lean_object* x_13363; lean_object* x_13364; lean_object* x_13365; lean_object* x_13366; lean_object* x_13367; lean_object* x_13368; +x_13362 = lean_ctor_get(x_13361, 0); +lean_inc(x_13362); +lean_dec(x_13361); +x_13363 = lean_ctor_get(x_1, 0); +lean_inc(x_13363); +lean_dec(x_1); +x_13364 = l_Lean_IR_ToIR_bindVarToVarId(x_13363, x_13362, x_13261, x_4, x_5, x_13264); +x_13365 = lean_ctor_get(x_13364, 0); +lean_inc(x_13365); +x_13366 = lean_ctor_get(x_13364, 1); +lean_inc(x_13366); +lean_dec(x_13364); +x_13367 = lean_ctor_get(x_13365, 1); +lean_inc(x_13367); +lean_dec(x_13365); +x_13368 = l_Lean_IR_ToIR_lowerCode(x_2, x_13367, x_4, x_5, x_13366); +return x_13368; +} +else +{ +lean_object* x_13369; lean_object* x_13370; lean_object* x_13371; lean_object* x_13372; lean_object* x_13373; lean_object* x_13374; +x_13369 = lean_ctor_get(x_1, 0); +lean_inc(x_13369); +lean_dec(x_1); +x_13370 = l_Lean_IR_ToIR_bindErased(x_13369, x_13261, x_4, x_5, x_13264); +x_13371 = lean_ctor_get(x_13370, 0); +lean_inc(x_13371); +x_13372 = lean_ctor_get(x_13370, 1); +lean_inc(x_13372); +lean_dec(x_13370); +x_13373 = lean_ctor_get(x_13371, 1); +lean_inc(x_13373); +lean_dec(x_13371); +x_13374 = l_Lean_IR_ToIR_lowerCode(x_2, x_13373, x_4, x_5, x_13372); +return x_13374; +} +} +} +case 1: +{ +lean_object* x_13375; lean_object* x_13376; lean_object* x_13403; lean_object* x_13404; +lean_dec(x_13271); +lean_dec(x_13266); +lean_dec(x_11911); +lean_dec(x_11910); +lean_inc(x_153); +x_13403 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_13264); +x_13404 = lean_ctor_get(x_13403, 0); +lean_inc(x_13404); +if (lean_obj_tag(x_13404) == 0) +{ +lean_object* x_13405; lean_object* x_13406; lean_object* x_13407; +x_13405 = lean_ctor_get(x_13403, 1); +lean_inc(x_13405); +lean_dec(x_13403); +x_13406 = lean_box(0); +x_13407 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13407, 0, x_13406); +lean_ctor_set(x_13407, 1, x_13261); +x_13375 = x_13407; +x_13376 = x_13405; +goto block_13402; +} +else +{ +lean_object* x_13408; lean_object* x_13409; lean_object* x_13410; lean_object* x_13411; lean_object* x_13412; lean_object* x_13413; lean_object* x_13414; uint8_t x_13415; +x_13408 = lean_ctor_get(x_13403, 1); +lean_inc(x_13408); +if (lean_is_exclusive(x_13403)) { + lean_ctor_release(x_13403, 0); + lean_ctor_release(x_13403, 1); + x_13409 = x_13403; +} else { + lean_dec_ref(x_13403); + x_13409 = lean_box(0); +} +x_13410 = lean_ctor_get(x_13404, 0); +lean_inc(x_13410); +if (lean_is_exclusive(x_13404)) { + lean_ctor_release(x_13404, 0); + x_13411 = x_13404; +} else { + lean_dec_ref(x_13404); + x_13411 = lean_box(0); +} +x_13412 = lean_array_get_size(x_11919); +x_13413 = lean_ctor_get(x_13410, 3); +lean_inc(x_13413); +lean_dec(x_13410); +x_13414 = lean_array_get_size(x_13413); +lean_dec(x_13413); +x_13415 = lean_nat_dec_lt(x_13412, x_13414); +if (x_13415 == 0) +{ +uint8_t x_13416; +x_13416 = lean_nat_dec_eq(x_13412, x_13414); +if (x_13416 == 0) +{ +lean_object* x_13417; lean_object* x_13418; lean_object* x_13419; lean_object* x_13420; lean_object* x_13421; lean_object* x_13422; lean_object* x_13423; lean_object* x_13424; lean_object* x_13425; lean_object* x_13426; lean_object* x_13427; lean_object* x_13428; lean_object* x_13429; lean_object* x_13430; lean_object* x_13431; lean_object* x_13432; lean_object* x_13433; +x_13417 = lean_unsigned_to_nat(0u); +x_13418 = l_Array_extract___rarg(x_11919, x_13417, x_13414); +x_13419 = l_Array_extract___rarg(x_11919, x_13414, x_13412); +lean_dec(x_13412); +lean_inc(x_153); +if (lean_is_scalar(x_13409)) { + x_13420 = lean_alloc_ctor(6, 2, 0); +} else { + x_13420 = x_13409; + lean_ctor_set_tag(x_13420, 6); +} +lean_ctor_set(x_13420, 0, x_153); +lean_ctor_set(x_13420, 1, x_13418); +x_13421 = lean_ctor_get(x_1, 0); +lean_inc(x_13421); +x_13422 = l_Lean_IR_ToIR_bindVar(x_13421, x_13261, x_4, x_5, x_13408); +x_13423 = lean_ctor_get(x_13422, 0); +lean_inc(x_13423); +x_13424 = lean_ctor_get(x_13422, 1); +lean_inc(x_13424); +lean_dec(x_13422); +x_13425 = lean_ctor_get(x_13423, 0); +lean_inc(x_13425); +x_13426 = lean_ctor_get(x_13423, 1); +lean_inc(x_13426); +lean_dec(x_13423); +x_13427 = l_Lean_IR_ToIR_newVar(x_13426, x_4, x_5, x_13424); +x_13428 = lean_ctor_get(x_13427, 0); +lean_inc(x_13428); +x_13429 = lean_ctor_get(x_13427, 1); +lean_inc(x_13429); +lean_dec(x_13427); +x_13430 = lean_ctor_get(x_13428, 0); +lean_inc(x_13430); +x_13431 = lean_ctor_get(x_13428, 1); +lean_inc(x_13431); +lean_dec(x_13428); +x_13432 = lean_ctor_get(x_1, 2); +lean_inc(x_13432); +lean_inc(x_5); +lean_inc(x_4); +x_13433 = l_Lean_IR_ToIR_lowerType(x_13432, x_13431, x_4, x_5, x_13429); +if (lean_obj_tag(x_13433) == 0) +{ +lean_object* x_13434; lean_object* x_13435; lean_object* x_13436; lean_object* x_13437; lean_object* x_13438; +x_13434 = lean_ctor_get(x_13433, 0); +lean_inc(x_13434); +x_13435 = lean_ctor_get(x_13433, 1); +lean_inc(x_13435); +lean_dec(x_13433); +x_13436 = lean_ctor_get(x_13434, 0); +lean_inc(x_13436); +x_13437 = lean_ctor_get(x_13434, 1); +lean_inc(x_13437); +lean_dec(x_13434); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13438 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_13430, x_13419, x_13425, x_13420, x_13436, x_13437, x_4, x_5, x_13435); +if (lean_obj_tag(x_13438) == 0) +{ +lean_object* x_13439; lean_object* x_13440; lean_object* x_13441; lean_object* x_13442; lean_object* x_13443; lean_object* x_13444; lean_object* x_13445; +x_13439 = lean_ctor_get(x_13438, 0); +lean_inc(x_13439); +x_13440 = lean_ctor_get(x_13438, 1); +lean_inc(x_13440); +lean_dec(x_13438); +x_13441 = lean_ctor_get(x_13439, 0); +lean_inc(x_13441); +x_13442 = lean_ctor_get(x_13439, 1); +lean_inc(x_13442); +if (lean_is_exclusive(x_13439)) { + lean_ctor_release(x_13439, 0); + lean_ctor_release(x_13439, 1); + x_13443 = x_13439; +} else { + lean_dec_ref(x_13439); + x_13443 = lean_box(0); +} +if (lean_is_scalar(x_13411)) { + x_13444 = lean_alloc_ctor(1, 1, 0); +} else { + x_13444 = x_13411; +} +lean_ctor_set(x_13444, 0, x_13441); +if (lean_is_scalar(x_13443)) { + x_13445 = lean_alloc_ctor(0, 2, 0); +} else { + x_13445 = x_13443; +} +lean_ctor_set(x_13445, 0, x_13444); +lean_ctor_set(x_13445, 1, x_13442); +x_13375 = x_13445; +x_13376 = x_13440; +goto block_13402; +} +else +{ +lean_object* x_13446; lean_object* x_13447; lean_object* x_13448; lean_object* x_13449; +lean_dec(x_13411); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13446 = lean_ctor_get(x_13438, 0); +lean_inc(x_13446); +x_13447 = lean_ctor_get(x_13438, 1); +lean_inc(x_13447); +if (lean_is_exclusive(x_13438)) { + lean_ctor_release(x_13438, 0); + lean_ctor_release(x_13438, 1); + x_13448 = x_13438; +} else { + lean_dec_ref(x_13438); + x_13448 = lean_box(0); +} +if (lean_is_scalar(x_13448)) { + x_13449 = lean_alloc_ctor(1, 2, 0); +} else { + x_13449 = x_13448; +} +lean_ctor_set(x_13449, 0, x_13446); +lean_ctor_set(x_13449, 1, x_13447); +return x_13449; +} +} +else +{ +lean_object* x_13450; lean_object* x_13451; lean_object* x_13452; lean_object* x_13453; +lean_dec(x_13430); +lean_dec(x_13425); +lean_dec(x_13420); +lean_dec(x_13419); +lean_dec(x_13411); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13450 = lean_ctor_get(x_13433, 0); +lean_inc(x_13450); +x_13451 = lean_ctor_get(x_13433, 1); +lean_inc(x_13451); +if (lean_is_exclusive(x_13433)) { + lean_ctor_release(x_13433, 0); + lean_ctor_release(x_13433, 1); + x_13452 = x_13433; +} else { + lean_dec_ref(x_13433); + x_13452 = lean_box(0); +} +if (lean_is_scalar(x_13452)) { + x_13453 = lean_alloc_ctor(1, 2, 0); +} else { + x_13453 = x_13452; +} +lean_ctor_set(x_13453, 0, x_13450); +lean_ctor_set(x_13453, 1, x_13451); +return x_13453; +} +} +else +{ +lean_object* x_13454; lean_object* x_13455; lean_object* x_13456; lean_object* x_13457; lean_object* x_13458; lean_object* x_13459; lean_object* x_13460; lean_object* x_13461; lean_object* x_13462; +lean_dec(x_13414); +lean_dec(x_13412); +lean_inc(x_11919); +lean_inc(x_153); +if (lean_is_scalar(x_13409)) { + x_13454 = lean_alloc_ctor(6, 2, 0); +} else { + x_13454 = x_13409; + lean_ctor_set_tag(x_13454, 6); +} +lean_ctor_set(x_13454, 0, x_153); +lean_ctor_set(x_13454, 1, x_11919); +x_13455 = lean_ctor_get(x_1, 0); +lean_inc(x_13455); +x_13456 = l_Lean_IR_ToIR_bindVar(x_13455, x_13261, x_4, x_5, x_13408); +x_13457 = lean_ctor_get(x_13456, 0); +lean_inc(x_13457); +x_13458 = lean_ctor_get(x_13456, 1); +lean_inc(x_13458); +lean_dec(x_13456); +x_13459 = lean_ctor_get(x_13457, 0); +lean_inc(x_13459); +x_13460 = lean_ctor_get(x_13457, 1); +lean_inc(x_13460); +lean_dec(x_13457); +x_13461 = lean_ctor_get(x_1, 2); +lean_inc(x_13461); +lean_inc(x_5); +lean_inc(x_4); +x_13462 = l_Lean_IR_ToIR_lowerType(x_13461, x_13460, x_4, x_5, x_13458); +if (lean_obj_tag(x_13462) == 0) +{ +lean_object* x_13463; lean_object* x_13464; lean_object* x_13465; lean_object* x_13466; lean_object* x_13467; +x_13463 = lean_ctor_get(x_13462, 0); +lean_inc(x_13463); +x_13464 = lean_ctor_get(x_13462, 1); +lean_inc(x_13464); +lean_dec(x_13462); +x_13465 = lean_ctor_get(x_13463, 0); +lean_inc(x_13465); +x_13466 = lean_ctor_get(x_13463, 1); +lean_inc(x_13466); +lean_dec(x_13463); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13467 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13459, x_13454, x_13465, x_13466, x_4, x_5, x_13464); +if (lean_obj_tag(x_13467) == 0) +{ +lean_object* x_13468; lean_object* x_13469; lean_object* x_13470; lean_object* x_13471; lean_object* x_13472; lean_object* x_13473; lean_object* x_13474; +x_13468 = lean_ctor_get(x_13467, 0); +lean_inc(x_13468); +x_13469 = lean_ctor_get(x_13467, 1); +lean_inc(x_13469); +lean_dec(x_13467); +x_13470 = lean_ctor_get(x_13468, 0); +lean_inc(x_13470); +x_13471 = lean_ctor_get(x_13468, 1); +lean_inc(x_13471); +if (lean_is_exclusive(x_13468)) { + lean_ctor_release(x_13468, 0); + lean_ctor_release(x_13468, 1); + x_13472 = x_13468; +} else { + lean_dec_ref(x_13468); + x_13472 = lean_box(0); +} +if (lean_is_scalar(x_13411)) { + x_13473 = lean_alloc_ctor(1, 1, 0); +} else { + x_13473 = x_13411; +} +lean_ctor_set(x_13473, 0, x_13470); +if (lean_is_scalar(x_13472)) { + x_13474 = lean_alloc_ctor(0, 2, 0); +} else { + x_13474 = x_13472; +} +lean_ctor_set(x_13474, 0, x_13473); +lean_ctor_set(x_13474, 1, x_13471); +x_13375 = x_13474; +x_13376 = x_13469; +goto block_13402; +} +else +{ +lean_object* x_13475; lean_object* x_13476; lean_object* x_13477; lean_object* x_13478; +lean_dec(x_13411); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13475 = lean_ctor_get(x_13467, 0); +lean_inc(x_13475); +x_13476 = lean_ctor_get(x_13467, 1); +lean_inc(x_13476); +if (lean_is_exclusive(x_13467)) { + lean_ctor_release(x_13467, 0); + lean_ctor_release(x_13467, 1); + x_13477 = x_13467; +} else { + lean_dec_ref(x_13467); + x_13477 = lean_box(0); +} +if (lean_is_scalar(x_13477)) { + x_13478 = lean_alloc_ctor(1, 2, 0); +} else { + x_13478 = x_13477; +} +lean_ctor_set(x_13478, 0, x_13475); +lean_ctor_set(x_13478, 1, x_13476); +return x_13478; +} +} +else +{ +lean_object* x_13479; lean_object* x_13480; lean_object* x_13481; lean_object* x_13482; +lean_dec(x_13459); +lean_dec(x_13454); +lean_dec(x_13411); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13479 = lean_ctor_get(x_13462, 0); +lean_inc(x_13479); +x_13480 = lean_ctor_get(x_13462, 1); +lean_inc(x_13480); +if (lean_is_exclusive(x_13462)) { + lean_ctor_release(x_13462, 0); + lean_ctor_release(x_13462, 1); + x_13481 = x_13462; +} else { + lean_dec_ref(x_13462); + x_13481 = lean_box(0); +} +if (lean_is_scalar(x_13481)) { + x_13482 = lean_alloc_ctor(1, 2, 0); +} else { + x_13482 = x_13481; +} +lean_ctor_set(x_13482, 0, x_13479); +lean_ctor_set(x_13482, 1, x_13480); +return x_13482; +} +} +} +else +{ +lean_object* x_13483; lean_object* x_13484; lean_object* x_13485; lean_object* x_13486; lean_object* x_13487; lean_object* x_13488; lean_object* x_13489; lean_object* x_13490; lean_object* x_13491; +lean_dec(x_13414); +lean_dec(x_13412); +lean_inc(x_11919); +lean_inc(x_153); +if (lean_is_scalar(x_13409)) { + x_13483 = lean_alloc_ctor(7, 2, 0); +} else { + x_13483 = x_13409; + lean_ctor_set_tag(x_13483, 7); +} +lean_ctor_set(x_13483, 0, x_153); +lean_ctor_set(x_13483, 1, x_11919); +x_13484 = lean_ctor_get(x_1, 0); +lean_inc(x_13484); +x_13485 = l_Lean_IR_ToIR_bindVar(x_13484, x_13261, x_4, x_5, x_13408); +x_13486 = lean_ctor_get(x_13485, 0); +lean_inc(x_13486); +x_13487 = lean_ctor_get(x_13485, 1); +lean_inc(x_13487); +lean_dec(x_13485); +x_13488 = lean_ctor_get(x_13486, 0); +lean_inc(x_13488); +x_13489 = lean_ctor_get(x_13486, 1); +lean_inc(x_13489); +lean_dec(x_13486); +x_13490 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13491 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13488, x_13483, x_13490, x_13489, x_4, x_5, x_13487); +if (lean_obj_tag(x_13491) == 0) +{ +lean_object* x_13492; lean_object* x_13493; lean_object* x_13494; lean_object* x_13495; lean_object* x_13496; lean_object* x_13497; lean_object* x_13498; +x_13492 = lean_ctor_get(x_13491, 0); +lean_inc(x_13492); +x_13493 = lean_ctor_get(x_13491, 1); +lean_inc(x_13493); +lean_dec(x_13491); +x_13494 = lean_ctor_get(x_13492, 0); +lean_inc(x_13494); +x_13495 = lean_ctor_get(x_13492, 1); +lean_inc(x_13495); +if (lean_is_exclusive(x_13492)) { + lean_ctor_release(x_13492, 0); + lean_ctor_release(x_13492, 1); + x_13496 = x_13492; +} else { + lean_dec_ref(x_13492); + x_13496 = lean_box(0); +} +if (lean_is_scalar(x_13411)) { + x_13497 = lean_alloc_ctor(1, 1, 0); +} else { + x_13497 = x_13411; +} +lean_ctor_set(x_13497, 0, x_13494); +if (lean_is_scalar(x_13496)) { + x_13498 = lean_alloc_ctor(0, 2, 0); +} else { + x_13498 = x_13496; +} +lean_ctor_set(x_13498, 0, x_13497); +lean_ctor_set(x_13498, 1, x_13495); +x_13375 = x_13498; +x_13376 = x_13493; +goto block_13402; +} +else +{ +lean_object* x_13499; lean_object* x_13500; lean_object* x_13501; lean_object* x_13502; +lean_dec(x_13411); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13499 = lean_ctor_get(x_13491, 0); +lean_inc(x_13499); +x_13500 = lean_ctor_get(x_13491, 1); +lean_inc(x_13500); +if (lean_is_exclusive(x_13491)) { + lean_ctor_release(x_13491, 0); + lean_ctor_release(x_13491, 1); + x_13501 = x_13491; +} else { + lean_dec_ref(x_13491); + x_13501 = lean_box(0); +} +if (lean_is_scalar(x_13501)) { + x_13502 = lean_alloc_ctor(1, 2, 0); +} else { + x_13502 = x_13501; +} +lean_ctor_set(x_13502, 0, x_13499); +lean_ctor_set(x_13502, 1, x_13500); +return x_13502; +} +} +} +block_13402: +{ +lean_object* x_13377; +x_13377 = lean_ctor_get(x_13375, 0); +lean_inc(x_13377); +if (lean_obj_tag(x_13377) == 0) +{ +lean_object* x_13378; lean_object* x_13379; lean_object* x_13380; lean_object* x_13381; lean_object* x_13382; lean_object* x_13383; lean_object* x_13384; lean_object* x_13385; lean_object* x_13386; lean_object* x_13387; +lean_dec(x_13265); +x_13378 = lean_ctor_get(x_13375, 1); +lean_inc(x_13378); +lean_dec(x_13375); +x_13379 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_13379, 0, x_153); +lean_ctor_set(x_13379, 1, x_11919); +x_13380 = lean_ctor_get(x_1, 0); +lean_inc(x_13380); +x_13381 = l_Lean_IR_ToIR_bindVar(x_13380, x_13378, x_4, x_5, x_13376); +x_13382 = lean_ctor_get(x_13381, 0); +lean_inc(x_13382); +x_13383 = lean_ctor_get(x_13381, 1); +lean_inc(x_13383); +lean_dec(x_13381); +x_13384 = lean_ctor_get(x_13382, 0); +lean_inc(x_13384); +x_13385 = lean_ctor_get(x_13382, 1); +lean_inc(x_13385); +lean_dec(x_13382); +x_13386 = lean_ctor_get(x_1, 2); +lean_inc(x_13386); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_13387 = l_Lean_IR_ToIR_lowerType(x_13386, x_13385, x_4, x_5, x_13383); +if (lean_obj_tag(x_13387) == 0) +{ +lean_object* x_13388; lean_object* x_13389; lean_object* x_13390; lean_object* x_13391; lean_object* x_13392; +x_13388 = lean_ctor_get(x_13387, 0); +lean_inc(x_13388); +x_13389 = lean_ctor_get(x_13387, 1); +lean_inc(x_13389); +lean_dec(x_13387); +x_13390 = lean_ctor_get(x_13388, 0); +lean_inc(x_13390); +x_13391 = lean_ctor_get(x_13388, 1); +lean_inc(x_13391); +lean_dec(x_13388); +x_13392 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13384, x_13379, x_13390, x_13391, x_4, x_5, x_13389); +return x_13392; +} +else +{ +lean_object* x_13393; lean_object* x_13394; lean_object* x_13395; lean_object* x_13396; +lean_dec(x_13384); +lean_dec(x_13379); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_13393 = lean_ctor_get(x_13387, 0); +lean_inc(x_13393); +x_13394 = lean_ctor_get(x_13387, 1); +lean_inc(x_13394); +if (lean_is_exclusive(x_13387)) { + lean_ctor_release(x_13387, 0); + lean_ctor_release(x_13387, 1); + x_13395 = x_13387; +} else { + lean_dec_ref(x_13387); + x_13395 = lean_box(0); +} +if (lean_is_scalar(x_13395)) { + x_13396 = lean_alloc_ctor(1, 2, 0); +} else { + x_13396 = x_13395; +} +lean_ctor_set(x_13396, 0, x_13393); +lean_ctor_set(x_13396, 1, x_13394); +return x_13396; +} +} +else +{ +lean_object* x_13397; lean_object* x_13398; lean_object* x_13399; lean_object* x_13400; lean_object* x_13401; +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13397 = lean_ctor_get(x_13375, 1); +lean_inc(x_13397); +if (lean_is_exclusive(x_13375)) { + lean_ctor_release(x_13375, 0); + lean_ctor_release(x_13375, 1); + x_13398 = x_13375; +} else { + lean_dec_ref(x_13375); + x_13398 = lean_box(0); +} +x_13399 = lean_ctor_get(x_13377, 0); +lean_inc(x_13399); +lean_dec(x_13377); +if (lean_is_scalar(x_13398)) { + x_13400 = lean_alloc_ctor(0, 2, 0); +} else { + x_13400 = x_13398; +} +lean_ctor_set(x_13400, 0, x_13399); +lean_ctor_set(x_13400, 1, x_13397); +if (lean_is_scalar(x_13265)) { + x_13401 = lean_alloc_ctor(0, 2, 0); +} else { + x_13401 = x_13265; +} +lean_ctor_set(x_13401, 0, x_13400); +lean_ctor_set(x_13401, 1, x_13376); +return x_13401; +} +} +} +case 2: +{ +lean_object* x_13503; lean_object* x_13504; +lean_dec(x_13271); +lean_dec(x_13266); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_13503 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_13504 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_13503, x_13261, x_4, x_5, x_13264); +return x_13504; +} +case 3: +{ +lean_object* x_13505; lean_object* x_13506; lean_object* x_13533; lean_object* x_13534; +lean_dec(x_13271); +lean_dec(x_13266); +lean_dec(x_11911); +lean_dec(x_11910); +lean_inc(x_153); +x_13533 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_13264); +x_13534 = lean_ctor_get(x_13533, 0); +lean_inc(x_13534); +if (lean_obj_tag(x_13534) == 0) +{ +lean_object* x_13535; lean_object* x_13536; lean_object* x_13537; +x_13535 = lean_ctor_get(x_13533, 1); +lean_inc(x_13535); +lean_dec(x_13533); +x_13536 = lean_box(0); +x_13537 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13537, 0, x_13536); +lean_ctor_set(x_13537, 1, x_13261); +x_13505 = x_13537; +x_13506 = x_13535; +goto block_13532; +} +else +{ +lean_object* x_13538; lean_object* x_13539; lean_object* x_13540; lean_object* x_13541; lean_object* x_13542; lean_object* x_13543; lean_object* x_13544; uint8_t x_13545; +x_13538 = lean_ctor_get(x_13533, 1); +lean_inc(x_13538); +if (lean_is_exclusive(x_13533)) { + lean_ctor_release(x_13533, 0); + lean_ctor_release(x_13533, 1); + x_13539 = x_13533; +} else { + lean_dec_ref(x_13533); + x_13539 = lean_box(0); +} +x_13540 = lean_ctor_get(x_13534, 0); +lean_inc(x_13540); +if (lean_is_exclusive(x_13534)) { + lean_ctor_release(x_13534, 0); + x_13541 = x_13534; +} else { + lean_dec_ref(x_13534); + x_13541 = lean_box(0); +} +x_13542 = lean_array_get_size(x_11919); +x_13543 = lean_ctor_get(x_13540, 3); +lean_inc(x_13543); +lean_dec(x_13540); +x_13544 = lean_array_get_size(x_13543); +lean_dec(x_13543); +x_13545 = lean_nat_dec_lt(x_13542, x_13544); +if (x_13545 == 0) +{ +uint8_t x_13546; +x_13546 = lean_nat_dec_eq(x_13542, x_13544); +if (x_13546 == 0) +{ +lean_object* x_13547; lean_object* x_13548; lean_object* x_13549; lean_object* x_13550; lean_object* x_13551; lean_object* x_13552; lean_object* x_13553; lean_object* x_13554; lean_object* x_13555; lean_object* x_13556; lean_object* x_13557; lean_object* x_13558; lean_object* x_13559; lean_object* x_13560; lean_object* x_13561; lean_object* x_13562; lean_object* x_13563; +x_13547 = lean_unsigned_to_nat(0u); +x_13548 = l_Array_extract___rarg(x_11919, x_13547, x_13544); +x_13549 = l_Array_extract___rarg(x_11919, x_13544, x_13542); +lean_dec(x_13542); +lean_inc(x_153); +if (lean_is_scalar(x_13539)) { + x_13550 = lean_alloc_ctor(6, 2, 0); +} else { + x_13550 = x_13539; + lean_ctor_set_tag(x_13550, 6); +} +lean_ctor_set(x_13550, 0, x_153); +lean_ctor_set(x_13550, 1, x_13548); +x_13551 = lean_ctor_get(x_1, 0); +lean_inc(x_13551); +x_13552 = l_Lean_IR_ToIR_bindVar(x_13551, x_13261, x_4, x_5, x_13538); +x_13553 = lean_ctor_get(x_13552, 0); +lean_inc(x_13553); +x_13554 = lean_ctor_get(x_13552, 1); +lean_inc(x_13554); +lean_dec(x_13552); +x_13555 = lean_ctor_get(x_13553, 0); +lean_inc(x_13555); +x_13556 = lean_ctor_get(x_13553, 1); +lean_inc(x_13556); +lean_dec(x_13553); +x_13557 = l_Lean_IR_ToIR_newVar(x_13556, x_4, x_5, x_13554); +x_13558 = lean_ctor_get(x_13557, 0); +lean_inc(x_13558); +x_13559 = lean_ctor_get(x_13557, 1); +lean_inc(x_13559); +lean_dec(x_13557); +x_13560 = lean_ctor_get(x_13558, 0); +lean_inc(x_13560); +x_13561 = lean_ctor_get(x_13558, 1); +lean_inc(x_13561); +lean_dec(x_13558); +x_13562 = lean_ctor_get(x_1, 2); +lean_inc(x_13562); +lean_inc(x_5); +lean_inc(x_4); +x_13563 = l_Lean_IR_ToIR_lowerType(x_13562, x_13561, x_4, x_5, x_13559); +if (lean_obj_tag(x_13563) == 0) +{ +lean_object* x_13564; lean_object* x_13565; lean_object* x_13566; lean_object* x_13567; lean_object* x_13568; +x_13564 = lean_ctor_get(x_13563, 0); +lean_inc(x_13564); +x_13565 = lean_ctor_get(x_13563, 1); +lean_inc(x_13565); +lean_dec(x_13563); +x_13566 = lean_ctor_get(x_13564, 0); +lean_inc(x_13566); +x_13567 = lean_ctor_get(x_13564, 1); +lean_inc(x_13567); +lean_dec(x_13564); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13568 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_13560, x_13549, x_13555, x_13550, x_13566, x_13567, x_4, x_5, x_13565); +if (lean_obj_tag(x_13568) == 0) +{ +lean_object* x_13569; lean_object* x_13570; lean_object* x_13571; lean_object* x_13572; lean_object* x_13573; lean_object* x_13574; lean_object* x_13575; +x_13569 = lean_ctor_get(x_13568, 0); +lean_inc(x_13569); +x_13570 = lean_ctor_get(x_13568, 1); +lean_inc(x_13570); +lean_dec(x_13568); +x_13571 = lean_ctor_get(x_13569, 0); +lean_inc(x_13571); +x_13572 = lean_ctor_get(x_13569, 1); +lean_inc(x_13572); +if (lean_is_exclusive(x_13569)) { + lean_ctor_release(x_13569, 0); + lean_ctor_release(x_13569, 1); + x_13573 = x_13569; +} else { + lean_dec_ref(x_13569); + x_13573 = lean_box(0); +} +if (lean_is_scalar(x_13541)) { + x_13574 = lean_alloc_ctor(1, 1, 0); +} else { + x_13574 = x_13541; +} +lean_ctor_set(x_13574, 0, x_13571); +if (lean_is_scalar(x_13573)) { + x_13575 = lean_alloc_ctor(0, 2, 0); +} else { + x_13575 = x_13573; +} +lean_ctor_set(x_13575, 0, x_13574); +lean_ctor_set(x_13575, 1, x_13572); +x_13505 = x_13575; +x_13506 = x_13570; +goto block_13532; +} +else +{ +lean_object* x_13576; lean_object* x_13577; lean_object* x_13578; lean_object* x_13579; +lean_dec(x_13541); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13576 = lean_ctor_get(x_13568, 0); +lean_inc(x_13576); +x_13577 = lean_ctor_get(x_13568, 1); +lean_inc(x_13577); +if (lean_is_exclusive(x_13568)) { + lean_ctor_release(x_13568, 0); + lean_ctor_release(x_13568, 1); + x_13578 = x_13568; +} else { + lean_dec_ref(x_13568); + x_13578 = lean_box(0); +} +if (lean_is_scalar(x_13578)) { + x_13579 = lean_alloc_ctor(1, 2, 0); +} else { + x_13579 = x_13578; +} +lean_ctor_set(x_13579, 0, x_13576); +lean_ctor_set(x_13579, 1, x_13577); +return x_13579; +} +} +else +{ +lean_object* x_13580; lean_object* x_13581; lean_object* x_13582; lean_object* x_13583; +lean_dec(x_13560); +lean_dec(x_13555); +lean_dec(x_13550); +lean_dec(x_13549); +lean_dec(x_13541); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13580 = lean_ctor_get(x_13563, 0); +lean_inc(x_13580); +x_13581 = lean_ctor_get(x_13563, 1); +lean_inc(x_13581); +if (lean_is_exclusive(x_13563)) { + lean_ctor_release(x_13563, 0); + lean_ctor_release(x_13563, 1); + x_13582 = x_13563; +} else { + lean_dec_ref(x_13563); + x_13582 = lean_box(0); +} +if (lean_is_scalar(x_13582)) { + x_13583 = lean_alloc_ctor(1, 2, 0); +} else { + x_13583 = x_13582; +} +lean_ctor_set(x_13583, 0, x_13580); +lean_ctor_set(x_13583, 1, x_13581); +return x_13583; +} +} +else +{ +lean_object* x_13584; lean_object* x_13585; lean_object* x_13586; lean_object* x_13587; lean_object* x_13588; lean_object* x_13589; lean_object* x_13590; lean_object* x_13591; lean_object* x_13592; +lean_dec(x_13544); +lean_dec(x_13542); +lean_inc(x_11919); +lean_inc(x_153); +if (lean_is_scalar(x_13539)) { + x_13584 = lean_alloc_ctor(6, 2, 0); +} else { + x_13584 = x_13539; + lean_ctor_set_tag(x_13584, 6); +} +lean_ctor_set(x_13584, 0, x_153); +lean_ctor_set(x_13584, 1, x_11919); +x_13585 = lean_ctor_get(x_1, 0); +lean_inc(x_13585); +x_13586 = l_Lean_IR_ToIR_bindVar(x_13585, x_13261, x_4, x_5, x_13538); +x_13587 = lean_ctor_get(x_13586, 0); +lean_inc(x_13587); +x_13588 = lean_ctor_get(x_13586, 1); +lean_inc(x_13588); +lean_dec(x_13586); +x_13589 = lean_ctor_get(x_13587, 0); +lean_inc(x_13589); +x_13590 = lean_ctor_get(x_13587, 1); +lean_inc(x_13590); +lean_dec(x_13587); +x_13591 = lean_ctor_get(x_1, 2); +lean_inc(x_13591); +lean_inc(x_5); +lean_inc(x_4); +x_13592 = l_Lean_IR_ToIR_lowerType(x_13591, x_13590, x_4, x_5, x_13588); +if (lean_obj_tag(x_13592) == 0) +{ +lean_object* x_13593; lean_object* x_13594; lean_object* x_13595; lean_object* x_13596; lean_object* x_13597; +x_13593 = lean_ctor_get(x_13592, 0); +lean_inc(x_13593); +x_13594 = lean_ctor_get(x_13592, 1); +lean_inc(x_13594); +lean_dec(x_13592); +x_13595 = lean_ctor_get(x_13593, 0); +lean_inc(x_13595); +x_13596 = lean_ctor_get(x_13593, 1); +lean_inc(x_13596); +lean_dec(x_13593); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13597 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13589, x_13584, x_13595, x_13596, x_4, x_5, x_13594); +if (lean_obj_tag(x_13597) == 0) +{ +lean_object* x_13598; lean_object* x_13599; lean_object* x_13600; lean_object* x_13601; lean_object* x_13602; lean_object* x_13603; lean_object* x_13604; +x_13598 = lean_ctor_get(x_13597, 0); +lean_inc(x_13598); +x_13599 = lean_ctor_get(x_13597, 1); +lean_inc(x_13599); +lean_dec(x_13597); +x_13600 = lean_ctor_get(x_13598, 0); +lean_inc(x_13600); +x_13601 = lean_ctor_get(x_13598, 1); +lean_inc(x_13601); +if (lean_is_exclusive(x_13598)) { + lean_ctor_release(x_13598, 0); + lean_ctor_release(x_13598, 1); + x_13602 = x_13598; +} else { + lean_dec_ref(x_13598); + x_13602 = lean_box(0); +} +if (lean_is_scalar(x_13541)) { + x_13603 = lean_alloc_ctor(1, 1, 0); +} else { + x_13603 = x_13541; +} +lean_ctor_set(x_13603, 0, x_13600); +if (lean_is_scalar(x_13602)) { + x_13604 = lean_alloc_ctor(0, 2, 0); +} else { + x_13604 = x_13602; +} +lean_ctor_set(x_13604, 0, x_13603); +lean_ctor_set(x_13604, 1, x_13601); +x_13505 = x_13604; +x_13506 = x_13599; +goto block_13532; +} +else +{ +lean_object* x_13605; lean_object* x_13606; lean_object* x_13607; lean_object* x_13608; +lean_dec(x_13541); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13605 = lean_ctor_get(x_13597, 0); +lean_inc(x_13605); +x_13606 = lean_ctor_get(x_13597, 1); +lean_inc(x_13606); +if (lean_is_exclusive(x_13597)) { + lean_ctor_release(x_13597, 0); + lean_ctor_release(x_13597, 1); + x_13607 = x_13597; +} else { + lean_dec_ref(x_13597); + x_13607 = lean_box(0); +} +if (lean_is_scalar(x_13607)) { + x_13608 = lean_alloc_ctor(1, 2, 0); +} else { + x_13608 = x_13607; +} +lean_ctor_set(x_13608, 0, x_13605); +lean_ctor_set(x_13608, 1, x_13606); +return x_13608; +} +} +else +{ +lean_object* x_13609; lean_object* x_13610; lean_object* x_13611; lean_object* x_13612; +lean_dec(x_13589); +lean_dec(x_13584); +lean_dec(x_13541); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13609 = lean_ctor_get(x_13592, 0); +lean_inc(x_13609); +x_13610 = lean_ctor_get(x_13592, 1); +lean_inc(x_13610); +if (lean_is_exclusive(x_13592)) { + lean_ctor_release(x_13592, 0); + lean_ctor_release(x_13592, 1); + x_13611 = x_13592; +} else { + lean_dec_ref(x_13592); + x_13611 = lean_box(0); +} +if (lean_is_scalar(x_13611)) { + x_13612 = lean_alloc_ctor(1, 2, 0); +} else { + x_13612 = x_13611; +} +lean_ctor_set(x_13612, 0, x_13609); +lean_ctor_set(x_13612, 1, x_13610); +return x_13612; +} +} +} +else +{ +lean_object* x_13613; lean_object* x_13614; lean_object* x_13615; lean_object* x_13616; lean_object* x_13617; lean_object* x_13618; lean_object* x_13619; lean_object* x_13620; lean_object* x_13621; +lean_dec(x_13544); +lean_dec(x_13542); +lean_inc(x_11919); +lean_inc(x_153); +if (lean_is_scalar(x_13539)) { + x_13613 = lean_alloc_ctor(7, 2, 0); +} else { + x_13613 = x_13539; + lean_ctor_set_tag(x_13613, 7); +} +lean_ctor_set(x_13613, 0, x_153); +lean_ctor_set(x_13613, 1, x_11919); +x_13614 = lean_ctor_get(x_1, 0); +lean_inc(x_13614); +x_13615 = l_Lean_IR_ToIR_bindVar(x_13614, x_13261, x_4, x_5, x_13538); +x_13616 = lean_ctor_get(x_13615, 0); +lean_inc(x_13616); +x_13617 = lean_ctor_get(x_13615, 1); +lean_inc(x_13617); +lean_dec(x_13615); +x_13618 = lean_ctor_get(x_13616, 0); +lean_inc(x_13618); +x_13619 = lean_ctor_get(x_13616, 1); +lean_inc(x_13619); +lean_dec(x_13616); +x_13620 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13621 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13618, x_13613, x_13620, x_13619, x_4, x_5, x_13617); +if (lean_obj_tag(x_13621) == 0) +{ +lean_object* x_13622; lean_object* x_13623; lean_object* x_13624; lean_object* x_13625; lean_object* x_13626; lean_object* x_13627; lean_object* x_13628; +x_13622 = lean_ctor_get(x_13621, 0); +lean_inc(x_13622); +x_13623 = lean_ctor_get(x_13621, 1); +lean_inc(x_13623); +lean_dec(x_13621); +x_13624 = lean_ctor_get(x_13622, 0); +lean_inc(x_13624); +x_13625 = lean_ctor_get(x_13622, 1); +lean_inc(x_13625); +if (lean_is_exclusive(x_13622)) { + lean_ctor_release(x_13622, 0); + lean_ctor_release(x_13622, 1); + x_13626 = x_13622; +} else { + lean_dec_ref(x_13622); + x_13626 = lean_box(0); +} +if (lean_is_scalar(x_13541)) { + x_13627 = lean_alloc_ctor(1, 1, 0); +} else { + x_13627 = x_13541; +} +lean_ctor_set(x_13627, 0, x_13624); +if (lean_is_scalar(x_13626)) { + x_13628 = lean_alloc_ctor(0, 2, 0); +} else { + x_13628 = x_13626; +} +lean_ctor_set(x_13628, 0, x_13627); +lean_ctor_set(x_13628, 1, x_13625); +x_13505 = x_13628; +x_13506 = x_13623; +goto block_13532; +} +else +{ +lean_object* x_13629; lean_object* x_13630; lean_object* x_13631; lean_object* x_13632; +lean_dec(x_13541); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13629 = lean_ctor_get(x_13621, 0); +lean_inc(x_13629); +x_13630 = lean_ctor_get(x_13621, 1); +lean_inc(x_13630); +if (lean_is_exclusive(x_13621)) { + lean_ctor_release(x_13621, 0); + lean_ctor_release(x_13621, 1); + x_13631 = x_13621; +} else { + lean_dec_ref(x_13621); + x_13631 = lean_box(0); +} +if (lean_is_scalar(x_13631)) { + x_13632 = lean_alloc_ctor(1, 2, 0); +} else { + x_13632 = x_13631; +} +lean_ctor_set(x_13632, 0, x_13629); +lean_ctor_set(x_13632, 1, x_13630); +return x_13632; +} +} +} +block_13532: +{ +lean_object* x_13507; +x_13507 = lean_ctor_get(x_13505, 0); +lean_inc(x_13507); +if (lean_obj_tag(x_13507) == 0) +{ +lean_object* x_13508; lean_object* x_13509; lean_object* x_13510; lean_object* x_13511; lean_object* x_13512; lean_object* x_13513; lean_object* x_13514; lean_object* x_13515; lean_object* x_13516; lean_object* x_13517; +lean_dec(x_13265); +x_13508 = lean_ctor_get(x_13505, 1); +lean_inc(x_13508); +lean_dec(x_13505); +x_13509 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_13509, 0, x_153); +lean_ctor_set(x_13509, 1, x_11919); +x_13510 = lean_ctor_get(x_1, 0); +lean_inc(x_13510); +x_13511 = l_Lean_IR_ToIR_bindVar(x_13510, x_13508, x_4, x_5, x_13506); +x_13512 = lean_ctor_get(x_13511, 0); +lean_inc(x_13512); +x_13513 = lean_ctor_get(x_13511, 1); +lean_inc(x_13513); +lean_dec(x_13511); +x_13514 = lean_ctor_get(x_13512, 0); +lean_inc(x_13514); +x_13515 = lean_ctor_get(x_13512, 1); +lean_inc(x_13515); +lean_dec(x_13512); +x_13516 = lean_ctor_get(x_1, 2); +lean_inc(x_13516); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_13517 = l_Lean_IR_ToIR_lowerType(x_13516, x_13515, x_4, x_5, x_13513); +if (lean_obj_tag(x_13517) == 0) +{ +lean_object* x_13518; lean_object* x_13519; lean_object* x_13520; lean_object* x_13521; lean_object* x_13522; +x_13518 = lean_ctor_get(x_13517, 0); +lean_inc(x_13518); +x_13519 = lean_ctor_get(x_13517, 1); +lean_inc(x_13519); +lean_dec(x_13517); +x_13520 = lean_ctor_get(x_13518, 0); +lean_inc(x_13520); +x_13521 = lean_ctor_get(x_13518, 1); +lean_inc(x_13521); +lean_dec(x_13518); +x_13522 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13514, x_13509, x_13520, x_13521, x_4, x_5, x_13519); +return x_13522; +} +else +{ +lean_object* x_13523; lean_object* x_13524; lean_object* x_13525; lean_object* x_13526; +lean_dec(x_13514); +lean_dec(x_13509); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_13523 = lean_ctor_get(x_13517, 0); +lean_inc(x_13523); +x_13524 = lean_ctor_get(x_13517, 1); +lean_inc(x_13524); +if (lean_is_exclusive(x_13517)) { + lean_ctor_release(x_13517, 0); + lean_ctor_release(x_13517, 1); + x_13525 = x_13517; +} else { + lean_dec_ref(x_13517); + x_13525 = lean_box(0); +} +if (lean_is_scalar(x_13525)) { + x_13526 = lean_alloc_ctor(1, 2, 0); +} else { + x_13526 = x_13525; +} +lean_ctor_set(x_13526, 0, x_13523); +lean_ctor_set(x_13526, 1, x_13524); +return x_13526; +} +} +else +{ +lean_object* x_13527; lean_object* x_13528; lean_object* x_13529; lean_object* x_13530; lean_object* x_13531; +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13527 = lean_ctor_get(x_13505, 1); +lean_inc(x_13527); +if (lean_is_exclusive(x_13505)) { + lean_ctor_release(x_13505, 0); + lean_ctor_release(x_13505, 1); + x_13528 = x_13505; +} else { + lean_dec_ref(x_13505); + x_13528 = lean_box(0); +} +x_13529 = lean_ctor_get(x_13507, 0); +lean_inc(x_13529); +lean_dec(x_13507); +if (lean_is_scalar(x_13528)) { + x_13530 = lean_alloc_ctor(0, 2, 0); +} else { + x_13530 = x_13528; +} +lean_ctor_set(x_13530, 0, x_13529); +lean_ctor_set(x_13530, 1, x_13527); +if (lean_is_scalar(x_13265)) { + x_13531 = lean_alloc_ctor(0, 2, 0); +} else { + x_13531 = x_13265; +} +lean_ctor_set(x_13531, 0, x_13530); +lean_ctor_set(x_13531, 1, x_13506); +return x_13531; +} +} +} +case 4: +{ +lean_object* x_13633; lean_object* x_13634; uint8_t x_13635; +lean_dec(x_13266); +lean_dec(x_13265); +lean_dec(x_11911); +lean_dec(x_11910); +if (lean_is_exclusive(x_13271)) { + lean_ctor_release(x_13271, 0); + x_13633 = x_13271; +} else { + lean_dec_ref(x_13271); + x_13633 = lean_box(0); +} +x_13634 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_13635 = lean_name_eq(x_153, x_13634); +if (x_13635 == 0) +{ +uint8_t x_13636; lean_object* x_13637; lean_object* x_13638; lean_object* x_13639; lean_object* x_13640; lean_object* x_13641; lean_object* x_13642; lean_object* x_13643; lean_object* x_13644; lean_object* x_13645; +lean_dec(x_11919); +lean_dec(x_2); +lean_dec(x_1); +x_13636 = 1; +x_13637 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_13638 = l_Lean_Name_toString(x_153, x_13636, x_13637); +if (lean_is_scalar(x_13633)) { + x_13639 = lean_alloc_ctor(3, 1, 0); +} else { + x_13639 = x_13633; + lean_ctor_set_tag(x_13639, 3); +} +lean_ctor_set(x_13639, 0, x_13638); +x_13640 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_13641 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13641, 0, x_13640); +lean_ctor_set(x_13641, 1, x_13639); +x_13642 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_13643 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13643, 0, x_13641); +lean_ctor_set(x_13643, 1, x_13642); +x_13644 = l_Lean_MessageData_ofFormat(x_13643); +x_13645 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_13644, x_13261, x_4, x_5, x_13264); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_13261); +return x_13645; +} +else +{ +lean_object* x_13646; lean_object* x_13647; lean_object* x_13648; +lean_dec(x_13633); +lean_dec(x_153); +x_13646 = l_Lean_IR_instInhabitedArg; +x_13647 = lean_unsigned_to_nat(2u); +x_13648 = lean_array_get(x_13646, x_11919, x_13647); +lean_dec(x_11919); +if (lean_obj_tag(x_13648) == 0) +{ +lean_object* x_13649; lean_object* x_13650; lean_object* x_13651; lean_object* x_13652; lean_object* x_13653; lean_object* x_13654; lean_object* x_13655; +x_13649 = lean_ctor_get(x_13648, 0); +lean_inc(x_13649); +lean_dec(x_13648); +x_13650 = lean_ctor_get(x_1, 0); +lean_inc(x_13650); +lean_dec(x_1); +x_13651 = l_Lean_IR_ToIR_bindVarToVarId(x_13650, x_13649, x_13261, x_4, x_5, x_13264); +x_13652 = lean_ctor_get(x_13651, 0); +lean_inc(x_13652); +x_13653 = lean_ctor_get(x_13651, 1); +lean_inc(x_13653); +lean_dec(x_13651); +x_13654 = lean_ctor_get(x_13652, 1); +lean_inc(x_13654); +lean_dec(x_13652); +x_13655 = l_Lean_IR_ToIR_lowerCode(x_2, x_13654, x_4, x_5, x_13653); +return x_13655; +} +else +{ +lean_object* x_13656; lean_object* x_13657; lean_object* x_13658; lean_object* x_13659; lean_object* x_13660; lean_object* x_13661; +x_13656 = lean_ctor_get(x_1, 0); +lean_inc(x_13656); +lean_dec(x_1); +x_13657 = l_Lean_IR_ToIR_bindErased(x_13656, x_13261, x_4, x_5, x_13264); +x_13658 = lean_ctor_get(x_13657, 0); +lean_inc(x_13658); +x_13659 = lean_ctor_get(x_13657, 1); +lean_inc(x_13659); +lean_dec(x_13657); +x_13660 = lean_ctor_get(x_13658, 1); +lean_inc(x_13660); +lean_dec(x_13658); +x_13661 = l_Lean_IR_ToIR_lowerCode(x_2, x_13660, x_4, x_5, x_13659); +return x_13661; +} +} +} +case 5: +{ +lean_object* x_13662; lean_object* x_13663; +lean_dec(x_13271); +lean_dec(x_13266); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_13662 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_13663 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_13662, x_13261, x_4, x_5, x_13264); +return x_13663; +} +case 6: +{ +lean_object* x_13664; uint8_t x_13665; +x_13664 = lean_ctor_get(x_13271, 0); +lean_inc(x_13664); +lean_dec(x_13271); +lean_inc(x_153); +x_13665 = l_Lean_isExtern(x_13266, x_153); +if (x_13665 == 0) +{ +lean_object* x_13666; +lean_dec(x_13265); +lean_dec(x_11919); +lean_inc(x_5); +lean_inc(x_4); +x_13666 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_13261, x_4, x_5, x_13264); +if (lean_obj_tag(x_13666) == 0) +{ +lean_object* x_13667; lean_object* x_13668; lean_object* x_13669; lean_object* x_13670; lean_object* x_13671; lean_object* x_13672; lean_object* x_13673; lean_object* x_13674; lean_object* x_13675; lean_object* x_13676; lean_object* x_13677; lean_object* x_13678; lean_object* x_13679; lean_object* x_13680; lean_object* x_13681; lean_object* x_13682; lean_object* x_13683; lean_object* x_13684; lean_object* x_13685; lean_object* x_13686; +x_13667 = lean_ctor_get(x_13666, 0); +lean_inc(x_13667); +x_13668 = lean_ctor_get(x_13667, 0); +lean_inc(x_13668); +x_13669 = lean_ctor_get(x_13666, 1); +lean_inc(x_13669); +lean_dec(x_13666); +x_13670 = lean_ctor_get(x_13667, 1); +lean_inc(x_13670); +lean_dec(x_13667); +x_13671 = lean_ctor_get(x_13668, 0); +lean_inc(x_13671); +x_13672 = lean_ctor_get(x_13668, 1); +lean_inc(x_13672); +lean_dec(x_13668); +x_13673 = lean_ctor_get(x_13664, 3); +lean_inc(x_13673); +lean_dec(x_13664); +x_13674 = lean_array_get_size(x_11910); +x_13675 = l_Array_extract___rarg(x_11910, x_13673, x_13674); +lean_dec(x_13674); +lean_dec(x_11910); +x_13676 = lean_array_get_size(x_13672); +x_13677 = lean_unsigned_to_nat(0u); +x_13678 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_11911)) { + x_13679 = lean_alloc_ctor(0, 3, 0); +} else { + x_13679 = x_11911; + lean_ctor_set_tag(x_13679, 0); +} +lean_ctor_set(x_13679, 0, x_13677); +lean_ctor_set(x_13679, 1, x_13676); +lean_ctor_set(x_13679, 2, x_13678); +x_13680 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_13681 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__6(x_13672, x_13675, x_13679, x_13679, x_13680, x_13677, lean_box(0), lean_box(0), x_13670, x_4, x_5, x_13669); +lean_dec(x_13679); +x_13682 = lean_ctor_get(x_13681, 0); +lean_inc(x_13682); +x_13683 = lean_ctor_get(x_13681, 1); +lean_inc(x_13683); +lean_dec(x_13681); +x_13684 = lean_ctor_get(x_13682, 0); +lean_inc(x_13684); +x_13685 = lean_ctor_get(x_13682, 1); +lean_inc(x_13685); +lean_dec(x_13682); +x_13686 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_13671, x_13672, x_13675, x_13684, x_13685, x_4, x_5, x_13683); +lean_dec(x_13675); +lean_dec(x_13672); +return x_13686; +} +else +{ +lean_object* x_13687; lean_object* x_13688; lean_object* x_13689; lean_object* x_13690; +lean_dec(x_13664); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13687 = lean_ctor_get(x_13666, 0); +lean_inc(x_13687); +x_13688 = lean_ctor_get(x_13666, 1); +lean_inc(x_13688); +if (lean_is_exclusive(x_13666)) { + lean_ctor_release(x_13666, 0); + lean_ctor_release(x_13666, 1); + x_13689 = x_13666; +} else { + lean_dec_ref(x_13666); + x_13689 = lean_box(0); +} +if (lean_is_scalar(x_13689)) { + x_13690 = lean_alloc_ctor(1, 2, 0); +} else { + x_13690 = x_13689; +} +lean_ctor_set(x_13690, 0, x_13687); +lean_ctor_set(x_13690, 1, x_13688); +return x_13690; +} +} +else +{ +lean_object* x_13691; lean_object* x_13692; lean_object* x_13719; lean_object* x_13720; +lean_dec(x_13664); +lean_dec(x_11911); +lean_dec(x_11910); +lean_inc(x_153); +x_13719 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_13264); +x_13720 = lean_ctor_get(x_13719, 0); +lean_inc(x_13720); +if (lean_obj_tag(x_13720) == 0) +{ +lean_object* x_13721; lean_object* x_13722; lean_object* x_13723; +x_13721 = lean_ctor_get(x_13719, 1); +lean_inc(x_13721); +lean_dec(x_13719); +x_13722 = lean_box(0); +x_13723 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13723, 0, x_13722); +lean_ctor_set(x_13723, 1, x_13261); +x_13691 = x_13723; +x_13692 = x_13721; +goto block_13718; +} +else +{ +lean_object* x_13724; lean_object* x_13725; lean_object* x_13726; lean_object* x_13727; lean_object* x_13728; lean_object* x_13729; lean_object* x_13730; uint8_t x_13731; +x_13724 = lean_ctor_get(x_13719, 1); +lean_inc(x_13724); +if (lean_is_exclusive(x_13719)) { + lean_ctor_release(x_13719, 0); + lean_ctor_release(x_13719, 1); + x_13725 = x_13719; +} else { + lean_dec_ref(x_13719); + x_13725 = lean_box(0); +} +x_13726 = lean_ctor_get(x_13720, 0); +lean_inc(x_13726); +if (lean_is_exclusive(x_13720)) { + lean_ctor_release(x_13720, 0); + x_13727 = x_13720; +} else { + lean_dec_ref(x_13720); + x_13727 = lean_box(0); +} +x_13728 = lean_array_get_size(x_11919); +x_13729 = lean_ctor_get(x_13726, 3); +lean_inc(x_13729); +lean_dec(x_13726); +x_13730 = lean_array_get_size(x_13729); +lean_dec(x_13729); +x_13731 = lean_nat_dec_lt(x_13728, x_13730); +if (x_13731 == 0) +{ +uint8_t x_13732; +x_13732 = lean_nat_dec_eq(x_13728, x_13730); +if (x_13732 == 0) +{ +lean_object* x_13733; lean_object* x_13734; lean_object* x_13735; lean_object* x_13736; lean_object* x_13737; lean_object* x_13738; lean_object* x_13739; lean_object* x_13740; lean_object* x_13741; lean_object* x_13742; lean_object* x_13743; lean_object* x_13744; lean_object* x_13745; lean_object* x_13746; lean_object* x_13747; lean_object* x_13748; lean_object* x_13749; +x_13733 = lean_unsigned_to_nat(0u); +x_13734 = l_Array_extract___rarg(x_11919, x_13733, x_13730); +x_13735 = l_Array_extract___rarg(x_11919, x_13730, x_13728); +lean_dec(x_13728); +lean_inc(x_153); +if (lean_is_scalar(x_13725)) { + x_13736 = lean_alloc_ctor(6, 2, 0); +} else { + x_13736 = x_13725; + lean_ctor_set_tag(x_13736, 6); +} +lean_ctor_set(x_13736, 0, x_153); +lean_ctor_set(x_13736, 1, x_13734); +x_13737 = lean_ctor_get(x_1, 0); +lean_inc(x_13737); +x_13738 = l_Lean_IR_ToIR_bindVar(x_13737, x_13261, x_4, x_5, x_13724); +x_13739 = lean_ctor_get(x_13738, 0); +lean_inc(x_13739); +x_13740 = lean_ctor_get(x_13738, 1); +lean_inc(x_13740); +lean_dec(x_13738); +x_13741 = lean_ctor_get(x_13739, 0); +lean_inc(x_13741); +x_13742 = lean_ctor_get(x_13739, 1); +lean_inc(x_13742); +lean_dec(x_13739); +x_13743 = l_Lean_IR_ToIR_newVar(x_13742, x_4, x_5, x_13740); +x_13744 = lean_ctor_get(x_13743, 0); +lean_inc(x_13744); +x_13745 = lean_ctor_get(x_13743, 1); +lean_inc(x_13745); +lean_dec(x_13743); +x_13746 = lean_ctor_get(x_13744, 0); +lean_inc(x_13746); +x_13747 = lean_ctor_get(x_13744, 1); +lean_inc(x_13747); +lean_dec(x_13744); +x_13748 = lean_ctor_get(x_1, 2); +lean_inc(x_13748); +lean_inc(x_5); +lean_inc(x_4); +x_13749 = l_Lean_IR_ToIR_lowerType(x_13748, x_13747, x_4, x_5, x_13745); +if (lean_obj_tag(x_13749) == 0) +{ +lean_object* x_13750; lean_object* x_13751; lean_object* x_13752; lean_object* x_13753; lean_object* x_13754; +x_13750 = lean_ctor_get(x_13749, 0); +lean_inc(x_13750); +x_13751 = lean_ctor_get(x_13749, 1); +lean_inc(x_13751); +lean_dec(x_13749); +x_13752 = lean_ctor_get(x_13750, 0); +lean_inc(x_13752); +x_13753 = lean_ctor_get(x_13750, 1); +lean_inc(x_13753); +lean_dec(x_13750); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13754 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_13746, x_13735, x_13741, x_13736, x_13752, x_13753, x_4, x_5, x_13751); +if (lean_obj_tag(x_13754) == 0) +{ +lean_object* x_13755; lean_object* x_13756; lean_object* x_13757; lean_object* x_13758; lean_object* x_13759; lean_object* x_13760; lean_object* x_13761; +x_13755 = lean_ctor_get(x_13754, 0); +lean_inc(x_13755); +x_13756 = lean_ctor_get(x_13754, 1); +lean_inc(x_13756); +lean_dec(x_13754); +x_13757 = lean_ctor_get(x_13755, 0); +lean_inc(x_13757); +x_13758 = lean_ctor_get(x_13755, 1); +lean_inc(x_13758); +if (lean_is_exclusive(x_13755)) { + lean_ctor_release(x_13755, 0); + lean_ctor_release(x_13755, 1); + x_13759 = x_13755; +} else { + lean_dec_ref(x_13755); + x_13759 = lean_box(0); +} +if (lean_is_scalar(x_13727)) { + x_13760 = lean_alloc_ctor(1, 1, 0); +} else { + x_13760 = x_13727; +} +lean_ctor_set(x_13760, 0, x_13757); +if (lean_is_scalar(x_13759)) { + x_13761 = lean_alloc_ctor(0, 2, 0); +} else { + x_13761 = x_13759; +} +lean_ctor_set(x_13761, 0, x_13760); +lean_ctor_set(x_13761, 1, x_13758); +x_13691 = x_13761; +x_13692 = x_13756; +goto block_13718; +} +else +{ +lean_object* x_13762; lean_object* x_13763; lean_object* x_13764; lean_object* x_13765; +lean_dec(x_13727); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13762 = lean_ctor_get(x_13754, 0); +lean_inc(x_13762); +x_13763 = lean_ctor_get(x_13754, 1); +lean_inc(x_13763); +if (lean_is_exclusive(x_13754)) { + lean_ctor_release(x_13754, 0); + lean_ctor_release(x_13754, 1); + x_13764 = x_13754; +} else { + lean_dec_ref(x_13754); + x_13764 = lean_box(0); +} +if (lean_is_scalar(x_13764)) { + x_13765 = lean_alloc_ctor(1, 2, 0); +} else { + x_13765 = x_13764; +} +lean_ctor_set(x_13765, 0, x_13762); +lean_ctor_set(x_13765, 1, x_13763); +return x_13765; +} +} +else +{ +lean_object* x_13766; lean_object* x_13767; lean_object* x_13768; lean_object* x_13769; +lean_dec(x_13746); +lean_dec(x_13741); +lean_dec(x_13736); +lean_dec(x_13735); +lean_dec(x_13727); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13766 = lean_ctor_get(x_13749, 0); +lean_inc(x_13766); +x_13767 = lean_ctor_get(x_13749, 1); +lean_inc(x_13767); +if (lean_is_exclusive(x_13749)) { + lean_ctor_release(x_13749, 0); + lean_ctor_release(x_13749, 1); + x_13768 = x_13749; +} else { + lean_dec_ref(x_13749); + x_13768 = lean_box(0); +} +if (lean_is_scalar(x_13768)) { + x_13769 = lean_alloc_ctor(1, 2, 0); +} else { + x_13769 = x_13768; +} +lean_ctor_set(x_13769, 0, x_13766); +lean_ctor_set(x_13769, 1, x_13767); +return x_13769; +} +} +else +{ +lean_object* x_13770; lean_object* x_13771; lean_object* x_13772; lean_object* x_13773; lean_object* x_13774; lean_object* x_13775; lean_object* x_13776; lean_object* x_13777; lean_object* x_13778; +lean_dec(x_13730); +lean_dec(x_13728); +lean_inc(x_11919); +lean_inc(x_153); +if (lean_is_scalar(x_13725)) { + x_13770 = lean_alloc_ctor(6, 2, 0); +} else { + x_13770 = x_13725; + lean_ctor_set_tag(x_13770, 6); +} +lean_ctor_set(x_13770, 0, x_153); +lean_ctor_set(x_13770, 1, x_11919); +x_13771 = lean_ctor_get(x_1, 0); +lean_inc(x_13771); +x_13772 = l_Lean_IR_ToIR_bindVar(x_13771, x_13261, x_4, x_5, x_13724); +x_13773 = lean_ctor_get(x_13772, 0); +lean_inc(x_13773); +x_13774 = lean_ctor_get(x_13772, 1); +lean_inc(x_13774); +lean_dec(x_13772); +x_13775 = lean_ctor_get(x_13773, 0); +lean_inc(x_13775); +x_13776 = lean_ctor_get(x_13773, 1); +lean_inc(x_13776); +lean_dec(x_13773); +x_13777 = lean_ctor_get(x_1, 2); +lean_inc(x_13777); +lean_inc(x_5); +lean_inc(x_4); +x_13778 = l_Lean_IR_ToIR_lowerType(x_13777, x_13776, x_4, x_5, x_13774); +if (lean_obj_tag(x_13778) == 0) +{ +lean_object* x_13779; lean_object* x_13780; lean_object* x_13781; lean_object* x_13782; lean_object* x_13783; +x_13779 = lean_ctor_get(x_13778, 0); +lean_inc(x_13779); +x_13780 = lean_ctor_get(x_13778, 1); +lean_inc(x_13780); +lean_dec(x_13778); +x_13781 = lean_ctor_get(x_13779, 0); +lean_inc(x_13781); +x_13782 = lean_ctor_get(x_13779, 1); +lean_inc(x_13782); +lean_dec(x_13779); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13783 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13775, x_13770, x_13781, x_13782, x_4, x_5, x_13780); +if (lean_obj_tag(x_13783) == 0) +{ +lean_object* x_13784; lean_object* x_13785; lean_object* x_13786; lean_object* x_13787; lean_object* x_13788; lean_object* x_13789; lean_object* x_13790; +x_13784 = lean_ctor_get(x_13783, 0); +lean_inc(x_13784); +x_13785 = lean_ctor_get(x_13783, 1); +lean_inc(x_13785); +lean_dec(x_13783); +x_13786 = lean_ctor_get(x_13784, 0); +lean_inc(x_13786); +x_13787 = lean_ctor_get(x_13784, 1); +lean_inc(x_13787); +if (lean_is_exclusive(x_13784)) { + lean_ctor_release(x_13784, 0); + lean_ctor_release(x_13784, 1); + x_13788 = x_13784; +} else { + lean_dec_ref(x_13784); + x_13788 = lean_box(0); +} +if (lean_is_scalar(x_13727)) { + x_13789 = lean_alloc_ctor(1, 1, 0); +} else { + x_13789 = x_13727; +} +lean_ctor_set(x_13789, 0, x_13786); +if (lean_is_scalar(x_13788)) { + x_13790 = lean_alloc_ctor(0, 2, 0); +} else { + x_13790 = x_13788; +} +lean_ctor_set(x_13790, 0, x_13789); +lean_ctor_set(x_13790, 1, x_13787); +x_13691 = x_13790; +x_13692 = x_13785; +goto block_13718; +} +else +{ +lean_object* x_13791; lean_object* x_13792; lean_object* x_13793; lean_object* x_13794; +lean_dec(x_13727); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13791 = lean_ctor_get(x_13783, 0); +lean_inc(x_13791); +x_13792 = lean_ctor_get(x_13783, 1); +lean_inc(x_13792); +if (lean_is_exclusive(x_13783)) { + lean_ctor_release(x_13783, 0); + lean_ctor_release(x_13783, 1); + x_13793 = x_13783; +} else { + lean_dec_ref(x_13783); + x_13793 = lean_box(0); +} +if (lean_is_scalar(x_13793)) { + x_13794 = lean_alloc_ctor(1, 2, 0); +} else { + x_13794 = x_13793; +} +lean_ctor_set(x_13794, 0, x_13791); +lean_ctor_set(x_13794, 1, x_13792); +return x_13794; +} +} +else +{ +lean_object* x_13795; lean_object* x_13796; lean_object* x_13797; lean_object* x_13798; +lean_dec(x_13775); +lean_dec(x_13770); +lean_dec(x_13727); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13795 = lean_ctor_get(x_13778, 0); +lean_inc(x_13795); +x_13796 = lean_ctor_get(x_13778, 1); +lean_inc(x_13796); +if (lean_is_exclusive(x_13778)) { + lean_ctor_release(x_13778, 0); + lean_ctor_release(x_13778, 1); + x_13797 = x_13778; +} else { + lean_dec_ref(x_13778); + x_13797 = lean_box(0); +} +if (lean_is_scalar(x_13797)) { + x_13798 = lean_alloc_ctor(1, 2, 0); +} else { + x_13798 = x_13797; +} +lean_ctor_set(x_13798, 0, x_13795); +lean_ctor_set(x_13798, 1, x_13796); +return x_13798; +} +} +} +else +{ +lean_object* x_13799; lean_object* x_13800; lean_object* x_13801; lean_object* x_13802; lean_object* x_13803; lean_object* x_13804; lean_object* x_13805; lean_object* x_13806; lean_object* x_13807; +lean_dec(x_13730); +lean_dec(x_13728); +lean_inc(x_11919); +lean_inc(x_153); +if (lean_is_scalar(x_13725)) { + x_13799 = lean_alloc_ctor(7, 2, 0); +} else { + x_13799 = x_13725; + lean_ctor_set_tag(x_13799, 7); +} +lean_ctor_set(x_13799, 0, x_153); +lean_ctor_set(x_13799, 1, x_11919); +x_13800 = lean_ctor_get(x_1, 0); +lean_inc(x_13800); +x_13801 = l_Lean_IR_ToIR_bindVar(x_13800, x_13261, x_4, x_5, x_13724); +x_13802 = lean_ctor_get(x_13801, 0); +lean_inc(x_13802); +x_13803 = lean_ctor_get(x_13801, 1); +lean_inc(x_13803); +lean_dec(x_13801); +x_13804 = lean_ctor_get(x_13802, 0); +lean_inc(x_13804); +x_13805 = lean_ctor_get(x_13802, 1); +lean_inc(x_13805); +lean_dec(x_13802); +x_13806 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_13807 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13804, x_13799, x_13806, x_13805, x_4, x_5, x_13803); +if (lean_obj_tag(x_13807) == 0) +{ +lean_object* x_13808; lean_object* x_13809; lean_object* x_13810; lean_object* x_13811; lean_object* x_13812; lean_object* x_13813; lean_object* x_13814; +x_13808 = lean_ctor_get(x_13807, 0); +lean_inc(x_13808); +x_13809 = lean_ctor_get(x_13807, 1); +lean_inc(x_13809); +lean_dec(x_13807); +x_13810 = lean_ctor_get(x_13808, 0); +lean_inc(x_13810); +x_13811 = lean_ctor_get(x_13808, 1); +lean_inc(x_13811); +if (lean_is_exclusive(x_13808)) { + lean_ctor_release(x_13808, 0); + lean_ctor_release(x_13808, 1); + x_13812 = x_13808; +} else { + lean_dec_ref(x_13808); + x_13812 = lean_box(0); +} +if (lean_is_scalar(x_13727)) { + x_13813 = lean_alloc_ctor(1, 1, 0); +} else { + x_13813 = x_13727; +} +lean_ctor_set(x_13813, 0, x_13810); +if (lean_is_scalar(x_13812)) { + x_13814 = lean_alloc_ctor(0, 2, 0); +} else { + x_13814 = x_13812; +} +lean_ctor_set(x_13814, 0, x_13813); +lean_ctor_set(x_13814, 1, x_13811); +x_13691 = x_13814; +x_13692 = x_13809; +goto block_13718; +} +else +{ +lean_object* x_13815; lean_object* x_13816; lean_object* x_13817; lean_object* x_13818; +lean_dec(x_13727); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13815 = lean_ctor_get(x_13807, 0); +lean_inc(x_13815); +x_13816 = lean_ctor_get(x_13807, 1); +lean_inc(x_13816); +if (lean_is_exclusive(x_13807)) { + lean_ctor_release(x_13807, 0); + lean_ctor_release(x_13807, 1); + x_13817 = x_13807; +} else { + lean_dec_ref(x_13807); + x_13817 = lean_box(0); +} +if (lean_is_scalar(x_13817)) { + x_13818 = lean_alloc_ctor(1, 2, 0); +} else { + x_13818 = x_13817; +} +lean_ctor_set(x_13818, 0, x_13815); +lean_ctor_set(x_13818, 1, x_13816); +return x_13818; +} +} +} +block_13718: +{ +lean_object* x_13693; +x_13693 = lean_ctor_get(x_13691, 0); +lean_inc(x_13693); +if (lean_obj_tag(x_13693) == 0) +{ +lean_object* x_13694; lean_object* x_13695; lean_object* x_13696; lean_object* x_13697; lean_object* x_13698; lean_object* x_13699; lean_object* x_13700; lean_object* x_13701; lean_object* x_13702; lean_object* x_13703; +lean_dec(x_13265); +x_13694 = lean_ctor_get(x_13691, 1); +lean_inc(x_13694); +lean_dec(x_13691); +x_13695 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_13695, 0, x_153); +lean_ctor_set(x_13695, 1, x_11919); +x_13696 = lean_ctor_get(x_1, 0); +lean_inc(x_13696); +x_13697 = l_Lean_IR_ToIR_bindVar(x_13696, x_13694, x_4, x_5, x_13692); +x_13698 = lean_ctor_get(x_13697, 0); +lean_inc(x_13698); +x_13699 = lean_ctor_get(x_13697, 1); +lean_inc(x_13699); +lean_dec(x_13697); +x_13700 = lean_ctor_get(x_13698, 0); +lean_inc(x_13700); +x_13701 = lean_ctor_get(x_13698, 1); +lean_inc(x_13701); +lean_dec(x_13698); +x_13702 = lean_ctor_get(x_1, 2); +lean_inc(x_13702); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_13703 = l_Lean_IR_ToIR_lowerType(x_13702, x_13701, x_4, x_5, x_13699); +if (lean_obj_tag(x_13703) == 0) +{ +lean_object* x_13704; lean_object* x_13705; lean_object* x_13706; lean_object* x_13707; lean_object* x_13708; +x_13704 = lean_ctor_get(x_13703, 0); +lean_inc(x_13704); +x_13705 = lean_ctor_get(x_13703, 1); +lean_inc(x_13705); +lean_dec(x_13703); +x_13706 = lean_ctor_get(x_13704, 0); +lean_inc(x_13706); +x_13707 = lean_ctor_get(x_13704, 1); +lean_inc(x_13707); +lean_dec(x_13704); +x_13708 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_13700, x_13695, x_13706, x_13707, x_4, x_5, x_13705); +return x_13708; +} +else +{ +lean_object* x_13709; lean_object* x_13710; lean_object* x_13711; lean_object* x_13712; +lean_dec(x_13700); +lean_dec(x_13695); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_13709 = lean_ctor_get(x_13703, 0); +lean_inc(x_13709); +x_13710 = lean_ctor_get(x_13703, 1); +lean_inc(x_13710); +if (lean_is_exclusive(x_13703)) { + lean_ctor_release(x_13703, 0); + lean_ctor_release(x_13703, 1); + x_13711 = x_13703; +} else { + lean_dec_ref(x_13703); + x_13711 = lean_box(0); +} +if (lean_is_scalar(x_13711)) { + x_13712 = lean_alloc_ctor(1, 2, 0); +} else { + x_13712 = x_13711; +} +lean_ctor_set(x_13712, 0, x_13709); +lean_ctor_set(x_13712, 1, x_13710); +return x_13712; +} +} +else +{ +lean_object* x_13713; lean_object* x_13714; lean_object* x_13715; lean_object* x_13716; lean_object* x_13717; +lean_dec(x_11919); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13713 = lean_ctor_get(x_13691, 1); +lean_inc(x_13713); +if (lean_is_exclusive(x_13691)) { + lean_ctor_release(x_13691, 0); + lean_ctor_release(x_13691, 1); + x_13714 = x_13691; +} else { + lean_dec_ref(x_13691); + x_13714 = lean_box(0); +} +x_13715 = lean_ctor_get(x_13693, 0); +lean_inc(x_13715); +lean_dec(x_13693); +if (lean_is_scalar(x_13714)) { + x_13716 = lean_alloc_ctor(0, 2, 0); +} else { + x_13716 = x_13714; +} +lean_ctor_set(x_13716, 0, x_13715); +lean_ctor_set(x_13716, 1, x_13713); +if (lean_is_scalar(x_13265)) { + x_13717 = lean_alloc_ctor(0, 2, 0); +} else { + x_13717 = x_13265; +} +lean_ctor_set(x_13717, 0, x_13716); +lean_ctor_set(x_13717, 1, x_13692); +return x_13717; +} +} +} +} +default: +{ +lean_object* x_13819; uint8_t x_13820; lean_object* x_13821; lean_object* x_13822; lean_object* x_13823; lean_object* x_13824; lean_object* x_13825; lean_object* x_13826; lean_object* x_13827; lean_object* x_13828; lean_object* x_13829; +lean_dec(x_13266); +lean_dec(x_13265); +lean_dec(x_11919); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_13271)) { + lean_ctor_release(x_13271, 0); + x_13819 = x_13271; +} else { + lean_dec_ref(x_13271); + x_13819 = lean_box(0); +} +x_13820 = 1; +x_13821 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_13822 = l_Lean_Name_toString(x_153, x_13820, x_13821); +if (lean_is_scalar(x_13819)) { + x_13823 = lean_alloc_ctor(3, 1, 0); +} else { + x_13823 = x_13819; + lean_ctor_set_tag(x_13823, 3); +} +lean_ctor_set(x_13823, 0, x_13822); +x_13824 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_13825 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13825, 0, x_13824); +lean_ctor_set(x_13825, 1, x_13823); +x_13826 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_13827 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13827, 0, x_13825); +lean_ctor_set(x_13827, 1, x_13826); +x_13828 = l_Lean_MessageData_ofFormat(x_13827); +x_13829 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_13828, x_13261, x_4, x_5, x_13264); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_13261); +return x_13829; +} +} +} +} +} +else +{ +uint8_t x_13830; +lean_dec(x_11919); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_13830 = !lean_is_exclusive(x_11921); +if (x_13830 == 0) +{ +lean_object* x_13831; lean_object* x_13832; lean_object* x_13833; +x_13831 = lean_ctor_get(x_11921, 0); +lean_dec(x_13831); +x_13832 = lean_ctor_get(x_11923, 0); +lean_inc(x_13832); +lean_dec(x_11923); +lean_ctor_set(x_11921, 0, x_13832); +if (lean_is_scalar(x_11917)) { + x_13833 = lean_alloc_ctor(0, 2, 0); +} else { + x_13833 = x_11917; +} +lean_ctor_set(x_13833, 0, x_11921); +lean_ctor_set(x_13833, 1, x_11922); +return x_13833; +} +else +{ +lean_object* x_13834; lean_object* x_13835; lean_object* x_13836; lean_object* x_13837; +x_13834 = lean_ctor_get(x_11921, 1); +lean_inc(x_13834); +lean_dec(x_11921); +x_13835 = lean_ctor_get(x_11923, 0); +lean_inc(x_13835); +lean_dec(x_11923); +x_13836 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13836, 0, x_13835); +lean_ctor_set(x_13836, 1, x_13834); +if (lean_is_scalar(x_11917)) { + x_13837 = lean_alloc_ctor(0, 2, 0); +} else { + x_13837 = x_11917; +} +lean_ctor_set(x_13837, 0, x_13836); +lean_ctor_set(x_13837, 1, x_11922); +return x_13837; +} +} +} +} +else +{ +lean_object* x_14119; lean_object* x_14120; lean_object* x_14121; lean_object* x_14122; lean_object* x_14700; lean_object* x_14701; +x_14119 = lean_ctor_get(x_11915, 0); +x_14120 = lean_ctor_get(x_11915, 1); +lean_inc(x_14120); +lean_inc(x_14119); +lean_dec(x_11915); +lean_inc(x_153); +x_14700 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_11916); +x_14701 = lean_ctor_get(x_14700, 0); +lean_inc(x_14701); +if (lean_obj_tag(x_14701) == 0) +{ +lean_object* x_14702; lean_object* x_14703; lean_object* x_14704; +x_14702 = lean_ctor_get(x_14700, 1); +lean_inc(x_14702); +lean_dec(x_14700); +x_14703 = lean_box(0); +x_14704 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14704, 0, x_14703); +lean_ctor_set(x_14704, 1, x_14120); +x_14121 = x_14704; +x_14122 = x_14702; +goto block_14699; +} +else +{ +lean_object* x_14705; lean_object* x_14706; lean_object* x_14707; lean_object* x_14708; lean_object* x_14709; lean_object* x_14710; lean_object* x_14711; uint8_t x_14712; +x_14705 = lean_ctor_get(x_14700, 1); +lean_inc(x_14705); +if (lean_is_exclusive(x_14700)) { + lean_ctor_release(x_14700, 0); + lean_ctor_release(x_14700, 1); + x_14706 = x_14700; +} else { + lean_dec_ref(x_14700); + x_14706 = lean_box(0); +} +x_14707 = lean_ctor_get(x_14701, 0); +lean_inc(x_14707); +if (lean_is_exclusive(x_14701)) { + lean_ctor_release(x_14701, 0); + x_14708 = x_14701; +} else { + lean_dec_ref(x_14701); + x_14708 = lean_box(0); +} +x_14709 = lean_array_get_size(x_14119); +x_14710 = lean_ctor_get(x_14707, 3); +lean_inc(x_14710); +lean_dec(x_14707); +x_14711 = lean_array_get_size(x_14710); +lean_dec(x_14710); +x_14712 = lean_nat_dec_lt(x_14709, x_14711); +if (x_14712 == 0) +{ +uint8_t x_14713; +x_14713 = lean_nat_dec_eq(x_14709, x_14711); +if (x_14713 == 0) +{ +lean_object* x_14714; lean_object* x_14715; lean_object* x_14716; lean_object* x_14717; lean_object* x_14718; lean_object* x_14719; lean_object* x_14720; lean_object* x_14721; lean_object* x_14722; lean_object* x_14723; lean_object* x_14724; lean_object* x_14725; lean_object* x_14726; lean_object* x_14727; lean_object* x_14728; lean_object* x_14729; lean_object* x_14730; +x_14714 = lean_unsigned_to_nat(0u); +x_14715 = l_Array_extract___rarg(x_14119, x_14714, x_14711); +x_14716 = l_Array_extract___rarg(x_14119, x_14711, x_14709); +lean_dec(x_14709); +lean_inc(x_153); +if (lean_is_scalar(x_14706)) { + x_14717 = lean_alloc_ctor(6, 2, 0); +} else { + x_14717 = x_14706; + lean_ctor_set_tag(x_14717, 6); +} +lean_ctor_set(x_14717, 0, x_153); +lean_ctor_set(x_14717, 1, x_14715); +x_14718 = lean_ctor_get(x_1, 0); +lean_inc(x_14718); +x_14719 = l_Lean_IR_ToIR_bindVar(x_14718, x_14120, x_4, x_5, x_14705); +x_14720 = lean_ctor_get(x_14719, 0); +lean_inc(x_14720); +x_14721 = lean_ctor_get(x_14719, 1); +lean_inc(x_14721); +lean_dec(x_14719); +x_14722 = lean_ctor_get(x_14720, 0); +lean_inc(x_14722); +x_14723 = lean_ctor_get(x_14720, 1); +lean_inc(x_14723); +lean_dec(x_14720); +x_14724 = l_Lean_IR_ToIR_newVar(x_14723, x_4, x_5, x_14721); +x_14725 = lean_ctor_get(x_14724, 0); +lean_inc(x_14725); +x_14726 = lean_ctor_get(x_14724, 1); +lean_inc(x_14726); +lean_dec(x_14724); +x_14727 = lean_ctor_get(x_14725, 0); +lean_inc(x_14727); +x_14728 = lean_ctor_get(x_14725, 1); +lean_inc(x_14728); +lean_dec(x_14725); +x_14729 = lean_ctor_get(x_1, 2); +lean_inc(x_14729); +lean_inc(x_5); +lean_inc(x_4); +x_14730 = l_Lean_IR_ToIR_lowerType(x_14729, x_14728, x_4, x_5, x_14726); +if (lean_obj_tag(x_14730) == 0) +{ +lean_object* x_14731; lean_object* x_14732; lean_object* x_14733; lean_object* x_14734; lean_object* x_14735; +x_14731 = lean_ctor_get(x_14730, 0); +lean_inc(x_14731); +x_14732 = lean_ctor_get(x_14730, 1); +lean_inc(x_14732); +lean_dec(x_14730); +x_14733 = lean_ctor_get(x_14731, 0); +lean_inc(x_14733); +x_14734 = lean_ctor_get(x_14731, 1); +lean_inc(x_14734); +lean_dec(x_14731); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14735 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_14727, x_14716, x_14722, x_14717, x_14733, x_14734, x_4, x_5, x_14732); +if (lean_obj_tag(x_14735) == 0) +{ +lean_object* x_14736; lean_object* x_14737; lean_object* x_14738; lean_object* x_14739; lean_object* x_14740; lean_object* x_14741; lean_object* x_14742; +x_14736 = lean_ctor_get(x_14735, 0); +lean_inc(x_14736); +x_14737 = lean_ctor_get(x_14735, 1); +lean_inc(x_14737); +lean_dec(x_14735); +x_14738 = lean_ctor_get(x_14736, 0); +lean_inc(x_14738); +x_14739 = lean_ctor_get(x_14736, 1); +lean_inc(x_14739); +if (lean_is_exclusive(x_14736)) { + lean_ctor_release(x_14736, 0); + lean_ctor_release(x_14736, 1); + x_14740 = x_14736; +} else { + lean_dec_ref(x_14736); + x_14740 = lean_box(0); +} +if (lean_is_scalar(x_14708)) { + x_14741 = lean_alloc_ctor(1, 1, 0); +} else { + x_14741 = x_14708; +} +lean_ctor_set(x_14741, 0, x_14738); +if (lean_is_scalar(x_14740)) { + x_14742 = lean_alloc_ctor(0, 2, 0); +} else { + x_14742 = x_14740; +} +lean_ctor_set(x_14742, 0, x_14741); +lean_ctor_set(x_14742, 1, x_14739); +x_14121 = x_14742; +x_14122 = x_14737; +goto block_14699; +} +else +{ +lean_object* x_14743; lean_object* x_14744; lean_object* x_14745; lean_object* x_14746; +lean_dec(x_14708); +lean_dec(x_14119); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14743 = lean_ctor_get(x_14735, 0); +lean_inc(x_14743); +x_14744 = lean_ctor_get(x_14735, 1); +lean_inc(x_14744); +if (lean_is_exclusive(x_14735)) { + lean_ctor_release(x_14735, 0); + lean_ctor_release(x_14735, 1); + x_14745 = x_14735; +} else { + lean_dec_ref(x_14735); + x_14745 = lean_box(0); +} +if (lean_is_scalar(x_14745)) { + x_14746 = lean_alloc_ctor(1, 2, 0); +} else { + x_14746 = x_14745; +} +lean_ctor_set(x_14746, 0, x_14743); +lean_ctor_set(x_14746, 1, x_14744); +return x_14746; +} +} +else +{ +lean_object* x_14747; lean_object* x_14748; lean_object* x_14749; lean_object* x_14750; +lean_dec(x_14727); +lean_dec(x_14722); +lean_dec(x_14717); +lean_dec(x_14716); +lean_dec(x_14708); +lean_dec(x_14119); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14747 = lean_ctor_get(x_14730, 0); +lean_inc(x_14747); +x_14748 = lean_ctor_get(x_14730, 1); +lean_inc(x_14748); +if (lean_is_exclusive(x_14730)) { + lean_ctor_release(x_14730, 0); + lean_ctor_release(x_14730, 1); + x_14749 = x_14730; +} else { + lean_dec_ref(x_14730); + x_14749 = lean_box(0); +} +if (lean_is_scalar(x_14749)) { + x_14750 = lean_alloc_ctor(1, 2, 0); +} else { + x_14750 = x_14749; +} +lean_ctor_set(x_14750, 0, x_14747); +lean_ctor_set(x_14750, 1, x_14748); +return x_14750; +} +} +else +{ +lean_object* x_14751; lean_object* x_14752; lean_object* x_14753; lean_object* x_14754; lean_object* x_14755; lean_object* x_14756; lean_object* x_14757; lean_object* x_14758; lean_object* x_14759; +lean_dec(x_14711); +lean_dec(x_14709); +lean_inc(x_14119); +lean_inc(x_153); +if (lean_is_scalar(x_14706)) { + x_14751 = lean_alloc_ctor(6, 2, 0); +} else { + x_14751 = x_14706; + lean_ctor_set_tag(x_14751, 6); +} +lean_ctor_set(x_14751, 0, x_153); +lean_ctor_set(x_14751, 1, x_14119); +x_14752 = lean_ctor_get(x_1, 0); +lean_inc(x_14752); +x_14753 = l_Lean_IR_ToIR_bindVar(x_14752, x_14120, x_4, x_5, x_14705); +x_14754 = lean_ctor_get(x_14753, 0); +lean_inc(x_14754); +x_14755 = lean_ctor_get(x_14753, 1); +lean_inc(x_14755); +lean_dec(x_14753); +x_14756 = lean_ctor_get(x_14754, 0); +lean_inc(x_14756); +x_14757 = lean_ctor_get(x_14754, 1); +lean_inc(x_14757); +lean_dec(x_14754); +x_14758 = lean_ctor_get(x_1, 2); +lean_inc(x_14758); +lean_inc(x_5); +lean_inc(x_4); +x_14759 = l_Lean_IR_ToIR_lowerType(x_14758, x_14757, x_4, x_5, x_14755); +if (lean_obj_tag(x_14759) == 0) +{ +lean_object* x_14760; lean_object* x_14761; lean_object* x_14762; lean_object* x_14763; lean_object* x_14764; +x_14760 = lean_ctor_get(x_14759, 0); +lean_inc(x_14760); +x_14761 = lean_ctor_get(x_14759, 1); +lean_inc(x_14761); +lean_dec(x_14759); +x_14762 = lean_ctor_get(x_14760, 0); +lean_inc(x_14762); +x_14763 = lean_ctor_get(x_14760, 1); +lean_inc(x_14763); +lean_dec(x_14760); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14764 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14756, x_14751, x_14762, x_14763, x_4, x_5, x_14761); +if (lean_obj_tag(x_14764) == 0) +{ +lean_object* x_14765; lean_object* x_14766; lean_object* x_14767; lean_object* x_14768; lean_object* x_14769; lean_object* x_14770; lean_object* x_14771; +x_14765 = lean_ctor_get(x_14764, 0); +lean_inc(x_14765); +x_14766 = lean_ctor_get(x_14764, 1); +lean_inc(x_14766); +lean_dec(x_14764); +x_14767 = lean_ctor_get(x_14765, 0); +lean_inc(x_14767); +x_14768 = lean_ctor_get(x_14765, 1); +lean_inc(x_14768); +if (lean_is_exclusive(x_14765)) { + lean_ctor_release(x_14765, 0); + lean_ctor_release(x_14765, 1); + x_14769 = x_14765; +} else { + lean_dec_ref(x_14765); + x_14769 = lean_box(0); +} +if (lean_is_scalar(x_14708)) { + x_14770 = lean_alloc_ctor(1, 1, 0); +} else { + x_14770 = x_14708; +} +lean_ctor_set(x_14770, 0, x_14767); +if (lean_is_scalar(x_14769)) { + x_14771 = lean_alloc_ctor(0, 2, 0); +} else { + x_14771 = x_14769; +} +lean_ctor_set(x_14771, 0, x_14770); +lean_ctor_set(x_14771, 1, x_14768); +x_14121 = x_14771; +x_14122 = x_14766; +goto block_14699; +} +else +{ +lean_object* x_14772; lean_object* x_14773; lean_object* x_14774; lean_object* x_14775; +lean_dec(x_14708); +lean_dec(x_14119); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14772 = lean_ctor_get(x_14764, 0); +lean_inc(x_14772); +x_14773 = lean_ctor_get(x_14764, 1); +lean_inc(x_14773); +if (lean_is_exclusive(x_14764)) { + lean_ctor_release(x_14764, 0); + lean_ctor_release(x_14764, 1); + x_14774 = x_14764; +} else { + lean_dec_ref(x_14764); + x_14774 = lean_box(0); +} +if (lean_is_scalar(x_14774)) { + x_14775 = lean_alloc_ctor(1, 2, 0); +} else { + x_14775 = x_14774; +} +lean_ctor_set(x_14775, 0, x_14772); +lean_ctor_set(x_14775, 1, x_14773); +return x_14775; +} +} +else +{ +lean_object* x_14776; lean_object* x_14777; lean_object* x_14778; lean_object* x_14779; +lean_dec(x_14756); +lean_dec(x_14751); +lean_dec(x_14708); +lean_dec(x_14119); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14776 = lean_ctor_get(x_14759, 0); +lean_inc(x_14776); +x_14777 = lean_ctor_get(x_14759, 1); +lean_inc(x_14777); +if (lean_is_exclusive(x_14759)) { + lean_ctor_release(x_14759, 0); + lean_ctor_release(x_14759, 1); + x_14778 = x_14759; +} else { + lean_dec_ref(x_14759); + x_14778 = lean_box(0); +} +if (lean_is_scalar(x_14778)) { + x_14779 = lean_alloc_ctor(1, 2, 0); +} else { + x_14779 = x_14778; +} +lean_ctor_set(x_14779, 0, x_14776); +lean_ctor_set(x_14779, 1, x_14777); +return x_14779; +} +} +} +else +{ +lean_object* x_14780; lean_object* x_14781; lean_object* x_14782; lean_object* x_14783; lean_object* x_14784; lean_object* x_14785; lean_object* x_14786; lean_object* x_14787; lean_object* x_14788; +lean_dec(x_14711); +lean_dec(x_14709); +lean_inc(x_14119); +lean_inc(x_153); +if (lean_is_scalar(x_14706)) { + x_14780 = lean_alloc_ctor(7, 2, 0); +} else { + x_14780 = x_14706; + lean_ctor_set_tag(x_14780, 7); +} +lean_ctor_set(x_14780, 0, x_153); +lean_ctor_set(x_14780, 1, x_14119); +x_14781 = lean_ctor_get(x_1, 0); +lean_inc(x_14781); +x_14782 = l_Lean_IR_ToIR_bindVar(x_14781, x_14120, x_4, x_5, x_14705); +x_14783 = lean_ctor_get(x_14782, 0); +lean_inc(x_14783); +x_14784 = lean_ctor_get(x_14782, 1); +lean_inc(x_14784); +lean_dec(x_14782); +x_14785 = lean_ctor_get(x_14783, 0); +lean_inc(x_14785); +x_14786 = lean_ctor_get(x_14783, 1); +lean_inc(x_14786); +lean_dec(x_14783); +x_14787 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14788 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14785, x_14780, x_14787, x_14786, x_4, x_5, x_14784); +if (lean_obj_tag(x_14788) == 0) +{ +lean_object* x_14789; lean_object* x_14790; lean_object* x_14791; lean_object* x_14792; lean_object* x_14793; lean_object* x_14794; lean_object* x_14795; +x_14789 = lean_ctor_get(x_14788, 0); +lean_inc(x_14789); +x_14790 = lean_ctor_get(x_14788, 1); +lean_inc(x_14790); +lean_dec(x_14788); +x_14791 = lean_ctor_get(x_14789, 0); +lean_inc(x_14791); +x_14792 = lean_ctor_get(x_14789, 1); +lean_inc(x_14792); +if (lean_is_exclusive(x_14789)) { + lean_ctor_release(x_14789, 0); + lean_ctor_release(x_14789, 1); + x_14793 = x_14789; +} else { + lean_dec_ref(x_14789); + x_14793 = lean_box(0); +} +if (lean_is_scalar(x_14708)) { + x_14794 = lean_alloc_ctor(1, 1, 0); +} else { + x_14794 = x_14708; +} +lean_ctor_set(x_14794, 0, x_14791); +if (lean_is_scalar(x_14793)) { + x_14795 = lean_alloc_ctor(0, 2, 0); +} else { + x_14795 = x_14793; +} +lean_ctor_set(x_14795, 0, x_14794); +lean_ctor_set(x_14795, 1, x_14792); +x_14121 = x_14795; +x_14122 = x_14790; +goto block_14699; +} +else +{ +lean_object* x_14796; lean_object* x_14797; lean_object* x_14798; lean_object* x_14799; +lean_dec(x_14708); +lean_dec(x_14119); +lean_dec(x_11917); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14796 = lean_ctor_get(x_14788, 0); +lean_inc(x_14796); +x_14797 = lean_ctor_get(x_14788, 1); +lean_inc(x_14797); +if (lean_is_exclusive(x_14788)) { + lean_ctor_release(x_14788, 0); + lean_ctor_release(x_14788, 1); + x_14798 = x_14788; +} else { + lean_dec_ref(x_14788); + x_14798 = lean_box(0); +} +if (lean_is_scalar(x_14798)) { + x_14799 = lean_alloc_ctor(1, 2, 0); +} else { + x_14799 = x_14798; +} +lean_ctor_set(x_14799, 0, x_14796); +lean_ctor_set(x_14799, 1, x_14797); +return x_14799; +} +} +} +block_14699: +{ +lean_object* x_14123; +x_14123 = lean_ctor_get(x_14121, 0); +lean_inc(x_14123); +if (lean_obj_tag(x_14123) == 0) +{ +lean_object* x_14124; lean_object* x_14125; lean_object* x_14126; lean_object* x_14127; lean_object* x_14128; lean_object* x_14129; lean_object* x_14130; uint8_t x_14131; lean_object* x_14132; +lean_dec(x_11917); +x_14124 = lean_ctor_get(x_14121, 1); +lean_inc(x_14124); +if (lean_is_exclusive(x_14121)) { + lean_ctor_release(x_14121, 0); + lean_ctor_release(x_14121, 1); + x_14125 = x_14121; +} else { + lean_dec_ref(x_14121); + x_14125 = lean_box(0); +} +x_14126 = lean_st_ref_get(x_5, x_14122); +x_14127 = lean_ctor_get(x_14126, 0); +lean_inc(x_14127); +x_14128 = lean_ctor_get(x_14126, 1); +lean_inc(x_14128); +if (lean_is_exclusive(x_14126)) { + lean_ctor_release(x_14126, 0); + lean_ctor_release(x_14126, 1); + x_14129 = x_14126; +} else { + lean_dec_ref(x_14126); + x_14129 = lean_box(0); +} +x_14130 = lean_ctor_get(x_14127, 0); +lean_inc(x_14130); +lean_dec(x_14127); +x_14131 = 0; +lean_inc(x_153); +lean_inc(x_14130); +x_14132 = l_Lean_Environment_find_x3f(x_14130, x_153, x_14131); +if (lean_obj_tag(x_14132) == 0) +{ +lean_object* x_14133; lean_object* x_14134; +lean_dec(x_14130); +lean_dec(x_14129); +lean_dec(x_14125); +lean_dec(x_14119); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_14133 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_14134 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_14133, x_14124, x_4, x_5, x_14128); +return x_14134; +} +else +{ +lean_object* x_14135; +x_14135 = lean_ctor_get(x_14132, 0); +lean_inc(x_14135); +lean_dec(x_14132); +switch (lean_obj_tag(x_14135)) { +case 0: +{ +lean_object* x_14136; lean_object* x_14137; uint8_t x_14138; +lean_dec(x_14130); +lean_dec(x_11911); +lean_dec(x_11910); +if (lean_is_exclusive(x_14135)) { + lean_ctor_release(x_14135, 0); + x_14136 = x_14135; +} else { + lean_dec_ref(x_14135); + x_14136 = lean_box(0); +} +x_14137 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_14138 = lean_name_eq(x_153, x_14137); +if (x_14138 == 0) +{ +lean_object* x_14139; uint8_t x_14140; +x_14139 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_14140 = lean_name_eq(x_153, x_14139); +if (x_14140 == 0) +{ +lean_object* x_14141; lean_object* x_14142; lean_object* x_14143; +lean_dec(x_14129); +lean_dec(x_14125); +lean_inc(x_153); +x_14141 = l_Lean_IR_ToIR_findDecl(x_153, x_14124, x_4, x_5, x_14128); +x_14142 = lean_ctor_get(x_14141, 0); +lean_inc(x_14142); +x_14143 = lean_ctor_get(x_14142, 0); +lean_inc(x_14143); +if (lean_obj_tag(x_14143) == 0) +{ +lean_object* x_14144; lean_object* x_14145; lean_object* x_14146; lean_object* x_14147; uint8_t x_14148; lean_object* x_14149; lean_object* x_14150; lean_object* x_14151; lean_object* x_14152; lean_object* x_14153; lean_object* x_14154; lean_object* x_14155; lean_object* x_14156; lean_object* x_14157; +lean_dec(x_14119); +lean_dec(x_2); +lean_dec(x_1); +x_14144 = lean_ctor_get(x_14141, 1); +lean_inc(x_14144); +if (lean_is_exclusive(x_14141)) { + lean_ctor_release(x_14141, 0); + lean_ctor_release(x_14141, 1); + x_14145 = x_14141; +} else { + lean_dec_ref(x_14141); + x_14145 = lean_box(0); +} +x_14146 = lean_ctor_get(x_14142, 1); +lean_inc(x_14146); +if (lean_is_exclusive(x_14142)) { + lean_ctor_release(x_14142, 0); + lean_ctor_release(x_14142, 1); + x_14147 = x_14142; +} else { + lean_dec_ref(x_14142); + x_14147 = lean_box(0); +} +x_14148 = 1; +x_14149 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_14150 = l_Lean_Name_toString(x_153, x_14148, x_14149); +if (lean_is_scalar(x_14136)) { + x_14151 = lean_alloc_ctor(3, 1, 0); +} else { + x_14151 = x_14136; + lean_ctor_set_tag(x_14151, 3); +} +lean_ctor_set(x_14151, 0, x_14150); +x_14152 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_14147)) { + x_14153 = lean_alloc_ctor(5, 2, 0); +} else { + x_14153 = x_14147; + lean_ctor_set_tag(x_14153, 5); +} +lean_ctor_set(x_14153, 0, x_14152); +lean_ctor_set(x_14153, 1, x_14151); +x_14154 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_14145)) { + x_14155 = lean_alloc_ctor(5, 2, 0); +} else { + x_14155 = x_14145; + lean_ctor_set_tag(x_14155, 5); +} +lean_ctor_set(x_14155, 0, x_14153); +lean_ctor_set(x_14155, 1, x_14154); +x_14156 = l_Lean_MessageData_ofFormat(x_14155); +x_14157 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_14156, x_14146, x_4, x_5, x_14144); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_14146); +return x_14157; +} +else +{ +lean_object* x_14158; lean_object* x_14159; lean_object* x_14160; lean_object* x_14161; lean_object* x_14162; lean_object* x_14163; lean_object* x_14164; uint8_t x_14165; +lean_dec(x_14136); +x_14158 = lean_ctor_get(x_14141, 1); +lean_inc(x_14158); +lean_dec(x_14141); +x_14159 = lean_ctor_get(x_14142, 1); +lean_inc(x_14159); +if (lean_is_exclusive(x_14142)) { + lean_ctor_release(x_14142, 0); + lean_ctor_release(x_14142, 1); + x_14160 = x_14142; +} else { + lean_dec_ref(x_14142); + x_14160 = lean_box(0); +} +x_14161 = lean_ctor_get(x_14143, 0); +lean_inc(x_14161); +lean_dec(x_14143); +x_14162 = lean_array_get_size(x_14119); +x_14163 = l_Lean_IR_Decl_params(x_14161); +lean_dec(x_14161); +x_14164 = lean_array_get_size(x_14163); +lean_dec(x_14163); +x_14165 = lean_nat_dec_lt(x_14162, x_14164); +if (x_14165 == 0) +{ +uint8_t x_14166; +x_14166 = lean_nat_dec_eq(x_14162, x_14164); +if (x_14166 == 0) +{ +lean_object* x_14167; lean_object* x_14168; lean_object* x_14169; lean_object* x_14170; lean_object* x_14171; lean_object* x_14172; lean_object* x_14173; lean_object* x_14174; lean_object* x_14175; lean_object* x_14176; lean_object* x_14177; lean_object* x_14178; lean_object* x_14179; lean_object* x_14180; lean_object* x_14181; lean_object* x_14182; lean_object* x_14183; +x_14167 = lean_unsigned_to_nat(0u); +x_14168 = l_Array_extract___rarg(x_14119, x_14167, x_14164); +x_14169 = l_Array_extract___rarg(x_14119, x_14164, x_14162); +lean_dec(x_14162); +lean_dec(x_14119); +if (lean_is_scalar(x_14160)) { + x_14170 = lean_alloc_ctor(6, 2, 0); +} else { + x_14170 = x_14160; + lean_ctor_set_tag(x_14170, 6); +} +lean_ctor_set(x_14170, 0, x_153); +lean_ctor_set(x_14170, 1, x_14168); +x_14171 = lean_ctor_get(x_1, 0); +lean_inc(x_14171); +x_14172 = l_Lean_IR_ToIR_bindVar(x_14171, x_14159, x_4, x_5, x_14158); +x_14173 = lean_ctor_get(x_14172, 0); +lean_inc(x_14173); +x_14174 = lean_ctor_get(x_14172, 1); +lean_inc(x_14174); +lean_dec(x_14172); +x_14175 = lean_ctor_get(x_14173, 0); +lean_inc(x_14175); +x_14176 = lean_ctor_get(x_14173, 1); +lean_inc(x_14176); +lean_dec(x_14173); +x_14177 = l_Lean_IR_ToIR_newVar(x_14176, x_4, x_5, x_14174); +x_14178 = lean_ctor_get(x_14177, 0); +lean_inc(x_14178); +x_14179 = lean_ctor_get(x_14177, 1); +lean_inc(x_14179); +lean_dec(x_14177); +x_14180 = lean_ctor_get(x_14178, 0); +lean_inc(x_14180); +x_14181 = lean_ctor_get(x_14178, 1); +lean_inc(x_14181); +lean_dec(x_14178); +x_14182 = lean_ctor_get(x_1, 2); +lean_inc(x_14182); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_14183 = l_Lean_IR_ToIR_lowerType(x_14182, x_14181, x_4, x_5, x_14179); +if (lean_obj_tag(x_14183) == 0) +{ +lean_object* x_14184; lean_object* x_14185; lean_object* x_14186; lean_object* x_14187; lean_object* x_14188; +x_14184 = lean_ctor_get(x_14183, 0); +lean_inc(x_14184); +x_14185 = lean_ctor_get(x_14183, 1); +lean_inc(x_14185); +lean_dec(x_14183); +x_14186 = lean_ctor_get(x_14184, 0); +lean_inc(x_14186); +x_14187 = lean_ctor_get(x_14184, 1); +lean_inc(x_14187); +lean_dec(x_14184); +x_14188 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_14180, x_14169, x_14175, x_14170, x_14186, x_14187, x_4, x_5, x_14185); +return x_14188; +} +else +{ +lean_object* x_14189; lean_object* x_14190; lean_object* x_14191; lean_object* x_14192; +lean_dec(x_14180); +lean_dec(x_14175); +lean_dec(x_14170); +lean_dec(x_14169); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_14189 = lean_ctor_get(x_14183, 0); +lean_inc(x_14189); +x_14190 = lean_ctor_get(x_14183, 1); +lean_inc(x_14190); +if (lean_is_exclusive(x_14183)) { + lean_ctor_release(x_14183, 0); + lean_ctor_release(x_14183, 1); + x_14191 = x_14183; +} else { + lean_dec_ref(x_14183); + x_14191 = lean_box(0); +} +if (lean_is_scalar(x_14191)) { + x_14192 = lean_alloc_ctor(1, 2, 0); +} else { + x_14192 = x_14191; +} +lean_ctor_set(x_14192, 0, x_14189); +lean_ctor_set(x_14192, 1, x_14190); +return x_14192; +} +} +else +{ +lean_object* x_14193; lean_object* x_14194; lean_object* x_14195; lean_object* x_14196; lean_object* x_14197; lean_object* x_14198; lean_object* x_14199; lean_object* x_14200; lean_object* x_14201; +lean_dec(x_14164); +lean_dec(x_14162); +if (lean_is_scalar(x_14160)) { + x_14193 = lean_alloc_ctor(6, 2, 0); +} else { + x_14193 = x_14160; + lean_ctor_set_tag(x_14193, 6); +} +lean_ctor_set(x_14193, 0, x_153); +lean_ctor_set(x_14193, 1, x_14119); +x_14194 = lean_ctor_get(x_1, 0); +lean_inc(x_14194); +x_14195 = l_Lean_IR_ToIR_bindVar(x_14194, x_14159, x_4, x_5, x_14158); +x_14196 = lean_ctor_get(x_14195, 0); +lean_inc(x_14196); +x_14197 = lean_ctor_get(x_14195, 1); +lean_inc(x_14197); +lean_dec(x_14195); +x_14198 = lean_ctor_get(x_14196, 0); +lean_inc(x_14198); +x_14199 = lean_ctor_get(x_14196, 1); +lean_inc(x_14199); +lean_dec(x_14196); +x_14200 = lean_ctor_get(x_1, 2); +lean_inc(x_14200); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_14201 = l_Lean_IR_ToIR_lowerType(x_14200, x_14199, x_4, x_5, x_14197); +if (lean_obj_tag(x_14201) == 0) +{ +lean_object* x_14202; lean_object* x_14203; lean_object* x_14204; lean_object* x_14205; lean_object* x_14206; +x_14202 = lean_ctor_get(x_14201, 0); +lean_inc(x_14202); +x_14203 = lean_ctor_get(x_14201, 1); +lean_inc(x_14203); +lean_dec(x_14201); +x_14204 = lean_ctor_get(x_14202, 0); +lean_inc(x_14204); +x_14205 = lean_ctor_get(x_14202, 1); +lean_inc(x_14205); +lean_dec(x_14202); +x_14206 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14198, x_14193, x_14204, x_14205, x_4, x_5, x_14203); +return x_14206; +} +else +{ +lean_object* x_14207; lean_object* x_14208; lean_object* x_14209; lean_object* x_14210; +lean_dec(x_14198); +lean_dec(x_14193); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_14207 = lean_ctor_get(x_14201, 0); +lean_inc(x_14207); +x_14208 = lean_ctor_get(x_14201, 1); +lean_inc(x_14208); +if (lean_is_exclusive(x_14201)) { + lean_ctor_release(x_14201, 0); + lean_ctor_release(x_14201, 1); + x_14209 = x_14201; +} else { + lean_dec_ref(x_14201); + x_14209 = lean_box(0); +} +if (lean_is_scalar(x_14209)) { + x_14210 = lean_alloc_ctor(1, 2, 0); +} else { + x_14210 = x_14209; +} +lean_ctor_set(x_14210, 0, x_14207); +lean_ctor_set(x_14210, 1, x_14208); +return x_14210; +} +} +} +else +{ +lean_object* x_14211; lean_object* x_14212; lean_object* x_14213; lean_object* x_14214; lean_object* x_14215; lean_object* x_14216; lean_object* x_14217; lean_object* x_14218; lean_object* x_14219; +lean_dec(x_14164); +lean_dec(x_14162); +if (lean_is_scalar(x_14160)) { + x_14211 = lean_alloc_ctor(7, 2, 0); +} else { + x_14211 = x_14160; + lean_ctor_set_tag(x_14211, 7); +} +lean_ctor_set(x_14211, 0, x_153); +lean_ctor_set(x_14211, 1, x_14119); +x_14212 = lean_ctor_get(x_1, 0); +lean_inc(x_14212); +lean_dec(x_1); +x_14213 = l_Lean_IR_ToIR_bindVar(x_14212, x_14159, x_4, x_5, x_14158); +x_14214 = lean_ctor_get(x_14213, 0); +lean_inc(x_14214); +x_14215 = lean_ctor_get(x_14213, 1); +lean_inc(x_14215); +lean_dec(x_14213); +x_14216 = lean_ctor_get(x_14214, 0); +lean_inc(x_14216); +x_14217 = lean_ctor_get(x_14214, 1); +lean_inc(x_14217); +lean_dec(x_14214); +x_14218 = lean_box(7); +x_14219 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14216, x_14211, x_14218, x_14217, x_4, x_5, x_14215); +return x_14219; +} +} +} +else +{ +lean_object* x_14220; lean_object* x_14221; lean_object* x_14222; +lean_dec(x_14136); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14220 = lean_box(13); +if (lean_is_scalar(x_14125)) { + x_14221 = lean_alloc_ctor(0, 2, 0); +} else { + x_14221 = x_14125; +} +lean_ctor_set(x_14221, 0, x_14220); +lean_ctor_set(x_14221, 1, x_14124); +if (lean_is_scalar(x_14129)) { + x_14222 = lean_alloc_ctor(0, 2, 0); +} else { + x_14222 = x_14129; +} +lean_ctor_set(x_14222, 0, x_14221); +lean_ctor_set(x_14222, 1, x_14128); +return x_14222; +} +} +else +{ +lean_object* x_14223; lean_object* x_14224; lean_object* x_14225; +lean_dec(x_14136); +lean_dec(x_14129); +lean_dec(x_14125); +lean_dec(x_153); +x_14223 = l_Lean_IR_instInhabitedArg; +x_14224 = lean_unsigned_to_nat(2u); +x_14225 = lean_array_get(x_14223, x_14119, x_14224); +lean_dec(x_14119); +if (lean_obj_tag(x_14225) == 0) +{ +lean_object* x_14226; lean_object* x_14227; lean_object* x_14228; lean_object* x_14229; lean_object* x_14230; lean_object* x_14231; lean_object* x_14232; +x_14226 = lean_ctor_get(x_14225, 0); +lean_inc(x_14226); +lean_dec(x_14225); +x_14227 = lean_ctor_get(x_1, 0); +lean_inc(x_14227); +lean_dec(x_1); +x_14228 = l_Lean_IR_ToIR_bindVarToVarId(x_14227, x_14226, x_14124, x_4, x_5, x_14128); +x_14229 = lean_ctor_get(x_14228, 0); +lean_inc(x_14229); +x_14230 = lean_ctor_get(x_14228, 1); +lean_inc(x_14230); +lean_dec(x_14228); +x_14231 = lean_ctor_get(x_14229, 1); +lean_inc(x_14231); +lean_dec(x_14229); +x_14232 = l_Lean_IR_ToIR_lowerCode(x_2, x_14231, x_4, x_5, x_14230); +return x_14232; +} +else +{ +lean_object* x_14233; lean_object* x_14234; lean_object* x_14235; lean_object* x_14236; lean_object* x_14237; lean_object* x_14238; +x_14233 = lean_ctor_get(x_1, 0); +lean_inc(x_14233); +lean_dec(x_1); +x_14234 = l_Lean_IR_ToIR_bindErased(x_14233, x_14124, x_4, x_5, x_14128); +x_14235 = lean_ctor_get(x_14234, 0); +lean_inc(x_14235); +x_14236 = lean_ctor_get(x_14234, 1); +lean_inc(x_14236); +lean_dec(x_14234); +x_14237 = lean_ctor_get(x_14235, 1); +lean_inc(x_14237); +lean_dec(x_14235); +x_14238 = l_Lean_IR_ToIR_lowerCode(x_2, x_14237, x_4, x_5, x_14236); +return x_14238; +} +} +} +case 1: +{ +lean_object* x_14239; lean_object* x_14240; lean_object* x_14267; lean_object* x_14268; +lean_dec(x_14135); +lean_dec(x_14130); +lean_dec(x_11911); +lean_dec(x_11910); +lean_inc(x_153); +x_14267 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_14128); +x_14268 = lean_ctor_get(x_14267, 0); +lean_inc(x_14268); +if (lean_obj_tag(x_14268) == 0) +{ +lean_object* x_14269; lean_object* x_14270; lean_object* x_14271; +x_14269 = lean_ctor_get(x_14267, 1); +lean_inc(x_14269); +lean_dec(x_14267); +x_14270 = lean_box(0); +if (lean_is_scalar(x_14125)) { + x_14271 = lean_alloc_ctor(0, 2, 0); +} else { + x_14271 = x_14125; +} +lean_ctor_set(x_14271, 0, x_14270); +lean_ctor_set(x_14271, 1, x_14124); +x_14239 = x_14271; +x_14240 = x_14269; +goto block_14266; +} +else +{ +lean_object* x_14272; lean_object* x_14273; lean_object* x_14274; lean_object* x_14275; lean_object* x_14276; lean_object* x_14277; lean_object* x_14278; uint8_t x_14279; +lean_dec(x_14125); +x_14272 = lean_ctor_get(x_14267, 1); +lean_inc(x_14272); +if (lean_is_exclusive(x_14267)) { + lean_ctor_release(x_14267, 0); + lean_ctor_release(x_14267, 1); + x_14273 = x_14267; +} else { + lean_dec_ref(x_14267); + x_14273 = lean_box(0); +} +x_14274 = lean_ctor_get(x_14268, 0); +lean_inc(x_14274); +if (lean_is_exclusive(x_14268)) { + lean_ctor_release(x_14268, 0); + x_14275 = x_14268; +} else { + lean_dec_ref(x_14268); + x_14275 = lean_box(0); +} +x_14276 = lean_array_get_size(x_14119); +x_14277 = lean_ctor_get(x_14274, 3); +lean_inc(x_14277); +lean_dec(x_14274); +x_14278 = lean_array_get_size(x_14277); +lean_dec(x_14277); +x_14279 = lean_nat_dec_lt(x_14276, x_14278); +if (x_14279 == 0) +{ +uint8_t x_14280; +x_14280 = lean_nat_dec_eq(x_14276, x_14278); +if (x_14280 == 0) +{ +lean_object* x_14281; lean_object* x_14282; lean_object* x_14283; lean_object* x_14284; lean_object* x_14285; lean_object* x_14286; lean_object* x_14287; lean_object* x_14288; lean_object* x_14289; lean_object* x_14290; lean_object* x_14291; lean_object* x_14292; lean_object* x_14293; lean_object* x_14294; lean_object* x_14295; lean_object* x_14296; lean_object* x_14297; +x_14281 = lean_unsigned_to_nat(0u); +x_14282 = l_Array_extract___rarg(x_14119, x_14281, x_14278); +x_14283 = l_Array_extract___rarg(x_14119, x_14278, x_14276); +lean_dec(x_14276); +lean_inc(x_153); +if (lean_is_scalar(x_14273)) { + x_14284 = lean_alloc_ctor(6, 2, 0); +} else { + x_14284 = x_14273; + lean_ctor_set_tag(x_14284, 6); +} +lean_ctor_set(x_14284, 0, x_153); +lean_ctor_set(x_14284, 1, x_14282); +x_14285 = lean_ctor_get(x_1, 0); +lean_inc(x_14285); +x_14286 = l_Lean_IR_ToIR_bindVar(x_14285, x_14124, x_4, x_5, x_14272); +x_14287 = lean_ctor_get(x_14286, 0); +lean_inc(x_14287); +x_14288 = lean_ctor_get(x_14286, 1); +lean_inc(x_14288); +lean_dec(x_14286); +x_14289 = lean_ctor_get(x_14287, 0); +lean_inc(x_14289); +x_14290 = lean_ctor_get(x_14287, 1); +lean_inc(x_14290); +lean_dec(x_14287); +x_14291 = l_Lean_IR_ToIR_newVar(x_14290, x_4, x_5, x_14288); +x_14292 = lean_ctor_get(x_14291, 0); +lean_inc(x_14292); +x_14293 = lean_ctor_get(x_14291, 1); +lean_inc(x_14293); +lean_dec(x_14291); +x_14294 = lean_ctor_get(x_14292, 0); +lean_inc(x_14294); +x_14295 = lean_ctor_get(x_14292, 1); +lean_inc(x_14295); +lean_dec(x_14292); +x_14296 = lean_ctor_get(x_1, 2); +lean_inc(x_14296); +lean_inc(x_5); +lean_inc(x_4); +x_14297 = l_Lean_IR_ToIR_lowerType(x_14296, x_14295, x_4, x_5, x_14293); +if (lean_obj_tag(x_14297) == 0) +{ +lean_object* x_14298; lean_object* x_14299; lean_object* x_14300; lean_object* x_14301; lean_object* x_14302; +x_14298 = lean_ctor_get(x_14297, 0); +lean_inc(x_14298); +x_14299 = lean_ctor_get(x_14297, 1); +lean_inc(x_14299); +lean_dec(x_14297); +x_14300 = lean_ctor_get(x_14298, 0); +lean_inc(x_14300); +x_14301 = lean_ctor_get(x_14298, 1); +lean_inc(x_14301); +lean_dec(x_14298); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14302 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_14294, x_14283, x_14289, x_14284, x_14300, x_14301, x_4, x_5, x_14299); +if (lean_obj_tag(x_14302) == 0) +{ +lean_object* x_14303; lean_object* x_14304; lean_object* x_14305; lean_object* x_14306; lean_object* x_14307; lean_object* x_14308; lean_object* x_14309; +x_14303 = lean_ctor_get(x_14302, 0); +lean_inc(x_14303); +x_14304 = lean_ctor_get(x_14302, 1); +lean_inc(x_14304); +lean_dec(x_14302); +x_14305 = lean_ctor_get(x_14303, 0); +lean_inc(x_14305); +x_14306 = lean_ctor_get(x_14303, 1); +lean_inc(x_14306); +if (lean_is_exclusive(x_14303)) { + lean_ctor_release(x_14303, 0); + lean_ctor_release(x_14303, 1); + x_14307 = x_14303; +} else { + lean_dec_ref(x_14303); + x_14307 = lean_box(0); +} +if (lean_is_scalar(x_14275)) { + x_14308 = lean_alloc_ctor(1, 1, 0); +} else { + x_14308 = x_14275; +} +lean_ctor_set(x_14308, 0, x_14305); +if (lean_is_scalar(x_14307)) { + x_14309 = lean_alloc_ctor(0, 2, 0); +} else { + x_14309 = x_14307; +} +lean_ctor_set(x_14309, 0, x_14308); +lean_ctor_set(x_14309, 1, x_14306); +x_14239 = x_14309; +x_14240 = x_14304; +goto block_14266; +} +else +{ +lean_object* x_14310; lean_object* x_14311; lean_object* x_14312; lean_object* x_14313; +lean_dec(x_14275); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14310 = lean_ctor_get(x_14302, 0); +lean_inc(x_14310); +x_14311 = lean_ctor_get(x_14302, 1); +lean_inc(x_14311); +if (lean_is_exclusive(x_14302)) { + lean_ctor_release(x_14302, 0); + lean_ctor_release(x_14302, 1); + x_14312 = x_14302; +} else { + lean_dec_ref(x_14302); + x_14312 = lean_box(0); +} +if (lean_is_scalar(x_14312)) { + x_14313 = lean_alloc_ctor(1, 2, 0); +} else { + x_14313 = x_14312; +} +lean_ctor_set(x_14313, 0, x_14310); +lean_ctor_set(x_14313, 1, x_14311); +return x_14313; +} +} +else +{ +lean_object* x_14314; lean_object* x_14315; lean_object* x_14316; lean_object* x_14317; +lean_dec(x_14294); +lean_dec(x_14289); +lean_dec(x_14284); +lean_dec(x_14283); +lean_dec(x_14275); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14314 = lean_ctor_get(x_14297, 0); +lean_inc(x_14314); +x_14315 = lean_ctor_get(x_14297, 1); +lean_inc(x_14315); +if (lean_is_exclusive(x_14297)) { + lean_ctor_release(x_14297, 0); + lean_ctor_release(x_14297, 1); + x_14316 = x_14297; +} else { + lean_dec_ref(x_14297); + x_14316 = lean_box(0); +} +if (lean_is_scalar(x_14316)) { + x_14317 = lean_alloc_ctor(1, 2, 0); +} else { + x_14317 = x_14316; +} +lean_ctor_set(x_14317, 0, x_14314); +lean_ctor_set(x_14317, 1, x_14315); +return x_14317; +} +} +else +{ +lean_object* x_14318; lean_object* x_14319; lean_object* x_14320; lean_object* x_14321; lean_object* x_14322; lean_object* x_14323; lean_object* x_14324; lean_object* x_14325; lean_object* x_14326; +lean_dec(x_14278); +lean_dec(x_14276); +lean_inc(x_14119); +lean_inc(x_153); +if (lean_is_scalar(x_14273)) { + x_14318 = lean_alloc_ctor(6, 2, 0); +} else { + x_14318 = x_14273; + lean_ctor_set_tag(x_14318, 6); +} +lean_ctor_set(x_14318, 0, x_153); +lean_ctor_set(x_14318, 1, x_14119); +x_14319 = lean_ctor_get(x_1, 0); +lean_inc(x_14319); +x_14320 = l_Lean_IR_ToIR_bindVar(x_14319, x_14124, x_4, x_5, x_14272); +x_14321 = lean_ctor_get(x_14320, 0); +lean_inc(x_14321); +x_14322 = lean_ctor_get(x_14320, 1); +lean_inc(x_14322); +lean_dec(x_14320); +x_14323 = lean_ctor_get(x_14321, 0); +lean_inc(x_14323); +x_14324 = lean_ctor_get(x_14321, 1); +lean_inc(x_14324); +lean_dec(x_14321); +x_14325 = lean_ctor_get(x_1, 2); +lean_inc(x_14325); +lean_inc(x_5); +lean_inc(x_4); +x_14326 = l_Lean_IR_ToIR_lowerType(x_14325, x_14324, x_4, x_5, x_14322); +if (lean_obj_tag(x_14326) == 0) +{ +lean_object* x_14327; lean_object* x_14328; lean_object* x_14329; lean_object* x_14330; lean_object* x_14331; +x_14327 = lean_ctor_get(x_14326, 0); +lean_inc(x_14327); +x_14328 = lean_ctor_get(x_14326, 1); +lean_inc(x_14328); +lean_dec(x_14326); +x_14329 = lean_ctor_get(x_14327, 0); +lean_inc(x_14329); +x_14330 = lean_ctor_get(x_14327, 1); +lean_inc(x_14330); +lean_dec(x_14327); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14331 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14323, x_14318, x_14329, x_14330, x_4, x_5, x_14328); +if (lean_obj_tag(x_14331) == 0) +{ +lean_object* x_14332; lean_object* x_14333; lean_object* x_14334; lean_object* x_14335; lean_object* x_14336; lean_object* x_14337; lean_object* x_14338; +x_14332 = lean_ctor_get(x_14331, 0); +lean_inc(x_14332); +x_14333 = lean_ctor_get(x_14331, 1); +lean_inc(x_14333); +lean_dec(x_14331); +x_14334 = lean_ctor_get(x_14332, 0); +lean_inc(x_14334); +x_14335 = lean_ctor_get(x_14332, 1); +lean_inc(x_14335); +if (lean_is_exclusive(x_14332)) { + lean_ctor_release(x_14332, 0); + lean_ctor_release(x_14332, 1); + x_14336 = x_14332; +} else { + lean_dec_ref(x_14332); + x_14336 = lean_box(0); +} +if (lean_is_scalar(x_14275)) { + x_14337 = lean_alloc_ctor(1, 1, 0); +} else { + x_14337 = x_14275; +} +lean_ctor_set(x_14337, 0, x_14334); +if (lean_is_scalar(x_14336)) { + x_14338 = lean_alloc_ctor(0, 2, 0); +} else { + x_14338 = x_14336; +} +lean_ctor_set(x_14338, 0, x_14337); +lean_ctor_set(x_14338, 1, x_14335); +x_14239 = x_14338; +x_14240 = x_14333; +goto block_14266; +} +else +{ +lean_object* x_14339; lean_object* x_14340; lean_object* x_14341; lean_object* x_14342; +lean_dec(x_14275); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14339 = lean_ctor_get(x_14331, 0); +lean_inc(x_14339); +x_14340 = lean_ctor_get(x_14331, 1); +lean_inc(x_14340); +if (lean_is_exclusive(x_14331)) { + lean_ctor_release(x_14331, 0); + lean_ctor_release(x_14331, 1); + x_14341 = x_14331; +} else { + lean_dec_ref(x_14331); + x_14341 = lean_box(0); +} +if (lean_is_scalar(x_14341)) { + x_14342 = lean_alloc_ctor(1, 2, 0); +} else { + x_14342 = x_14341; +} +lean_ctor_set(x_14342, 0, x_14339); +lean_ctor_set(x_14342, 1, x_14340); +return x_14342; +} +} +else +{ +lean_object* x_14343; lean_object* x_14344; lean_object* x_14345; lean_object* x_14346; +lean_dec(x_14323); +lean_dec(x_14318); +lean_dec(x_14275); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14343 = lean_ctor_get(x_14326, 0); +lean_inc(x_14343); +x_14344 = lean_ctor_get(x_14326, 1); +lean_inc(x_14344); +if (lean_is_exclusive(x_14326)) { + lean_ctor_release(x_14326, 0); + lean_ctor_release(x_14326, 1); + x_14345 = x_14326; +} else { + lean_dec_ref(x_14326); + x_14345 = lean_box(0); +} +if (lean_is_scalar(x_14345)) { + x_14346 = lean_alloc_ctor(1, 2, 0); +} else { + x_14346 = x_14345; +} +lean_ctor_set(x_14346, 0, x_14343); +lean_ctor_set(x_14346, 1, x_14344); +return x_14346; +} +} +} +else +{ +lean_object* x_14347; lean_object* x_14348; lean_object* x_14349; lean_object* x_14350; lean_object* x_14351; lean_object* x_14352; lean_object* x_14353; lean_object* x_14354; lean_object* x_14355; +lean_dec(x_14278); +lean_dec(x_14276); +lean_inc(x_14119); +lean_inc(x_153); +if (lean_is_scalar(x_14273)) { + x_14347 = lean_alloc_ctor(7, 2, 0); +} else { + x_14347 = x_14273; + lean_ctor_set_tag(x_14347, 7); +} +lean_ctor_set(x_14347, 0, x_153); +lean_ctor_set(x_14347, 1, x_14119); +x_14348 = lean_ctor_get(x_1, 0); +lean_inc(x_14348); +x_14349 = l_Lean_IR_ToIR_bindVar(x_14348, x_14124, x_4, x_5, x_14272); +x_14350 = lean_ctor_get(x_14349, 0); +lean_inc(x_14350); +x_14351 = lean_ctor_get(x_14349, 1); +lean_inc(x_14351); +lean_dec(x_14349); +x_14352 = lean_ctor_get(x_14350, 0); +lean_inc(x_14352); +x_14353 = lean_ctor_get(x_14350, 1); +lean_inc(x_14353); +lean_dec(x_14350); +x_14354 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14355 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14352, x_14347, x_14354, x_14353, x_4, x_5, x_14351); +if (lean_obj_tag(x_14355) == 0) +{ +lean_object* x_14356; lean_object* x_14357; lean_object* x_14358; lean_object* x_14359; lean_object* x_14360; lean_object* x_14361; lean_object* x_14362; +x_14356 = lean_ctor_get(x_14355, 0); +lean_inc(x_14356); +x_14357 = lean_ctor_get(x_14355, 1); +lean_inc(x_14357); +lean_dec(x_14355); +x_14358 = lean_ctor_get(x_14356, 0); +lean_inc(x_14358); +x_14359 = lean_ctor_get(x_14356, 1); +lean_inc(x_14359); +if (lean_is_exclusive(x_14356)) { + lean_ctor_release(x_14356, 0); + lean_ctor_release(x_14356, 1); + x_14360 = x_14356; +} else { + lean_dec_ref(x_14356); + x_14360 = lean_box(0); +} +if (lean_is_scalar(x_14275)) { + x_14361 = lean_alloc_ctor(1, 1, 0); +} else { + x_14361 = x_14275; +} +lean_ctor_set(x_14361, 0, x_14358); +if (lean_is_scalar(x_14360)) { + x_14362 = lean_alloc_ctor(0, 2, 0); +} else { + x_14362 = x_14360; +} +lean_ctor_set(x_14362, 0, x_14361); +lean_ctor_set(x_14362, 1, x_14359); +x_14239 = x_14362; +x_14240 = x_14357; +goto block_14266; +} +else +{ +lean_object* x_14363; lean_object* x_14364; lean_object* x_14365; lean_object* x_14366; +lean_dec(x_14275); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14363 = lean_ctor_get(x_14355, 0); +lean_inc(x_14363); +x_14364 = lean_ctor_get(x_14355, 1); +lean_inc(x_14364); +if (lean_is_exclusive(x_14355)) { + lean_ctor_release(x_14355, 0); + lean_ctor_release(x_14355, 1); + x_14365 = x_14355; +} else { + lean_dec_ref(x_14355); + x_14365 = lean_box(0); +} +if (lean_is_scalar(x_14365)) { + x_14366 = lean_alloc_ctor(1, 2, 0); +} else { + x_14366 = x_14365; +} +lean_ctor_set(x_14366, 0, x_14363); +lean_ctor_set(x_14366, 1, x_14364); +return x_14366; +} +} +} +block_14266: +{ +lean_object* x_14241; +x_14241 = lean_ctor_get(x_14239, 0); +lean_inc(x_14241); +if (lean_obj_tag(x_14241) == 0) +{ +lean_object* x_14242; lean_object* x_14243; lean_object* x_14244; lean_object* x_14245; lean_object* x_14246; lean_object* x_14247; lean_object* x_14248; lean_object* x_14249; lean_object* x_14250; lean_object* x_14251; +lean_dec(x_14129); +x_14242 = lean_ctor_get(x_14239, 1); +lean_inc(x_14242); +lean_dec(x_14239); +x_14243 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_14243, 0, x_153); +lean_ctor_set(x_14243, 1, x_14119); +x_14244 = lean_ctor_get(x_1, 0); +lean_inc(x_14244); +x_14245 = l_Lean_IR_ToIR_bindVar(x_14244, x_14242, x_4, x_5, x_14240); +x_14246 = lean_ctor_get(x_14245, 0); +lean_inc(x_14246); +x_14247 = lean_ctor_get(x_14245, 1); +lean_inc(x_14247); +lean_dec(x_14245); +x_14248 = lean_ctor_get(x_14246, 0); +lean_inc(x_14248); +x_14249 = lean_ctor_get(x_14246, 1); +lean_inc(x_14249); +lean_dec(x_14246); +x_14250 = lean_ctor_get(x_1, 2); +lean_inc(x_14250); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_14251 = l_Lean_IR_ToIR_lowerType(x_14250, x_14249, x_4, x_5, x_14247); +if (lean_obj_tag(x_14251) == 0) +{ +lean_object* x_14252; lean_object* x_14253; lean_object* x_14254; lean_object* x_14255; lean_object* x_14256; +x_14252 = lean_ctor_get(x_14251, 0); +lean_inc(x_14252); +x_14253 = lean_ctor_get(x_14251, 1); +lean_inc(x_14253); +lean_dec(x_14251); +x_14254 = lean_ctor_get(x_14252, 0); +lean_inc(x_14254); +x_14255 = lean_ctor_get(x_14252, 1); +lean_inc(x_14255); +lean_dec(x_14252); +x_14256 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14248, x_14243, x_14254, x_14255, x_4, x_5, x_14253); +return x_14256; +} +else +{ +lean_object* x_14257; lean_object* x_14258; lean_object* x_14259; lean_object* x_14260; +lean_dec(x_14248); +lean_dec(x_14243); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_14257 = lean_ctor_get(x_14251, 0); +lean_inc(x_14257); +x_14258 = lean_ctor_get(x_14251, 1); +lean_inc(x_14258); +if (lean_is_exclusive(x_14251)) { + lean_ctor_release(x_14251, 0); + lean_ctor_release(x_14251, 1); + x_14259 = x_14251; +} else { + lean_dec_ref(x_14251); + x_14259 = lean_box(0); +} +if (lean_is_scalar(x_14259)) { + x_14260 = lean_alloc_ctor(1, 2, 0); +} else { + x_14260 = x_14259; +} +lean_ctor_set(x_14260, 0, x_14257); +lean_ctor_set(x_14260, 1, x_14258); +return x_14260; +} +} +else +{ +lean_object* x_14261; lean_object* x_14262; lean_object* x_14263; lean_object* x_14264; lean_object* x_14265; +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14261 = lean_ctor_get(x_14239, 1); +lean_inc(x_14261); +if (lean_is_exclusive(x_14239)) { + lean_ctor_release(x_14239, 0); + lean_ctor_release(x_14239, 1); + x_14262 = x_14239; +} else { + lean_dec_ref(x_14239); + x_14262 = lean_box(0); +} +x_14263 = lean_ctor_get(x_14241, 0); +lean_inc(x_14263); +lean_dec(x_14241); +if (lean_is_scalar(x_14262)) { + x_14264 = lean_alloc_ctor(0, 2, 0); +} else { + x_14264 = x_14262; +} +lean_ctor_set(x_14264, 0, x_14263); +lean_ctor_set(x_14264, 1, x_14261); +if (lean_is_scalar(x_14129)) { + x_14265 = lean_alloc_ctor(0, 2, 0); +} else { + x_14265 = x_14129; +} +lean_ctor_set(x_14265, 0, x_14264); +lean_ctor_set(x_14265, 1, x_14240); +return x_14265; +} +} +} +case 2: +{ +lean_object* x_14367; lean_object* x_14368; +lean_dec(x_14135); +lean_dec(x_14130); +lean_dec(x_14129); +lean_dec(x_14125); +lean_dec(x_14119); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_14367 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_14368 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_14367, x_14124, x_4, x_5, x_14128); +return x_14368; +} +case 3: +{ +lean_object* x_14369; lean_object* x_14370; lean_object* x_14397; lean_object* x_14398; +lean_dec(x_14135); +lean_dec(x_14130); +lean_dec(x_11911); +lean_dec(x_11910); +lean_inc(x_153); +x_14397 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_14128); +x_14398 = lean_ctor_get(x_14397, 0); +lean_inc(x_14398); +if (lean_obj_tag(x_14398) == 0) +{ +lean_object* x_14399; lean_object* x_14400; lean_object* x_14401; +x_14399 = lean_ctor_get(x_14397, 1); +lean_inc(x_14399); +lean_dec(x_14397); +x_14400 = lean_box(0); +if (lean_is_scalar(x_14125)) { + x_14401 = lean_alloc_ctor(0, 2, 0); +} else { + x_14401 = x_14125; +} +lean_ctor_set(x_14401, 0, x_14400); +lean_ctor_set(x_14401, 1, x_14124); +x_14369 = x_14401; +x_14370 = x_14399; +goto block_14396; +} +else +{ +lean_object* x_14402; lean_object* x_14403; lean_object* x_14404; lean_object* x_14405; lean_object* x_14406; lean_object* x_14407; lean_object* x_14408; uint8_t x_14409; +lean_dec(x_14125); +x_14402 = lean_ctor_get(x_14397, 1); +lean_inc(x_14402); +if (lean_is_exclusive(x_14397)) { + lean_ctor_release(x_14397, 0); + lean_ctor_release(x_14397, 1); + x_14403 = x_14397; +} else { + lean_dec_ref(x_14397); + x_14403 = lean_box(0); +} +x_14404 = lean_ctor_get(x_14398, 0); +lean_inc(x_14404); +if (lean_is_exclusive(x_14398)) { + lean_ctor_release(x_14398, 0); + x_14405 = x_14398; +} else { + lean_dec_ref(x_14398); + x_14405 = lean_box(0); +} +x_14406 = lean_array_get_size(x_14119); +x_14407 = lean_ctor_get(x_14404, 3); +lean_inc(x_14407); +lean_dec(x_14404); +x_14408 = lean_array_get_size(x_14407); +lean_dec(x_14407); +x_14409 = lean_nat_dec_lt(x_14406, x_14408); +if (x_14409 == 0) +{ +uint8_t x_14410; +x_14410 = lean_nat_dec_eq(x_14406, x_14408); +if (x_14410 == 0) +{ +lean_object* x_14411; lean_object* x_14412; lean_object* x_14413; lean_object* x_14414; lean_object* x_14415; lean_object* x_14416; lean_object* x_14417; lean_object* x_14418; lean_object* x_14419; lean_object* x_14420; lean_object* x_14421; lean_object* x_14422; lean_object* x_14423; lean_object* x_14424; lean_object* x_14425; lean_object* x_14426; lean_object* x_14427; +x_14411 = lean_unsigned_to_nat(0u); +x_14412 = l_Array_extract___rarg(x_14119, x_14411, x_14408); +x_14413 = l_Array_extract___rarg(x_14119, x_14408, x_14406); +lean_dec(x_14406); +lean_inc(x_153); +if (lean_is_scalar(x_14403)) { + x_14414 = lean_alloc_ctor(6, 2, 0); +} else { + x_14414 = x_14403; + lean_ctor_set_tag(x_14414, 6); +} +lean_ctor_set(x_14414, 0, x_153); +lean_ctor_set(x_14414, 1, x_14412); +x_14415 = lean_ctor_get(x_1, 0); +lean_inc(x_14415); +x_14416 = l_Lean_IR_ToIR_bindVar(x_14415, x_14124, x_4, x_5, x_14402); +x_14417 = lean_ctor_get(x_14416, 0); +lean_inc(x_14417); +x_14418 = lean_ctor_get(x_14416, 1); +lean_inc(x_14418); +lean_dec(x_14416); +x_14419 = lean_ctor_get(x_14417, 0); +lean_inc(x_14419); +x_14420 = lean_ctor_get(x_14417, 1); +lean_inc(x_14420); +lean_dec(x_14417); +x_14421 = l_Lean_IR_ToIR_newVar(x_14420, x_4, x_5, x_14418); +x_14422 = lean_ctor_get(x_14421, 0); +lean_inc(x_14422); +x_14423 = lean_ctor_get(x_14421, 1); +lean_inc(x_14423); +lean_dec(x_14421); +x_14424 = lean_ctor_get(x_14422, 0); +lean_inc(x_14424); +x_14425 = lean_ctor_get(x_14422, 1); +lean_inc(x_14425); +lean_dec(x_14422); +x_14426 = lean_ctor_get(x_1, 2); +lean_inc(x_14426); +lean_inc(x_5); +lean_inc(x_4); +x_14427 = l_Lean_IR_ToIR_lowerType(x_14426, x_14425, x_4, x_5, x_14423); +if (lean_obj_tag(x_14427) == 0) +{ +lean_object* x_14428; lean_object* x_14429; lean_object* x_14430; lean_object* x_14431; lean_object* x_14432; +x_14428 = lean_ctor_get(x_14427, 0); +lean_inc(x_14428); +x_14429 = lean_ctor_get(x_14427, 1); +lean_inc(x_14429); +lean_dec(x_14427); +x_14430 = lean_ctor_get(x_14428, 0); +lean_inc(x_14430); +x_14431 = lean_ctor_get(x_14428, 1); +lean_inc(x_14431); +lean_dec(x_14428); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14432 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_14424, x_14413, x_14419, x_14414, x_14430, x_14431, x_4, x_5, x_14429); +if (lean_obj_tag(x_14432) == 0) +{ +lean_object* x_14433; lean_object* x_14434; lean_object* x_14435; lean_object* x_14436; lean_object* x_14437; lean_object* x_14438; lean_object* x_14439; +x_14433 = lean_ctor_get(x_14432, 0); +lean_inc(x_14433); +x_14434 = lean_ctor_get(x_14432, 1); +lean_inc(x_14434); +lean_dec(x_14432); +x_14435 = lean_ctor_get(x_14433, 0); +lean_inc(x_14435); +x_14436 = lean_ctor_get(x_14433, 1); +lean_inc(x_14436); +if (lean_is_exclusive(x_14433)) { + lean_ctor_release(x_14433, 0); + lean_ctor_release(x_14433, 1); + x_14437 = x_14433; +} else { + lean_dec_ref(x_14433); + x_14437 = lean_box(0); +} +if (lean_is_scalar(x_14405)) { + x_14438 = lean_alloc_ctor(1, 1, 0); +} else { + x_14438 = x_14405; +} +lean_ctor_set(x_14438, 0, x_14435); +if (lean_is_scalar(x_14437)) { + x_14439 = lean_alloc_ctor(0, 2, 0); +} else { + x_14439 = x_14437; +} +lean_ctor_set(x_14439, 0, x_14438); +lean_ctor_set(x_14439, 1, x_14436); +x_14369 = x_14439; +x_14370 = x_14434; +goto block_14396; +} +else +{ +lean_object* x_14440; lean_object* x_14441; lean_object* x_14442; lean_object* x_14443; +lean_dec(x_14405); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14440 = lean_ctor_get(x_14432, 0); +lean_inc(x_14440); +x_14441 = lean_ctor_get(x_14432, 1); +lean_inc(x_14441); +if (lean_is_exclusive(x_14432)) { + lean_ctor_release(x_14432, 0); + lean_ctor_release(x_14432, 1); + x_14442 = x_14432; +} else { + lean_dec_ref(x_14432); + x_14442 = lean_box(0); +} +if (lean_is_scalar(x_14442)) { + x_14443 = lean_alloc_ctor(1, 2, 0); +} else { + x_14443 = x_14442; +} +lean_ctor_set(x_14443, 0, x_14440); +lean_ctor_set(x_14443, 1, x_14441); +return x_14443; +} +} +else +{ +lean_object* x_14444; lean_object* x_14445; lean_object* x_14446; lean_object* x_14447; +lean_dec(x_14424); +lean_dec(x_14419); +lean_dec(x_14414); +lean_dec(x_14413); +lean_dec(x_14405); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14444 = lean_ctor_get(x_14427, 0); +lean_inc(x_14444); +x_14445 = lean_ctor_get(x_14427, 1); +lean_inc(x_14445); +if (lean_is_exclusive(x_14427)) { + lean_ctor_release(x_14427, 0); + lean_ctor_release(x_14427, 1); + x_14446 = x_14427; +} else { + lean_dec_ref(x_14427); + x_14446 = lean_box(0); +} +if (lean_is_scalar(x_14446)) { + x_14447 = lean_alloc_ctor(1, 2, 0); +} else { + x_14447 = x_14446; +} +lean_ctor_set(x_14447, 0, x_14444); +lean_ctor_set(x_14447, 1, x_14445); +return x_14447; +} +} +else +{ +lean_object* x_14448; lean_object* x_14449; lean_object* x_14450; lean_object* x_14451; lean_object* x_14452; lean_object* x_14453; lean_object* x_14454; lean_object* x_14455; lean_object* x_14456; +lean_dec(x_14408); +lean_dec(x_14406); +lean_inc(x_14119); +lean_inc(x_153); +if (lean_is_scalar(x_14403)) { + x_14448 = lean_alloc_ctor(6, 2, 0); +} else { + x_14448 = x_14403; + lean_ctor_set_tag(x_14448, 6); +} +lean_ctor_set(x_14448, 0, x_153); +lean_ctor_set(x_14448, 1, x_14119); +x_14449 = lean_ctor_get(x_1, 0); +lean_inc(x_14449); +x_14450 = l_Lean_IR_ToIR_bindVar(x_14449, x_14124, x_4, x_5, x_14402); +x_14451 = lean_ctor_get(x_14450, 0); +lean_inc(x_14451); +x_14452 = lean_ctor_get(x_14450, 1); +lean_inc(x_14452); +lean_dec(x_14450); +x_14453 = lean_ctor_get(x_14451, 0); +lean_inc(x_14453); +x_14454 = lean_ctor_get(x_14451, 1); +lean_inc(x_14454); +lean_dec(x_14451); +x_14455 = lean_ctor_get(x_1, 2); +lean_inc(x_14455); +lean_inc(x_5); +lean_inc(x_4); +x_14456 = l_Lean_IR_ToIR_lowerType(x_14455, x_14454, x_4, x_5, x_14452); +if (lean_obj_tag(x_14456) == 0) +{ +lean_object* x_14457; lean_object* x_14458; lean_object* x_14459; lean_object* x_14460; lean_object* x_14461; +x_14457 = lean_ctor_get(x_14456, 0); +lean_inc(x_14457); +x_14458 = lean_ctor_get(x_14456, 1); +lean_inc(x_14458); +lean_dec(x_14456); +x_14459 = lean_ctor_get(x_14457, 0); +lean_inc(x_14459); +x_14460 = lean_ctor_get(x_14457, 1); +lean_inc(x_14460); +lean_dec(x_14457); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14461 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14453, x_14448, x_14459, x_14460, x_4, x_5, x_14458); +if (lean_obj_tag(x_14461) == 0) +{ +lean_object* x_14462; lean_object* x_14463; lean_object* x_14464; lean_object* x_14465; lean_object* x_14466; lean_object* x_14467; lean_object* x_14468; +x_14462 = lean_ctor_get(x_14461, 0); +lean_inc(x_14462); +x_14463 = lean_ctor_get(x_14461, 1); +lean_inc(x_14463); +lean_dec(x_14461); +x_14464 = lean_ctor_get(x_14462, 0); +lean_inc(x_14464); +x_14465 = lean_ctor_get(x_14462, 1); +lean_inc(x_14465); +if (lean_is_exclusive(x_14462)) { + lean_ctor_release(x_14462, 0); + lean_ctor_release(x_14462, 1); + x_14466 = x_14462; +} else { + lean_dec_ref(x_14462); + x_14466 = lean_box(0); +} +if (lean_is_scalar(x_14405)) { + x_14467 = lean_alloc_ctor(1, 1, 0); +} else { + x_14467 = x_14405; +} +lean_ctor_set(x_14467, 0, x_14464); +if (lean_is_scalar(x_14466)) { + x_14468 = lean_alloc_ctor(0, 2, 0); +} else { + x_14468 = x_14466; +} +lean_ctor_set(x_14468, 0, x_14467); +lean_ctor_set(x_14468, 1, x_14465); +x_14369 = x_14468; +x_14370 = x_14463; +goto block_14396; +} +else +{ +lean_object* x_14469; lean_object* x_14470; lean_object* x_14471; lean_object* x_14472; +lean_dec(x_14405); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14469 = lean_ctor_get(x_14461, 0); +lean_inc(x_14469); +x_14470 = lean_ctor_get(x_14461, 1); +lean_inc(x_14470); +if (lean_is_exclusive(x_14461)) { + lean_ctor_release(x_14461, 0); + lean_ctor_release(x_14461, 1); + x_14471 = x_14461; +} else { + lean_dec_ref(x_14461); + x_14471 = lean_box(0); +} +if (lean_is_scalar(x_14471)) { + x_14472 = lean_alloc_ctor(1, 2, 0); +} else { + x_14472 = x_14471; +} +lean_ctor_set(x_14472, 0, x_14469); +lean_ctor_set(x_14472, 1, x_14470); +return x_14472; +} +} +else +{ +lean_object* x_14473; lean_object* x_14474; lean_object* x_14475; lean_object* x_14476; +lean_dec(x_14453); +lean_dec(x_14448); +lean_dec(x_14405); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14473 = lean_ctor_get(x_14456, 0); +lean_inc(x_14473); +x_14474 = lean_ctor_get(x_14456, 1); +lean_inc(x_14474); +if (lean_is_exclusive(x_14456)) { + lean_ctor_release(x_14456, 0); + lean_ctor_release(x_14456, 1); + x_14475 = x_14456; +} else { + lean_dec_ref(x_14456); + x_14475 = lean_box(0); +} +if (lean_is_scalar(x_14475)) { + x_14476 = lean_alloc_ctor(1, 2, 0); +} else { + x_14476 = x_14475; +} +lean_ctor_set(x_14476, 0, x_14473); +lean_ctor_set(x_14476, 1, x_14474); +return x_14476; +} +} +} +else +{ +lean_object* x_14477; lean_object* x_14478; lean_object* x_14479; lean_object* x_14480; lean_object* x_14481; lean_object* x_14482; lean_object* x_14483; lean_object* x_14484; lean_object* x_14485; +lean_dec(x_14408); +lean_dec(x_14406); +lean_inc(x_14119); +lean_inc(x_153); +if (lean_is_scalar(x_14403)) { + x_14477 = lean_alloc_ctor(7, 2, 0); +} else { + x_14477 = x_14403; + lean_ctor_set_tag(x_14477, 7); +} +lean_ctor_set(x_14477, 0, x_153); +lean_ctor_set(x_14477, 1, x_14119); +x_14478 = lean_ctor_get(x_1, 0); +lean_inc(x_14478); +x_14479 = l_Lean_IR_ToIR_bindVar(x_14478, x_14124, x_4, x_5, x_14402); +x_14480 = lean_ctor_get(x_14479, 0); +lean_inc(x_14480); +x_14481 = lean_ctor_get(x_14479, 1); +lean_inc(x_14481); +lean_dec(x_14479); +x_14482 = lean_ctor_get(x_14480, 0); +lean_inc(x_14482); +x_14483 = lean_ctor_get(x_14480, 1); +lean_inc(x_14483); +lean_dec(x_14480); +x_14484 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14485 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14482, x_14477, x_14484, x_14483, x_4, x_5, x_14481); +if (lean_obj_tag(x_14485) == 0) +{ +lean_object* x_14486; lean_object* x_14487; lean_object* x_14488; lean_object* x_14489; lean_object* x_14490; lean_object* x_14491; lean_object* x_14492; +x_14486 = lean_ctor_get(x_14485, 0); +lean_inc(x_14486); +x_14487 = lean_ctor_get(x_14485, 1); +lean_inc(x_14487); +lean_dec(x_14485); +x_14488 = lean_ctor_get(x_14486, 0); +lean_inc(x_14488); +x_14489 = lean_ctor_get(x_14486, 1); +lean_inc(x_14489); +if (lean_is_exclusive(x_14486)) { + lean_ctor_release(x_14486, 0); + lean_ctor_release(x_14486, 1); + x_14490 = x_14486; +} else { + lean_dec_ref(x_14486); + x_14490 = lean_box(0); +} +if (lean_is_scalar(x_14405)) { + x_14491 = lean_alloc_ctor(1, 1, 0); +} else { + x_14491 = x_14405; +} +lean_ctor_set(x_14491, 0, x_14488); +if (lean_is_scalar(x_14490)) { + x_14492 = lean_alloc_ctor(0, 2, 0); +} else { + x_14492 = x_14490; +} +lean_ctor_set(x_14492, 0, x_14491); +lean_ctor_set(x_14492, 1, x_14489); +x_14369 = x_14492; +x_14370 = x_14487; +goto block_14396; +} +else +{ +lean_object* x_14493; lean_object* x_14494; lean_object* x_14495; lean_object* x_14496; +lean_dec(x_14405); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14493 = lean_ctor_get(x_14485, 0); +lean_inc(x_14493); +x_14494 = lean_ctor_get(x_14485, 1); +lean_inc(x_14494); +if (lean_is_exclusive(x_14485)) { + lean_ctor_release(x_14485, 0); + lean_ctor_release(x_14485, 1); + x_14495 = x_14485; +} else { + lean_dec_ref(x_14485); + x_14495 = lean_box(0); +} +if (lean_is_scalar(x_14495)) { + x_14496 = lean_alloc_ctor(1, 2, 0); +} else { + x_14496 = x_14495; +} +lean_ctor_set(x_14496, 0, x_14493); +lean_ctor_set(x_14496, 1, x_14494); +return x_14496; +} +} +} +block_14396: +{ +lean_object* x_14371; +x_14371 = lean_ctor_get(x_14369, 0); +lean_inc(x_14371); +if (lean_obj_tag(x_14371) == 0) +{ +lean_object* x_14372; lean_object* x_14373; lean_object* x_14374; lean_object* x_14375; lean_object* x_14376; lean_object* x_14377; lean_object* x_14378; lean_object* x_14379; lean_object* x_14380; lean_object* x_14381; +lean_dec(x_14129); +x_14372 = lean_ctor_get(x_14369, 1); +lean_inc(x_14372); +lean_dec(x_14369); +x_14373 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_14373, 0, x_153); +lean_ctor_set(x_14373, 1, x_14119); +x_14374 = lean_ctor_get(x_1, 0); +lean_inc(x_14374); +x_14375 = l_Lean_IR_ToIR_bindVar(x_14374, x_14372, x_4, x_5, x_14370); +x_14376 = lean_ctor_get(x_14375, 0); +lean_inc(x_14376); +x_14377 = lean_ctor_get(x_14375, 1); +lean_inc(x_14377); +lean_dec(x_14375); +x_14378 = lean_ctor_get(x_14376, 0); +lean_inc(x_14378); +x_14379 = lean_ctor_get(x_14376, 1); +lean_inc(x_14379); +lean_dec(x_14376); +x_14380 = lean_ctor_get(x_1, 2); +lean_inc(x_14380); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_14381 = l_Lean_IR_ToIR_lowerType(x_14380, x_14379, x_4, x_5, x_14377); +if (lean_obj_tag(x_14381) == 0) +{ +lean_object* x_14382; lean_object* x_14383; lean_object* x_14384; lean_object* x_14385; lean_object* x_14386; +x_14382 = lean_ctor_get(x_14381, 0); +lean_inc(x_14382); +x_14383 = lean_ctor_get(x_14381, 1); +lean_inc(x_14383); +lean_dec(x_14381); +x_14384 = lean_ctor_get(x_14382, 0); +lean_inc(x_14384); +x_14385 = lean_ctor_get(x_14382, 1); +lean_inc(x_14385); +lean_dec(x_14382); +x_14386 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14378, x_14373, x_14384, x_14385, x_4, x_5, x_14383); +return x_14386; +} +else +{ +lean_object* x_14387; lean_object* x_14388; lean_object* x_14389; lean_object* x_14390; +lean_dec(x_14378); +lean_dec(x_14373); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_14387 = lean_ctor_get(x_14381, 0); +lean_inc(x_14387); +x_14388 = lean_ctor_get(x_14381, 1); +lean_inc(x_14388); +if (lean_is_exclusive(x_14381)) { + lean_ctor_release(x_14381, 0); + lean_ctor_release(x_14381, 1); + x_14389 = x_14381; +} else { + lean_dec_ref(x_14381); + x_14389 = lean_box(0); +} +if (lean_is_scalar(x_14389)) { + x_14390 = lean_alloc_ctor(1, 2, 0); +} else { + x_14390 = x_14389; +} +lean_ctor_set(x_14390, 0, x_14387); +lean_ctor_set(x_14390, 1, x_14388); +return x_14390; +} +} +else +{ +lean_object* x_14391; lean_object* x_14392; lean_object* x_14393; lean_object* x_14394; lean_object* x_14395; +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14391 = lean_ctor_get(x_14369, 1); +lean_inc(x_14391); +if (lean_is_exclusive(x_14369)) { + lean_ctor_release(x_14369, 0); + lean_ctor_release(x_14369, 1); + x_14392 = x_14369; +} else { + lean_dec_ref(x_14369); + x_14392 = lean_box(0); +} +x_14393 = lean_ctor_get(x_14371, 0); +lean_inc(x_14393); +lean_dec(x_14371); +if (lean_is_scalar(x_14392)) { + x_14394 = lean_alloc_ctor(0, 2, 0); +} else { + x_14394 = x_14392; +} +lean_ctor_set(x_14394, 0, x_14393); +lean_ctor_set(x_14394, 1, x_14391); +if (lean_is_scalar(x_14129)) { + x_14395 = lean_alloc_ctor(0, 2, 0); +} else { + x_14395 = x_14129; +} +lean_ctor_set(x_14395, 0, x_14394); +lean_ctor_set(x_14395, 1, x_14370); +return x_14395; +} +} +} +case 4: +{ +lean_object* x_14497; lean_object* x_14498; uint8_t x_14499; +lean_dec(x_14130); +lean_dec(x_14129); +lean_dec(x_14125); +lean_dec(x_11911); +lean_dec(x_11910); +if (lean_is_exclusive(x_14135)) { + lean_ctor_release(x_14135, 0); + x_14497 = x_14135; +} else { + lean_dec_ref(x_14135); + x_14497 = lean_box(0); +} +x_14498 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_14499 = lean_name_eq(x_153, x_14498); +if (x_14499 == 0) +{ +uint8_t x_14500; lean_object* x_14501; lean_object* x_14502; lean_object* x_14503; lean_object* x_14504; lean_object* x_14505; lean_object* x_14506; lean_object* x_14507; lean_object* x_14508; lean_object* x_14509; +lean_dec(x_14119); +lean_dec(x_2); +lean_dec(x_1); +x_14500 = 1; +x_14501 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_14502 = l_Lean_Name_toString(x_153, x_14500, x_14501); +if (lean_is_scalar(x_14497)) { + x_14503 = lean_alloc_ctor(3, 1, 0); +} else { + x_14503 = x_14497; + lean_ctor_set_tag(x_14503, 3); +} +lean_ctor_set(x_14503, 0, x_14502); +x_14504 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_14505 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_14505, 0, x_14504); +lean_ctor_set(x_14505, 1, x_14503); +x_14506 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_14507 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_14507, 0, x_14505); +lean_ctor_set(x_14507, 1, x_14506); +x_14508 = l_Lean_MessageData_ofFormat(x_14507); +x_14509 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_14508, x_14124, x_4, x_5, x_14128); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_14124); +return x_14509; +} +else +{ +lean_object* x_14510; lean_object* x_14511; lean_object* x_14512; +lean_dec(x_14497); +lean_dec(x_153); +x_14510 = l_Lean_IR_instInhabitedArg; +x_14511 = lean_unsigned_to_nat(2u); +x_14512 = lean_array_get(x_14510, x_14119, x_14511); +lean_dec(x_14119); +if (lean_obj_tag(x_14512) == 0) +{ +lean_object* x_14513; lean_object* x_14514; lean_object* x_14515; lean_object* x_14516; lean_object* x_14517; lean_object* x_14518; lean_object* x_14519; +x_14513 = lean_ctor_get(x_14512, 0); +lean_inc(x_14513); +lean_dec(x_14512); +x_14514 = lean_ctor_get(x_1, 0); +lean_inc(x_14514); +lean_dec(x_1); +x_14515 = l_Lean_IR_ToIR_bindVarToVarId(x_14514, x_14513, x_14124, x_4, x_5, x_14128); +x_14516 = lean_ctor_get(x_14515, 0); +lean_inc(x_14516); +x_14517 = lean_ctor_get(x_14515, 1); +lean_inc(x_14517); +lean_dec(x_14515); +x_14518 = lean_ctor_get(x_14516, 1); +lean_inc(x_14518); +lean_dec(x_14516); +x_14519 = l_Lean_IR_ToIR_lowerCode(x_2, x_14518, x_4, x_5, x_14517); +return x_14519; +} +else +{ +lean_object* x_14520; lean_object* x_14521; lean_object* x_14522; lean_object* x_14523; lean_object* x_14524; lean_object* x_14525; +x_14520 = lean_ctor_get(x_1, 0); +lean_inc(x_14520); +lean_dec(x_1); +x_14521 = l_Lean_IR_ToIR_bindErased(x_14520, x_14124, x_4, x_5, x_14128); +x_14522 = lean_ctor_get(x_14521, 0); +lean_inc(x_14522); +x_14523 = lean_ctor_get(x_14521, 1); +lean_inc(x_14523); +lean_dec(x_14521); +x_14524 = lean_ctor_get(x_14522, 1); +lean_inc(x_14524); +lean_dec(x_14522); +x_14525 = l_Lean_IR_ToIR_lowerCode(x_2, x_14524, x_4, x_5, x_14523); +return x_14525; +} +} +} +case 5: +{ +lean_object* x_14526; lean_object* x_14527; +lean_dec(x_14135); +lean_dec(x_14130); +lean_dec(x_14129); +lean_dec(x_14125); +lean_dec(x_14119); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_14526 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_14527 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_14526, x_14124, x_4, x_5, x_14128); +return x_14527; +} +case 6: +{ +lean_object* x_14528; uint8_t x_14529; +x_14528 = lean_ctor_get(x_14135, 0); +lean_inc(x_14528); +lean_dec(x_14135); +lean_inc(x_153); +x_14529 = l_Lean_isExtern(x_14130, x_153); +if (x_14529 == 0) +{ +lean_object* x_14530; +lean_dec(x_14129); +lean_dec(x_14125); +lean_dec(x_14119); +lean_inc(x_5); +lean_inc(x_4); +x_14530 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_14124, x_4, x_5, x_14128); +if (lean_obj_tag(x_14530) == 0) +{ +lean_object* x_14531; lean_object* x_14532; lean_object* x_14533; lean_object* x_14534; lean_object* x_14535; lean_object* x_14536; lean_object* x_14537; lean_object* x_14538; lean_object* x_14539; lean_object* x_14540; lean_object* x_14541; lean_object* x_14542; lean_object* x_14543; lean_object* x_14544; lean_object* x_14545; lean_object* x_14546; lean_object* x_14547; lean_object* x_14548; lean_object* x_14549; lean_object* x_14550; +x_14531 = lean_ctor_get(x_14530, 0); +lean_inc(x_14531); +x_14532 = lean_ctor_get(x_14531, 0); +lean_inc(x_14532); +x_14533 = lean_ctor_get(x_14530, 1); +lean_inc(x_14533); +lean_dec(x_14530); +x_14534 = lean_ctor_get(x_14531, 1); +lean_inc(x_14534); +lean_dec(x_14531); +x_14535 = lean_ctor_get(x_14532, 0); +lean_inc(x_14535); +x_14536 = lean_ctor_get(x_14532, 1); +lean_inc(x_14536); +lean_dec(x_14532); +x_14537 = lean_ctor_get(x_14528, 3); +lean_inc(x_14537); +lean_dec(x_14528); +x_14538 = lean_array_get_size(x_11910); +x_14539 = l_Array_extract___rarg(x_11910, x_14537, x_14538); +lean_dec(x_14538); +lean_dec(x_11910); +x_14540 = lean_array_get_size(x_14536); +x_14541 = lean_unsigned_to_nat(0u); +x_14542 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_11911)) { + x_14543 = lean_alloc_ctor(0, 3, 0); +} else { + x_14543 = x_11911; + lean_ctor_set_tag(x_14543, 0); +} +lean_ctor_set(x_14543, 0, x_14541); +lean_ctor_set(x_14543, 1, x_14540); +lean_ctor_set(x_14543, 2, x_14542); +x_14544 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_14545 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__6(x_14536, x_14539, x_14543, x_14543, x_14544, x_14541, lean_box(0), lean_box(0), x_14534, x_4, x_5, x_14533); +lean_dec(x_14543); +x_14546 = lean_ctor_get(x_14545, 0); +lean_inc(x_14546); +x_14547 = lean_ctor_get(x_14545, 1); +lean_inc(x_14547); +lean_dec(x_14545); +x_14548 = lean_ctor_get(x_14546, 0); +lean_inc(x_14548); +x_14549 = lean_ctor_get(x_14546, 1); +lean_inc(x_14549); +lean_dec(x_14546); +x_14550 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_14535, x_14536, x_14539, x_14548, x_14549, x_4, x_5, x_14547); +lean_dec(x_14539); +lean_dec(x_14536); +return x_14550; +} +else +{ +lean_object* x_14551; lean_object* x_14552; lean_object* x_14553; lean_object* x_14554; +lean_dec(x_14528); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14551 = lean_ctor_get(x_14530, 0); +lean_inc(x_14551); +x_14552 = lean_ctor_get(x_14530, 1); +lean_inc(x_14552); +if (lean_is_exclusive(x_14530)) { + lean_ctor_release(x_14530, 0); + lean_ctor_release(x_14530, 1); + x_14553 = x_14530; +} else { + lean_dec_ref(x_14530); + x_14553 = lean_box(0); +} +if (lean_is_scalar(x_14553)) { + x_14554 = lean_alloc_ctor(1, 2, 0); +} else { + x_14554 = x_14553; +} +lean_ctor_set(x_14554, 0, x_14551); +lean_ctor_set(x_14554, 1, x_14552); +return x_14554; +} +} +else +{ +lean_object* x_14555; lean_object* x_14556; lean_object* x_14583; lean_object* x_14584; +lean_dec(x_14528); +lean_dec(x_11911); +lean_dec(x_11910); +lean_inc(x_153); +x_14583 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_14128); +x_14584 = lean_ctor_get(x_14583, 0); +lean_inc(x_14584); +if (lean_obj_tag(x_14584) == 0) +{ +lean_object* x_14585; lean_object* x_14586; lean_object* x_14587; +x_14585 = lean_ctor_get(x_14583, 1); +lean_inc(x_14585); +lean_dec(x_14583); +x_14586 = lean_box(0); +if (lean_is_scalar(x_14125)) { + x_14587 = lean_alloc_ctor(0, 2, 0); +} else { + x_14587 = x_14125; +} +lean_ctor_set(x_14587, 0, x_14586); +lean_ctor_set(x_14587, 1, x_14124); +x_14555 = x_14587; +x_14556 = x_14585; +goto block_14582; +} +else +{ +lean_object* x_14588; lean_object* x_14589; lean_object* x_14590; lean_object* x_14591; lean_object* x_14592; lean_object* x_14593; lean_object* x_14594; uint8_t x_14595; +lean_dec(x_14125); +x_14588 = lean_ctor_get(x_14583, 1); +lean_inc(x_14588); +if (lean_is_exclusive(x_14583)) { + lean_ctor_release(x_14583, 0); + lean_ctor_release(x_14583, 1); + x_14589 = x_14583; +} else { + lean_dec_ref(x_14583); + x_14589 = lean_box(0); +} +x_14590 = lean_ctor_get(x_14584, 0); +lean_inc(x_14590); +if (lean_is_exclusive(x_14584)) { + lean_ctor_release(x_14584, 0); + x_14591 = x_14584; +} else { + lean_dec_ref(x_14584); + x_14591 = lean_box(0); +} +x_14592 = lean_array_get_size(x_14119); +x_14593 = lean_ctor_get(x_14590, 3); +lean_inc(x_14593); +lean_dec(x_14590); +x_14594 = lean_array_get_size(x_14593); +lean_dec(x_14593); +x_14595 = lean_nat_dec_lt(x_14592, x_14594); +if (x_14595 == 0) +{ +uint8_t x_14596; +x_14596 = lean_nat_dec_eq(x_14592, x_14594); +if (x_14596 == 0) +{ +lean_object* x_14597; lean_object* x_14598; lean_object* x_14599; lean_object* x_14600; lean_object* x_14601; lean_object* x_14602; lean_object* x_14603; lean_object* x_14604; lean_object* x_14605; lean_object* x_14606; lean_object* x_14607; lean_object* x_14608; lean_object* x_14609; lean_object* x_14610; lean_object* x_14611; lean_object* x_14612; lean_object* x_14613; +x_14597 = lean_unsigned_to_nat(0u); +x_14598 = l_Array_extract___rarg(x_14119, x_14597, x_14594); +x_14599 = l_Array_extract___rarg(x_14119, x_14594, x_14592); +lean_dec(x_14592); +lean_inc(x_153); +if (lean_is_scalar(x_14589)) { + x_14600 = lean_alloc_ctor(6, 2, 0); +} else { + x_14600 = x_14589; + lean_ctor_set_tag(x_14600, 6); +} +lean_ctor_set(x_14600, 0, x_153); +lean_ctor_set(x_14600, 1, x_14598); +x_14601 = lean_ctor_get(x_1, 0); +lean_inc(x_14601); +x_14602 = l_Lean_IR_ToIR_bindVar(x_14601, x_14124, x_4, x_5, x_14588); +x_14603 = lean_ctor_get(x_14602, 0); +lean_inc(x_14603); +x_14604 = lean_ctor_get(x_14602, 1); +lean_inc(x_14604); +lean_dec(x_14602); +x_14605 = lean_ctor_get(x_14603, 0); +lean_inc(x_14605); +x_14606 = lean_ctor_get(x_14603, 1); +lean_inc(x_14606); +lean_dec(x_14603); +x_14607 = l_Lean_IR_ToIR_newVar(x_14606, x_4, x_5, x_14604); +x_14608 = lean_ctor_get(x_14607, 0); +lean_inc(x_14608); +x_14609 = lean_ctor_get(x_14607, 1); +lean_inc(x_14609); +lean_dec(x_14607); +x_14610 = lean_ctor_get(x_14608, 0); +lean_inc(x_14610); +x_14611 = lean_ctor_get(x_14608, 1); +lean_inc(x_14611); +lean_dec(x_14608); +x_14612 = lean_ctor_get(x_1, 2); +lean_inc(x_14612); +lean_inc(x_5); +lean_inc(x_4); +x_14613 = l_Lean_IR_ToIR_lowerType(x_14612, x_14611, x_4, x_5, x_14609); +if (lean_obj_tag(x_14613) == 0) +{ +lean_object* x_14614; lean_object* x_14615; lean_object* x_14616; lean_object* x_14617; lean_object* x_14618; +x_14614 = lean_ctor_get(x_14613, 0); +lean_inc(x_14614); +x_14615 = lean_ctor_get(x_14613, 1); +lean_inc(x_14615); +lean_dec(x_14613); +x_14616 = lean_ctor_get(x_14614, 0); +lean_inc(x_14616); +x_14617 = lean_ctor_get(x_14614, 1); +lean_inc(x_14617); +lean_dec(x_14614); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14618 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_14610, x_14599, x_14605, x_14600, x_14616, x_14617, x_4, x_5, x_14615); +if (lean_obj_tag(x_14618) == 0) +{ +lean_object* x_14619; lean_object* x_14620; lean_object* x_14621; lean_object* x_14622; lean_object* x_14623; lean_object* x_14624; lean_object* x_14625; +x_14619 = lean_ctor_get(x_14618, 0); +lean_inc(x_14619); +x_14620 = lean_ctor_get(x_14618, 1); +lean_inc(x_14620); +lean_dec(x_14618); +x_14621 = lean_ctor_get(x_14619, 0); +lean_inc(x_14621); +x_14622 = lean_ctor_get(x_14619, 1); +lean_inc(x_14622); +if (lean_is_exclusive(x_14619)) { + lean_ctor_release(x_14619, 0); + lean_ctor_release(x_14619, 1); + x_14623 = x_14619; +} else { + lean_dec_ref(x_14619); + x_14623 = lean_box(0); +} +if (lean_is_scalar(x_14591)) { + x_14624 = lean_alloc_ctor(1, 1, 0); +} else { + x_14624 = x_14591; +} +lean_ctor_set(x_14624, 0, x_14621); +if (lean_is_scalar(x_14623)) { + x_14625 = lean_alloc_ctor(0, 2, 0); +} else { + x_14625 = x_14623; +} +lean_ctor_set(x_14625, 0, x_14624); +lean_ctor_set(x_14625, 1, x_14622); +x_14555 = x_14625; +x_14556 = x_14620; +goto block_14582; +} +else +{ +lean_object* x_14626; lean_object* x_14627; lean_object* x_14628; lean_object* x_14629; +lean_dec(x_14591); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14626 = lean_ctor_get(x_14618, 0); +lean_inc(x_14626); +x_14627 = lean_ctor_get(x_14618, 1); +lean_inc(x_14627); +if (lean_is_exclusive(x_14618)) { + lean_ctor_release(x_14618, 0); + lean_ctor_release(x_14618, 1); + x_14628 = x_14618; +} else { + lean_dec_ref(x_14618); + x_14628 = lean_box(0); +} +if (lean_is_scalar(x_14628)) { + x_14629 = lean_alloc_ctor(1, 2, 0); +} else { + x_14629 = x_14628; +} +lean_ctor_set(x_14629, 0, x_14626); +lean_ctor_set(x_14629, 1, x_14627); +return x_14629; +} +} +else +{ +lean_object* x_14630; lean_object* x_14631; lean_object* x_14632; lean_object* x_14633; +lean_dec(x_14610); +lean_dec(x_14605); +lean_dec(x_14600); +lean_dec(x_14599); +lean_dec(x_14591); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14630 = lean_ctor_get(x_14613, 0); +lean_inc(x_14630); +x_14631 = lean_ctor_get(x_14613, 1); +lean_inc(x_14631); +if (lean_is_exclusive(x_14613)) { + lean_ctor_release(x_14613, 0); + lean_ctor_release(x_14613, 1); + x_14632 = x_14613; +} else { + lean_dec_ref(x_14613); + x_14632 = lean_box(0); +} +if (lean_is_scalar(x_14632)) { + x_14633 = lean_alloc_ctor(1, 2, 0); +} else { + x_14633 = x_14632; +} +lean_ctor_set(x_14633, 0, x_14630); +lean_ctor_set(x_14633, 1, x_14631); +return x_14633; +} +} +else +{ +lean_object* x_14634; lean_object* x_14635; lean_object* x_14636; lean_object* x_14637; lean_object* x_14638; lean_object* x_14639; lean_object* x_14640; lean_object* x_14641; lean_object* x_14642; +lean_dec(x_14594); +lean_dec(x_14592); +lean_inc(x_14119); +lean_inc(x_153); +if (lean_is_scalar(x_14589)) { + x_14634 = lean_alloc_ctor(6, 2, 0); +} else { + x_14634 = x_14589; + lean_ctor_set_tag(x_14634, 6); +} +lean_ctor_set(x_14634, 0, x_153); +lean_ctor_set(x_14634, 1, x_14119); +x_14635 = lean_ctor_get(x_1, 0); +lean_inc(x_14635); +x_14636 = l_Lean_IR_ToIR_bindVar(x_14635, x_14124, x_4, x_5, x_14588); +x_14637 = lean_ctor_get(x_14636, 0); +lean_inc(x_14637); +x_14638 = lean_ctor_get(x_14636, 1); +lean_inc(x_14638); +lean_dec(x_14636); +x_14639 = lean_ctor_get(x_14637, 0); +lean_inc(x_14639); +x_14640 = lean_ctor_get(x_14637, 1); +lean_inc(x_14640); +lean_dec(x_14637); +x_14641 = lean_ctor_get(x_1, 2); +lean_inc(x_14641); +lean_inc(x_5); +lean_inc(x_4); +x_14642 = l_Lean_IR_ToIR_lowerType(x_14641, x_14640, x_4, x_5, x_14638); +if (lean_obj_tag(x_14642) == 0) +{ +lean_object* x_14643; lean_object* x_14644; lean_object* x_14645; lean_object* x_14646; lean_object* x_14647; +x_14643 = lean_ctor_get(x_14642, 0); +lean_inc(x_14643); +x_14644 = lean_ctor_get(x_14642, 1); +lean_inc(x_14644); +lean_dec(x_14642); +x_14645 = lean_ctor_get(x_14643, 0); +lean_inc(x_14645); +x_14646 = lean_ctor_get(x_14643, 1); +lean_inc(x_14646); +lean_dec(x_14643); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14647 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14639, x_14634, x_14645, x_14646, x_4, x_5, x_14644); +if (lean_obj_tag(x_14647) == 0) +{ +lean_object* x_14648; lean_object* x_14649; lean_object* x_14650; lean_object* x_14651; lean_object* x_14652; lean_object* x_14653; lean_object* x_14654; +x_14648 = lean_ctor_get(x_14647, 0); +lean_inc(x_14648); +x_14649 = lean_ctor_get(x_14647, 1); +lean_inc(x_14649); +lean_dec(x_14647); +x_14650 = lean_ctor_get(x_14648, 0); +lean_inc(x_14650); +x_14651 = lean_ctor_get(x_14648, 1); +lean_inc(x_14651); +if (lean_is_exclusive(x_14648)) { + lean_ctor_release(x_14648, 0); + lean_ctor_release(x_14648, 1); + x_14652 = x_14648; +} else { + lean_dec_ref(x_14648); + x_14652 = lean_box(0); +} +if (lean_is_scalar(x_14591)) { + x_14653 = lean_alloc_ctor(1, 1, 0); +} else { + x_14653 = x_14591; +} +lean_ctor_set(x_14653, 0, x_14650); +if (lean_is_scalar(x_14652)) { + x_14654 = lean_alloc_ctor(0, 2, 0); +} else { + x_14654 = x_14652; +} +lean_ctor_set(x_14654, 0, x_14653); +lean_ctor_set(x_14654, 1, x_14651); +x_14555 = x_14654; +x_14556 = x_14649; +goto block_14582; +} +else +{ +lean_object* x_14655; lean_object* x_14656; lean_object* x_14657; lean_object* x_14658; +lean_dec(x_14591); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14655 = lean_ctor_get(x_14647, 0); +lean_inc(x_14655); +x_14656 = lean_ctor_get(x_14647, 1); +lean_inc(x_14656); +if (lean_is_exclusive(x_14647)) { + lean_ctor_release(x_14647, 0); + lean_ctor_release(x_14647, 1); + x_14657 = x_14647; +} else { + lean_dec_ref(x_14647); + x_14657 = lean_box(0); +} +if (lean_is_scalar(x_14657)) { + x_14658 = lean_alloc_ctor(1, 2, 0); +} else { + x_14658 = x_14657; +} +lean_ctor_set(x_14658, 0, x_14655); +lean_ctor_set(x_14658, 1, x_14656); +return x_14658; +} +} +else +{ +lean_object* x_14659; lean_object* x_14660; lean_object* x_14661; lean_object* x_14662; +lean_dec(x_14639); +lean_dec(x_14634); +lean_dec(x_14591); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14659 = lean_ctor_get(x_14642, 0); +lean_inc(x_14659); +x_14660 = lean_ctor_get(x_14642, 1); +lean_inc(x_14660); +if (lean_is_exclusive(x_14642)) { + lean_ctor_release(x_14642, 0); + lean_ctor_release(x_14642, 1); + x_14661 = x_14642; +} else { + lean_dec_ref(x_14642); + x_14661 = lean_box(0); +} +if (lean_is_scalar(x_14661)) { + x_14662 = lean_alloc_ctor(1, 2, 0); +} else { + x_14662 = x_14661; +} +lean_ctor_set(x_14662, 0, x_14659); +lean_ctor_set(x_14662, 1, x_14660); +return x_14662; +} +} +} +else +{ +lean_object* x_14663; lean_object* x_14664; lean_object* x_14665; lean_object* x_14666; lean_object* x_14667; lean_object* x_14668; lean_object* x_14669; lean_object* x_14670; lean_object* x_14671; +lean_dec(x_14594); +lean_dec(x_14592); +lean_inc(x_14119); +lean_inc(x_153); +if (lean_is_scalar(x_14589)) { + x_14663 = lean_alloc_ctor(7, 2, 0); +} else { + x_14663 = x_14589; + lean_ctor_set_tag(x_14663, 7); +} +lean_ctor_set(x_14663, 0, x_153); +lean_ctor_set(x_14663, 1, x_14119); +x_14664 = lean_ctor_get(x_1, 0); +lean_inc(x_14664); +x_14665 = l_Lean_IR_ToIR_bindVar(x_14664, x_14124, x_4, x_5, x_14588); +x_14666 = lean_ctor_get(x_14665, 0); +lean_inc(x_14666); +x_14667 = lean_ctor_get(x_14665, 1); +lean_inc(x_14667); +lean_dec(x_14665); +x_14668 = lean_ctor_get(x_14666, 0); +lean_inc(x_14668); +x_14669 = lean_ctor_get(x_14666, 1); +lean_inc(x_14669); +lean_dec(x_14666); +x_14670 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_14671 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14668, x_14663, x_14670, x_14669, x_4, x_5, x_14667); +if (lean_obj_tag(x_14671) == 0) +{ +lean_object* x_14672; lean_object* x_14673; lean_object* x_14674; lean_object* x_14675; lean_object* x_14676; lean_object* x_14677; lean_object* x_14678; +x_14672 = lean_ctor_get(x_14671, 0); +lean_inc(x_14672); +x_14673 = lean_ctor_get(x_14671, 1); +lean_inc(x_14673); +lean_dec(x_14671); +x_14674 = lean_ctor_get(x_14672, 0); +lean_inc(x_14674); +x_14675 = lean_ctor_get(x_14672, 1); +lean_inc(x_14675); +if (lean_is_exclusive(x_14672)) { + lean_ctor_release(x_14672, 0); + lean_ctor_release(x_14672, 1); + x_14676 = x_14672; +} else { + lean_dec_ref(x_14672); + x_14676 = lean_box(0); +} +if (lean_is_scalar(x_14591)) { + x_14677 = lean_alloc_ctor(1, 1, 0); +} else { + x_14677 = x_14591; +} +lean_ctor_set(x_14677, 0, x_14674); +if (lean_is_scalar(x_14676)) { + x_14678 = lean_alloc_ctor(0, 2, 0); +} else { + x_14678 = x_14676; +} +lean_ctor_set(x_14678, 0, x_14677); +lean_ctor_set(x_14678, 1, x_14675); +x_14555 = x_14678; +x_14556 = x_14673; +goto block_14582; +} +else +{ +lean_object* x_14679; lean_object* x_14680; lean_object* x_14681; lean_object* x_14682; +lean_dec(x_14591); +lean_dec(x_14129); +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14679 = lean_ctor_get(x_14671, 0); +lean_inc(x_14679); +x_14680 = lean_ctor_get(x_14671, 1); +lean_inc(x_14680); +if (lean_is_exclusive(x_14671)) { + lean_ctor_release(x_14671, 0); + lean_ctor_release(x_14671, 1); + x_14681 = x_14671; +} else { + lean_dec_ref(x_14671); + x_14681 = lean_box(0); +} +if (lean_is_scalar(x_14681)) { + x_14682 = lean_alloc_ctor(1, 2, 0); +} else { + x_14682 = x_14681; +} +lean_ctor_set(x_14682, 0, x_14679); +lean_ctor_set(x_14682, 1, x_14680); +return x_14682; +} +} +} +block_14582: +{ +lean_object* x_14557; +x_14557 = lean_ctor_get(x_14555, 0); +lean_inc(x_14557); +if (lean_obj_tag(x_14557) == 0) +{ +lean_object* x_14558; lean_object* x_14559; lean_object* x_14560; lean_object* x_14561; lean_object* x_14562; lean_object* x_14563; lean_object* x_14564; lean_object* x_14565; lean_object* x_14566; lean_object* x_14567; +lean_dec(x_14129); +x_14558 = lean_ctor_get(x_14555, 1); +lean_inc(x_14558); +lean_dec(x_14555); +x_14559 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_14559, 0, x_153); +lean_ctor_set(x_14559, 1, x_14119); +x_14560 = lean_ctor_get(x_1, 0); +lean_inc(x_14560); +x_14561 = l_Lean_IR_ToIR_bindVar(x_14560, x_14558, x_4, x_5, x_14556); +x_14562 = lean_ctor_get(x_14561, 0); +lean_inc(x_14562); +x_14563 = lean_ctor_get(x_14561, 1); +lean_inc(x_14563); +lean_dec(x_14561); +x_14564 = lean_ctor_get(x_14562, 0); +lean_inc(x_14564); +x_14565 = lean_ctor_get(x_14562, 1); +lean_inc(x_14565); +lean_dec(x_14562); +x_14566 = lean_ctor_get(x_1, 2); +lean_inc(x_14566); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_14567 = l_Lean_IR_ToIR_lowerType(x_14566, x_14565, x_4, x_5, x_14563); +if (lean_obj_tag(x_14567) == 0) +{ +lean_object* x_14568; lean_object* x_14569; lean_object* x_14570; lean_object* x_14571; lean_object* x_14572; +x_14568 = lean_ctor_get(x_14567, 0); +lean_inc(x_14568); +x_14569 = lean_ctor_get(x_14567, 1); +lean_inc(x_14569); +lean_dec(x_14567); +x_14570 = lean_ctor_get(x_14568, 0); +lean_inc(x_14570); +x_14571 = lean_ctor_get(x_14568, 1); +lean_inc(x_14571); +lean_dec(x_14568); +x_14572 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14564, x_14559, x_14570, x_14571, x_4, x_5, x_14569); +return x_14572; +} +else +{ +lean_object* x_14573; lean_object* x_14574; lean_object* x_14575; lean_object* x_14576; +lean_dec(x_14564); +lean_dec(x_14559); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_14573 = lean_ctor_get(x_14567, 0); +lean_inc(x_14573); +x_14574 = lean_ctor_get(x_14567, 1); +lean_inc(x_14574); +if (lean_is_exclusive(x_14567)) { + lean_ctor_release(x_14567, 0); + lean_ctor_release(x_14567, 1); + x_14575 = x_14567; +} else { + lean_dec_ref(x_14567); + x_14575 = lean_box(0); +} +if (lean_is_scalar(x_14575)) { + x_14576 = lean_alloc_ctor(1, 2, 0); +} else { + x_14576 = x_14575; +} +lean_ctor_set(x_14576, 0, x_14573); +lean_ctor_set(x_14576, 1, x_14574); +return x_14576; +} +} +else +{ +lean_object* x_14577; lean_object* x_14578; lean_object* x_14579; lean_object* x_14580; lean_object* x_14581; +lean_dec(x_14119); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14577 = lean_ctor_get(x_14555, 1); +lean_inc(x_14577); +if (lean_is_exclusive(x_14555)) { + lean_ctor_release(x_14555, 0); + lean_ctor_release(x_14555, 1); + x_14578 = x_14555; +} else { + lean_dec_ref(x_14555); + x_14578 = lean_box(0); +} +x_14579 = lean_ctor_get(x_14557, 0); +lean_inc(x_14579); +lean_dec(x_14557); +if (lean_is_scalar(x_14578)) { + x_14580 = lean_alloc_ctor(0, 2, 0); +} else { + x_14580 = x_14578; +} +lean_ctor_set(x_14580, 0, x_14579); +lean_ctor_set(x_14580, 1, x_14577); +if (lean_is_scalar(x_14129)) { + x_14581 = lean_alloc_ctor(0, 2, 0); +} else { + x_14581 = x_14129; +} +lean_ctor_set(x_14581, 0, x_14580); +lean_ctor_set(x_14581, 1, x_14556); +return x_14581; +} +} +} +} +default: +{ +lean_object* x_14683; uint8_t x_14684; lean_object* x_14685; lean_object* x_14686; lean_object* x_14687; lean_object* x_14688; lean_object* x_14689; lean_object* x_14690; lean_object* x_14691; lean_object* x_14692; lean_object* x_14693; +lean_dec(x_14130); +lean_dec(x_14129); +lean_dec(x_14125); +lean_dec(x_14119); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_14135)) { + lean_ctor_release(x_14135, 0); + x_14683 = x_14135; +} else { + lean_dec_ref(x_14135); + x_14683 = lean_box(0); +} +x_14684 = 1; +x_14685 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_14686 = l_Lean_Name_toString(x_153, x_14684, x_14685); +if (lean_is_scalar(x_14683)) { + x_14687 = lean_alloc_ctor(3, 1, 0); +} else { + x_14687 = x_14683; + lean_ctor_set_tag(x_14687, 3); +} +lean_ctor_set(x_14687, 0, x_14686); +x_14688 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_14689 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_14689, 0, x_14688); +lean_ctor_set(x_14689, 1, x_14687); +x_14690 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_14691 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_14691, 0, x_14689); +lean_ctor_set(x_14691, 1, x_14690); +x_14692 = l_Lean_MessageData_ofFormat(x_14691); +x_14693 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_14692, x_14124, x_4, x_5, x_14128); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_14124); +return x_14693; +} +} +} +} +else +{ +lean_object* x_14694; lean_object* x_14695; lean_object* x_14696; lean_object* x_14697; lean_object* x_14698; +lean_dec(x_14119); +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14694 = lean_ctor_get(x_14121, 1); +lean_inc(x_14694); +if (lean_is_exclusive(x_14121)) { + lean_ctor_release(x_14121, 0); + lean_ctor_release(x_14121, 1); + x_14695 = x_14121; +} else { + lean_dec_ref(x_14121); + x_14695 = lean_box(0); +} +x_14696 = lean_ctor_get(x_14123, 0); +lean_inc(x_14696); +lean_dec(x_14123); +if (lean_is_scalar(x_14695)) { + x_14697 = lean_alloc_ctor(0, 2, 0); +} else { + x_14697 = x_14695; +} +lean_ctor_set(x_14697, 0, x_14696); +lean_ctor_set(x_14697, 1, x_14694); +if (lean_is_scalar(x_11917)) { + x_14698 = lean_alloc_ctor(0, 2, 0); +} else { + x_14698 = x_11917; +} +lean_ctor_set(x_14698, 0, x_14697); +lean_ctor_set(x_14698, 1, x_14122); +return x_14698; +} +} +} +} +else +{ +uint8_t x_14800; +lean_dec(x_11911); +lean_dec(x_11910); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14800 = !lean_is_exclusive(x_11914); +if (x_14800 == 0) +{ +return x_11914; +} +else +{ +lean_object* x_14801; lean_object* x_14802; lean_object* x_14803; +x_14801 = lean_ctor_get(x_11914, 0); +x_14802 = lean_ctor_get(x_11914, 1); +lean_inc(x_14802); +lean_inc(x_14801); +lean_dec(x_11914); +x_14803 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_14803, 0, x_14801); +lean_ctor_set(x_14803, 1, x_14802); +return x_14803; +} +} +} +default: +{ +lean_object* x_14804; lean_object* x_14805; size_t x_14806; size_t x_14807; lean_object* x_14808; +lean_dec(x_5943); +lean_dec(x_3048); +x_14804 = lean_ctor_get(x_7, 2); +lean_inc(x_14804); +if (lean_is_exclusive(x_7)) { + lean_ctor_release(x_7, 0); + lean_ctor_release(x_7, 1); + lean_ctor_release(x_7, 2); + x_14805 = x_7; +} else { + lean_dec_ref(x_7); + x_14805 = lean_box(0); +} +x_14806 = lean_array_size(x_14804); +x_14807 = 0; +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_14804); +x_14808 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_14806, x_14807, x_14804, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_14808) == 0) +{ +lean_object* x_14809; lean_object* x_14810; lean_object* x_14811; uint8_t x_14812; +x_14809 = lean_ctor_get(x_14808, 0); +lean_inc(x_14809); +x_14810 = lean_ctor_get(x_14808, 1); +lean_inc(x_14810); +if (lean_is_exclusive(x_14808)) { + lean_ctor_release(x_14808, 0); + lean_ctor_release(x_14808, 1); + x_14811 = x_14808; +} else { + lean_dec_ref(x_14808); + x_14811 = lean_box(0); +} +x_14812 = !lean_is_exclusive(x_14809); +if (x_14812 == 0) +{ +lean_object* x_14813; lean_object* x_14814; lean_object* x_14815; lean_object* x_14816; lean_object* x_16733; lean_object* x_16734; +x_14813 = lean_ctor_get(x_14809, 0); +x_14814 = lean_ctor_get(x_14809, 1); +lean_inc(x_153); +x_16733 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_14810); +x_16734 = lean_ctor_get(x_16733, 0); +lean_inc(x_16734); +if (lean_obj_tag(x_16734) == 0) +{ +lean_object* x_16735; lean_object* x_16736; +x_16735 = lean_ctor_get(x_16733, 1); +lean_inc(x_16735); +lean_dec(x_16733); +x_16736 = lean_box(0); +lean_ctor_set(x_14809, 0, x_16736); +x_14815 = x_14809; +x_14816 = x_16735; +goto block_16732; +} +else +{ +uint8_t x_16737; +lean_free_object(x_14809); +x_16737 = !lean_is_exclusive(x_16733); +if (x_16737 == 0) +{ +lean_object* x_16738; lean_object* x_16739; uint8_t x_16740; +x_16738 = lean_ctor_get(x_16733, 1); +x_16739 = lean_ctor_get(x_16733, 0); +lean_dec(x_16739); +x_16740 = !lean_is_exclusive(x_16734); +if (x_16740 == 0) +{ +lean_object* x_16741; lean_object* x_16742; lean_object* x_16743; lean_object* x_16744; uint8_t x_16745; +x_16741 = lean_ctor_get(x_16734, 0); +x_16742 = lean_array_get_size(x_14813); +x_16743 = lean_ctor_get(x_16741, 3); +lean_inc(x_16743); +lean_dec(x_16741); +x_16744 = lean_array_get_size(x_16743); +lean_dec(x_16743); +x_16745 = lean_nat_dec_lt(x_16742, x_16744); +if (x_16745 == 0) +{ +uint8_t x_16746; +x_16746 = lean_nat_dec_eq(x_16742, x_16744); +if (x_16746 == 0) +{ +lean_object* x_16747; lean_object* x_16748; lean_object* x_16749; lean_object* x_16750; lean_object* x_16751; lean_object* x_16752; lean_object* x_16753; lean_object* x_16754; lean_object* x_16755; lean_object* x_16756; lean_object* x_16757; lean_object* x_16758; lean_object* x_16759; lean_object* x_16760; lean_object* x_16761; lean_object* x_16762; +x_16747 = lean_unsigned_to_nat(0u); +x_16748 = l_Array_extract___rarg(x_14813, x_16747, x_16744); +x_16749 = l_Array_extract___rarg(x_14813, x_16744, x_16742); +lean_dec(x_16742); +lean_inc(x_153); +lean_ctor_set_tag(x_16733, 6); +lean_ctor_set(x_16733, 1, x_16748); +lean_ctor_set(x_16733, 0, x_153); +x_16750 = lean_ctor_get(x_1, 0); +lean_inc(x_16750); +x_16751 = l_Lean_IR_ToIR_bindVar(x_16750, x_14814, x_4, x_5, x_16738); +x_16752 = lean_ctor_get(x_16751, 0); +lean_inc(x_16752); +x_16753 = lean_ctor_get(x_16751, 1); +lean_inc(x_16753); +lean_dec(x_16751); +x_16754 = lean_ctor_get(x_16752, 0); +lean_inc(x_16754); +x_16755 = lean_ctor_get(x_16752, 1); +lean_inc(x_16755); +lean_dec(x_16752); +x_16756 = l_Lean_IR_ToIR_newVar(x_16755, x_4, x_5, x_16753); +x_16757 = lean_ctor_get(x_16756, 0); +lean_inc(x_16757); +x_16758 = lean_ctor_get(x_16756, 1); +lean_inc(x_16758); +lean_dec(x_16756); +x_16759 = lean_ctor_get(x_16757, 0); +lean_inc(x_16759); +x_16760 = lean_ctor_get(x_16757, 1); +lean_inc(x_16760); +lean_dec(x_16757); +x_16761 = lean_ctor_get(x_1, 2); +lean_inc(x_16761); +lean_inc(x_5); +lean_inc(x_4); +x_16762 = l_Lean_IR_ToIR_lowerType(x_16761, x_16760, x_4, x_5, x_16758); +if (lean_obj_tag(x_16762) == 0) +{ +lean_object* x_16763; lean_object* x_16764; lean_object* x_16765; lean_object* x_16766; lean_object* x_16767; +x_16763 = lean_ctor_get(x_16762, 0); +lean_inc(x_16763); +x_16764 = lean_ctor_get(x_16762, 1); +lean_inc(x_16764); +lean_dec(x_16762); +x_16765 = lean_ctor_get(x_16763, 0); +lean_inc(x_16765); +x_16766 = lean_ctor_get(x_16763, 1); +lean_inc(x_16766); +lean_dec(x_16763); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16767 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_16759, x_16749, x_16754, x_16733, x_16765, x_16766, x_4, x_5, x_16764); +if (lean_obj_tag(x_16767) == 0) +{ +lean_object* x_16768; lean_object* x_16769; uint8_t x_16770; +x_16768 = lean_ctor_get(x_16767, 0); +lean_inc(x_16768); +x_16769 = lean_ctor_get(x_16767, 1); +lean_inc(x_16769); +lean_dec(x_16767); +x_16770 = !lean_is_exclusive(x_16768); +if (x_16770 == 0) +{ +lean_object* x_16771; +x_16771 = lean_ctor_get(x_16768, 0); +lean_ctor_set(x_16734, 0, x_16771); +lean_ctor_set(x_16768, 0, x_16734); +x_14815 = x_16768; +x_14816 = x_16769; +goto block_16732; +} +else +{ +lean_object* x_16772; lean_object* x_16773; lean_object* x_16774; +x_16772 = lean_ctor_get(x_16768, 0); +x_16773 = lean_ctor_get(x_16768, 1); +lean_inc(x_16773); +lean_inc(x_16772); +lean_dec(x_16768); +lean_ctor_set(x_16734, 0, x_16772); +x_16774 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16774, 0, x_16734); +lean_ctor_set(x_16774, 1, x_16773); +x_14815 = x_16774; +x_14816 = x_16769; +goto block_16732; +} +} +else +{ +uint8_t x_16775; +lean_free_object(x_16734); +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16775 = !lean_is_exclusive(x_16767); +if (x_16775 == 0) +{ +return x_16767; +} +else +{ +lean_object* x_16776; lean_object* x_16777; lean_object* x_16778; +x_16776 = lean_ctor_get(x_16767, 0); +x_16777 = lean_ctor_get(x_16767, 1); +lean_inc(x_16777); +lean_inc(x_16776); +lean_dec(x_16767); +x_16778 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_16778, 0, x_16776); +lean_ctor_set(x_16778, 1, x_16777); +return x_16778; +} +} +} +else +{ +uint8_t x_16779; +lean_dec(x_16759); +lean_dec(x_16754); +lean_dec(x_16733); +lean_dec(x_16749); +lean_free_object(x_16734); +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16779 = !lean_is_exclusive(x_16762); +if (x_16779 == 0) +{ +return x_16762; +} +else +{ +lean_object* x_16780; lean_object* x_16781; lean_object* x_16782; +x_16780 = lean_ctor_get(x_16762, 0); +x_16781 = lean_ctor_get(x_16762, 1); +lean_inc(x_16781); +lean_inc(x_16780); +lean_dec(x_16762); +x_16782 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_16782, 0, x_16780); +lean_ctor_set(x_16782, 1, x_16781); +return x_16782; +} +} +} +else +{ +lean_object* x_16783; lean_object* x_16784; lean_object* x_16785; lean_object* x_16786; lean_object* x_16787; lean_object* x_16788; lean_object* x_16789; lean_object* x_16790; +lean_dec(x_16744); +lean_dec(x_16742); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_16733, 6); +lean_ctor_set(x_16733, 1, x_14813); +lean_ctor_set(x_16733, 0, x_153); +x_16783 = lean_ctor_get(x_1, 0); +lean_inc(x_16783); +x_16784 = l_Lean_IR_ToIR_bindVar(x_16783, x_14814, x_4, x_5, x_16738); +x_16785 = lean_ctor_get(x_16784, 0); +lean_inc(x_16785); +x_16786 = lean_ctor_get(x_16784, 1); +lean_inc(x_16786); +lean_dec(x_16784); +x_16787 = lean_ctor_get(x_16785, 0); +lean_inc(x_16787); +x_16788 = lean_ctor_get(x_16785, 1); +lean_inc(x_16788); +lean_dec(x_16785); +x_16789 = lean_ctor_get(x_1, 2); +lean_inc(x_16789); +lean_inc(x_5); +lean_inc(x_4); +x_16790 = l_Lean_IR_ToIR_lowerType(x_16789, x_16788, x_4, x_5, x_16786); +if (lean_obj_tag(x_16790) == 0) +{ +lean_object* x_16791; lean_object* x_16792; lean_object* x_16793; lean_object* x_16794; lean_object* x_16795; +x_16791 = lean_ctor_get(x_16790, 0); +lean_inc(x_16791); +x_16792 = lean_ctor_get(x_16790, 1); +lean_inc(x_16792); +lean_dec(x_16790); +x_16793 = lean_ctor_get(x_16791, 0); +lean_inc(x_16793); +x_16794 = lean_ctor_get(x_16791, 1); +lean_inc(x_16794); +lean_dec(x_16791); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16795 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16787, x_16733, x_16793, x_16794, x_4, x_5, x_16792); +if (lean_obj_tag(x_16795) == 0) +{ +lean_object* x_16796; lean_object* x_16797; uint8_t x_16798; +x_16796 = lean_ctor_get(x_16795, 0); +lean_inc(x_16796); +x_16797 = lean_ctor_get(x_16795, 1); +lean_inc(x_16797); +lean_dec(x_16795); +x_16798 = !lean_is_exclusive(x_16796); +if (x_16798 == 0) +{ +lean_object* x_16799; +x_16799 = lean_ctor_get(x_16796, 0); +lean_ctor_set(x_16734, 0, x_16799); +lean_ctor_set(x_16796, 0, x_16734); +x_14815 = x_16796; +x_14816 = x_16797; +goto block_16732; +} +else +{ +lean_object* x_16800; lean_object* x_16801; lean_object* x_16802; +x_16800 = lean_ctor_get(x_16796, 0); +x_16801 = lean_ctor_get(x_16796, 1); +lean_inc(x_16801); +lean_inc(x_16800); +lean_dec(x_16796); +lean_ctor_set(x_16734, 0, x_16800); +x_16802 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16802, 0, x_16734); +lean_ctor_set(x_16802, 1, x_16801); +x_14815 = x_16802; +x_14816 = x_16797; +goto block_16732; +} +} +else +{ +uint8_t x_16803; +lean_free_object(x_16734); +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16803 = !lean_is_exclusive(x_16795); +if (x_16803 == 0) +{ +return x_16795; +} +else +{ +lean_object* x_16804; lean_object* x_16805; lean_object* x_16806; +x_16804 = lean_ctor_get(x_16795, 0); +x_16805 = lean_ctor_get(x_16795, 1); +lean_inc(x_16805); +lean_inc(x_16804); +lean_dec(x_16795); +x_16806 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_16806, 0, x_16804); +lean_ctor_set(x_16806, 1, x_16805); +return x_16806; +} +} +} +else +{ +uint8_t x_16807; +lean_dec(x_16787); +lean_dec(x_16733); +lean_free_object(x_16734); +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16807 = !lean_is_exclusive(x_16790); +if (x_16807 == 0) +{ +return x_16790; +} +else +{ +lean_object* x_16808; lean_object* x_16809; lean_object* x_16810; +x_16808 = lean_ctor_get(x_16790, 0); +x_16809 = lean_ctor_get(x_16790, 1); +lean_inc(x_16809); +lean_inc(x_16808); +lean_dec(x_16790); +x_16810 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_16810, 0, x_16808); +lean_ctor_set(x_16810, 1, x_16809); +return x_16810; +} +} +} +} +else +{ +lean_object* x_16811; lean_object* x_16812; lean_object* x_16813; lean_object* x_16814; lean_object* x_16815; lean_object* x_16816; lean_object* x_16817; lean_object* x_16818; +lean_dec(x_16744); +lean_dec(x_16742); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_16733, 7); +lean_ctor_set(x_16733, 1, x_14813); +lean_ctor_set(x_16733, 0, x_153); +x_16811 = lean_ctor_get(x_1, 0); +lean_inc(x_16811); +x_16812 = l_Lean_IR_ToIR_bindVar(x_16811, x_14814, x_4, x_5, x_16738); +x_16813 = lean_ctor_get(x_16812, 0); +lean_inc(x_16813); +x_16814 = lean_ctor_get(x_16812, 1); +lean_inc(x_16814); +lean_dec(x_16812); +x_16815 = lean_ctor_get(x_16813, 0); +lean_inc(x_16815); +x_16816 = lean_ctor_get(x_16813, 1); +lean_inc(x_16816); +lean_dec(x_16813); +x_16817 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16818 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16815, x_16733, x_16817, x_16816, x_4, x_5, x_16814); +if (lean_obj_tag(x_16818) == 0) +{ +lean_object* x_16819; lean_object* x_16820; uint8_t x_16821; +x_16819 = lean_ctor_get(x_16818, 0); +lean_inc(x_16819); +x_16820 = lean_ctor_get(x_16818, 1); +lean_inc(x_16820); +lean_dec(x_16818); +x_16821 = !lean_is_exclusive(x_16819); +if (x_16821 == 0) +{ +lean_object* x_16822; +x_16822 = lean_ctor_get(x_16819, 0); +lean_ctor_set(x_16734, 0, x_16822); +lean_ctor_set(x_16819, 0, x_16734); +x_14815 = x_16819; +x_14816 = x_16820; +goto block_16732; +} +else +{ +lean_object* x_16823; lean_object* x_16824; lean_object* x_16825; +x_16823 = lean_ctor_get(x_16819, 0); +x_16824 = lean_ctor_get(x_16819, 1); +lean_inc(x_16824); +lean_inc(x_16823); +lean_dec(x_16819); +lean_ctor_set(x_16734, 0, x_16823); +x_16825 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16825, 0, x_16734); +lean_ctor_set(x_16825, 1, x_16824); +x_14815 = x_16825; +x_14816 = x_16820; +goto block_16732; +} +} +else +{ +uint8_t x_16826; +lean_free_object(x_16734); +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16826 = !lean_is_exclusive(x_16818); +if (x_16826 == 0) +{ +return x_16818; +} +else +{ +lean_object* x_16827; lean_object* x_16828; lean_object* x_16829; +x_16827 = lean_ctor_get(x_16818, 0); +x_16828 = lean_ctor_get(x_16818, 1); +lean_inc(x_16828); +lean_inc(x_16827); +lean_dec(x_16818); +x_16829 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_16829, 0, x_16827); +lean_ctor_set(x_16829, 1, x_16828); +return x_16829; +} +} +} +} +else +{ +lean_object* x_16830; lean_object* x_16831; lean_object* x_16832; lean_object* x_16833; uint8_t x_16834; +x_16830 = lean_ctor_get(x_16734, 0); +lean_inc(x_16830); +lean_dec(x_16734); +x_16831 = lean_array_get_size(x_14813); +x_16832 = lean_ctor_get(x_16830, 3); +lean_inc(x_16832); +lean_dec(x_16830); +x_16833 = lean_array_get_size(x_16832); +lean_dec(x_16832); +x_16834 = lean_nat_dec_lt(x_16831, x_16833); +if (x_16834 == 0) +{ +uint8_t x_16835; +x_16835 = lean_nat_dec_eq(x_16831, x_16833); +if (x_16835 == 0) +{ +lean_object* x_16836; lean_object* x_16837; lean_object* x_16838; lean_object* x_16839; lean_object* x_16840; lean_object* x_16841; lean_object* x_16842; lean_object* x_16843; lean_object* x_16844; lean_object* x_16845; lean_object* x_16846; lean_object* x_16847; lean_object* x_16848; lean_object* x_16849; lean_object* x_16850; lean_object* x_16851; +x_16836 = lean_unsigned_to_nat(0u); +x_16837 = l_Array_extract___rarg(x_14813, x_16836, x_16833); +x_16838 = l_Array_extract___rarg(x_14813, x_16833, x_16831); +lean_dec(x_16831); +lean_inc(x_153); +lean_ctor_set_tag(x_16733, 6); +lean_ctor_set(x_16733, 1, x_16837); +lean_ctor_set(x_16733, 0, x_153); +x_16839 = lean_ctor_get(x_1, 0); +lean_inc(x_16839); +x_16840 = l_Lean_IR_ToIR_bindVar(x_16839, x_14814, x_4, x_5, x_16738); +x_16841 = lean_ctor_get(x_16840, 0); +lean_inc(x_16841); +x_16842 = lean_ctor_get(x_16840, 1); +lean_inc(x_16842); +lean_dec(x_16840); +x_16843 = lean_ctor_get(x_16841, 0); +lean_inc(x_16843); +x_16844 = lean_ctor_get(x_16841, 1); +lean_inc(x_16844); +lean_dec(x_16841); +x_16845 = l_Lean_IR_ToIR_newVar(x_16844, x_4, x_5, x_16842); +x_16846 = lean_ctor_get(x_16845, 0); +lean_inc(x_16846); +x_16847 = lean_ctor_get(x_16845, 1); +lean_inc(x_16847); +lean_dec(x_16845); +x_16848 = lean_ctor_get(x_16846, 0); +lean_inc(x_16848); +x_16849 = lean_ctor_get(x_16846, 1); +lean_inc(x_16849); +lean_dec(x_16846); +x_16850 = lean_ctor_get(x_1, 2); +lean_inc(x_16850); +lean_inc(x_5); +lean_inc(x_4); +x_16851 = l_Lean_IR_ToIR_lowerType(x_16850, x_16849, x_4, x_5, x_16847); +if (lean_obj_tag(x_16851) == 0) +{ +lean_object* x_16852; lean_object* x_16853; lean_object* x_16854; lean_object* x_16855; lean_object* x_16856; +x_16852 = lean_ctor_get(x_16851, 0); +lean_inc(x_16852); +x_16853 = lean_ctor_get(x_16851, 1); +lean_inc(x_16853); +lean_dec(x_16851); +x_16854 = lean_ctor_get(x_16852, 0); +lean_inc(x_16854); +x_16855 = lean_ctor_get(x_16852, 1); +lean_inc(x_16855); +lean_dec(x_16852); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16856 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_16848, x_16838, x_16843, x_16733, x_16854, x_16855, x_4, x_5, x_16853); +if (lean_obj_tag(x_16856) == 0) +{ +lean_object* x_16857; lean_object* x_16858; lean_object* x_16859; lean_object* x_16860; lean_object* x_16861; lean_object* x_16862; lean_object* x_16863; +x_16857 = lean_ctor_get(x_16856, 0); +lean_inc(x_16857); +x_16858 = lean_ctor_get(x_16856, 1); +lean_inc(x_16858); +lean_dec(x_16856); +x_16859 = lean_ctor_get(x_16857, 0); +lean_inc(x_16859); +x_16860 = lean_ctor_get(x_16857, 1); +lean_inc(x_16860); +if (lean_is_exclusive(x_16857)) { + lean_ctor_release(x_16857, 0); + lean_ctor_release(x_16857, 1); + x_16861 = x_16857; +} else { + lean_dec_ref(x_16857); + x_16861 = lean_box(0); +} +x_16862 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_16862, 0, x_16859); +if (lean_is_scalar(x_16861)) { + x_16863 = lean_alloc_ctor(0, 2, 0); +} else { + x_16863 = x_16861; +} +lean_ctor_set(x_16863, 0, x_16862); +lean_ctor_set(x_16863, 1, x_16860); +x_14815 = x_16863; +x_14816 = x_16858; +goto block_16732; +} +else +{ +lean_object* x_16864; lean_object* x_16865; lean_object* x_16866; lean_object* x_16867; +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16864 = lean_ctor_get(x_16856, 0); +lean_inc(x_16864); +x_16865 = lean_ctor_get(x_16856, 1); +lean_inc(x_16865); +if (lean_is_exclusive(x_16856)) { + lean_ctor_release(x_16856, 0); + lean_ctor_release(x_16856, 1); + x_16866 = x_16856; +} else { + lean_dec_ref(x_16856); + x_16866 = lean_box(0); +} +if (lean_is_scalar(x_16866)) { + x_16867 = lean_alloc_ctor(1, 2, 0); +} else { + x_16867 = x_16866; +} +lean_ctor_set(x_16867, 0, x_16864); +lean_ctor_set(x_16867, 1, x_16865); +return x_16867; +} +} +else +{ +lean_object* x_16868; lean_object* x_16869; lean_object* x_16870; lean_object* x_16871; +lean_dec(x_16848); +lean_dec(x_16843); +lean_dec(x_16733); +lean_dec(x_16838); +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16868 = lean_ctor_get(x_16851, 0); +lean_inc(x_16868); +x_16869 = lean_ctor_get(x_16851, 1); +lean_inc(x_16869); +if (lean_is_exclusive(x_16851)) { + lean_ctor_release(x_16851, 0); + lean_ctor_release(x_16851, 1); + x_16870 = x_16851; +} else { + lean_dec_ref(x_16851); + x_16870 = lean_box(0); +} +if (lean_is_scalar(x_16870)) { + x_16871 = lean_alloc_ctor(1, 2, 0); +} else { + x_16871 = x_16870; +} +lean_ctor_set(x_16871, 0, x_16868); +lean_ctor_set(x_16871, 1, x_16869); +return x_16871; +} +} +else +{ +lean_object* x_16872; lean_object* x_16873; lean_object* x_16874; lean_object* x_16875; lean_object* x_16876; lean_object* x_16877; lean_object* x_16878; lean_object* x_16879; +lean_dec(x_16833); +lean_dec(x_16831); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_16733, 6); +lean_ctor_set(x_16733, 1, x_14813); +lean_ctor_set(x_16733, 0, x_153); +x_16872 = lean_ctor_get(x_1, 0); +lean_inc(x_16872); +x_16873 = l_Lean_IR_ToIR_bindVar(x_16872, x_14814, x_4, x_5, x_16738); +x_16874 = lean_ctor_get(x_16873, 0); +lean_inc(x_16874); +x_16875 = lean_ctor_get(x_16873, 1); +lean_inc(x_16875); +lean_dec(x_16873); +x_16876 = lean_ctor_get(x_16874, 0); +lean_inc(x_16876); +x_16877 = lean_ctor_get(x_16874, 1); +lean_inc(x_16877); +lean_dec(x_16874); +x_16878 = lean_ctor_get(x_1, 2); +lean_inc(x_16878); +lean_inc(x_5); +lean_inc(x_4); +x_16879 = l_Lean_IR_ToIR_lowerType(x_16878, x_16877, x_4, x_5, x_16875); +if (lean_obj_tag(x_16879) == 0) +{ +lean_object* x_16880; lean_object* x_16881; lean_object* x_16882; lean_object* x_16883; lean_object* x_16884; +x_16880 = lean_ctor_get(x_16879, 0); +lean_inc(x_16880); +x_16881 = lean_ctor_get(x_16879, 1); +lean_inc(x_16881); +lean_dec(x_16879); +x_16882 = lean_ctor_get(x_16880, 0); +lean_inc(x_16882); +x_16883 = lean_ctor_get(x_16880, 1); +lean_inc(x_16883); +lean_dec(x_16880); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16884 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16876, x_16733, x_16882, x_16883, x_4, x_5, x_16881); +if (lean_obj_tag(x_16884) == 0) +{ +lean_object* x_16885; lean_object* x_16886; lean_object* x_16887; lean_object* x_16888; lean_object* x_16889; lean_object* x_16890; lean_object* x_16891; +x_16885 = lean_ctor_get(x_16884, 0); +lean_inc(x_16885); +x_16886 = lean_ctor_get(x_16884, 1); +lean_inc(x_16886); +lean_dec(x_16884); +x_16887 = lean_ctor_get(x_16885, 0); +lean_inc(x_16887); +x_16888 = lean_ctor_get(x_16885, 1); +lean_inc(x_16888); +if (lean_is_exclusive(x_16885)) { + lean_ctor_release(x_16885, 0); + lean_ctor_release(x_16885, 1); + x_16889 = x_16885; +} else { + lean_dec_ref(x_16885); + x_16889 = lean_box(0); +} +x_16890 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_16890, 0, x_16887); +if (lean_is_scalar(x_16889)) { + x_16891 = lean_alloc_ctor(0, 2, 0); +} else { + x_16891 = x_16889; +} +lean_ctor_set(x_16891, 0, x_16890); +lean_ctor_set(x_16891, 1, x_16888); +x_14815 = x_16891; +x_14816 = x_16886; +goto block_16732; +} +else +{ +lean_object* x_16892; lean_object* x_16893; lean_object* x_16894; lean_object* x_16895; +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16892 = lean_ctor_get(x_16884, 0); +lean_inc(x_16892); +x_16893 = lean_ctor_get(x_16884, 1); +lean_inc(x_16893); +if (lean_is_exclusive(x_16884)) { + lean_ctor_release(x_16884, 0); + lean_ctor_release(x_16884, 1); + x_16894 = x_16884; +} else { + lean_dec_ref(x_16884); + x_16894 = lean_box(0); +} +if (lean_is_scalar(x_16894)) { + x_16895 = lean_alloc_ctor(1, 2, 0); +} else { + x_16895 = x_16894; +} +lean_ctor_set(x_16895, 0, x_16892); +lean_ctor_set(x_16895, 1, x_16893); +return x_16895; +} +} +else +{ +lean_object* x_16896; lean_object* x_16897; lean_object* x_16898; lean_object* x_16899; +lean_dec(x_16876); +lean_dec(x_16733); +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16896 = lean_ctor_get(x_16879, 0); +lean_inc(x_16896); +x_16897 = lean_ctor_get(x_16879, 1); +lean_inc(x_16897); +if (lean_is_exclusive(x_16879)) { + lean_ctor_release(x_16879, 0); + lean_ctor_release(x_16879, 1); + x_16898 = x_16879; +} else { + lean_dec_ref(x_16879); + x_16898 = lean_box(0); +} +if (lean_is_scalar(x_16898)) { + x_16899 = lean_alloc_ctor(1, 2, 0); +} else { + x_16899 = x_16898; +} +lean_ctor_set(x_16899, 0, x_16896); +lean_ctor_set(x_16899, 1, x_16897); +return x_16899; +} +} +} +else +{ +lean_object* x_16900; lean_object* x_16901; lean_object* x_16902; lean_object* x_16903; lean_object* x_16904; lean_object* x_16905; lean_object* x_16906; lean_object* x_16907; +lean_dec(x_16833); +lean_dec(x_16831); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_16733, 7); +lean_ctor_set(x_16733, 1, x_14813); +lean_ctor_set(x_16733, 0, x_153); +x_16900 = lean_ctor_get(x_1, 0); +lean_inc(x_16900); +x_16901 = l_Lean_IR_ToIR_bindVar(x_16900, x_14814, x_4, x_5, x_16738); +x_16902 = lean_ctor_get(x_16901, 0); +lean_inc(x_16902); +x_16903 = lean_ctor_get(x_16901, 1); +lean_inc(x_16903); +lean_dec(x_16901); +x_16904 = lean_ctor_get(x_16902, 0); +lean_inc(x_16904); +x_16905 = lean_ctor_get(x_16902, 1); +lean_inc(x_16905); +lean_dec(x_16902); +x_16906 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16907 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16904, x_16733, x_16906, x_16905, x_4, x_5, x_16903); +if (lean_obj_tag(x_16907) == 0) +{ +lean_object* x_16908; lean_object* x_16909; lean_object* x_16910; lean_object* x_16911; lean_object* x_16912; lean_object* x_16913; lean_object* x_16914; +x_16908 = lean_ctor_get(x_16907, 0); +lean_inc(x_16908); +x_16909 = lean_ctor_get(x_16907, 1); +lean_inc(x_16909); +lean_dec(x_16907); +x_16910 = lean_ctor_get(x_16908, 0); +lean_inc(x_16910); +x_16911 = lean_ctor_get(x_16908, 1); +lean_inc(x_16911); +if (lean_is_exclusive(x_16908)) { + lean_ctor_release(x_16908, 0); + lean_ctor_release(x_16908, 1); + x_16912 = x_16908; +} else { + lean_dec_ref(x_16908); + x_16912 = lean_box(0); +} +x_16913 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_16913, 0, x_16910); +if (lean_is_scalar(x_16912)) { + x_16914 = lean_alloc_ctor(0, 2, 0); +} else { + x_16914 = x_16912; +} +lean_ctor_set(x_16914, 0, x_16913); +lean_ctor_set(x_16914, 1, x_16911); +x_14815 = x_16914; +x_14816 = x_16909; +goto block_16732; +} +else +{ +lean_object* x_16915; lean_object* x_16916; lean_object* x_16917; lean_object* x_16918; +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16915 = lean_ctor_get(x_16907, 0); +lean_inc(x_16915); +x_16916 = lean_ctor_get(x_16907, 1); +lean_inc(x_16916); +if (lean_is_exclusive(x_16907)) { + lean_ctor_release(x_16907, 0); + lean_ctor_release(x_16907, 1); + x_16917 = x_16907; +} else { + lean_dec_ref(x_16907); + x_16917 = lean_box(0); +} +if (lean_is_scalar(x_16917)) { + x_16918 = lean_alloc_ctor(1, 2, 0); +} else { + x_16918 = x_16917; +} +lean_ctor_set(x_16918, 0, x_16915); +lean_ctor_set(x_16918, 1, x_16916); +return x_16918; +} +} +} +} +else +{ +lean_object* x_16919; lean_object* x_16920; lean_object* x_16921; lean_object* x_16922; lean_object* x_16923; lean_object* x_16924; uint8_t x_16925; +x_16919 = lean_ctor_get(x_16733, 1); +lean_inc(x_16919); +lean_dec(x_16733); +x_16920 = lean_ctor_get(x_16734, 0); +lean_inc(x_16920); +if (lean_is_exclusive(x_16734)) { + lean_ctor_release(x_16734, 0); + x_16921 = x_16734; +} else { + lean_dec_ref(x_16734); + x_16921 = lean_box(0); +} +x_16922 = lean_array_get_size(x_14813); +x_16923 = lean_ctor_get(x_16920, 3); +lean_inc(x_16923); +lean_dec(x_16920); +x_16924 = lean_array_get_size(x_16923); +lean_dec(x_16923); +x_16925 = lean_nat_dec_lt(x_16922, x_16924); +if (x_16925 == 0) +{ +uint8_t x_16926; +x_16926 = lean_nat_dec_eq(x_16922, x_16924); +if (x_16926 == 0) +{ +lean_object* x_16927; lean_object* x_16928; lean_object* x_16929; lean_object* x_16930; lean_object* x_16931; lean_object* x_16932; lean_object* x_16933; lean_object* x_16934; lean_object* x_16935; lean_object* x_16936; lean_object* x_16937; lean_object* x_16938; lean_object* x_16939; lean_object* x_16940; lean_object* x_16941; lean_object* x_16942; lean_object* x_16943; +x_16927 = lean_unsigned_to_nat(0u); +x_16928 = l_Array_extract___rarg(x_14813, x_16927, x_16924); +x_16929 = l_Array_extract___rarg(x_14813, x_16924, x_16922); +lean_dec(x_16922); +lean_inc(x_153); +x_16930 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_16930, 0, x_153); +lean_ctor_set(x_16930, 1, x_16928); +x_16931 = lean_ctor_get(x_1, 0); +lean_inc(x_16931); +x_16932 = l_Lean_IR_ToIR_bindVar(x_16931, x_14814, x_4, x_5, x_16919); +x_16933 = lean_ctor_get(x_16932, 0); +lean_inc(x_16933); +x_16934 = lean_ctor_get(x_16932, 1); +lean_inc(x_16934); +lean_dec(x_16932); +x_16935 = lean_ctor_get(x_16933, 0); +lean_inc(x_16935); +x_16936 = lean_ctor_get(x_16933, 1); +lean_inc(x_16936); +lean_dec(x_16933); +x_16937 = l_Lean_IR_ToIR_newVar(x_16936, x_4, x_5, x_16934); +x_16938 = lean_ctor_get(x_16937, 0); +lean_inc(x_16938); +x_16939 = lean_ctor_get(x_16937, 1); +lean_inc(x_16939); +lean_dec(x_16937); +x_16940 = lean_ctor_get(x_16938, 0); +lean_inc(x_16940); +x_16941 = lean_ctor_get(x_16938, 1); +lean_inc(x_16941); +lean_dec(x_16938); +x_16942 = lean_ctor_get(x_1, 2); +lean_inc(x_16942); +lean_inc(x_5); +lean_inc(x_4); +x_16943 = l_Lean_IR_ToIR_lowerType(x_16942, x_16941, x_4, x_5, x_16939); +if (lean_obj_tag(x_16943) == 0) +{ +lean_object* x_16944; lean_object* x_16945; lean_object* x_16946; lean_object* x_16947; lean_object* x_16948; +x_16944 = lean_ctor_get(x_16943, 0); +lean_inc(x_16944); +x_16945 = lean_ctor_get(x_16943, 1); +lean_inc(x_16945); +lean_dec(x_16943); +x_16946 = lean_ctor_get(x_16944, 0); +lean_inc(x_16946); +x_16947 = lean_ctor_get(x_16944, 1); +lean_inc(x_16947); +lean_dec(x_16944); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16948 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_16940, x_16929, x_16935, x_16930, x_16946, x_16947, x_4, x_5, x_16945); +if (lean_obj_tag(x_16948) == 0) +{ +lean_object* x_16949; lean_object* x_16950; lean_object* x_16951; lean_object* x_16952; lean_object* x_16953; lean_object* x_16954; lean_object* x_16955; +x_16949 = lean_ctor_get(x_16948, 0); +lean_inc(x_16949); +x_16950 = lean_ctor_get(x_16948, 1); +lean_inc(x_16950); +lean_dec(x_16948); +x_16951 = lean_ctor_get(x_16949, 0); +lean_inc(x_16951); +x_16952 = lean_ctor_get(x_16949, 1); +lean_inc(x_16952); +if (lean_is_exclusive(x_16949)) { + lean_ctor_release(x_16949, 0); + lean_ctor_release(x_16949, 1); + x_16953 = x_16949; +} else { + lean_dec_ref(x_16949); + x_16953 = lean_box(0); +} +if (lean_is_scalar(x_16921)) { + x_16954 = lean_alloc_ctor(1, 1, 0); +} else { + x_16954 = x_16921; +} +lean_ctor_set(x_16954, 0, x_16951); +if (lean_is_scalar(x_16953)) { + x_16955 = lean_alloc_ctor(0, 2, 0); +} else { + x_16955 = x_16953; +} +lean_ctor_set(x_16955, 0, x_16954); +lean_ctor_set(x_16955, 1, x_16952); +x_14815 = x_16955; +x_14816 = x_16950; +goto block_16732; +} +else +{ +lean_object* x_16956; lean_object* x_16957; lean_object* x_16958; lean_object* x_16959; +lean_dec(x_16921); +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16956 = lean_ctor_get(x_16948, 0); +lean_inc(x_16956); +x_16957 = lean_ctor_get(x_16948, 1); +lean_inc(x_16957); +if (lean_is_exclusive(x_16948)) { + lean_ctor_release(x_16948, 0); + lean_ctor_release(x_16948, 1); + x_16958 = x_16948; +} else { + lean_dec_ref(x_16948); + x_16958 = lean_box(0); +} +if (lean_is_scalar(x_16958)) { + x_16959 = lean_alloc_ctor(1, 2, 0); +} else { + x_16959 = x_16958; +} +lean_ctor_set(x_16959, 0, x_16956); +lean_ctor_set(x_16959, 1, x_16957); +return x_16959; +} +} +else +{ +lean_object* x_16960; lean_object* x_16961; lean_object* x_16962; lean_object* x_16963; +lean_dec(x_16940); +lean_dec(x_16935); +lean_dec(x_16930); +lean_dec(x_16929); +lean_dec(x_16921); +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16960 = lean_ctor_get(x_16943, 0); +lean_inc(x_16960); +x_16961 = lean_ctor_get(x_16943, 1); +lean_inc(x_16961); +if (lean_is_exclusive(x_16943)) { + lean_ctor_release(x_16943, 0); + lean_ctor_release(x_16943, 1); + x_16962 = x_16943; +} else { + lean_dec_ref(x_16943); + x_16962 = lean_box(0); +} +if (lean_is_scalar(x_16962)) { + x_16963 = lean_alloc_ctor(1, 2, 0); +} else { + x_16963 = x_16962; +} +lean_ctor_set(x_16963, 0, x_16960); +lean_ctor_set(x_16963, 1, x_16961); +return x_16963; +} +} +else +{ +lean_object* x_16964; lean_object* x_16965; lean_object* x_16966; lean_object* x_16967; lean_object* x_16968; lean_object* x_16969; lean_object* x_16970; lean_object* x_16971; lean_object* x_16972; +lean_dec(x_16924); +lean_dec(x_16922); +lean_inc(x_14813); +lean_inc(x_153); +x_16964 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_16964, 0, x_153); +lean_ctor_set(x_16964, 1, x_14813); +x_16965 = lean_ctor_get(x_1, 0); +lean_inc(x_16965); +x_16966 = l_Lean_IR_ToIR_bindVar(x_16965, x_14814, x_4, x_5, x_16919); +x_16967 = lean_ctor_get(x_16966, 0); +lean_inc(x_16967); +x_16968 = lean_ctor_get(x_16966, 1); +lean_inc(x_16968); +lean_dec(x_16966); +x_16969 = lean_ctor_get(x_16967, 0); +lean_inc(x_16969); +x_16970 = lean_ctor_get(x_16967, 1); +lean_inc(x_16970); +lean_dec(x_16967); +x_16971 = lean_ctor_get(x_1, 2); +lean_inc(x_16971); +lean_inc(x_5); +lean_inc(x_4); +x_16972 = l_Lean_IR_ToIR_lowerType(x_16971, x_16970, x_4, x_5, x_16968); +if (lean_obj_tag(x_16972) == 0) +{ +lean_object* x_16973; lean_object* x_16974; lean_object* x_16975; lean_object* x_16976; lean_object* x_16977; +x_16973 = lean_ctor_get(x_16972, 0); +lean_inc(x_16973); +x_16974 = lean_ctor_get(x_16972, 1); +lean_inc(x_16974); +lean_dec(x_16972); +x_16975 = lean_ctor_get(x_16973, 0); +lean_inc(x_16975); +x_16976 = lean_ctor_get(x_16973, 1); +lean_inc(x_16976); +lean_dec(x_16973); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16977 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16969, x_16964, x_16975, x_16976, x_4, x_5, x_16974); +if (lean_obj_tag(x_16977) == 0) +{ +lean_object* x_16978; lean_object* x_16979; lean_object* x_16980; lean_object* x_16981; lean_object* x_16982; lean_object* x_16983; lean_object* x_16984; +x_16978 = lean_ctor_get(x_16977, 0); +lean_inc(x_16978); +x_16979 = lean_ctor_get(x_16977, 1); +lean_inc(x_16979); +lean_dec(x_16977); +x_16980 = lean_ctor_get(x_16978, 0); +lean_inc(x_16980); +x_16981 = lean_ctor_get(x_16978, 1); +lean_inc(x_16981); +if (lean_is_exclusive(x_16978)) { + lean_ctor_release(x_16978, 0); + lean_ctor_release(x_16978, 1); + x_16982 = x_16978; +} else { + lean_dec_ref(x_16978); + x_16982 = lean_box(0); +} +if (lean_is_scalar(x_16921)) { + x_16983 = lean_alloc_ctor(1, 1, 0); +} else { + x_16983 = x_16921; +} +lean_ctor_set(x_16983, 0, x_16980); +if (lean_is_scalar(x_16982)) { + x_16984 = lean_alloc_ctor(0, 2, 0); +} else { + x_16984 = x_16982; +} +lean_ctor_set(x_16984, 0, x_16983); +lean_ctor_set(x_16984, 1, x_16981); +x_14815 = x_16984; +x_14816 = x_16979; +goto block_16732; +} +else +{ +lean_object* x_16985; lean_object* x_16986; lean_object* x_16987; lean_object* x_16988; +lean_dec(x_16921); +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16985 = lean_ctor_get(x_16977, 0); +lean_inc(x_16985); +x_16986 = lean_ctor_get(x_16977, 1); +lean_inc(x_16986); +if (lean_is_exclusive(x_16977)) { + lean_ctor_release(x_16977, 0); + lean_ctor_release(x_16977, 1); + x_16987 = x_16977; +} else { + lean_dec_ref(x_16977); + x_16987 = lean_box(0); +} +if (lean_is_scalar(x_16987)) { + x_16988 = lean_alloc_ctor(1, 2, 0); +} else { + x_16988 = x_16987; +} +lean_ctor_set(x_16988, 0, x_16985); +lean_ctor_set(x_16988, 1, x_16986); +return x_16988; +} +} +else +{ +lean_object* x_16989; lean_object* x_16990; lean_object* x_16991; lean_object* x_16992; +lean_dec(x_16969); +lean_dec(x_16964); +lean_dec(x_16921); +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16989 = lean_ctor_get(x_16972, 0); +lean_inc(x_16989); +x_16990 = lean_ctor_get(x_16972, 1); +lean_inc(x_16990); +if (lean_is_exclusive(x_16972)) { + lean_ctor_release(x_16972, 0); + lean_ctor_release(x_16972, 1); + x_16991 = x_16972; +} else { + lean_dec_ref(x_16972); + x_16991 = lean_box(0); +} +if (lean_is_scalar(x_16991)) { + x_16992 = lean_alloc_ctor(1, 2, 0); +} else { + x_16992 = x_16991; +} +lean_ctor_set(x_16992, 0, x_16989); +lean_ctor_set(x_16992, 1, x_16990); +return x_16992; +} +} +} +else +{ +lean_object* x_16993; lean_object* x_16994; lean_object* x_16995; lean_object* x_16996; lean_object* x_16997; lean_object* x_16998; lean_object* x_16999; lean_object* x_17000; lean_object* x_17001; +lean_dec(x_16924); +lean_dec(x_16922); +lean_inc(x_14813); +lean_inc(x_153); +x_16993 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_16993, 0, x_153); +lean_ctor_set(x_16993, 1, x_14813); +x_16994 = lean_ctor_get(x_1, 0); +lean_inc(x_16994); +x_16995 = l_Lean_IR_ToIR_bindVar(x_16994, x_14814, x_4, x_5, x_16919); +x_16996 = lean_ctor_get(x_16995, 0); +lean_inc(x_16996); +x_16997 = lean_ctor_get(x_16995, 1); +lean_inc(x_16997); +lean_dec(x_16995); +x_16998 = lean_ctor_get(x_16996, 0); +lean_inc(x_16998); +x_16999 = lean_ctor_get(x_16996, 1); +lean_inc(x_16999); +lean_dec(x_16996); +x_17000 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17001 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16998, x_16993, x_17000, x_16999, x_4, x_5, x_16997); +if (lean_obj_tag(x_17001) == 0) +{ +lean_object* x_17002; lean_object* x_17003; lean_object* x_17004; lean_object* x_17005; lean_object* x_17006; lean_object* x_17007; lean_object* x_17008; +x_17002 = lean_ctor_get(x_17001, 0); +lean_inc(x_17002); +x_17003 = lean_ctor_get(x_17001, 1); +lean_inc(x_17003); +lean_dec(x_17001); +x_17004 = lean_ctor_get(x_17002, 0); +lean_inc(x_17004); +x_17005 = lean_ctor_get(x_17002, 1); +lean_inc(x_17005); +if (lean_is_exclusive(x_17002)) { + lean_ctor_release(x_17002, 0); + lean_ctor_release(x_17002, 1); + x_17006 = x_17002; +} else { + lean_dec_ref(x_17002); + x_17006 = lean_box(0); +} +if (lean_is_scalar(x_16921)) { + x_17007 = lean_alloc_ctor(1, 1, 0); +} else { + x_17007 = x_16921; +} +lean_ctor_set(x_17007, 0, x_17004); +if (lean_is_scalar(x_17006)) { + x_17008 = lean_alloc_ctor(0, 2, 0); +} else { + x_17008 = x_17006; +} +lean_ctor_set(x_17008, 0, x_17007); +lean_ctor_set(x_17008, 1, x_17005); +x_14815 = x_17008; +x_14816 = x_17003; +goto block_16732; +} +else +{ +lean_object* x_17009; lean_object* x_17010; lean_object* x_17011; lean_object* x_17012; +lean_dec(x_16921); +lean_dec(x_14813); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17009 = lean_ctor_get(x_17001, 0); +lean_inc(x_17009); +x_17010 = lean_ctor_get(x_17001, 1); +lean_inc(x_17010); +if (lean_is_exclusive(x_17001)) { + lean_ctor_release(x_17001, 0); + lean_ctor_release(x_17001, 1); + x_17011 = x_17001; +} else { + lean_dec_ref(x_17001); + x_17011 = lean_box(0); +} +if (lean_is_scalar(x_17011)) { + x_17012 = lean_alloc_ctor(1, 2, 0); +} else { + x_17012 = x_17011; +} +lean_ctor_set(x_17012, 0, x_17009); +lean_ctor_set(x_17012, 1, x_17010); +return x_17012; +} +} +} +} +block_16732: +{ +lean_object* x_14817; +x_14817 = lean_ctor_get(x_14815, 0); +lean_inc(x_14817); +if (lean_obj_tag(x_14817) == 0) +{ +uint8_t x_14818; +lean_dec(x_14811); +x_14818 = !lean_is_exclusive(x_14815); +if (x_14818 == 0) +{ +lean_object* x_14819; lean_object* x_14820; lean_object* x_14821; lean_object* x_14822; lean_object* x_14823; lean_object* x_14824; lean_object* x_14825; uint8_t x_14826; lean_object* x_14827; +x_14819 = lean_ctor_get(x_14815, 1); +x_14820 = lean_ctor_get(x_14815, 0); +lean_dec(x_14820); +x_14821 = lean_st_ref_get(x_5, x_14816); +x_14822 = lean_ctor_get(x_14821, 0); +lean_inc(x_14822); +x_14823 = lean_ctor_get(x_14821, 1); +lean_inc(x_14823); +if (lean_is_exclusive(x_14821)) { + lean_ctor_release(x_14821, 0); + lean_ctor_release(x_14821, 1); + x_14824 = x_14821; +} else { + lean_dec_ref(x_14821); + x_14824 = lean_box(0); +} +x_14825 = lean_ctor_get(x_14822, 0); +lean_inc(x_14825); +lean_dec(x_14822); +x_14826 = 0; +lean_inc(x_153); +lean_inc(x_14825); +x_14827 = l_Lean_Environment_find_x3f(x_14825, x_153, x_14826); +if (lean_obj_tag(x_14827) == 0) +{ +lean_object* x_14828; lean_object* x_14829; +lean_dec(x_14825); +lean_dec(x_14824); +lean_free_object(x_14815); +lean_dec(x_14813); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_14828 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_14829 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_14828, x_14819, x_4, x_5, x_14823); +return x_14829; +} +else +{ +lean_object* x_14830; +x_14830 = lean_ctor_get(x_14827, 0); +lean_inc(x_14830); +lean_dec(x_14827); +switch (lean_obj_tag(x_14830)) { +case 0: +{ +uint8_t x_14831; +lean_dec(x_14825); +lean_dec(x_14805); +lean_dec(x_14804); +x_14831 = !lean_is_exclusive(x_14830); +if (x_14831 == 0) +{ +lean_object* x_14832; lean_object* x_14833; uint8_t x_14834; +x_14832 = lean_ctor_get(x_14830, 0); +lean_dec(x_14832); +x_14833 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_14834 = lean_name_eq(x_153, x_14833); +if (x_14834 == 0) +{ +lean_object* x_14835; uint8_t x_14836; +x_14835 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_14836 = lean_name_eq(x_153, x_14835); +if (x_14836 == 0) +{ +lean_object* x_14837; lean_object* x_14838; lean_object* x_14839; +lean_dec(x_14824); +lean_free_object(x_14815); +lean_inc(x_153); +x_14837 = l_Lean_IR_ToIR_findDecl(x_153, x_14819, x_4, x_5, x_14823); +x_14838 = lean_ctor_get(x_14837, 0); +lean_inc(x_14838); +x_14839 = lean_ctor_get(x_14838, 0); +lean_inc(x_14839); +if (lean_obj_tag(x_14839) == 0) +{ +uint8_t x_14840; +lean_dec(x_14813); +lean_dec(x_2); +lean_dec(x_1); +x_14840 = !lean_is_exclusive(x_14837); +if (x_14840 == 0) +{ +lean_object* x_14841; lean_object* x_14842; uint8_t x_14843; +x_14841 = lean_ctor_get(x_14837, 1); +x_14842 = lean_ctor_get(x_14837, 0); +lean_dec(x_14842); +x_14843 = !lean_is_exclusive(x_14838); +if (x_14843 == 0) +{ +lean_object* x_14844; lean_object* x_14845; uint8_t x_14846; lean_object* x_14847; lean_object* x_14848; lean_object* x_14849; lean_object* x_14850; lean_object* x_14851; lean_object* x_14852; +x_14844 = lean_ctor_get(x_14838, 1); +x_14845 = lean_ctor_get(x_14838, 0); +lean_dec(x_14845); +x_14846 = 1; +x_14847 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_14848 = l_Lean_Name_toString(x_153, x_14846, x_14847); +lean_ctor_set_tag(x_14830, 3); +lean_ctor_set(x_14830, 0, x_14848); +x_14849 = l_Lean_IR_ToIR_lowerLet___closed__13; +lean_ctor_set_tag(x_14838, 5); +lean_ctor_set(x_14838, 1, x_14830); +lean_ctor_set(x_14838, 0, x_14849); +x_14850 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_14837, 5); +lean_ctor_set(x_14837, 1, x_14850); +x_14851 = l_Lean_MessageData_ofFormat(x_14837); +x_14852 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_14851, x_14844, x_4, x_5, x_14841); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_14844); +return x_14852; +} +else +{ +lean_object* x_14853; uint8_t x_14854; lean_object* x_14855; lean_object* x_14856; lean_object* x_14857; lean_object* x_14858; lean_object* x_14859; lean_object* x_14860; lean_object* x_14861; +x_14853 = lean_ctor_get(x_14838, 1); +lean_inc(x_14853); +lean_dec(x_14838); +x_14854 = 1; +x_14855 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_14856 = l_Lean_Name_toString(x_153, x_14854, x_14855); +lean_ctor_set_tag(x_14830, 3); +lean_ctor_set(x_14830, 0, x_14856); +x_14857 = l_Lean_IR_ToIR_lowerLet___closed__13; +x_14858 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_14858, 0, x_14857); +lean_ctor_set(x_14858, 1, x_14830); +x_14859 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_14837, 5); +lean_ctor_set(x_14837, 1, x_14859); +lean_ctor_set(x_14837, 0, x_14858); +x_14860 = l_Lean_MessageData_ofFormat(x_14837); +x_14861 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_14860, x_14853, x_4, x_5, x_14841); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_14853); +return x_14861; +} +} +else +{ +lean_object* x_14862; lean_object* x_14863; lean_object* x_14864; uint8_t x_14865; lean_object* x_14866; lean_object* x_14867; lean_object* x_14868; lean_object* x_14869; lean_object* x_14870; lean_object* x_14871; lean_object* x_14872; lean_object* x_14873; +x_14862 = lean_ctor_get(x_14837, 1); +lean_inc(x_14862); +lean_dec(x_14837); +x_14863 = lean_ctor_get(x_14838, 1); +lean_inc(x_14863); +if (lean_is_exclusive(x_14838)) { + lean_ctor_release(x_14838, 0); + lean_ctor_release(x_14838, 1); + x_14864 = x_14838; +} else { + lean_dec_ref(x_14838); + x_14864 = lean_box(0); +} +x_14865 = 1; +x_14866 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_14867 = l_Lean_Name_toString(x_153, x_14865, x_14866); +lean_ctor_set_tag(x_14830, 3); +lean_ctor_set(x_14830, 0, x_14867); +x_14868 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_14864)) { + x_14869 = lean_alloc_ctor(5, 2, 0); +} else { + x_14869 = x_14864; + lean_ctor_set_tag(x_14869, 5); +} +lean_ctor_set(x_14869, 0, x_14868); +lean_ctor_set(x_14869, 1, x_14830); +x_14870 = l_Lean_IR_ToIR_lowerLet___closed__16; +x_14871 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_14871, 0, x_14869); +lean_ctor_set(x_14871, 1, x_14870); +x_14872 = l_Lean_MessageData_ofFormat(x_14871); +x_14873 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_14872, x_14863, x_4, x_5, x_14862); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_14863); +return x_14873; +} +} +else +{ +lean_object* x_14874; uint8_t x_14875; +lean_free_object(x_14830); +x_14874 = lean_ctor_get(x_14837, 1); +lean_inc(x_14874); +lean_dec(x_14837); +x_14875 = !lean_is_exclusive(x_14838); +if (x_14875 == 0) +{ +lean_object* x_14876; lean_object* x_14877; lean_object* x_14878; lean_object* x_14879; lean_object* x_14880; lean_object* x_14881; uint8_t x_14882; +x_14876 = lean_ctor_get(x_14838, 1); +x_14877 = lean_ctor_get(x_14838, 0); +lean_dec(x_14877); +x_14878 = lean_ctor_get(x_14839, 0); +lean_inc(x_14878); +lean_dec(x_14839); +x_14879 = lean_array_get_size(x_14813); +x_14880 = l_Lean_IR_Decl_params(x_14878); +lean_dec(x_14878); +x_14881 = lean_array_get_size(x_14880); +lean_dec(x_14880); +x_14882 = lean_nat_dec_lt(x_14879, x_14881); +if (x_14882 == 0) +{ +uint8_t x_14883; +x_14883 = lean_nat_dec_eq(x_14879, x_14881); +if (x_14883 == 0) +{ +lean_object* x_14884; lean_object* x_14885; lean_object* x_14886; lean_object* x_14887; lean_object* x_14888; lean_object* x_14889; lean_object* x_14890; lean_object* x_14891; lean_object* x_14892; lean_object* x_14893; lean_object* x_14894; lean_object* x_14895; lean_object* x_14896; lean_object* x_14897; lean_object* x_14898; lean_object* x_14899; +x_14884 = lean_unsigned_to_nat(0u); +x_14885 = l_Array_extract___rarg(x_14813, x_14884, x_14881); +x_14886 = l_Array_extract___rarg(x_14813, x_14881, x_14879); +lean_dec(x_14879); +lean_dec(x_14813); +lean_ctor_set_tag(x_14838, 6); +lean_ctor_set(x_14838, 1, x_14885); +lean_ctor_set(x_14838, 0, x_153); +x_14887 = lean_ctor_get(x_1, 0); +lean_inc(x_14887); +x_14888 = l_Lean_IR_ToIR_bindVar(x_14887, x_14876, x_4, x_5, x_14874); +x_14889 = lean_ctor_get(x_14888, 0); +lean_inc(x_14889); +x_14890 = lean_ctor_get(x_14888, 1); +lean_inc(x_14890); +lean_dec(x_14888); +x_14891 = lean_ctor_get(x_14889, 0); +lean_inc(x_14891); +x_14892 = lean_ctor_get(x_14889, 1); +lean_inc(x_14892); +lean_dec(x_14889); +x_14893 = l_Lean_IR_ToIR_newVar(x_14892, x_4, x_5, x_14890); +x_14894 = lean_ctor_get(x_14893, 0); +lean_inc(x_14894); +x_14895 = lean_ctor_get(x_14893, 1); +lean_inc(x_14895); +lean_dec(x_14893); +x_14896 = lean_ctor_get(x_14894, 0); +lean_inc(x_14896); +x_14897 = lean_ctor_get(x_14894, 1); +lean_inc(x_14897); +lean_dec(x_14894); +x_14898 = lean_ctor_get(x_1, 2); +lean_inc(x_14898); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_14899 = l_Lean_IR_ToIR_lowerType(x_14898, x_14897, x_4, x_5, x_14895); +if (lean_obj_tag(x_14899) == 0) +{ +lean_object* x_14900; lean_object* x_14901; lean_object* x_14902; lean_object* x_14903; lean_object* x_14904; +x_14900 = lean_ctor_get(x_14899, 0); +lean_inc(x_14900); +x_14901 = lean_ctor_get(x_14899, 1); +lean_inc(x_14901); +lean_dec(x_14899); +x_14902 = lean_ctor_get(x_14900, 0); +lean_inc(x_14902); +x_14903 = lean_ctor_get(x_14900, 1); +lean_inc(x_14903); +lean_dec(x_14900); +x_14904 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_14896, x_14886, x_14891, x_14838, x_14902, x_14903, x_4, x_5, x_14901); +return x_14904; +} +else +{ +uint8_t x_14905; +lean_dec(x_14896); +lean_dec(x_14891); +lean_dec(x_14838); +lean_dec(x_14886); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_14905 = !lean_is_exclusive(x_14899); +if (x_14905 == 0) +{ +return x_14899; +} +else +{ +lean_object* x_14906; lean_object* x_14907; lean_object* x_14908; +x_14906 = lean_ctor_get(x_14899, 0); +x_14907 = lean_ctor_get(x_14899, 1); +lean_inc(x_14907); +lean_inc(x_14906); +lean_dec(x_14899); +x_14908 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_14908, 0, x_14906); +lean_ctor_set(x_14908, 1, x_14907); +return x_14908; +} +} +} +else +{ +lean_object* x_14909; lean_object* x_14910; lean_object* x_14911; lean_object* x_14912; lean_object* x_14913; lean_object* x_14914; lean_object* x_14915; lean_object* x_14916; +lean_dec(x_14881); +lean_dec(x_14879); +lean_ctor_set_tag(x_14838, 6); +lean_ctor_set(x_14838, 1, x_14813); +lean_ctor_set(x_14838, 0, x_153); +x_14909 = lean_ctor_get(x_1, 0); +lean_inc(x_14909); +x_14910 = l_Lean_IR_ToIR_bindVar(x_14909, x_14876, x_4, x_5, x_14874); +x_14911 = lean_ctor_get(x_14910, 0); +lean_inc(x_14911); +x_14912 = lean_ctor_get(x_14910, 1); +lean_inc(x_14912); +lean_dec(x_14910); +x_14913 = lean_ctor_get(x_14911, 0); +lean_inc(x_14913); +x_14914 = lean_ctor_get(x_14911, 1); +lean_inc(x_14914); +lean_dec(x_14911); +x_14915 = lean_ctor_get(x_1, 2); +lean_inc(x_14915); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_14916 = l_Lean_IR_ToIR_lowerType(x_14915, x_14914, x_4, x_5, x_14912); +if (lean_obj_tag(x_14916) == 0) +{ +lean_object* x_14917; lean_object* x_14918; lean_object* x_14919; lean_object* x_14920; lean_object* x_14921; +x_14917 = lean_ctor_get(x_14916, 0); +lean_inc(x_14917); +x_14918 = lean_ctor_get(x_14916, 1); +lean_inc(x_14918); +lean_dec(x_14916); +x_14919 = lean_ctor_get(x_14917, 0); +lean_inc(x_14919); +x_14920 = lean_ctor_get(x_14917, 1); +lean_inc(x_14920); +lean_dec(x_14917); +x_14921 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14913, x_14838, x_14919, x_14920, x_4, x_5, x_14918); +return x_14921; +} +else +{ +uint8_t x_14922; +lean_dec(x_14913); +lean_dec(x_14838); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_14922 = !lean_is_exclusive(x_14916); +if (x_14922 == 0) +{ +return x_14916; +} +else +{ +lean_object* x_14923; lean_object* x_14924; lean_object* x_14925; +x_14923 = lean_ctor_get(x_14916, 0); +x_14924 = lean_ctor_get(x_14916, 1); +lean_inc(x_14924); +lean_inc(x_14923); +lean_dec(x_14916); +x_14925 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_14925, 0, x_14923); +lean_ctor_set(x_14925, 1, x_14924); +return x_14925; +} +} +} +} +else +{ +lean_object* x_14926; lean_object* x_14927; lean_object* x_14928; lean_object* x_14929; lean_object* x_14930; lean_object* x_14931; lean_object* x_14932; lean_object* x_14933; +lean_dec(x_14881); +lean_dec(x_14879); +lean_ctor_set_tag(x_14838, 7); +lean_ctor_set(x_14838, 1, x_14813); +lean_ctor_set(x_14838, 0, x_153); +x_14926 = lean_ctor_get(x_1, 0); +lean_inc(x_14926); +lean_dec(x_1); +x_14927 = l_Lean_IR_ToIR_bindVar(x_14926, x_14876, x_4, x_5, x_14874); +x_14928 = lean_ctor_get(x_14927, 0); +lean_inc(x_14928); +x_14929 = lean_ctor_get(x_14927, 1); +lean_inc(x_14929); +lean_dec(x_14927); +x_14930 = lean_ctor_get(x_14928, 0); +lean_inc(x_14930); +x_14931 = lean_ctor_get(x_14928, 1); +lean_inc(x_14931); +lean_dec(x_14928); +x_14932 = lean_box(7); +x_14933 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14930, x_14838, x_14932, x_14931, x_4, x_5, x_14929); +return x_14933; +} +} +else +{ +lean_object* x_14934; lean_object* x_14935; lean_object* x_14936; lean_object* x_14937; lean_object* x_14938; uint8_t x_14939; +x_14934 = lean_ctor_get(x_14838, 1); +lean_inc(x_14934); +lean_dec(x_14838); +x_14935 = lean_ctor_get(x_14839, 0); +lean_inc(x_14935); +lean_dec(x_14839); +x_14936 = lean_array_get_size(x_14813); +x_14937 = l_Lean_IR_Decl_params(x_14935); +lean_dec(x_14935); +x_14938 = lean_array_get_size(x_14937); +lean_dec(x_14937); +x_14939 = lean_nat_dec_lt(x_14936, x_14938); +if (x_14939 == 0) +{ +uint8_t x_14940; +x_14940 = lean_nat_dec_eq(x_14936, x_14938); +if (x_14940 == 0) +{ +lean_object* x_14941; lean_object* x_14942; lean_object* x_14943; lean_object* x_14944; lean_object* x_14945; lean_object* x_14946; lean_object* x_14947; lean_object* x_14948; lean_object* x_14949; lean_object* x_14950; lean_object* x_14951; lean_object* x_14952; lean_object* x_14953; lean_object* x_14954; lean_object* x_14955; lean_object* x_14956; lean_object* x_14957; +x_14941 = lean_unsigned_to_nat(0u); +x_14942 = l_Array_extract___rarg(x_14813, x_14941, x_14938); +x_14943 = l_Array_extract___rarg(x_14813, x_14938, x_14936); +lean_dec(x_14936); +lean_dec(x_14813); +x_14944 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_14944, 0, x_153); +lean_ctor_set(x_14944, 1, x_14942); +x_14945 = lean_ctor_get(x_1, 0); +lean_inc(x_14945); +x_14946 = l_Lean_IR_ToIR_bindVar(x_14945, x_14934, x_4, x_5, x_14874); +x_14947 = lean_ctor_get(x_14946, 0); +lean_inc(x_14947); +x_14948 = lean_ctor_get(x_14946, 1); +lean_inc(x_14948); +lean_dec(x_14946); +x_14949 = lean_ctor_get(x_14947, 0); +lean_inc(x_14949); +x_14950 = lean_ctor_get(x_14947, 1); +lean_inc(x_14950); +lean_dec(x_14947); +x_14951 = l_Lean_IR_ToIR_newVar(x_14950, x_4, x_5, x_14948); +x_14952 = lean_ctor_get(x_14951, 0); +lean_inc(x_14952); +x_14953 = lean_ctor_get(x_14951, 1); +lean_inc(x_14953); +lean_dec(x_14951); +x_14954 = lean_ctor_get(x_14952, 0); +lean_inc(x_14954); +x_14955 = lean_ctor_get(x_14952, 1); +lean_inc(x_14955); +lean_dec(x_14952); +x_14956 = lean_ctor_get(x_1, 2); +lean_inc(x_14956); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_14957 = l_Lean_IR_ToIR_lowerType(x_14956, x_14955, x_4, x_5, x_14953); +if (lean_obj_tag(x_14957) == 0) +{ +lean_object* x_14958; lean_object* x_14959; lean_object* x_14960; lean_object* x_14961; lean_object* x_14962; +x_14958 = lean_ctor_get(x_14957, 0); +lean_inc(x_14958); +x_14959 = lean_ctor_get(x_14957, 1); +lean_inc(x_14959); +lean_dec(x_14957); +x_14960 = lean_ctor_get(x_14958, 0); +lean_inc(x_14960); +x_14961 = lean_ctor_get(x_14958, 1); +lean_inc(x_14961); +lean_dec(x_14958); +x_14962 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_14954, x_14943, x_14949, x_14944, x_14960, x_14961, x_4, x_5, x_14959); +return x_14962; +} +else +{ +lean_object* x_14963; lean_object* x_14964; lean_object* x_14965; lean_object* x_14966; +lean_dec(x_14954); +lean_dec(x_14949); +lean_dec(x_14944); +lean_dec(x_14943); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_14963 = lean_ctor_get(x_14957, 0); +lean_inc(x_14963); +x_14964 = lean_ctor_get(x_14957, 1); +lean_inc(x_14964); +if (lean_is_exclusive(x_14957)) { + lean_ctor_release(x_14957, 0); + lean_ctor_release(x_14957, 1); + x_14965 = x_14957; +} else { + lean_dec_ref(x_14957); + x_14965 = lean_box(0); +} +if (lean_is_scalar(x_14965)) { + x_14966 = lean_alloc_ctor(1, 2, 0); +} else { + x_14966 = x_14965; +} +lean_ctor_set(x_14966, 0, x_14963); +lean_ctor_set(x_14966, 1, x_14964); +return x_14966; +} +} +else +{ +lean_object* x_14967; lean_object* x_14968; lean_object* x_14969; lean_object* x_14970; lean_object* x_14971; lean_object* x_14972; lean_object* x_14973; lean_object* x_14974; lean_object* x_14975; +lean_dec(x_14938); +lean_dec(x_14936); +x_14967 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_14967, 0, x_153); +lean_ctor_set(x_14967, 1, x_14813); +x_14968 = lean_ctor_get(x_1, 0); +lean_inc(x_14968); +x_14969 = l_Lean_IR_ToIR_bindVar(x_14968, x_14934, x_4, x_5, x_14874); +x_14970 = lean_ctor_get(x_14969, 0); +lean_inc(x_14970); +x_14971 = lean_ctor_get(x_14969, 1); +lean_inc(x_14971); +lean_dec(x_14969); +x_14972 = lean_ctor_get(x_14970, 0); +lean_inc(x_14972); +x_14973 = lean_ctor_get(x_14970, 1); +lean_inc(x_14973); +lean_dec(x_14970); +x_14974 = lean_ctor_get(x_1, 2); +lean_inc(x_14974); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_14975 = l_Lean_IR_ToIR_lowerType(x_14974, x_14973, x_4, x_5, x_14971); +if (lean_obj_tag(x_14975) == 0) +{ +lean_object* x_14976; lean_object* x_14977; lean_object* x_14978; lean_object* x_14979; lean_object* x_14980; +x_14976 = lean_ctor_get(x_14975, 0); +lean_inc(x_14976); +x_14977 = lean_ctor_get(x_14975, 1); +lean_inc(x_14977); +lean_dec(x_14975); +x_14978 = lean_ctor_get(x_14976, 0); +lean_inc(x_14978); +x_14979 = lean_ctor_get(x_14976, 1); +lean_inc(x_14979); +lean_dec(x_14976); +x_14980 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14972, x_14967, x_14978, x_14979, x_4, x_5, x_14977); +return x_14980; +} +else +{ +lean_object* x_14981; lean_object* x_14982; lean_object* x_14983; lean_object* x_14984; +lean_dec(x_14972); +lean_dec(x_14967); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_14981 = lean_ctor_get(x_14975, 0); +lean_inc(x_14981); +x_14982 = lean_ctor_get(x_14975, 1); +lean_inc(x_14982); +if (lean_is_exclusive(x_14975)) { + lean_ctor_release(x_14975, 0); + lean_ctor_release(x_14975, 1); + x_14983 = x_14975; +} else { + lean_dec_ref(x_14975); + x_14983 = lean_box(0); +} +if (lean_is_scalar(x_14983)) { + x_14984 = lean_alloc_ctor(1, 2, 0); +} else { + x_14984 = x_14983; +} +lean_ctor_set(x_14984, 0, x_14981); +lean_ctor_set(x_14984, 1, x_14982); +return x_14984; +} +} +} +else +{ +lean_object* x_14985; lean_object* x_14986; lean_object* x_14987; lean_object* x_14988; lean_object* x_14989; lean_object* x_14990; lean_object* x_14991; lean_object* x_14992; lean_object* x_14993; +lean_dec(x_14938); +lean_dec(x_14936); +x_14985 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_14985, 0, x_153); +lean_ctor_set(x_14985, 1, x_14813); +x_14986 = lean_ctor_get(x_1, 0); +lean_inc(x_14986); +lean_dec(x_1); +x_14987 = l_Lean_IR_ToIR_bindVar(x_14986, x_14934, x_4, x_5, x_14874); +x_14988 = lean_ctor_get(x_14987, 0); +lean_inc(x_14988); +x_14989 = lean_ctor_get(x_14987, 1); +lean_inc(x_14989); +lean_dec(x_14987); +x_14990 = lean_ctor_get(x_14988, 0); +lean_inc(x_14990); +x_14991 = lean_ctor_get(x_14988, 1); +lean_inc(x_14991); +lean_dec(x_14988); +x_14992 = lean_box(7); +x_14993 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_14990, x_14985, x_14992, x_14991, x_4, x_5, x_14989); +return x_14993; +} +} +} +} +else +{ +lean_object* x_14994; lean_object* x_14995; +lean_free_object(x_14830); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14994 = lean_box(13); +lean_ctor_set(x_14815, 0, x_14994); +if (lean_is_scalar(x_14824)) { + x_14995 = lean_alloc_ctor(0, 2, 0); +} else { + x_14995 = x_14824; +} +lean_ctor_set(x_14995, 0, x_14815); +lean_ctor_set(x_14995, 1, x_14823); +return x_14995; +} +} +else +{ +lean_object* x_14996; lean_object* x_14997; lean_object* x_14998; +lean_free_object(x_14830); +lean_dec(x_14824); +lean_free_object(x_14815); +lean_dec(x_153); +x_14996 = l_Lean_IR_instInhabitedArg; +x_14997 = lean_unsigned_to_nat(2u); +x_14998 = lean_array_get(x_14996, x_14813, x_14997); +lean_dec(x_14813); +if (lean_obj_tag(x_14998) == 0) +{ +lean_object* x_14999; lean_object* x_15000; lean_object* x_15001; lean_object* x_15002; lean_object* x_15003; lean_object* x_15004; lean_object* x_15005; +x_14999 = lean_ctor_get(x_14998, 0); +lean_inc(x_14999); +lean_dec(x_14998); +x_15000 = lean_ctor_get(x_1, 0); +lean_inc(x_15000); +lean_dec(x_1); +x_15001 = l_Lean_IR_ToIR_bindVarToVarId(x_15000, x_14999, x_14819, x_4, x_5, x_14823); +x_15002 = lean_ctor_get(x_15001, 0); +lean_inc(x_15002); +x_15003 = lean_ctor_get(x_15001, 1); +lean_inc(x_15003); +lean_dec(x_15001); +x_15004 = lean_ctor_get(x_15002, 1); +lean_inc(x_15004); +lean_dec(x_15002); +x_15005 = l_Lean_IR_ToIR_lowerCode(x_2, x_15004, x_4, x_5, x_15003); +return x_15005; +} +else +{ +lean_object* x_15006; lean_object* x_15007; lean_object* x_15008; lean_object* x_15009; lean_object* x_15010; lean_object* x_15011; +x_15006 = lean_ctor_get(x_1, 0); +lean_inc(x_15006); +lean_dec(x_1); +x_15007 = l_Lean_IR_ToIR_bindErased(x_15006, x_14819, x_4, x_5, x_14823); +x_15008 = lean_ctor_get(x_15007, 0); +lean_inc(x_15008); +x_15009 = lean_ctor_get(x_15007, 1); +lean_inc(x_15009); +lean_dec(x_15007); +x_15010 = lean_ctor_get(x_15008, 1); +lean_inc(x_15010); +lean_dec(x_15008); +x_15011 = l_Lean_IR_ToIR_lowerCode(x_2, x_15010, x_4, x_5, x_15009); +return x_15011; +} +} +} +else +{ +lean_object* x_15012; uint8_t x_15013; +lean_dec(x_14830); +x_15012 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_15013 = lean_name_eq(x_153, x_15012); +if (x_15013 == 0) +{ +lean_object* x_15014; uint8_t x_15015; +x_15014 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_15015 = lean_name_eq(x_153, x_15014); +if (x_15015 == 0) +{ +lean_object* x_15016; lean_object* x_15017; lean_object* x_15018; +lean_dec(x_14824); +lean_free_object(x_14815); +lean_inc(x_153); +x_15016 = l_Lean_IR_ToIR_findDecl(x_153, x_14819, x_4, x_5, x_14823); +x_15017 = lean_ctor_get(x_15016, 0); +lean_inc(x_15017); +x_15018 = lean_ctor_get(x_15017, 0); +lean_inc(x_15018); +if (lean_obj_tag(x_15018) == 0) +{ +lean_object* x_15019; lean_object* x_15020; lean_object* x_15021; lean_object* x_15022; uint8_t x_15023; lean_object* x_15024; lean_object* x_15025; lean_object* x_15026; lean_object* x_15027; lean_object* x_15028; lean_object* x_15029; lean_object* x_15030; lean_object* x_15031; lean_object* x_15032; +lean_dec(x_14813); +lean_dec(x_2); +lean_dec(x_1); +x_15019 = lean_ctor_get(x_15016, 1); +lean_inc(x_15019); +if (lean_is_exclusive(x_15016)) { + lean_ctor_release(x_15016, 0); + lean_ctor_release(x_15016, 1); + x_15020 = x_15016; +} else { + lean_dec_ref(x_15016); + x_15020 = lean_box(0); +} +x_15021 = lean_ctor_get(x_15017, 1); +lean_inc(x_15021); +if (lean_is_exclusive(x_15017)) { + lean_ctor_release(x_15017, 0); + lean_ctor_release(x_15017, 1); + x_15022 = x_15017; +} else { + lean_dec_ref(x_15017); + x_15022 = lean_box(0); +} +x_15023 = 1; +x_15024 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_15025 = l_Lean_Name_toString(x_153, x_15023, x_15024); +x_15026 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_15026, 0, x_15025); +x_15027 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_15022)) { + x_15028 = lean_alloc_ctor(5, 2, 0); +} else { + x_15028 = x_15022; + lean_ctor_set_tag(x_15028, 5); +} +lean_ctor_set(x_15028, 0, x_15027); +lean_ctor_set(x_15028, 1, x_15026); +x_15029 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_15020)) { + x_15030 = lean_alloc_ctor(5, 2, 0); +} else { + x_15030 = x_15020; + lean_ctor_set_tag(x_15030, 5); +} +lean_ctor_set(x_15030, 0, x_15028); +lean_ctor_set(x_15030, 1, x_15029); +x_15031 = l_Lean_MessageData_ofFormat(x_15030); +x_15032 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_15031, x_15021, x_4, x_5, x_15019); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_15021); +return x_15032; +} +else +{ +lean_object* x_15033; lean_object* x_15034; lean_object* x_15035; lean_object* x_15036; lean_object* x_15037; lean_object* x_15038; lean_object* x_15039; uint8_t x_15040; +x_15033 = lean_ctor_get(x_15016, 1); +lean_inc(x_15033); +lean_dec(x_15016); +x_15034 = lean_ctor_get(x_15017, 1); +lean_inc(x_15034); +if (lean_is_exclusive(x_15017)) { + lean_ctor_release(x_15017, 0); + lean_ctor_release(x_15017, 1); + x_15035 = x_15017; +} else { + lean_dec_ref(x_15017); + x_15035 = lean_box(0); +} +x_15036 = lean_ctor_get(x_15018, 0); +lean_inc(x_15036); +lean_dec(x_15018); +x_15037 = lean_array_get_size(x_14813); +x_15038 = l_Lean_IR_Decl_params(x_15036); +lean_dec(x_15036); +x_15039 = lean_array_get_size(x_15038); +lean_dec(x_15038); +x_15040 = lean_nat_dec_lt(x_15037, x_15039); +if (x_15040 == 0) +{ +uint8_t x_15041; +x_15041 = lean_nat_dec_eq(x_15037, x_15039); +if (x_15041 == 0) +{ +lean_object* x_15042; lean_object* x_15043; lean_object* x_15044; lean_object* x_15045; lean_object* x_15046; lean_object* x_15047; lean_object* x_15048; lean_object* x_15049; lean_object* x_15050; lean_object* x_15051; lean_object* x_15052; lean_object* x_15053; lean_object* x_15054; lean_object* x_15055; lean_object* x_15056; lean_object* x_15057; lean_object* x_15058; +x_15042 = lean_unsigned_to_nat(0u); +x_15043 = l_Array_extract___rarg(x_14813, x_15042, x_15039); +x_15044 = l_Array_extract___rarg(x_14813, x_15039, x_15037); +lean_dec(x_15037); +lean_dec(x_14813); +if (lean_is_scalar(x_15035)) { + x_15045 = lean_alloc_ctor(6, 2, 0); +} else { + x_15045 = x_15035; + lean_ctor_set_tag(x_15045, 6); +} +lean_ctor_set(x_15045, 0, x_153); +lean_ctor_set(x_15045, 1, x_15043); +x_15046 = lean_ctor_get(x_1, 0); +lean_inc(x_15046); +x_15047 = l_Lean_IR_ToIR_bindVar(x_15046, x_15034, x_4, x_5, x_15033); +x_15048 = lean_ctor_get(x_15047, 0); +lean_inc(x_15048); +x_15049 = lean_ctor_get(x_15047, 1); +lean_inc(x_15049); +lean_dec(x_15047); +x_15050 = lean_ctor_get(x_15048, 0); +lean_inc(x_15050); +x_15051 = lean_ctor_get(x_15048, 1); +lean_inc(x_15051); +lean_dec(x_15048); +x_15052 = l_Lean_IR_ToIR_newVar(x_15051, x_4, x_5, x_15049); +x_15053 = lean_ctor_get(x_15052, 0); +lean_inc(x_15053); +x_15054 = lean_ctor_get(x_15052, 1); +lean_inc(x_15054); +lean_dec(x_15052); +x_15055 = lean_ctor_get(x_15053, 0); +lean_inc(x_15055); +x_15056 = lean_ctor_get(x_15053, 1); +lean_inc(x_15056); +lean_dec(x_15053); +x_15057 = lean_ctor_get(x_1, 2); +lean_inc(x_15057); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_15058 = l_Lean_IR_ToIR_lowerType(x_15057, x_15056, x_4, x_5, x_15054); +if (lean_obj_tag(x_15058) == 0) +{ +lean_object* x_15059; lean_object* x_15060; lean_object* x_15061; lean_object* x_15062; lean_object* x_15063; +x_15059 = lean_ctor_get(x_15058, 0); +lean_inc(x_15059); +x_15060 = lean_ctor_get(x_15058, 1); +lean_inc(x_15060); +lean_dec(x_15058); +x_15061 = lean_ctor_get(x_15059, 0); +lean_inc(x_15061); +x_15062 = lean_ctor_get(x_15059, 1); +lean_inc(x_15062); +lean_dec(x_15059); +x_15063 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_15055, x_15044, x_15050, x_15045, x_15061, x_15062, x_4, x_5, x_15060); +return x_15063; +} +else +{ +lean_object* x_15064; lean_object* x_15065; lean_object* x_15066; lean_object* x_15067; +lean_dec(x_15055); +lean_dec(x_15050); +lean_dec(x_15045); +lean_dec(x_15044); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_15064 = lean_ctor_get(x_15058, 0); +lean_inc(x_15064); +x_15065 = lean_ctor_get(x_15058, 1); +lean_inc(x_15065); +if (lean_is_exclusive(x_15058)) { + lean_ctor_release(x_15058, 0); + lean_ctor_release(x_15058, 1); + x_15066 = x_15058; +} else { + lean_dec_ref(x_15058); + x_15066 = lean_box(0); +} +if (lean_is_scalar(x_15066)) { + x_15067 = lean_alloc_ctor(1, 2, 0); +} else { + x_15067 = x_15066; +} +lean_ctor_set(x_15067, 0, x_15064); +lean_ctor_set(x_15067, 1, x_15065); +return x_15067; +} +} +else +{ +lean_object* x_15068; lean_object* x_15069; lean_object* x_15070; lean_object* x_15071; lean_object* x_15072; lean_object* x_15073; lean_object* x_15074; lean_object* x_15075; lean_object* x_15076; +lean_dec(x_15039); +lean_dec(x_15037); +if (lean_is_scalar(x_15035)) { + x_15068 = lean_alloc_ctor(6, 2, 0); +} else { + x_15068 = x_15035; + lean_ctor_set_tag(x_15068, 6); +} +lean_ctor_set(x_15068, 0, x_153); +lean_ctor_set(x_15068, 1, x_14813); +x_15069 = lean_ctor_get(x_1, 0); +lean_inc(x_15069); +x_15070 = l_Lean_IR_ToIR_bindVar(x_15069, x_15034, x_4, x_5, x_15033); +x_15071 = lean_ctor_get(x_15070, 0); +lean_inc(x_15071); +x_15072 = lean_ctor_get(x_15070, 1); +lean_inc(x_15072); +lean_dec(x_15070); +x_15073 = lean_ctor_get(x_15071, 0); +lean_inc(x_15073); +x_15074 = lean_ctor_get(x_15071, 1); +lean_inc(x_15074); +lean_dec(x_15071); +x_15075 = lean_ctor_get(x_1, 2); +lean_inc(x_15075); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_15076 = l_Lean_IR_ToIR_lowerType(x_15075, x_15074, x_4, x_5, x_15072); +if (lean_obj_tag(x_15076) == 0) +{ +lean_object* x_15077; lean_object* x_15078; lean_object* x_15079; lean_object* x_15080; lean_object* x_15081; +x_15077 = lean_ctor_get(x_15076, 0); +lean_inc(x_15077); +x_15078 = lean_ctor_get(x_15076, 1); +lean_inc(x_15078); +lean_dec(x_15076); +x_15079 = lean_ctor_get(x_15077, 0); +lean_inc(x_15079); +x_15080 = lean_ctor_get(x_15077, 1); +lean_inc(x_15080); +lean_dec(x_15077); +x_15081 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15073, x_15068, x_15079, x_15080, x_4, x_5, x_15078); +return x_15081; +} +else +{ +lean_object* x_15082; lean_object* x_15083; lean_object* x_15084; lean_object* x_15085; +lean_dec(x_15073); +lean_dec(x_15068); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_15082 = lean_ctor_get(x_15076, 0); +lean_inc(x_15082); +x_15083 = lean_ctor_get(x_15076, 1); +lean_inc(x_15083); +if (lean_is_exclusive(x_15076)) { + lean_ctor_release(x_15076, 0); + lean_ctor_release(x_15076, 1); + x_15084 = x_15076; +} else { + lean_dec_ref(x_15076); + x_15084 = lean_box(0); +} +if (lean_is_scalar(x_15084)) { + x_15085 = lean_alloc_ctor(1, 2, 0); +} else { + x_15085 = x_15084; +} +lean_ctor_set(x_15085, 0, x_15082); +lean_ctor_set(x_15085, 1, x_15083); +return x_15085; +} +} +} +else +{ +lean_object* x_15086; lean_object* x_15087; lean_object* x_15088; lean_object* x_15089; lean_object* x_15090; lean_object* x_15091; lean_object* x_15092; lean_object* x_15093; lean_object* x_15094; +lean_dec(x_15039); +lean_dec(x_15037); +if (lean_is_scalar(x_15035)) { + x_15086 = lean_alloc_ctor(7, 2, 0); +} else { + x_15086 = x_15035; + lean_ctor_set_tag(x_15086, 7); +} +lean_ctor_set(x_15086, 0, x_153); +lean_ctor_set(x_15086, 1, x_14813); +x_15087 = lean_ctor_get(x_1, 0); +lean_inc(x_15087); +lean_dec(x_1); +x_15088 = l_Lean_IR_ToIR_bindVar(x_15087, x_15034, x_4, x_5, x_15033); +x_15089 = lean_ctor_get(x_15088, 0); +lean_inc(x_15089); +x_15090 = lean_ctor_get(x_15088, 1); +lean_inc(x_15090); +lean_dec(x_15088); +x_15091 = lean_ctor_get(x_15089, 0); +lean_inc(x_15091); +x_15092 = lean_ctor_get(x_15089, 1); +lean_inc(x_15092); +lean_dec(x_15089); +x_15093 = lean_box(7); +x_15094 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15091, x_15086, x_15093, x_15092, x_4, x_5, x_15090); +return x_15094; +} +} +} +else +{ +lean_object* x_15095; lean_object* x_15096; +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15095 = lean_box(13); +lean_ctor_set(x_14815, 0, x_15095); +if (lean_is_scalar(x_14824)) { + x_15096 = lean_alloc_ctor(0, 2, 0); +} else { + x_15096 = x_14824; +} +lean_ctor_set(x_15096, 0, x_14815); +lean_ctor_set(x_15096, 1, x_14823); +return x_15096; +} +} +else +{ +lean_object* x_15097; lean_object* x_15098; lean_object* x_15099; +lean_dec(x_14824); +lean_free_object(x_14815); +lean_dec(x_153); +x_15097 = l_Lean_IR_instInhabitedArg; +x_15098 = lean_unsigned_to_nat(2u); +x_15099 = lean_array_get(x_15097, x_14813, x_15098); +lean_dec(x_14813); +if (lean_obj_tag(x_15099) == 0) +{ +lean_object* x_15100; lean_object* x_15101; lean_object* x_15102; lean_object* x_15103; lean_object* x_15104; lean_object* x_15105; lean_object* x_15106; +x_15100 = lean_ctor_get(x_15099, 0); +lean_inc(x_15100); +lean_dec(x_15099); +x_15101 = lean_ctor_get(x_1, 0); +lean_inc(x_15101); +lean_dec(x_1); +x_15102 = l_Lean_IR_ToIR_bindVarToVarId(x_15101, x_15100, x_14819, x_4, x_5, x_14823); +x_15103 = lean_ctor_get(x_15102, 0); +lean_inc(x_15103); +x_15104 = lean_ctor_get(x_15102, 1); +lean_inc(x_15104); +lean_dec(x_15102); +x_15105 = lean_ctor_get(x_15103, 1); +lean_inc(x_15105); +lean_dec(x_15103); +x_15106 = l_Lean_IR_ToIR_lowerCode(x_2, x_15105, x_4, x_5, x_15104); +return x_15106; +} +else +{ +lean_object* x_15107; lean_object* x_15108; lean_object* x_15109; lean_object* x_15110; lean_object* x_15111; lean_object* x_15112; +x_15107 = lean_ctor_get(x_1, 0); +lean_inc(x_15107); +lean_dec(x_1); +x_15108 = l_Lean_IR_ToIR_bindErased(x_15107, x_14819, x_4, x_5, x_14823); +x_15109 = lean_ctor_get(x_15108, 0); +lean_inc(x_15109); +x_15110 = lean_ctor_get(x_15108, 1); +lean_inc(x_15110); +lean_dec(x_15108); +x_15111 = lean_ctor_get(x_15109, 1); +lean_inc(x_15111); +lean_dec(x_15109); +x_15112 = l_Lean_IR_ToIR_lowerCode(x_2, x_15111, x_4, x_5, x_15110); +return x_15112; +} +} +} +} +case 1: +{ +lean_object* x_15113; lean_object* x_15114; lean_object* x_15144; lean_object* x_15145; +lean_dec(x_14830); +lean_dec(x_14825); +lean_dec(x_14805); +lean_dec(x_14804); +lean_inc(x_153); +x_15144 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_14823); +x_15145 = lean_ctor_get(x_15144, 0); +lean_inc(x_15145); +if (lean_obj_tag(x_15145) == 0) +{ +lean_object* x_15146; lean_object* x_15147; +x_15146 = lean_ctor_get(x_15144, 1); +lean_inc(x_15146); +lean_dec(x_15144); +x_15147 = lean_box(0); +lean_ctor_set(x_14815, 0, x_15147); +x_15113 = x_14815; +x_15114 = x_15146; +goto block_15143; +} +else +{ +uint8_t x_15148; +lean_free_object(x_14815); +x_15148 = !lean_is_exclusive(x_15144); +if (x_15148 == 0) +{ +lean_object* x_15149; lean_object* x_15150; uint8_t x_15151; +x_15149 = lean_ctor_get(x_15144, 1); +x_15150 = lean_ctor_get(x_15144, 0); +lean_dec(x_15150); +x_15151 = !lean_is_exclusive(x_15145); +if (x_15151 == 0) +{ +lean_object* x_15152; lean_object* x_15153; lean_object* x_15154; lean_object* x_15155; uint8_t x_15156; +x_15152 = lean_ctor_get(x_15145, 0); +x_15153 = lean_array_get_size(x_14813); +x_15154 = lean_ctor_get(x_15152, 3); +lean_inc(x_15154); +lean_dec(x_15152); +x_15155 = lean_array_get_size(x_15154); +lean_dec(x_15154); +x_15156 = lean_nat_dec_lt(x_15153, x_15155); +if (x_15156 == 0) +{ +uint8_t x_15157; +x_15157 = lean_nat_dec_eq(x_15153, x_15155); +if (x_15157 == 0) +{ +lean_object* x_15158; lean_object* x_15159; lean_object* x_15160; lean_object* x_15161; lean_object* x_15162; lean_object* x_15163; lean_object* x_15164; lean_object* x_15165; lean_object* x_15166; lean_object* x_15167; lean_object* x_15168; lean_object* x_15169; lean_object* x_15170; lean_object* x_15171; lean_object* x_15172; lean_object* x_15173; +x_15158 = lean_unsigned_to_nat(0u); +x_15159 = l_Array_extract___rarg(x_14813, x_15158, x_15155); +x_15160 = l_Array_extract___rarg(x_14813, x_15155, x_15153); +lean_dec(x_15153); +lean_inc(x_153); +lean_ctor_set_tag(x_15144, 6); +lean_ctor_set(x_15144, 1, x_15159); +lean_ctor_set(x_15144, 0, x_153); +x_15161 = lean_ctor_get(x_1, 0); +lean_inc(x_15161); +x_15162 = l_Lean_IR_ToIR_bindVar(x_15161, x_14819, x_4, x_5, x_15149); +x_15163 = lean_ctor_get(x_15162, 0); +lean_inc(x_15163); +x_15164 = lean_ctor_get(x_15162, 1); +lean_inc(x_15164); +lean_dec(x_15162); +x_15165 = lean_ctor_get(x_15163, 0); +lean_inc(x_15165); +x_15166 = lean_ctor_get(x_15163, 1); +lean_inc(x_15166); +lean_dec(x_15163); +x_15167 = l_Lean_IR_ToIR_newVar(x_15166, x_4, x_5, x_15164); +x_15168 = lean_ctor_get(x_15167, 0); +lean_inc(x_15168); +x_15169 = lean_ctor_get(x_15167, 1); +lean_inc(x_15169); +lean_dec(x_15167); +x_15170 = lean_ctor_get(x_15168, 0); +lean_inc(x_15170); +x_15171 = lean_ctor_get(x_15168, 1); +lean_inc(x_15171); +lean_dec(x_15168); +x_15172 = lean_ctor_get(x_1, 2); +lean_inc(x_15172); +lean_inc(x_5); +lean_inc(x_4); +x_15173 = l_Lean_IR_ToIR_lowerType(x_15172, x_15171, x_4, x_5, x_15169); +if (lean_obj_tag(x_15173) == 0) +{ +lean_object* x_15174; lean_object* x_15175; lean_object* x_15176; lean_object* x_15177; lean_object* x_15178; +x_15174 = lean_ctor_get(x_15173, 0); +lean_inc(x_15174); +x_15175 = lean_ctor_get(x_15173, 1); +lean_inc(x_15175); +lean_dec(x_15173); +x_15176 = lean_ctor_get(x_15174, 0); +lean_inc(x_15176); +x_15177 = lean_ctor_get(x_15174, 1); +lean_inc(x_15177); +lean_dec(x_15174); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15178 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_15170, x_15160, x_15165, x_15144, x_15176, x_15177, x_4, x_5, x_15175); +if (lean_obj_tag(x_15178) == 0) +{ +lean_object* x_15179; lean_object* x_15180; uint8_t x_15181; +x_15179 = lean_ctor_get(x_15178, 0); +lean_inc(x_15179); +x_15180 = lean_ctor_get(x_15178, 1); +lean_inc(x_15180); +lean_dec(x_15178); +x_15181 = !lean_is_exclusive(x_15179); +if (x_15181 == 0) +{ +lean_object* x_15182; +x_15182 = lean_ctor_get(x_15179, 0); +lean_ctor_set(x_15145, 0, x_15182); +lean_ctor_set(x_15179, 0, x_15145); +x_15113 = x_15179; +x_15114 = x_15180; +goto block_15143; +} +else +{ +lean_object* x_15183; lean_object* x_15184; lean_object* x_15185; +x_15183 = lean_ctor_get(x_15179, 0); +x_15184 = lean_ctor_get(x_15179, 1); +lean_inc(x_15184); +lean_inc(x_15183); +lean_dec(x_15179); +lean_ctor_set(x_15145, 0, x_15183); +x_15185 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15185, 0, x_15145); +lean_ctor_set(x_15185, 1, x_15184); +x_15113 = x_15185; +x_15114 = x_15180; +goto block_15143; +} +} +else +{ +uint8_t x_15186; +lean_free_object(x_15145); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15186 = !lean_is_exclusive(x_15178); +if (x_15186 == 0) +{ +return x_15178; +} +else +{ +lean_object* x_15187; lean_object* x_15188; lean_object* x_15189; +x_15187 = lean_ctor_get(x_15178, 0); +x_15188 = lean_ctor_get(x_15178, 1); +lean_inc(x_15188); +lean_inc(x_15187); +lean_dec(x_15178); +x_15189 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15189, 0, x_15187); +lean_ctor_set(x_15189, 1, x_15188); +return x_15189; +} +} +} +else +{ +uint8_t x_15190; +lean_dec(x_15170); +lean_dec(x_15165); +lean_dec(x_15144); +lean_dec(x_15160); +lean_free_object(x_15145); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15190 = !lean_is_exclusive(x_15173); +if (x_15190 == 0) +{ +return x_15173; +} +else +{ +lean_object* x_15191; lean_object* x_15192; lean_object* x_15193; +x_15191 = lean_ctor_get(x_15173, 0); +x_15192 = lean_ctor_get(x_15173, 1); +lean_inc(x_15192); +lean_inc(x_15191); +lean_dec(x_15173); +x_15193 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15193, 0, x_15191); +lean_ctor_set(x_15193, 1, x_15192); +return x_15193; +} +} +} +else +{ +lean_object* x_15194; lean_object* x_15195; lean_object* x_15196; lean_object* x_15197; lean_object* x_15198; lean_object* x_15199; lean_object* x_15200; lean_object* x_15201; +lean_dec(x_15155); +lean_dec(x_15153); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_15144, 6); +lean_ctor_set(x_15144, 1, x_14813); +lean_ctor_set(x_15144, 0, x_153); +x_15194 = lean_ctor_get(x_1, 0); +lean_inc(x_15194); +x_15195 = l_Lean_IR_ToIR_bindVar(x_15194, x_14819, x_4, x_5, x_15149); +x_15196 = lean_ctor_get(x_15195, 0); +lean_inc(x_15196); +x_15197 = lean_ctor_get(x_15195, 1); +lean_inc(x_15197); +lean_dec(x_15195); +x_15198 = lean_ctor_get(x_15196, 0); +lean_inc(x_15198); +x_15199 = lean_ctor_get(x_15196, 1); +lean_inc(x_15199); +lean_dec(x_15196); +x_15200 = lean_ctor_get(x_1, 2); +lean_inc(x_15200); +lean_inc(x_5); +lean_inc(x_4); +x_15201 = l_Lean_IR_ToIR_lowerType(x_15200, x_15199, x_4, x_5, x_15197); +if (lean_obj_tag(x_15201) == 0) +{ +lean_object* x_15202; lean_object* x_15203; lean_object* x_15204; lean_object* x_15205; lean_object* x_15206; +x_15202 = lean_ctor_get(x_15201, 0); +lean_inc(x_15202); +x_15203 = lean_ctor_get(x_15201, 1); +lean_inc(x_15203); +lean_dec(x_15201); +x_15204 = lean_ctor_get(x_15202, 0); +lean_inc(x_15204); +x_15205 = lean_ctor_get(x_15202, 1); +lean_inc(x_15205); +lean_dec(x_15202); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15206 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15198, x_15144, x_15204, x_15205, x_4, x_5, x_15203); +if (lean_obj_tag(x_15206) == 0) +{ +lean_object* x_15207; lean_object* x_15208; uint8_t x_15209; +x_15207 = lean_ctor_get(x_15206, 0); +lean_inc(x_15207); +x_15208 = lean_ctor_get(x_15206, 1); +lean_inc(x_15208); +lean_dec(x_15206); +x_15209 = !lean_is_exclusive(x_15207); +if (x_15209 == 0) +{ +lean_object* x_15210; +x_15210 = lean_ctor_get(x_15207, 0); +lean_ctor_set(x_15145, 0, x_15210); +lean_ctor_set(x_15207, 0, x_15145); +x_15113 = x_15207; +x_15114 = x_15208; +goto block_15143; +} +else +{ +lean_object* x_15211; lean_object* x_15212; lean_object* x_15213; +x_15211 = lean_ctor_get(x_15207, 0); +x_15212 = lean_ctor_get(x_15207, 1); +lean_inc(x_15212); +lean_inc(x_15211); +lean_dec(x_15207); +lean_ctor_set(x_15145, 0, x_15211); +x_15213 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15213, 0, x_15145); +lean_ctor_set(x_15213, 1, x_15212); +x_15113 = x_15213; +x_15114 = x_15208; +goto block_15143; +} +} +else +{ +uint8_t x_15214; +lean_free_object(x_15145); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15214 = !lean_is_exclusive(x_15206); +if (x_15214 == 0) +{ +return x_15206; +} +else +{ +lean_object* x_15215; lean_object* x_15216; lean_object* x_15217; +x_15215 = lean_ctor_get(x_15206, 0); +x_15216 = lean_ctor_get(x_15206, 1); +lean_inc(x_15216); +lean_inc(x_15215); +lean_dec(x_15206); +x_15217 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15217, 0, x_15215); +lean_ctor_set(x_15217, 1, x_15216); +return x_15217; +} +} +} +else +{ +uint8_t x_15218; +lean_dec(x_15198); +lean_dec(x_15144); +lean_free_object(x_15145); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15218 = !lean_is_exclusive(x_15201); +if (x_15218 == 0) +{ +return x_15201; +} +else +{ +lean_object* x_15219; lean_object* x_15220; lean_object* x_15221; +x_15219 = lean_ctor_get(x_15201, 0); +x_15220 = lean_ctor_get(x_15201, 1); +lean_inc(x_15220); +lean_inc(x_15219); +lean_dec(x_15201); +x_15221 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15221, 0, x_15219); +lean_ctor_set(x_15221, 1, x_15220); +return x_15221; +} +} +} +} +else +{ +lean_object* x_15222; lean_object* x_15223; lean_object* x_15224; lean_object* x_15225; lean_object* x_15226; lean_object* x_15227; lean_object* x_15228; lean_object* x_15229; +lean_dec(x_15155); +lean_dec(x_15153); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_15144, 7); +lean_ctor_set(x_15144, 1, x_14813); +lean_ctor_set(x_15144, 0, x_153); +x_15222 = lean_ctor_get(x_1, 0); +lean_inc(x_15222); +x_15223 = l_Lean_IR_ToIR_bindVar(x_15222, x_14819, x_4, x_5, x_15149); +x_15224 = lean_ctor_get(x_15223, 0); +lean_inc(x_15224); +x_15225 = lean_ctor_get(x_15223, 1); +lean_inc(x_15225); +lean_dec(x_15223); +x_15226 = lean_ctor_get(x_15224, 0); +lean_inc(x_15226); +x_15227 = lean_ctor_get(x_15224, 1); +lean_inc(x_15227); +lean_dec(x_15224); +x_15228 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15229 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15226, x_15144, x_15228, x_15227, x_4, x_5, x_15225); +if (lean_obj_tag(x_15229) == 0) +{ +lean_object* x_15230; lean_object* x_15231; uint8_t x_15232; +x_15230 = lean_ctor_get(x_15229, 0); +lean_inc(x_15230); +x_15231 = lean_ctor_get(x_15229, 1); +lean_inc(x_15231); +lean_dec(x_15229); +x_15232 = !lean_is_exclusive(x_15230); +if (x_15232 == 0) +{ +lean_object* x_15233; +x_15233 = lean_ctor_get(x_15230, 0); +lean_ctor_set(x_15145, 0, x_15233); +lean_ctor_set(x_15230, 0, x_15145); +x_15113 = x_15230; +x_15114 = x_15231; +goto block_15143; +} +else +{ +lean_object* x_15234; lean_object* x_15235; lean_object* x_15236; +x_15234 = lean_ctor_get(x_15230, 0); +x_15235 = lean_ctor_get(x_15230, 1); +lean_inc(x_15235); +lean_inc(x_15234); +lean_dec(x_15230); +lean_ctor_set(x_15145, 0, x_15234); +x_15236 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15236, 0, x_15145); +lean_ctor_set(x_15236, 1, x_15235); +x_15113 = x_15236; +x_15114 = x_15231; +goto block_15143; +} +} +else +{ +uint8_t x_15237; +lean_free_object(x_15145); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15237 = !lean_is_exclusive(x_15229); +if (x_15237 == 0) +{ +return x_15229; +} +else +{ +lean_object* x_15238; lean_object* x_15239; lean_object* x_15240; +x_15238 = lean_ctor_get(x_15229, 0); +x_15239 = lean_ctor_get(x_15229, 1); +lean_inc(x_15239); +lean_inc(x_15238); +lean_dec(x_15229); +x_15240 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15240, 0, x_15238); +lean_ctor_set(x_15240, 1, x_15239); +return x_15240; +} +} +} +} +else +{ +lean_object* x_15241; lean_object* x_15242; lean_object* x_15243; lean_object* x_15244; uint8_t x_15245; +x_15241 = lean_ctor_get(x_15145, 0); +lean_inc(x_15241); +lean_dec(x_15145); +x_15242 = lean_array_get_size(x_14813); +x_15243 = lean_ctor_get(x_15241, 3); +lean_inc(x_15243); +lean_dec(x_15241); +x_15244 = lean_array_get_size(x_15243); +lean_dec(x_15243); +x_15245 = lean_nat_dec_lt(x_15242, x_15244); +if (x_15245 == 0) +{ +uint8_t x_15246; +x_15246 = lean_nat_dec_eq(x_15242, x_15244); +if (x_15246 == 0) +{ +lean_object* x_15247; lean_object* x_15248; lean_object* x_15249; lean_object* x_15250; lean_object* x_15251; lean_object* x_15252; lean_object* x_15253; lean_object* x_15254; lean_object* x_15255; lean_object* x_15256; lean_object* x_15257; lean_object* x_15258; lean_object* x_15259; lean_object* x_15260; lean_object* x_15261; lean_object* x_15262; +x_15247 = lean_unsigned_to_nat(0u); +x_15248 = l_Array_extract___rarg(x_14813, x_15247, x_15244); +x_15249 = l_Array_extract___rarg(x_14813, x_15244, x_15242); +lean_dec(x_15242); +lean_inc(x_153); +lean_ctor_set_tag(x_15144, 6); +lean_ctor_set(x_15144, 1, x_15248); +lean_ctor_set(x_15144, 0, x_153); +x_15250 = lean_ctor_get(x_1, 0); +lean_inc(x_15250); +x_15251 = l_Lean_IR_ToIR_bindVar(x_15250, x_14819, x_4, x_5, x_15149); +x_15252 = lean_ctor_get(x_15251, 0); +lean_inc(x_15252); +x_15253 = lean_ctor_get(x_15251, 1); +lean_inc(x_15253); +lean_dec(x_15251); +x_15254 = lean_ctor_get(x_15252, 0); +lean_inc(x_15254); +x_15255 = lean_ctor_get(x_15252, 1); +lean_inc(x_15255); +lean_dec(x_15252); +x_15256 = l_Lean_IR_ToIR_newVar(x_15255, x_4, x_5, x_15253); +x_15257 = lean_ctor_get(x_15256, 0); +lean_inc(x_15257); +x_15258 = lean_ctor_get(x_15256, 1); +lean_inc(x_15258); +lean_dec(x_15256); +x_15259 = lean_ctor_get(x_15257, 0); +lean_inc(x_15259); +x_15260 = lean_ctor_get(x_15257, 1); +lean_inc(x_15260); +lean_dec(x_15257); +x_15261 = lean_ctor_get(x_1, 2); +lean_inc(x_15261); +lean_inc(x_5); +lean_inc(x_4); +x_15262 = l_Lean_IR_ToIR_lowerType(x_15261, x_15260, x_4, x_5, x_15258); +if (lean_obj_tag(x_15262) == 0) +{ +lean_object* x_15263; lean_object* x_15264; lean_object* x_15265; lean_object* x_15266; lean_object* x_15267; +x_15263 = lean_ctor_get(x_15262, 0); +lean_inc(x_15263); +x_15264 = lean_ctor_get(x_15262, 1); +lean_inc(x_15264); +lean_dec(x_15262); +x_15265 = lean_ctor_get(x_15263, 0); +lean_inc(x_15265); +x_15266 = lean_ctor_get(x_15263, 1); +lean_inc(x_15266); +lean_dec(x_15263); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15267 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_15259, x_15249, x_15254, x_15144, x_15265, x_15266, x_4, x_5, x_15264); +if (lean_obj_tag(x_15267) == 0) +{ +lean_object* x_15268; lean_object* x_15269; lean_object* x_15270; lean_object* x_15271; lean_object* x_15272; lean_object* x_15273; lean_object* x_15274; +x_15268 = lean_ctor_get(x_15267, 0); +lean_inc(x_15268); +x_15269 = lean_ctor_get(x_15267, 1); +lean_inc(x_15269); +lean_dec(x_15267); +x_15270 = lean_ctor_get(x_15268, 0); +lean_inc(x_15270); +x_15271 = lean_ctor_get(x_15268, 1); +lean_inc(x_15271); +if (lean_is_exclusive(x_15268)) { + lean_ctor_release(x_15268, 0); + lean_ctor_release(x_15268, 1); + x_15272 = x_15268; +} else { + lean_dec_ref(x_15268); + x_15272 = lean_box(0); +} +x_15273 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_15273, 0, x_15270); +if (lean_is_scalar(x_15272)) { + x_15274 = lean_alloc_ctor(0, 2, 0); +} else { + x_15274 = x_15272; +} +lean_ctor_set(x_15274, 0, x_15273); +lean_ctor_set(x_15274, 1, x_15271); +x_15113 = x_15274; +x_15114 = x_15269; +goto block_15143; +} +else +{ +lean_object* x_15275; lean_object* x_15276; lean_object* x_15277; lean_object* x_15278; +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15275 = lean_ctor_get(x_15267, 0); +lean_inc(x_15275); +x_15276 = lean_ctor_get(x_15267, 1); +lean_inc(x_15276); +if (lean_is_exclusive(x_15267)) { + lean_ctor_release(x_15267, 0); + lean_ctor_release(x_15267, 1); + x_15277 = x_15267; +} else { + lean_dec_ref(x_15267); + x_15277 = lean_box(0); +} +if (lean_is_scalar(x_15277)) { + x_15278 = lean_alloc_ctor(1, 2, 0); +} else { + x_15278 = x_15277; +} +lean_ctor_set(x_15278, 0, x_15275); +lean_ctor_set(x_15278, 1, x_15276); +return x_15278; +} +} +else +{ +lean_object* x_15279; lean_object* x_15280; lean_object* x_15281; lean_object* x_15282; +lean_dec(x_15259); +lean_dec(x_15254); +lean_dec(x_15144); +lean_dec(x_15249); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15279 = lean_ctor_get(x_15262, 0); +lean_inc(x_15279); +x_15280 = lean_ctor_get(x_15262, 1); +lean_inc(x_15280); +if (lean_is_exclusive(x_15262)) { + lean_ctor_release(x_15262, 0); + lean_ctor_release(x_15262, 1); + x_15281 = x_15262; +} else { + lean_dec_ref(x_15262); + x_15281 = lean_box(0); +} +if (lean_is_scalar(x_15281)) { + x_15282 = lean_alloc_ctor(1, 2, 0); +} else { + x_15282 = x_15281; +} +lean_ctor_set(x_15282, 0, x_15279); +lean_ctor_set(x_15282, 1, x_15280); +return x_15282; +} +} +else +{ +lean_object* x_15283; lean_object* x_15284; lean_object* x_15285; lean_object* x_15286; lean_object* x_15287; lean_object* x_15288; lean_object* x_15289; lean_object* x_15290; +lean_dec(x_15244); +lean_dec(x_15242); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_15144, 6); +lean_ctor_set(x_15144, 1, x_14813); +lean_ctor_set(x_15144, 0, x_153); +x_15283 = lean_ctor_get(x_1, 0); +lean_inc(x_15283); +x_15284 = l_Lean_IR_ToIR_bindVar(x_15283, x_14819, x_4, x_5, x_15149); +x_15285 = lean_ctor_get(x_15284, 0); +lean_inc(x_15285); +x_15286 = lean_ctor_get(x_15284, 1); +lean_inc(x_15286); +lean_dec(x_15284); +x_15287 = lean_ctor_get(x_15285, 0); +lean_inc(x_15287); +x_15288 = lean_ctor_get(x_15285, 1); +lean_inc(x_15288); +lean_dec(x_15285); +x_15289 = lean_ctor_get(x_1, 2); +lean_inc(x_15289); +lean_inc(x_5); +lean_inc(x_4); +x_15290 = l_Lean_IR_ToIR_lowerType(x_15289, x_15288, x_4, x_5, x_15286); +if (lean_obj_tag(x_15290) == 0) +{ +lean_object* x_15291; lean_object* x_15292; lean_object* x_15293; lean_object* x_15294; lean_object* x_15295; +x_15291 = lean_ctor_get(x_15290, 0); +lean_inc(x_15291); +x_15292 = lean_ctor_get(x_15290, 1); +lean_inc(x_15292); +lean_dec(x_15290); +x_15293 = lean_ctor_get(x_15291, 0); +lean_inc(x_15293); +x_15294 = lean_ctor_get(x_15291, 1); +lean_inc(x_15294); +lean_dec(x_15291); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15295 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15287, x_15144, x_15293, x_15294, x_4, x_5, x_15292); +if (lean_obj_tag(x_15295) == 0) +{ +lean_object* x_15296; lean_object* x_15297; lean_object* x_15298; lean_object* x_15299; lean_object* x_15300; lean_object* x_15301; lean_object* x_15302; +x_15296 = lean_ctor_get(x_15295, 0); +lean_inc(x_15296); +x_15297 = lean_ctor_get(x_15295, 1); +lean_inc(x_15297); +lean_dec(x_15295); +x_15298 = lean_ctor_get(x_15296, 0); +lean_inc(x_15298); +x_15299 = lean_ctor_get(x_15296, 1); +lean_inc(x_15299); +if (lean_is_exclusive(x_15296)) { + lean_ctor_release(x_15296, 0); + lean_ctor_release(x_15296, 1); + x_15300 = x_15296; +} else { + lean_dec_ref(x_15296); + x_15300 = lean_box(0); +} +x_15301 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_15301, 0, x_15298); +if (lean_is_scalar(x_15300)) { + x_15302 = lean_alloc_ctor(0, 2, 0); +} else { + x_15302 = x_15300; +} +lean_ctor_set(x_15302, 0, x_15301); +lean_ctor_set(x_15302, 1, x_15299); +x_15113 = x_15302; +x_15114 = x_15297; +goto block_15143; +} +else +{ +lean_object* x_15303; lean_object* x_15304; lean_object* x_15305; lean_object* x_15306; +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15303 = lean_ctor_get(x_15295, 0); +lean_inc(x_15303); +x_15304 = lean_ctor_get(x_15295, 1); +lean_inc(x_15304); +if (lean_is_exclusive(x_15295)) { + lean_ctor_release(x_15295, 0); + lean_ctor_release(x_15295, 1); + x_15305 = x_15295; +} else { + lean_dec_ref(x_15295); + x_15305 = lean_box(0); +} +if (lean_is_scalar(x_15305)) { + x_15306 = lean_alloc_ctor(1, 2, 0); +} else { + x_15306 = x_15305; +} +lean_ctor_set(x_15306, 0, x_15303); +lean_ctor_set(x_15306, 1, x_15304); +return x_15306; +} +} +else +{ +lean_object* x_15307; lean_object* x_15308; lean_object* x_15309; lean_object* x_15310; +lean_dec(x_15287); +lean_dec(x_15144); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15307 = lean_ctor_get(x_15290, 0); +lean_inc(x_15307); +x_15308 = lean_ctor_get(x_15290, 1); +lean_inc(x_15308); +if (lean_is_exclusive(x_15290)) { + lean_ctor_release(x_15290, 0); + lean_ctor_release(x_15290, 1); + x_15309 = x_15290; +} else { + lean_dec_ref(x_15290); + x_15309 = lean_box(0); +} +if (lean_is_scalar(x_15309)) { + x_15310 = lean_alloc_ctor(1, 2, 0); +} else { + x_15310 = x_15309; +} +lean_ctor_set(x_15310, 0, x_15307); +lean_ctor_set(x_15310, 1, x_15308); +return x_15310; +} +} +} +else +{ +lean_object* x_15311; lean_object* x_15312; lean_object* x_15313; lean_object* x_15314; lean_object* x_15315; lean_object* x_15316; lean_object* x_15317; lean_object* x_15318; +lean_dec(x_15244); +lean_dec(x_15242); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_15144, 7); +lean_ctor_set(x_15144, 1, x_14813); +lean_ctor_set(x_15144, 0, x_153); +x_15311 = lean_ctor_get(x_1, 0); +lean_inc(x_15311); +x_15312 = l_Lean_IR_ToIR_bindVar(x_15311, x_14819, x_4, x_5, x_15149); +x_15313 = lean_ctor_get(x_15312, 0); +lean_inc(x_15313); +x_15314 = lean_ctor_get(x_15312, 1); +lean_inc(x_15314); +lean_dec(x_15312); +x_15315 = lean_ctor_get(x_15313, 0); +lean_inc(x_15315); +x_15316 = lean_ctor_get(x_15313, 1); +lean_inc(x_15316); +lean_dec(x_15313); +x_15317 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15318 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15315, x_15144, x_15317, x_15316, x_4, x_5, x_15314); +if (lean_obj_tag(x_15318) == 0) +{ +lean_object* x_15319; lean_object* x_15320; lean_object* x_15321; lean_object* x_15322; lean_object* x_15323; lean_object* x_15324; lean_object* x_15325; +x_15319 = lean_ctor_get(x_15318, 0); +lean_inc(x_15319); +x_15320 = lean_ctor_get(x_15318, 1); +lean_inc(x_15320); +lean_dec(x_15318); +x_15321 = lean_ctor_get(x_15319, 0); +lean_inc(x_15321); +x_15322 = lean_ctor_get(x_15319, 1); +lean_inc(x_15322); +if (lean_is_exclusive(x_15319)) { + lean_ctor_release(x_15319, 0); + lean_ctor_release(x_15319, 1); + x_15323 = x_15319; +} else { + lean_dec_ref(x_15319); + x_15323 = lean_box(0); +} +x_15324 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_15324, 0, x_15321); +if (lean_is_scalar(x_15323)) { + x_15325 = lean_alloc_ctor(0, 2, 0); +} else { + x_15325 = x_15323; +} +lean_ctor_set(x_15325, 0, x_15324); +lean_ctor_set(x_15325, 1, x_15322); +x_15113 = x_15325; +x_15114 = x_15320; +goto block_15143; +} +else +{ +lean_object* x_15326; lean_object* x_15327; lean_object* x_15328; lean_object* x_15329; +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15326 = lean_ctor_get(x_15318, 0); +lean_inc(x_15326); +x_15327 = lean_ctor_get(x_15318, 1); +lean_inc(x_15327); +if (lean_is_exclusive(x_15318)) { + lean_ctor_release(x_15318, 0); + lean_ctor_release(x_15318, 1); + x_15328 = x_15318; +} else { + lean_dec_ref(x_15318); + x_15328 = lean_box(0); +} +if (lean_is_scalar(x_15328)) { + x_15329 = lean_alloc_ctor(1, 2, 0); +} else { + x_15329 = x_15328; +} +lean_ctor_set(x_15329, 0, x_15326); +lean_ctor_set(x_15329, 1, x_15327); +return x_15329; +} +} +} +} +else +{ +lean_object* x_15330; lean_object* x_15331; lean_object* x_15332; lean_object* x_15333; lean_object* x_15334; lean_object* x_15335; uint8_t x_15336; +x_15330 = lean_ctor_get(x_15144, 1); +lean_inc(x_15330); +lean_dec(x_15144); +x_15331 = lean_ctor_get(x_15145, 0); +lean_inc(x_15331); +if (lean_is_exclusive(x_15145)) { + lean_ctor_release(x_15145, 0); + x_15332 = x_15145; +} else { + lean_dec_ref(x_15145); + x_15332 = lean_box(0); +} +x_15333 = lean_array_get_size(x_14813); +x_15334 = lean_ctor_get(x_15331, 3); +lean_inc(x_15334); +lean_dec(x_15331); +x_15335 = lean_array_get_size(x_15334); +lean_dec(x_15334); +x_15336 = lean_nat_dec_lt(x_15333, x_15335); +if (x_15336 == 0) +{ +uint8_t x_15337; +x_15337 = lean_nat_dec_eq(x_15333, x_15335); +if (x_15337 == 0) +{ +lean_object* x_15338; lean_object* x_15339; lean_object* x_15340; lean_object* x_15341; lean_object* x_15342; lean_object* x_15343; lean_object* x_15344; lean_object* x_15345; lean_object* x_15346; lean_object* x_15347; lean_object* x_15348; lean_object* x_15349; lean_object* x_15350; lean_object* x_15351; lean_object* x_15352; lean_object* x_15353; lean_object* x_15354; +x_15338 = lean_unsigned_to_nat(0u); +x_15339 = l_Array_extract___rarg(x_14813, x_15338, x_15335); +x_15340 = l_Array_extract___rarg(x_14813, x_15335, x_15333); +lean_dec(x_15333); +lean_inc(x_153); +x_15341 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_15341, 0, x_153); +lean_ctor_set(x_15341, 1, x_15339); +x_15342 = lean_ctor_get(x_1, 0); +lean_inc(x_15342); +x_15343 = l_Lean_IR_ToIR_bindVar(x_15342, x_14819, x_4, x_5, x_15330); +x_15344 = lean_ctor_get(x_15343, 0); +lean_inc(x_15344); +x_15345 = lean_ctor_get(x_15343, 1); +lean_inc(x_15345); +lean_dec(x_15343); +x_15346 = lean_ctor_get(x_15344, 0); +lean_inc(x_15346); +x_15347 = lean_ctor_get(x_15344, 1); +lean_inc(x_15347); +lean_dec(x_15344); +x_15348 = l_Lean_IR_ToIR_newVar(x_15347, x_4, x_5, x_15345); +x_15349 = lean_ctor_get(x_15348, 0); +lean_inc(x_15349); +x_15350 = lean_ctor_get(x_15348, 1); +lean_inc(x_15350); +lean_dec(x_15348); +x_15351 = lean_ctor_get(x_15349, 0); +lean_inc(x_15351); +x_15352 = lean_ctor_get(x_15349, 1); +lean_inc(x_15352); +lean_dec(x_15349); +x_15353 = lean_ctor_get(x_1, 2); +lean_inc(x_15353); +lean_inc(x_5); +lean_inc(x_4); +x_15354 = l_Lean_IR_ToIR_lowerType(x_15353, x_15352, x_4, x_5, x_15350); +if (lean_obj_tag(x_15354) == 0) +{ +lean_object* x_15355; lean_object* x_15356; lean_object* x_15357; lean_object* x_15358; lean_object* x_15359; +x_15355 = lean_ctor_get(x_15354, 0); +lean_inc(x_15355); +x_15356 = lean_ctor_get(x_15354, 1); +lean_inc(x_15356); +lean_dec(x_15354); +x_15357 = lean_ctor_get(x_15355, 0); +lean_inc(x_15357); +x_15358 = lean_ctor_get(x_15355, 1); +lean_inc(x_15358); +lean_dec(x_15355); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15359 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_15351, x_15340, x_15346, x_15341, x_15357, x_15358, x_4, x_5, x_15356); +if (lean_obj_tag(x_15359) == 0) +{ +lean_object* x_15360; lean_object* x_15361; lean_object* x_15362; lean_object* x_15363; lean_object* x_15364; lean_object* x_15365; lean_object* x_15366; +x_15360 = lean_ctor_get(x_15359, 0); +lean_inc(x_15360); +x_15361 = lean_ctor_get(x_15359, 1); +lean_inc(x_15361); +lean_dec(x_15359); +x_15362 = lean_ctor_get(x_15360, 0); +lean_inc(x_15362); +x_15363 = lean_ctor_get(x_15360, 1); +lean_inc(x_15363); +if (lean_is_exclusive(x_15360)) { + lean_ctor_release(x_15360, 0); + lean_ctor_release(x_15360, 1); + x_15364 = x_15360; +} else { + lean_dec_ref(x_15360); + x_15364 = lean_box(0); +} +if (lean_is_scalar(x_15332)) { + x_15365 = lean_alloc_ctor(1, 1, 0); +} else { + x_15365 = x_15332; +} +lean_ctor_set(x_15365, 0, x_15362); +if (lean_is_scalar(x_15364)) { + x_15366 = lean_alloc_ctor(0, 2, 0); +} else { + x_15366 = x_15364; +} +lean_ctor_set(x_15366, 0, x_15365); +lean_ctor_set(x_15366, 1, x_15363); +x_15113 = x_15366; +x_15114 = x_15361; +goto block_15143; +} +else +{ +lean_object* x_15367; lean_object* x_15368; lean_object* x_15369; lean_object* x_15370; +lean_dec(x_15332); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15367 = lean_ctor_get(x_15359, 0); +lean_inc(x_15367); +x_15368 = lean_ctor_get(x_15359, 1); +lean_inc(x_15368); +if (lean_is_exclusive(x_15359)) { + lean_ctor_release(x_15359, 0); + lean_ctor_release(x_15359, 1); + x_15369 = x_15359; +} else { + lean_dec_ref(x_15359); + x_15369 = lean_box(0); +} +if (lean_is_scalar(x_15369)) { + x_15370 = lean_alloc_ctor(1, 2, 0); +} else { + x_15370 = x_15369; +} +lean_ctor_set(x_15370, 0, x_15367); +lean_ctor_set(x_15370, 1, x_15368); +return x_15370; +} +} +else +{ +lean_object* x_15371; lean_object* x_15372; lean_object* x_15373; lean_object* x_15374; +lean_dec(x_15351); +lean_dec(x_15346); +lean_dec(x_15341); +lean_dec(x_15340); +lean_dec(x_15332); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15371 = lean_ctor_get(x_15354, 0); +lean_inc(x_15371); +x_15372 = lean_ctor_get(x_15354, 1); +lean_inc(x_15372); +if (lean_is_exclusive(x_15354)) { + lean_ctor_release(x_15354, 0); + lean_ctor_release(x_15354, 1); + x_15373 = x_15354; +} else { + lean_dec_ref(x_15354); + x_15373 = lean_box(0); +} +if (lean_is_scalar(x_15373)) { + x_15374 = lean_alloc_ctor(1, 2, 0); +} else { + x_15374 = x_15373; +} +lean_ctor_set(x_15374, 0, x_15371); +lean_ctor_set(x_15374, 1, x_15372); +return x_15374; +} +} +else +{ +lean_object* x_15375; lean_object* x_15376; lean_object* x_15377; lean_object* x_15378; lean_object* x_15379; lean_object* x_15380; lean_object* x_15381; lean_object* x_15382; lean_object* x_15383; +lean_dec(x_15335); +lean_dec(x_15333); +lean_inc(x_14813); +lean_inc(x_153); +x_15375 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_15375, 0, x_153); +lean_ctor_set(x_15375, 1, x_14813); +x_15376 = lean_ctor_get(x_1, 0); +lean_inc(x_15376); +x_15377 = l_Lean_IR_ToIR_bindVar(x_15376, x_14819, x_4, x_5, x_15330); +x_15378 = lean_ctor_get(x_15377, 0); +lean_inc(x_15378); +x_15379 = lean_ctor_get(x_15377, 1); +lean_inc(x_15379); +lean_dec(x_15377); +x_15380 = lean_ctor_get(x_15378, 0); +lean_inc(x_15380); +x_15381 = lean_ctor_get(x_15378, 1); +lean_inc(x_15381); +lean_dec(x_15378); +x_15382 = lean_ctor_get(x_1, 2); +lean_inc(x_15382); +lean_inc(x_5); +lean_inc(x_4); +x_15383 = l_Lean_IR_ToIR_lowerType(x_15382, x_15381, x_4, x_5, x_15379); +if (lean_obj_tag(x_15383) == 0) +{ +lean_object* x_15384; lean_object* x_15385; lean_object* x_15386; lean_object* x_15387; lean_object* x_15388; +x_15384 = lean_ctor_get(x_15383, 0); +lean_inc(x_15384); +x_15385 = lean_ctor_get(x_15383, 1); +lean_inc(x_15385); +lean_dec(x_15383); +x_15386 = lean_ctor_get(x_15384, 0); +lean_inc(x_15386); +x_15387 = lean_ctor_get(x_15384, 1); +lean_inc(x_15387); +lean_dec(x_15384); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15388 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15380, x_15375, x_15386, x_15387, x_4, x_5, x_15385); +if (lean_obj_tag(x_15388) == 0) +{ +lean_object* x_15389; lean_object* x_15390; lean_object* x_15391; lean_object* x_15392; lean_object* x_15393; lean_object* x_15394; lean_object* x_15395; +x_15389 = lean_ctor_get(x_15388, 0); +lean_inc(x_15389); +x_15390 = lean_ctor_get(x_15388, 1); +lean_inc(x_15390); +lean_dec(x_15388); +x_15391 = lean_ctor_get(x_15389, 0); +lean_inc(x_15391); +x_15392 = lean_ctor_get(x_15389, 1); +lean_inc(x_15392); +if (lean_is_exclusive(x_15389)) { + lean_ctor_release(x_15389, 0); + lean_ctor_release(x_15389, 1); + x_15393 = x_15389; +} else { + lean_dec_ref(x_15389); + x_15393 = lean_box(0); +} +if (lean_is_scalar(x_15332)) { + x_15394 = lean_alloc_ctor(1, 1, 0); +} else { + x_15394 = x_15332; +} +lean_ctor_set(x_15394, 0, x_15391); +if (lean_is_scalar(x_15393)) { + x_15395 = lean_alloc_ctor(0, 2, 0); +} else { + x_15395 = x_15393; +} +lean_ctor_set(x_15395, 0, x_15394); +lean_ctor_set(x_15395, 1, x_15392); +x_15113 = x_15395; +x_15114 = x_15390; +goto block_15143; +} +else +{ +lean_object* x_15396; lean_object* x_15397; lean_object* x_15398; lean_object* x_15399; +lean_dec(x_15332); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15396 = lean_ctor_get(x_15388, 0); +lean_inc(x_15396); +x_15397 = lean_ctor_get(x_15388, 1); +lean_inc(x_15397); +if (lean_is_exclusive(x_15388)) { + lean_ctor_release(x_15388, 0); + lean_ctor_release(x_15388, 1); + x_15398 = x_15388; +} else { + lean_dec_ref(x_15388); + x_15398 = lean_box(0); +} +if (lean_is_scalar(x_15398)) { + x_15399 = lean_alloc_ctor(1, 2, 0); +} else { + x_15399 = x_15398; +} +lean_ctor_set(x_15399, 0, x_15396); +lean_ctor_set(x_15399, 1, x_15397); +return x_15399; +} +} +else +{ +lean_object* x_15400; lean_object* x_15401; lean_object* x_15402; lean_object* x_15403; +lean_dec(x_15380); +lean_dec(x_15375); +lean_dec(x_15332); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15400 = lean_ctor_get(x_15383, 0); +lean_inc(x_15400); +x_15401 = lean_ctor_get(x_15383, 1); +lean_inc(x_15401); +if (lean_is_exclusive(x_15383)) { + lean_ctor_release(x_15383, 0); + lean_ctor_release(x_15383, 1); + x_15402 = x_15383; +} else { + lean_dec_ref(x_15383); + x_15402 = lean_box(0); +} +if (lean_is_scalar(x_15402)) { + x_15403 = lean_alloc_ctor(1, 2, 0); +} else { + x_15403 = x_15402; +} +lean_ctor_set(x_15403, 0, x_15400); +lean_ctor_set(x_15403, 1, x_15401); +return x_15403; +} +} +} +else +{ +lean_object* x_15404; lean_object* x_15405; lean_object* x_15406; lean_object* x_15407; lean_object* x_15408; lean_object* x_15409; lean_object* x_15410; lean_object* x_15411; lean_object* x_15412; +lean_dec(x_15335); +lean_dec(x_15333); +lean_inc(x_14813); +lean_inc(x_153); +x_15404 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_15404, 0, x_153); +lean_ctor_set(x_15404, 1, x_14813); +x_15405 = lean_ctor_get(x_1, 0); +lean_inc(x_15405); +x_15406 = l_Lean_IR_ToIR_bindVar(x_15405, x_14819, x_4, x_5, x_15330); +x_15407 = lean_ctor_get(x_15406, 0); +lean_inc(x_15407); +x_15408 = lean_ctor_get(x_15406, 1); +lean_inc(x_15408); +lean_dec(x_15406); +x_15409 = lean_ctor_get(x_15407, 0); +lean_inc(x_15409); +x_15410 = lean_ctor_get(x_15407, 1); +lean_inc(x_15410); +lean_dec(x_15407); +x_15411 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15412 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15409, x_15404, x_15411, x_15410, x_4, x_5, x_15408); +if (lean_obj_tag(x_15412) == 0) +{ +lean_object* x_15413; lean_object* x_15414; lean_object* x_15415; lean_object* x_15416; lean_object* x_15417; lean_object* x_15418; lean_object* x_15419; +x_15413 = lean_ctor_get(x_15412, 0); +lean_inc(x_15413); +x_15414 = lean_ctor_get(x_15412, 1); +lean_inc(x_15414); +lean_dec(x_15412); +x_15415 = lean_ctor_get(x_15413, 0); +lean_inc(x_15415); +x_15416 = lean_ctor_get(x_15413, 1); +lean_inc(x_15416); +if (lean_is_exclusive(x_15413)) { + lean_ctor_release(x_15413, 0); + lean_ctor_release(x_15413, 1); + x_15417 = x_15413; +} else { + lean_dec_ref(x_15413); + x_15417 = lean_box(0); +} +if (lean_is_scalar(x_15332)) { + x_15418 = lean_alloc_ctor(1, 1, 0); +} else { + x_15418 = x_15332; +} +lean_ctor_set(x_15418, 0, x_15415); +if (lean_is_scalar(x_15417)) { + x_15419 = lean_alloc_ctor(0, 2, 0); +} else { + x_15419 = x_15417; +} +lean_ctor_set(x_15419, 0, x_15418); +lean_ctor_set(x_15419, 1, x_15416); +x_15113 = x_15419; +x_15114 = x_15414; +goto block_15143; +} +else +{ +lean_object* x_15420; lean_object* x_15421; lean_object* x_15422; lean_object* x_15423; +lean_dec(x_15332); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15420 = lean_ctor_get(x_15412, 0); +lean_inc(x_15420); +x_15421 = lean_ctor_get(x_15412, 1); +lean_inc(x_15421); +if (lean_is_exclusive(x_15412)) { + lean_ctor_release(x_15412, 0); + lean_ctor_release(x_15412, 1); + x_15422 = x_15412; +} else { + lean_dec_ref(x_15412); + x_15422 = lean_box(0); +} +if (lean_is_scalar(x_15422)) { + x_15423 = lean_alloc_ctor(1, 2, 0); +} else { + x_15423 = x_15422; +} +lean_ctor_set(x_15423, 0, x_15420); +lean_ctor_set(x_15423, 1, x_15421); +return x_15423; +} +} +} +} +block_15143: +{ +lean_object* x_15115; +x_15115 = lean_ctor_get(x_15113, 0); +lean_inc(x_15115); +if (lean_obj_tag(x_15115) == 0) +{ +lean_object* x_15116; lean_object* x_15117; lean_object* x_15118; lean_object* x_15119; lean_object* x_15120; lean_object* x_15121; lean_object* x_15122; lean_object* x_15123; lean_object* x_15124; lean_object* x_15125; +lean_dec(x_14824); +x_15116 = lean_ctor_get(x_15113, 1); +lean_inc(x_15116); +lean_dec(x_15113); +x_15117 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_15117, 0, x_153); +lean_ctor_set(x_15117, 1, x_14813); +x_15118 = lean_ctor_get(x_1, 0); +lean_inc(x_15118); +x_15119 = l_Lean_IR_ToIR_bindVar(x_15118, x_15116, x_4, x_5, x_15114); +x_15120 = lean_ctor_get(x_15119, 0); +lean_inc(x_15120); +x_15121 = lean_ctor_get(x_15119, 1); +lean_inc(x_15121); +lean_dec(x_15119); +x_15122 = lean_ctor_get(x_15120, 0); +lean_inc(x_15122); +x_15123 = lean_ctor_get(x_15120, 1); +lean_inc(x_15123); +lean_dec(x_15120); +x_15124 = lean_ctor_get(x_1, 2); +lean_inc(x_15124); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_15125 = l_Lean_IR_ToIR_lowerType(x_15124, x_15123, x_4, x_5, x_15121); +if (lean_obj_tag(x_15125) == 0) +{ +lean_object* x_15126; lean_object* x_15127; lean_object* x_15128; lean_object* x_15129; lean_object* x_15130; +x_15126 = lean_ctor_get(x_15125, 0); +lean_inc(x_15126); +x_15127 = lean_ctor_get(x_15125, 1); +lean_inc(x_15127); +lean_dec(x_15125); +x_15128 = lean_ctor_get(x_15126, 0); +lean_inc(x_15128); +x_15129 = lean_ctor_get(x_15126, 1); +lean_inc(x_15129); +lean_dec(x_15126); +x_15130 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15122, x_15117, x_15128, x_15129, x_4, x_5, x_15127); +return x_15130; +} +else +{ +uint8_t x_15131; +lean_dec(x_15122); +lean_dec(x_15117); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_15131 = !lean_is_exclusive(x_15125); +if (x_15131 == 0) +{ +return x_15125; +} +else +{ +lean_object* x_15132; lean_object* x_15133; lean_object* x_15134; +x_15132 = lean_ctor_get(x_15125, 0); +x_15133 = lean_ctor_get(x_15125, 1); +lean_inc(x_15133); +lean_inc(x_15132); +lean_dec(x_15125); +x_15134 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15134, 0, x_15132); +lean_ctor_set(x_15134, 1, x_15133); +return x_15134; +} +} +} +else +{ +uint8_t x_15135; +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15135 = !lean_is_exclusive(x_15113); +if (x_15135 == 0) +{ +lean_object* x_15136; lean_object* x_15137; lean_object* x_15138; +x_15136 = lean_ctor_get(x_15113, 0); +lean_dec(x_15136); +x_15137 = lean_ctor_get(x_15115, 0); +lean_inc(x_15137); +lean_dec(x_15115); +lean_ctor_set(x_15113, 0, x_15137); +if (lean_is_scalar(x_14824)) { + x_15138 = lean_alloc_ctor(0, 2, 0); +} else { + x_15138 = x_14824; +} +lean_ctor_set(x_15138, 0, x_15113); +lean_ctor_set(x_15138, 1, x_15114); +return x_15138; +} +else +{ +lean_object* x_15139; lean_object* x_15140; lean_object* x_15141; lean_object* x_15142; +x_15139 = lean_ctor_get(x_15113, 1); +lean_inc(x_15139); +lean_dec(x_15113); +x_15140 = lean_ctor_get(x_15115, 0); +lean_inc(x_15140); +lean_dec(x_15115); +x_15141 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15141, 0, x_15140); +lean_ctor_set(x_15141, 1, x_15139); +if (lean_is_scalar(x_14824)) { + x_15142 = lean_alloc_ctor(0, 2, 0); +} else { + x_15142 = x_14824; +} +lean_ctor_set(x_15142, 0, x_15141); +lean_ctor_set(x_15142, 1, x_15114); +return x_15142; +} +} +} +} +case 2: +{ +lean_object* x_15424; lean_object* x_15425; +lean_dec(x_14830); +lean_dec(x_14825); +lean_dec(x_14824); +lean_free_object(x_14815); +lean_dec(x_14813); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_15424 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_15425 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_15424, x_14819, x_4, x_5, x_14823); +return x_15425; +} +case 3: +{ +lean_object* x_15426; lean_object* x_15427; lean_object* x_15457; lean_object* x_15458; +lean_dec(x_14830); +lean_dec(x_14825); +lean_dec(x_14805); +lean_dec(x_14804); +lean_inc(x_153); +x_15457 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_14823); +x_15458 = lean_ctor_get(x_15457, 0); +lean_inc(x_15458); +if (lean_obj_tag(x_15458) == 0) +{ +lean_object* x_15459; lean_object* x_15460; +x_15459 = lean_ctor_get(x_15457, 1); +lean_inc(x_15459); +lean_dec(x_15457); +x_15460 = lean_box(0); +lean_ctor_set(x_14815, 0, x_15460); +x_15426 = x_14815; +x_15427 = x_15459; +goto block_15456; +} +else +{ +uint8_t x_15461; +lean_free_object(x_14815); +x_15461 = !lean_is_exclusive(x_15457); +if (x_15461 == 0) +{ +lean_object* x_15462; lean_object* x_15463; uint8_t x_15464; +x_15462 = lean_ctor_get(x_15457, 1); +x_15463 = lean_ctor_get(x_15457, 0); +lean_dec(x_15463); +x_15464 = !lean_is_exclusive(x_15458); +if (x_15464 == 0) +{ +lean_object* x_15465; lean_object* x_15466; lean_object* x_15467; lean_object* x_15468; uint8_t x_15469; +x_15465 = lean_ctor_get(x_15458, 0); +x_15466 = lean_array_get_size(x_14813); +x_15467 = lean_ctor_get(x_15465, 3); +lean_inc(x_15467); +lean_dec(x_15465); +x_15468 = lean_array_get_size(x_15467); +lean_dec(x_15467); +x_15469 = lean_nat_dec_lt(x_15466, x_15468); +if (x_15469 == 0) +{ +uint8_t x_15470; +x_15470 = lean_nat_dec_eq(x_15466, x_15468); +if (x_15470 == 0) +{ +lean_object* x_15471; lean_object* x_15472; lean_object* x_15473; lean_object* x_15474; lean_object* x_15475; lean_object* x_15476; lean_object* x_15477; lean_object* x_15478; lean_object* x_15479; lean_object* x_15480; lean_object* x_15481; lean_object* x_15482; lean_object* x_15483; lean_object* x_15484; lean_object* x_15485; lean_object* x_15486; +x_15471 = lean_unsigned_to_nat(0u); +x_15472 = l_Array_extract___rarg(x_14813, x_15471, x_15468); +x_15473 = l_Array_extract___rarg(x_14813, x_15468, x_15466); +lean_dec(x_15466); +lean_inc(x_153); +lean_ctor_set_tag(x_15457, 6); +lean_ctor_set(x_15457, 1, x_15472); +lean_ctor_set(x_15457, 0, x_153); +x_15474 = lean_ctor_get(x_1, 0); +lean_inc(x_15474); +x_15475 = l_Lean_IR_ToIR_bindVar(x_15474, x_14819, x_4, x_5, x_15462); +x_15476 = lean_ctor_get(x_15475, 0); +lean_inc(x_15476); +x_15477 = lean_ctor_get(x_15475, 1); +lean_inc(x_15477); +lean_dec(x_15475); +x_15478 = lean_ctor_get(x_15476, 0); +lean_inc(x_15478); +x_15479 = lean_ctor_get(x_15476, 1); +lean_inc(x_15479); +lean_dec(x_15476); +x_15480 = l_Lean_IR_ToIR_newVar(x_15479, x_4, x_5, x_15477); +x_15481 = lean_ctor_get(x_15480, 0); +lean_inc(x_15481); +x_15482 = lean_ctor_get(x_15480, 1); +lean_inc(x_15482); +lean_dec(x_15480); +x_15483 = lean_ctor_get(x_15481, 0); +lean_inc(x_15483); +x_15484 = lean_ctor_get(x_15481, 1); +lean_inc(x_15484); +lean_dec(x_15481); +x_15485 = lean_ctor_get(x_1, 2); +lean_inc(x_15485); +lean_inc(x_5); +lean_inc(x_4); +x_15486 = l_Lean_IR_ToIR_lowerType(x_15485, x_15484, x_4, x_5, x_15482); +if (lean_obj_tag(x_15486) == 0) +{ +lean_object* x_15487; lean_object* x_15488; lean_object* x_15489; lean_object* x_15490; lean_object* x_15491; +x_15487 = lean_ctor_get(x_15486, 0); +lean_inc(x_15487); +x_15488 = lean_ctor_get(x_15486, 1); +lean_inc(x_15488); +lean_dec(x_15486); +x_15489 = lean_ctor_get(x_15487, 0); +lean_inc(x_15489); +x_15490 = lean_ctor_get(x_15487, 1); +lean_inc(x_15490); +lean_dec(x_15487); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15491 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_15483, x_15473, x_15478, x_15457, x_15489, x_15490, x_4, x_5, x_15488); +if (lean_obj_tag(x_15491) == 0) +{ +lean_object* x_15492; lean_object* x_15493; uint8_t x_15494; +x_15492 = lean_ctor_get(x_15491, 0); +lean_inc(x_15492); +x_15493 = lean_ctor_get(x_15491, 1); +lean_inc(x_15493); +lean_dec(x_15491); +x_15494 = !lean_is_exclusive(x_15492); +if (x_15494 == 0) +{ +lean_object* x_15495; +x_15495 = lean_ctor_get(x_15492, 0); +lean_ctor_set(x_15458, 0, x_15495); +lean_ctor_set(x_15492, 0, x_15458); +x_15426 = x_15492; +x_15427 = x_15493; +goto block_15456; +} +else +{ +lean_object* x_15496; lean_object* x_15497; lean_object* x_15498; +x_15496 = lean_ctor_get(x_15492, 0); +x_15497 = lean_ctor_get(x_15492, 1); +lean_inc(x_15497); +lean_inc(x_15496); +lean_dec(x_15492); +lean_ctor_set(x_15458, 0, x_15496); +x_15498 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15498, 0, x_15458); +lean_ctor_set(x_15498, 1, x_15497); +x_15426 = x_15498; +x_15427 = x_15493; +goto block_15456; +} +} +else +{ +uint8_t x_15499; +lean_free_object(x_15458); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15499 = !lean_is_exclusive(x_15491); +if (x_15499 == 0) +{ +return x_15491; +} +else +{ +lean_object* x_15500; lean_object* x_15501; lean_object* x_15502; +x_15500 = lean_ctor_get(x_15491, 0); +x_15501 = lean_ctor_get(x_15491, 1); +lean_inc(x_15501); +lean_inc(x_15500); +lean_dec(x_15491); +x_15502 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15502, 0, x_15500); +lean_ctor_set(x_15502, 1, x_15501); +return x_15502; +} +} +} +else +{ +uint8_t x_15503; +lean_dec(x_15483); +lean_dec(x_15478); +lean_dec(x_15457); +lean_dec(x_15473); +lean_free_object(x_15458); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15503 = !lean_is_exclusive(x_15486); +if (x_15503 == 0) +{ +return x_15486; +} +else +{ +lean_object* x_15504; lean_object* x_15505; lean_object* x_15506; +x_15504 = lean_ctor_get(x_15486, 0); +x_15505 = lean_ctor_get(x_15486, 1); +lean_inc(x_15505); +lean_inc(x_15504); +lean_dec(x_15486); +x_15506 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15506, 0, x_15504); +lean_ctor_set(x_15506, 1, x_15505); +return x_15506; +} +} +} +else +{ +lean_object* x_15507; lean_object* x_15508; lean_object* x_15509; lean_object* x_15510; lean_object* x_15511; lean_object* x_15512; lean_object* x_15513; lean_object* x_15514; +lean_dec(x_15468); +lean_dec(x_15466); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_15457, 6); +lean_ctor_set(x_15457, 1, x_14813); +lean_ctor_set(x_15457, 0, x_153); +x_15507 = lean_ctor_get(x_1, 0); +lean_inc(x_15507); +x_15508 = l_Lean_IR_ToIR_bindVar(x_15507, x_14819, x_4, x_5, x_15462); +x_15509 = lean_ctor_get(x_15508, 0); +lean_inc(x_15509); +x_15510 = lean_ctor_get(x_15508, 1); +lean_inc(x_15510); +lean_dec(x_15508); +x_15511 = lean_ctor_get(x_15509, 0); +lean_inc(x_15511); +x_15512 = lean_ctor_get(x_15509, 1); +lean_inc(x_15512); +lean_dec(x_15509); +x_15513 = lean_ctor_get(x_1, 2); +lean_inc(x_15513); +lean_inc(x_5); +lean_inc(x_4); +x_15514 = l_Lean_IR_ToIR_lowerType(x_15513, x_15512, x_4, x_5, x_15510); +if (lean_obj_tag(x_15514) == 0) +{ +lean_object* x_15515; lean_object* x_15516; lean_object* x_15517; lean_object* x_15518; lean_object* x_15519; +x_15515 = lean_ctor_get(x_15514, 0); +lean_inc(x_15515); +x_15516 = lean_ctor_get(x_15514, 1); +lean_inc(x_15516); +lean_dec(x_15514); +x_15517 = lean_ctor_get(x_15515, 0); +lean_inc(x_15517); +x_15518 = lean_ctor_get(x_15515, 1); +lean_inc(x_15518); +lean_dec(x_15515); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15519 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15511, x_15457, x_15517, x_15518, x_4, x_5, x_15516); +if (lean_obj_tag(x_15519) == 0) +{ +lean_object* x_15520; lean_object* x_15521; uint8_t x_15522; +x_15520 = lean_ctor_get(x_15519, 0); +lean_inc(x_15520); +x_15521 = lean_ctor_get(x_15519, 1); +lean_inc(x_15521); +lean_dec(x_15519); +x_15522 = !lean_is_exclusive(x_15520); +if (x_15522 == 0) +{ +lean_object* x_15523; +x_15523 = lean_ctor_get(x_15520, 0); +lean_ctor_set(x_15458, 0, x_15523); +lean_ctor_set(x_15520, 0, x_15458); +x_15426 = x_15520; +x_15427 = x_15521; +goto block_15456; +} +else +{ +lean_object* x_15524; lean_object* x_15525; lean_object* x_15526; +x_15524 = lean_ctor_get(x_15520, 0); +x_15525 = lean_ctor_get(x_15520, 1); +lean_inc(x_15525); +lean_inc(x_15524); +lean_dec(x_15520); +lean_ctor_set(x_15458, 0, x_15524); +x_15526 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15526, 0, x_15458); +lean_ctor_set(x_15526, 1, x_15525); +x_15426 = x_15526; +x_15427 = x_15521; +goto block_15456; +} +} +else +{ +uint8_t x_15527; +lean_free_object(x_15458); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15527 = !lean_is_exclusive(x_15519); +if (x_15527 == 0) +{ +return x_15519; +} +else +{ +lean_object* x_15528; lean_object* x_15529; lean_object* x_15530; +x_15528 = lean_ctor_get(x_15519, 0); +x_15529 = lean_ctor_get(x_15519, 1); +lean_inc(x_15529); +lean_inc(x_15528); +lean_dec(x_15519); +x_15530 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15530, 0, x_15528); +lean_ctor_set(x_15530, 1, x_15529); +return x_15530; +} +} +} +else +{ +uint8_t x_15531; +lean_dec(x_15511); +lean_dec(x_15457); +lean_free_object(x_15458); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15531 = !lean_is_exclusive(x_15514); +if (x_15531 == 0) +{ +return x_15514; +} +else +{ +lean_object* x_15532; lean_object* x_15533; lean_object* x_15534; +x_15532 = lean_ctor_get(x_15514, 0); +x_15533 = lean_ctor_get(x_15514, 1); +lean_inc(x_15533); +lean_inc(x_15532); +lean_dec(x_15514); +x_15534 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15534, 0, x_15532); +lean_ctor_set(x_15534, 1, x_15533); +return x_15534; +} +} +} +} +else +{ +lean_object* x_15535; lean_object* x_15536; lean_object* x_15537; lean_object* x_15538; lean_object* x_15539; lean_object* x_15540; lean_object* x_15541; lean_object* x_15542; +lean_dec(x_15468); +lean_dec(x_15466); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_15457, 7); +lean_ctor_set(x_15457, 1, x_14813); +lean_ctor_set(x_15457, 0, x_153); +x_15535 = lean_ctor_get(x_1, 0); +lean_inc(x_15535); +x_15536 = l_Lean_IR_ToIR_bindVar(x_15535, x_14819, x_4, x_5, x_15462); +x_15537 = lean_ctor_get(x_15536, 0); +lean_inc(x_15537); +x_15538 = lean_ctor_get(x_15536, 1); +lean_inc(x_15538); +lean_dec(x_15536); +x_15539 = lean_ctor_get(x_15537, 0); +lean_inc(x_15539); +x_15540 = lean_ctor_get(x_15537, 1); +lean_inc(x_15540); +lean_dec(x_15537); +x_15541 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15542 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15539, x_15457, x_15541, x_15540, x_4, x_5, x_15538); +if (lean_obj_tag(x_15542) == 0) +{ +lean_object* x_15543; lean_object* x_15544; uint8_t x_15545; +x_15543 = lean_ctor_get(x_15542, 0); +lean_inc(x_15543); +x_15544 = lean_ctor_get(x_15542, 1); +lean_inc(x_15544); +lean_dec(x_15542); +x_15545 = !lean_is_exclusive(x_15543); +if (x_15545 == 0) +{ +lean_object* x_15546; +x_15546 = lean_ctor_get(x_15543, 0); +lean_ctor_set(x_15458, 0, x_15546); +lean_ctor_set(x_15543, 0, x_15458); +x_15426 = x_15543; +x_15427 = x_15544; +goto block_15456; +} +else +{ +lean_object* x_15547; lean_object* x_15548; lean_object* x_15549; +x_15547 = lean_ctor_get(x_15543, 0); +x_15548 = lean_ctor_get(x_15543, 1); +lean_inc(x_15548); +lean_inc(x_15547); +lean_dec(x_15543); +lean_ctor_set(x_15458, 0, x_15547); +x_15549 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15549, 0, x_15458); +lean_ctor_set(x_15549, 1, x_15548); +x_15426 = x_15549; +x_15427 = x_15544; +goto block_15456; +} +} +else +{ +uint8_t x_15550; +lean_free_object(x_15458); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15550 = !lean_is_exclusive(x_15542); +if (x_15550 == 0) +{ +return x_15542; +} +else +{ +lean_object* x_15551; lean_object* x_15552; lean_object* x_15553; +x_15551 = lean_ctor_get(x_15542, 0); +x_15552 = lean_ctor_get(x_15542, 1); +lean_inc(x_15552); +lean_inc(x_15551); +lean_dec(x_15542); +x_15553 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15553, 0, x_15551); +lean_ctor_set(x_15553, 1, x_15552); +return x_15553; +} +} +} +} +else +{ +lean_object* x_15554; lean_object* x_15555; lean_object* x_15556; lean_object* x_15557; uint8_t x_15558; +x_15554 = lean_ctor_get(x_15458, 0); +lean_inc(x_15554); +lean_dec(x_15458); +x_15555 = lean_array_get_size(x_14813); +x_15556 = lean_ctor_get(x_15554, 3); +lean_inc(x_15556); +lean_dec(x_15554); +x_15557 = lean_array_get_size(x_15556); +lean_dec(x_15556); +x_15558 = lean_nat_dec_lt(x_15555, x_15557); +if (x_15558 == 0) +{ +uint8_t x_15559; +x_15559 = lean_nat_dec_eq(x_15555, x_15557); +if (x_15559 == 0) +{ +lean_object* x_15560; lean_object* x_15561; lean_object* x_15562; lean_object* x_15563; lean_object* x_15564; lean_object* x_15565; lean_object* x_15566; lean_object* x_15567; lean_object* x_15568; lean_object* x_15569; lean_object* x_15570; lean_object* x_15571; lean_object* x_15572; lean_object* x_15573; lean_object* x_15574; lean_object* x_15575; +x_15560 = lean_unsigned_to_nat(0u); +x_15561 = l_Array_extract___rarg(x_14813, x_15560, x_15557); +x_15562 = l_Array_extract___rarg(x_14813, x_15557, x_15555); +lean_dec(x_15555); +lean_inc(x_153); +lean_ctor_set_tag(x_15457, 6); +lean_ctor_set(x_15457, 1, x_15561); +lean_ctor_set(x_15457, 0, x_153); +x_15563 = lean_ctor_get(x_1, 0); +lean_inc(x_15563); +x_15564 = l_Lean_IR_ToIR_bindVar(x_15563, x_14819, x_4, x_5, x_15462); +x_15565 = lean_ctor_get(x_15564, 0); +lean_inc(x_15565); +x_15566 = lean_ctor_get(x_15564, 1); +lean_inc(x_15566); +lean_dec(x_15564); +x_15567 = lean_ctor_get(x_15565, 0); +lean_inc(x_15567); +x_15568 = lean_ctor_get(x_15565, 1); +lean_inc(x_15568); +lean_dec(x_15565); +x_15569 = l_Lean_IR_ToIR_newVar(x_15568, x_4, x_5, x_15566); +x_15570 = lean_ctor_get(x_15569, 0); +lean_inc(x_15570); +x_15571 = lean_ctor_get(x_15569, 1); +lean_inc(x_15571); +lean_dec(x_15569); +x_15572 = lean_ctor_get(x_15570, 0); +lean_inc(x_15572); +x_15573 = lean_ctor_get(x_15570, 1); +lean_inc(x_15573); +lean_dec(x_15570); +x_15574 = lean_ctor_get(x_1, 2); +lean_inc(x_15574); +lean_inc(x_5); +lean_inc(x_4); +x_15575 = l_Lean_IR_ToIR_lowerType(x_15574, x_15573, x_4, x_5, x_15571); +if (lean_obj_tag(x_15575) == 0) +{ +lean_object* x_15576; lean_object* x_15577; lean_object* x_15578; lean_object* x_15579; lean_object* x_15580; +x_15576 = lean_ctor_get(x_15575, 0); +lean_inc(x_15576); +x_15577 = lean_ctor_get(x_15575, 1); +lean_inc(x_15577); +lean_dec(x_15575); +x_15578 = lean_ctor_get(x_15576, 0); +lean_inc(x_15578); +x_15579 = lean_ctor_get(x_15576, 1); +lean_inc(x_15579); +lean_dec(x_15576); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15580 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_15572, x_15562, x_15567, x_15457, x_15578, x_15579, x_4, x_5, x_15577); +if (lean_obj_tag(x_15580) == 0) +{ +lean_object* x_15581; lean_object* x_15582; lean_object* x_15583; lean_object* x_15584; lean_object* x_15585; lean_object* x_15586; lean_object* x_15587; +x_15581 = lean_ctor_get(x_15580, 0); +lean_inc(x_15581); +x_15582 = lean_ctor_get(x_15580, 1); +lean_inc(x_15582); +lean_dec(x_15580); +x_15583 = lean_ctor_get(x_15581, 0); +lean_inc(x_15583); +x_15584 = lean_ctor_get(x_15581, 1); +lean_inc(x_15584); +if (lean_is_exclusive(x_15581)) { + lean_ctor_release(x_15581, 0); + lean_ctor_release(x_15581, 1); + x_15585 = x_15581; +} else { + lean_dec_ref(x_15581); + x_15585 = lean_box(0); +} +x_15586 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_15586, 0, x_15583); +if (lean_is_scalar(x_15585)) { + x_15587 = lean_alloc_ctor(0, 2, 0); +} else { + x_15587 = x_15585; +} +lean_ctor_set(x_15587, 0, x_15586); +lean_ctor_set(x_15587, 1, x_15584); +x_15426 = x_15587; +x_15427 = x_15582; +goto block_15456; +} +else +{ +lean_object* x_15588; lean_object* x_15589; lean_object* x_15590; lean_object* x_15591; +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15588 = lean_ctor_get(x_15580, 0); +lean_inc(x_15588); +x_15589 = lean_ctor_get(x_15580, 1); +lean_inc(x_15589); +if (lean_is_exclusive(x_15580)) { + lean_ctor_release(x_15580, 0); + lean_ctor_release(x_15580, 1); + x_15590 = x_15580; +} else { + lean_dec_ref(x_15580); + x_15590 = lean_box(0); +} +if (lean_is_scalar(x_15590)) { + x_15591 = lean_alloc_ctor(1, 2, 0); +} else { + x_15591 = x_15590; +} +lean_ctor_set(x_15591, 0, x_15588); +lean_ctor_set(x_15591, 1, x_15589); +return x_15591; +} +} +else +{ +lean_object* x_15592; lean_object* x_15593; lean_object* x_15594; lean_object* x_15595; +lean_dec(x_15572); +lean_dec(x_15567); +lean_dec(x_15457); +lean_dec(x_15562); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15592 = lean_ctor_get(x_15575, 0); +lean_inc(x_15592); +x_15593 = lean_ctor_get(x_15575, 1); +lean_inc(x_15593); +if (lean_is_exclusive(x_15575)) { + lean_ctor_release(x_15575, 0); + lean_ctor_release(x_15575, 1); + x_15594 = x_15575; +} else { + lean_dec_ref(x_15575); + x_15594 = lean_box(0); +} +if (lean_is_scalar(x_15594)) { + x_15595 = lean_alloc_ctor(1, 2, 0); +} else { + x_15595 = x_15594; +} +lean_ctor_set(x_15595, 0, x_15592); +lean_ctor_set(x_15595, 1, x_15593); +return x_15595; +} +} +else +{ +lean_object* x_15596; lean_object* x_15597; lean_object* x_15598; lean_object* x_15599; lean_object* x_15600; lean_object* x_15601; lean_object* x_15602; lean_object* x_15603; +lean_dec(x_15557); +lean_dec(x_15555); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_15457, 6); +lean_ctor_set(x_15457, 1, x_14813); +lean_ctor_set(x_15457, 0, x_153); +x_15596 = lean_ctor_get(x_1, 0); +lean_inc(x_15596); +x_15597 = l_Lean_IR_ToIR_bindVar(x_15596, x_14819, x_4, x_5, x_15462); +x_15598 = lean_ctor_get(x_15597, 0); +lean_inc(x_15598); +x_15599 = lean_ctor_get(x_15597, 1); +lean_inc(x_15599); +lean_dec(x_15597); +x_15600 = lean_ctor_get(x_15598, 0); +lean_inc(x_15600); +x_15601 = lean_ctor_get(x_15598, 1); +lean_inc(x_15601); +lean_dec(x_15598); +x_15602 = lean_ctor_get(x_1, 2); +lean_inc(x_15602); +lean_inc(x_5); +lean_inc(x_4); +x_15603 = l_Lean_IR_ToIR_lowerType(x_15602, x_15601, x_4, x_5, x_15599); +if (lean_obj_tag(x_15603) == 0) +{ +lean_object* x_15604; lean_object* x_15605; lean_object* x_15606; lean_object* x_15607; lean_object* x_15608; +x_15604 = lean_ctor_get(x_15603, 0); +lean_inc(x_15604); +x_15605 = lean_ctor_get(x_15603, 1); +lean_inc(x_15605); +lean_dec(x_15603); +x_15606 = lean_ctor_get(x_15604, 0); +lean_inc(x_15606); +x_15607 = lean_ctor_get(x_15604, 1); +lean_inc(x_15607); +lean_dec(x_15604); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15608 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15600, x_15457, x_15606, x_15607, x_4, x_5, x_15605); +if (lean_obj_tag(x_15608) == 0) +{ +lean_object* x_15609; lean_object* x_15610; lean_object* x_15611; lean_object* x_15612; lean_object* x_15613; lean_object* x_15614; lean_object* x_15615; +x_15609 = lean_ctor_get(x_15608, 0); +lean_inc(x_15609); +x_15610 = lean_ctor_get(x_15608, 1); +lean_inc(x_15610); +lean_dec(x_15608); +x_15611 = lean_ctor_get(x_15609, 0); +lean_inc(x_15611); +x_15612 = lean_ctor_get(x_15609, 1); +lean_inc(x_15612); +if (lean_is_exclusive(x_15609)) { + lean_ctor_release(x_15609, 0); + lean_ctor_release(x_15609, 1); + x_15613 = x_15609; +} else { + lean_dec_ref(x_15609); + x_15613 = lean_box(0); +} +x_15614 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_15614, 0, x_15611); +if (lean_is_scalar(x_15613)) { + x_15615 = lean_alloc_ctor(0, 2, 0); +} else { + x_15615 = x_15613; +} +lean_ctor_set(x_15615, 0, x_15614); +lean_ctor_set(x_15615, 1, x_15612); +x_15426 = x_15615; +x_15427 = x_15610; +goto block_15456; +} +else +{ +lean_object* x_15616; lean_object* x_15617; lean_object* x_15618; lean_object* x_15619; +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15616 = lean_ctor_get(x_15608, 0); +lean_inc(x_15616); +x_15617 = lean_ctor_get(x_15608, 1); +lean_inc(x_15617); +if (lean_is_exclusive(x_15608)) { + lean_ctor_release(x_15608, 0); + lean_ctor_release(x_15608, 1); + x_15618 = x_15608; +} else { + lean_dec_ref(x_15608); + x_15618 = lean_box(0); +} +if (lean_is_scalar(x_15618)) { + x_15619 = lean_alloc_ctor(1, 2, 0); +} else { + x_15619 = x_15618; +} +lean_ctor_set(x_15619, 0, x_15616); +lean_ctor_set(x_15619, 1, x_15617); +return x_15619; +} +} +else +{ +lean_object* x_15620; lean_object* x_15621; lean_object* x_15622; lean_object* x_15623; +lean_dec(x_15600); +lean_dec(x_15457); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15620 = lean_ctor_get(x_15603, 0); +lean_inc(x_15620); +x_15621 = lean_ctor_get(x_15603, 1); +lean_inc(x_15621); +if (lean_is_exclusive(x_15603)) { + lean_ctor_release(x_15603, 0); + lean_ctor_release(x_15603, 1); + x_15622 = x_15603; +} else { + lean_dec_ref(x_15603); + x_15622 = lean_box(0); +} +if (lean_is_scalar(x_15622)) { + x_15623 = lean_alloc_ctor(1, 2, 0); +} else { + x_15623 = x_15622; +} +lean_ctor_set(x_15623, 0, x_15620); +lean_ctor_set(x_15623, 1, x_15621); +return x_15623; +} +} +} +else +{ +lean_object* x_15624; lean_object* x_15625; lean_object* x_15626; lean_object* x_15627; lean_object* x_15628; lean_object* x_15629; lean_object* x_15630; lean_object* x_15631; +lean_dec(x_15557); +lean_dec(x_15555); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_15457, 7); +lean_ctor_set(x_15457, 1, x_14813); +lean_ctor_set(x_15457, 0, x_153); +x_15624 = lean_ctor_get(x_1, 0); +lean_inc(x_15624); +x_15625 = l_Lean_IR_ToIR_bindVar(x_15624, x_14819, x_4, x_5, x_15462); +x_15626 = lean_ctor_get(x_15625, 0); +lean_inc(x_15626); +x_15627 = lean_ctor_get(x_15625, 1); +lean_inc(x_15627); +lean_dec(x_15625); +x_15628 = lean_ctor_get(x_15626, 0); +lean_inc(x_15628); +x_15629 = lean_ctor_get(x_15626, 1); +lean_inc(x_15629); +lean_dec(x_15626); +x_15630 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15631 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15628, x_15457, x_15630, x_15629, x_4, x_5, x_15627); +if (lean_obj_tag(x_15631) == 0) +{ +lean_object* x_15632; lean_object* x_15633; lean_object* x_15634; lean_object* x_15635; lean_object* x_15636; lean_object* x_15637; lean_object* x_15638; +x_15632 = lean_ctor_get(x_15631, 0); +lean_inc(x_15632); +x_15633 = lean_ctor_get(x_15631, 1); +lean_inc(x_15633); +lean_dec(x_15631); +x_15634 = lean_ctor_get(x_15632, 0); +lean_inc(x_15634); +x_15635 = lean_ctor_get(x_15632, 1); +lean_inc(x_15635); +if (lean_is_exclusive(x_15632)) { + lean_ctor_release(x_15632, 0); + lean_ctor_release(x_15632, 1); + x_15636 = x_15632; +} else { + lean_dec_ref(x_15632); + x_15636 = lean_box(0); +} +x_15637 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_15637, 0, x_15634); +if (lean_is_scalar(x_15636)) { + x_15638 = lean_alloc_ctor(0, 2, 0); +} else { + x_15638 = x_15636; +} +lean_ctor_set(x_15638, 0, x_15637); +lean_ctor_set(x_15638, 1, x_15635); +x_15426 = x_15638; +x_15427 = x_15633; +goto block_15456; +} +else +{ +lean_object* x_15639; lean_object* x_15640; lean_object* x_15641; lean_object* x_15642; +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15639 = lean_ctor_get(x_15631, 0); +lean_inc(x_15639); +x_15640 = lean_ctor_get(x_15631, 1); +lean_inc(x_15640); +if (lean_is_exclusive(x_15631)) { + lean_ctor_release(x_15631, 0); + lean_ctor_release(x_15631, 1); + x_15641 = x_15631; +} else { + lean_dec_ref(x_15631); + x_15641 = lean_box(0); +} +if (lean_is_scalar(x_15641)) { + x_15642 = lean_alloc_ctor(1, 2, 0); +} else { + x_15642 = x_15641; +} +lean_ctor_set(x_15642, 0, x_15639); +lean_ctor_set(x_15642, 1, x_15640); +return x_15642; +} +} +} +} +else +{ +lean_object* x_15643; lean_object* x_15644; lean_object* x_15645; lean_object* x_15646; lean_object* x_15647; lean_object* x_15648; uint8_t x_15649; +x_15643 = lean_ctor_get(x_15457, 1); +lean_inc(x_15643); +lean_dec(x_15457); +x_15644 = lean_ctor_get(x_15458, 0); +lean_inc(x_15644); +if (lean_is_exclusive(x_15458)) { + lean_ctor_release(x_15458, 0); + x_15645 = x_15458; +} else { + lean_dec_ref(x_15458); + x_15645 = lean_box(0); +} +x_15646 = lean_array_get_size(x_14813); +x_15647 = lean_ctor_get(x_15644, 3); +lean_inc(x_15647); +lean_dec(x_15644); +x_15648 = lean_array_get_size(x_15647); +lean_dec(x_15647); +x_15649 = lean_nat_dec_lt(x_15646, x_15648); +if (x_15649 == 0) +{ +uint8_t x_15650; +x_15650 = lean_nat_dec_eq(x_15646, x_15648); +if (x_15650 == 0) +{ +lean_object* x_15651; lean_object* x_15652; lean_object* x_15653; lean_object* x_15654; lean_object* x_15655; lean_object* x_15656; lean_object* x_15657; lean_object* x_15658; lean_object* x_15659; lean_object* x_15660; lean_object* x_15661; lean_object* x_15662; lean_object* x_15663; lean_object* x_15664; lean_object* x_15665; lean_object* x_15666; lean_object* x_15667; +x_15651 = lean_unsigned_to_nat(0u); +x_15652 = l_Array_extract___rarg(x_14813, x_15651, x_15648); +x_15653 = l_Array_extract___rarg(x_14813, x_15648, x_15646); +lean_dec(x_15646); +lean_inc(x_153); +x_15654 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_15654, 0, x_153); +lean_ctor_set(x_15654, 1, x_15652); +x_15655 = lean_ctor_get(x_1, 0); +lean_inc(x_15655); +x_15656 = l_Lean_IR_ToIR_bindVar(x_15655, x_14819, x_4, x_5, x_15643); +x_15657 = lean_ctor_get(x_15656, 0); +lean_inc(x_15657); +x_15658 = lean_ctor_get(x_15656, 1); +lean_inc(x_15658); +lean_dec(x_15656); +x_15659 = lean_ctor_get(x_15657, 0); +lean_inc(x_15659); +x_15660 = lean_ctor_get(x_15657, 1); +lean_inc(x_15660); +lean_dec(x_15657); +x_15661 = l_Lean_IR_ToIR_newVar(x_15660, x_4, x_5, x_15658); +x_15662 = lean_ctor_get(x_15661, 0); +lean_inc(x_15662); +x_15663 = lean_ctor_get(x_15661, 1); +lean_inc(x_15663); +lean_dec(x_15661); +x_15664 = lean_ctor_get(x_15662, 0); +lean_inc(x_15664); +x_15665 = lean_ctor_get(x_15662, 1); +lean_inc(x_15665); +lean_dec(x_15662); +x_15666 = lean_ctor_get(x_1, 2); +lean_inc(x_15666); +lean_inc(x_5); +lean_inc(x_4); +x_15667 = l_Lean_IR_ToIR_lowerType(x_15666, x_15665, x_4, x_5, x_15663); +if (lean_obj_tag(x_15667) == 0) +{ +lean_object* x_15668; lean_object* x_15669; lean_object* x_15670; lean_object* x_15671; lean_object* x_15672; +x_15668 = lean_ctor_get(x_15667, 0); +lean_inc(x_15668); +x_15669 = lean_ctor_get(x_15667, 1); +lean_inc(x_15669); +lean_dec(x_15667); +x_15670 = lean_ctor_get(x_15668, 0); +lean_inc(x_15670); +x_15671 = lean_ctor_get(x_15668, 1); +lean_inc(x_15671); +lean_dec(x_15668); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15672 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_15664, x_15653, x_15659, x_15654, x_15670, x_15671, x_4, x_5, x_15669); +if (lean_obj_tag(x_15672) == 0) +{ +lean_object* x_15673; lean_object* x_15674; lean_object* x_15675; lean_object* x_15676; lean_object* x_15677; lean_object* x_15678; lean_object* x_15679; +x_15673 = lean_ctor_get(x_15672, 0); +lean_inc(x_15673); +x_15674 = lean_ctor_get(x_15672, 1); +lean_inc(x_15674); +lean_dec(x_15672); +x_15675 = lean_ctor_get(x_15673, 0); +lean_inc(x_15675); +x_15676 = lean_ctor_get(x_15673, 1); +lean_inc(x_15676); +if (lean_is_exclusive(x_15673)) { + lean_ctor_release(x_15673, 0); + lean_ctor_release(x_15673, 1); + x_15677 = x_15673; +} else { + lean_dec_ref(x_15673); + x_15677 = lean_box(0); +} +if (lean_is_scalar(x_15645)) { + x_15678 = lean_alloc_ctor(1, 1, 0); +} else { + x_15678 = x_15645; +} +lean_ctor_set(x_15678, 0, x_15675); +if (lean_is_scalar(x_15677)) { + x_15679 = lean_alloc_ctor(0, 2, 0); +} else { + x_15679 = x_15677; +} +lean_ctor_set(x_15679, 0, x_15678); +lean_ctor_set(x_15679, 1, x_15676); +x_15426 = x_15679; +x_15427 = x_15674; +goto block_15456; +} +else +{ +lean_object* x_15680; lean_object* x_15681; lean_object* x_15682; lean_object* x_15683; +lean_dec(x_15645); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15680 = lean_ctor_get(x_15672, 0); +lean_inc(x_15680); +x_15681 = lean_ctor_get(x_15672, 1); +lean_inc(x_15681); +if (lean_is_exclusive(x_15672)) { + lean_ctor_release(x_15672, 0); + lean_ctor_release(x_15672, 1); + x_15682 = x_15672; +} else { + lean_dec_ref(x_15672); + x_15682 = lean_box(0); +} +if (lean_is_scalar(x_15682)) { + x_15683 = lean_alloc_ctor(1, 2, 0); +} else { + x_15683 = x_15682; +} +lean_ctor_set(x_15683, 0, x_15680); +lean_ctor_set(x_15683, 1, x_15681); +return x_15683; +} +} +else +{ +lean_object* x_15684; lean_object* x_15685; lean_object* x_15686; lean_object* x_15687; +lean_dec(x_15664); +lean_dec(x_15659); +lean_dec(x_15654); +lean_dec(x_15653); +lean_dec(x_15645); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15684 = lean_ctor_get(x_15667, 0); +lean_inc(x_15684); +x_15685 = lean_ctor_get(x_15667, 1); +lean_inc(x_15685); +if (lean_is_exclusive(x_15667)) { + lean_ctor_release(x_15667, 0); + lean_ctor_release(x_15667, 1); + x_15686 = x_15667; +} else { + lean_dec_ref(x_15667); + x_15686 = lean_box(0); +} +if (lean_is_scalar(x_15686)) { + x_15687 = lean_alloc_ctor(1, 2, 0); +} else { + x_15687 = x_15686; +} +lean_ctor_set(x_15687, 0, x_15684); +lean_ctor_set(x_15687, 1, x_15685); +return x_15687; +} +} +else +{ +lean_object* x_15688; lean_object* x_15689; lean_object* x_15690; lean_object* x_15691; lean_object* x_15692; lean_object* x_15693; lean_object* x_15694; lean_object* x_15695; lean_object* x_15696; +lean_dec(x_15648); +lean_dec(x_15646); +lean_inc(x_14813); +lean_inc(x_153); +x_15688 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_15688, 0, x_153); +lean_ctor_set(x_15688, 1, x_14813); +x_15689 = lean_ctor_get(x_1, 0); +lean_inc(x_15689); +x_15690 = l_Lean_IR_ToIR_bindVar(x_15689, x_14819, x_4, x_5, x_15643); +x_15691 = lean_ctor_get(x_15690, 0); +lean_inc(x_15691); +x_15692 = lean_ctor_get(x_15690, 1); +lean_inc(x_15692); +lean_dec(x_15690); +x_15693 = lean_ctor_get(x_15691, 0); +lean_inc(x_15693); +x_15694 = lean_ctor_get(x_15691, 1); +lean_inc(x_15694); +lean_dec(x_15691); +x_15695 = lean_ctor_get(x_1, 2); +lean_inc(x_15695); +lean_inc(x_5); +lean_inc(x_4); +x_15696 = l_Lean_IR_ToIR_lowerType(x_15695, x_15694, x_4, x_5, x_15692); +if (lean_obj_tag(x_15696) == 0) +{ +lean_object* x_15697; lean_object* x_15698; lean_object* x_15699; lean_object* x_15700; lean_object* x_15701; +x_15697 = lean_ctor_get(x_15696, 0); +lean_inc(x_15697); +x_15698 = lean_ctor_get(x_15696, 1); +lean_inc(x_15698); +lean_dec(x_15696); +x_15699 = lean_ctor_get(x_15697, 0); +lean_inc(x_15699); +x_15700 = lean_ctor_get(x_15697, 1); +lean_inc(x_15700); +lean_dec(x_15697); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15701 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15693, x_15688, x_15699, x_15700, x_4, x_5, x_15698); +if (lean_obj_tag(x_15701) == 0) +{ +lean_object* x_15702; lean_object* x_15703; lean_object* x_15704; lean_object* x_15705; lean_object* x_15706; lean_object* x_15707; lean_object* x_15708; +x_15702 = lean_ctor_get(x_15701, 0); +lean_inc(x_15702); +x_15703 = lean_ctor_get(x_15701, 1); +lean_inc(x_15703); +lean_dec(x_15701); +x_15704 = lean_ctor_get(x_15702, 0); +lean_inc(x_15704); +x_15705 = lean_ctor_get(x_15702, 1); +lean_inc(x_15705); +if (lean_is_exclusive(x_15702)) { + lean_ctor_release(x_15702, 0); + lean_ctor_release(x_15702, 1); + x_15706 = x_15702; +} else { + lean_dec_ref(x_15702); + x_15706 = lean_box(0); +} +if (lean_is_scalar(x_15645)) { + x_15707 = lean_alloc_ctor(1, 1, 0); +} else { + x_15707 = x_15645; +} +lean_ctor_set(x_15707, 0, x_15704); +if (lean_is_scalar(x_15706)) { + x_15708 = lean_alloc_ctor(0, 2, 0); +} else { + x_15708 = x_15706; +} +lean_ctor_set(x_15708, 0, x_15707); +lean_ctor_set(x_15708, 1, x_15705); +x_15426 = x_15708; +x_15427 = x_15703; +goto block_15456; +} +else +{ +lean_object* x_15709; lean_object* x_15710; lean_object* x_15711; lean_object* x_15712; +lean_dec(x_15645); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15709 = lean_ctor_get(x_15701, 0); +lean_inc(x_15709); +x_15710 = lean_ctor_get(x_15701, 1); +lean_inc(x_15710); +if (lean_is_exclusive(x_15701)) { + lean_ctor_release(x_15701, 0); + lean_ctor_release(x_15701, 1); + x_15711 = x_15701; +} else { + lean_dec_ref(x_15701); + x_15711 = lean_box(0); +} +if (lean_is_scalar(x_15711)) { + x_15712 = lean_alloc_ctor(1, 2, 0); +} else { + x_15712 = x_15711; +} +lean_ctor_set(x_15712, 0, x_15709); +lean_ctor_set(x_15712, 1, x_15710); +return x_15712; +} +} +else +{ +lean_object* x_15713; lean_object* x_15714; lean_object* x_15715; lean_object* x_15716; +lean_dec(x_15693); +lean_dec(x_15688); +lean_dec(x_15645); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15713 = lean_ctor_get(x_15696, 0); +lean_inc(x_15713); +x_15714 = lean_ctor_get(x_15696, 1); +lean_inc(x_15714); +if (lean_is_exclusive(x_15696)) { + lean_ctor_release(x_15696, 0); + lean_ctor_release(x_15696, 1); + x_15715 = x_15696; +} else { + lean_dec_ref(x_15696); + x_15715 = lean_box(0); +} +if (lean_is_scalar(x_15715)) { + x_15716 = lean_alloc_ctor(1, 2, 0); +} else { + x_15716 = x_15715; +} +lean_ctor_set(x_15716, 0, x_15713); +lean_ctor_set(x_15716, 1, x_15714); +return x_15716; +} +} +} +else +{ +lean_object* x_15717; lean_object* x_15718; lean_object* x_15719; lean_object* x_15720; lean_object* x_15721; lean_object* x_15722; lean_object* x_15723; lean_object* x_15724; lean_object* x_15725; +lean_dec(x_15648); +lean_dec(x_15646); +lean_inc(x_14813); +lean_inc(x_153); +x_15717 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_15717, 0, x_153); +lean_ctor_set(x_15717, 1, x_14813); +x_15718 = lean_ctor_get(x_1, 0); +lean_inc(x_15718); +x_15719 = l_Lean_IR_ToIR_bindVar(x_15718, x_14819, x_4, x_5, x_15643); +x_15720 = lean_ctor_get(x_15719, 0); +lean_inc(x_15720); +x_15721 = lean_ctor_get(x_15719, 1); +lean_inc(x_15721); +lean_dec(x_15719); +x_15722 = lean_ctor_get(x_15720, 0); +lean_inc(x_15722); +x_15723 = lean_ctor_get(x_15720, 1); +lean_inc(x_15723); +lean_dec(x_15720); +x_15724 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15725 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15722, x_15717, x_15724, x_15723, x_4, x_5, x_15721); +if (lean_obj_tag(x_15725) == 0) +{ +lean_object* x_15726; lean_object* x_15727; lean_object* x_15728; lean_object* x_15729; lean_object* x_15730; lean_object* x_15731; lean_object* x_15732; +x_15726 = lean_ctor_get(x_15725, 0); +lean_inc(x_15726); +x_15727 = lean_ctor_get(x_15725, 1); +lean_inc(x_15727); +lean_dec(x_15725); +x_15728 = lean_ctor_get(x_15726, 0); +lean_inc(x_15728); +x_15729 = lean_ctor_get(x_15726, 1); +lean_inc(x_15729); +if (lean_is_exclusive(x_15726)) { + lean_ctor_release(x_15726, 0); + lean_ctor_release(x_15726, 1); + x_15730 = x_15726; +} else { + lean_dec_ref(x_15726); + x_15730 = lean_box(0); +} +if (lean_is_scalar(x_15645)) { + x_15731 = lean_alloc_ctor(1, 1, 0); +} else { + x_15731 = x_15645; +} +lean_ctor_set(x_15731, 0, x_15728); +if (lean_is_scalar(x_15730)) { + x_15732 = lean_alloc_ctor(0, 2, 0); +} else { + x_15732 = x_15730; +} +lean_ctor_set(x_15732, 0, x_15731); +lean_ctor_set(x_15732, 1, x_15729); +x_15426 = x_15732; +x_15427 = x_15727; +goto block_15456; +} +else +{ +lean_object* x_15733; lean_object* x_15734; lean_object* x_15735; lean_object* x_15736; +lean_dec(x_15645); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15733 = lean_ctor_get(x_15725, 0); +lean_inc(x_15733); +x_15734 = lean_ctor_get(x_15725, 1); +lean_inc(x_15734); +if (lean_is_exclusive(x_15725)) { + lean_ctor_release(x_15725, 0); + lean_ctor_release(x_15725, 1); + x_15735 = x_15725; +} else { + lean_dec_ref(x_15725); + x_15735 = lean_box(0); +} +if (lean_is_scalar(x_15735)) { + x_15736 = lean_alloc_ctor(1, 2, 0); +} else { + x_15736 = x_15735; +} +lean_ctor_set(x_15736, 0, x_15733); +lean_ctor_set(x_15736, 1, x_15734); +return x_15736; +} +} +} +} +block_15456: +{ +lean_object* x_15428; +x_15428 = lean_ctor_get(x_15426, 0); +lean_inc(x_15428); +if (lean_obj_tag(x_15428) == 0) +{ +lean_object* x_15429; lean_object* x_15430; lean_object* x_15431; lean_object* x_15432; lean_object* x_15433; lean_object* x_15434; lean_object* x_15435; lean_object* x_15436; lean_object* x_15437; lean_object* x_15438; +lean_dec(x_14824); +x_15429 = lean_ctor_get(x_15426, 1); +lean_inc(x_15429); +lean_dec(x_15426); +x_15430 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_15430, 0, x_153); +lean_ctor_set(x_15430, 1, x_14813); +x_15431 = lean_ctor_get(x_1, 0); +lean_inc(x_15431); +x_15432 = l_Lean_IR_ToIR_bindVar(x_15431, x_15429, x_4, x_5, x_15427); +x_15433 = lean_ctor_get(x_15432, 0); +lean_inc(x_15433); +x_15434 = lean_ctor_get(x_15432, 1); +lean_inc(x_15434); +lean_dec(x_15432); +x_15435 = lean_ctor_get(x_15433, 0); +lean_inc(x_15435); +x_15436 = lean_ctor_get(x_15433, 1); +lean_inc(x_15436); +lean_dec(x_15433); +x_15437 = lean_ctor_get(x_1, 2); +lean_inc(x_15437); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_15438 = l_Lean_IR_ToIR_lowerType(x_15437, x_15436, x_4, x_5, x_15434); +if (lean_obj_tag(x_15438) == 0) +{ +lean_object* x_15439; lean_object* x_15440; lean_object* x_15441; lean_object* x_15442; lean_object* x_15443; +x_15439 = lean_ctor_get(x_15438, 0); +lean_inc(x_15439); +x_15440 = lean_ctor_get(x_15438, 1); +lean_inc(x_15440); +lean_dec(x_15438); +x_15441 = lean_ctor_get(x_15439, 0); +lean_inc(x_15441); +x_15442 = lean_ctor_get(x_15439, 1); +lean_inc(x_15442); +lean_dec(x_15439); +x_15443 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15435, x_15430, x_15441, x_15442, x_4, x_5, x_15440); +return x_15443; +} +else +{ +uint8_t x_15444; +lean_dec(x_15435); +lean_dec(x_15430); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_15444 = !lean_is_exclusive(x_15438); +if (x_15444 == 0) +{ +return x_15438; +} +else +{ +lean_object* x_15445; lean_object* x_15446; lean_object* x_15447; +x_15445 = lean_ctor_get(x_15438, 0); +x_15446 = lean_ctor_get(x_15438, 1); +lean_inc(x_15446); +lean_inc(x_15445); +lean_dec(x_15438); +x_15447 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15447, 0, x_15445); +lean_ctor_set(x_15447, 1, x_15446); +return x_15447; +} +} +} +else +{ +uint8_t x_15448; +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15448 = !lean_is_exclusive(x_15426); +if (x_15448 == 0) +{ +lean_object* x_15449; lean_object* x_15450; lean_object* x_15451; +x_15449 = lean_ctor_get(x_15426, 0); +lean_dec(x_15449); +x_15450 = lean_ctor_get(x_15428, 0); +lean_inc(x_15450); +lean_dec(x_15428); +lean_ctor_set(x_15426, 0, x_15450); +if (lean_is_scalar(x_14824)) { + x_15451 = lean_alloc_ctor(0, 2, 0); +} else { + x_15451 = x_14824; +} +lean_ctor_set(x_15451, 0, x_15426); +lean_ctor_set(x_15451, 1, x_15427); +return x_15451; +} +else +{ +lean_object* x_15452; lean_object* x_15453; lean_object* x_15454; lean_object* x_15455; +x_15452 = lean_ctor_get(x_15426, 1); +lean_inc(x_15452); +lean_dec(x_15426); +x_15453 = lean_ctor_get(x_15428, 0); +lean_inc(x_15453); +lean_dec(x_15428); +x_15454 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15454, 0, x_15453); +lean_ctor_set(x_15454, 1, x_15452); +if (lean_is_scalar(x_14824)) { + x_15455 = lean_alloc_ctor(0, 2, 0); +} else { + x_15455 = x_14824; +} +lean_ctor_set(x_15455, 0, x_15454); +lean_ctor_set(x_15455, 1, x_15427); +return x_15455; +} +} +} +} +case 4: +{ +uint8_t x_15737; +lean_dec(x_14825); +lean_dec(x_14824); +lean_free_object(x_14815); +lean_dec(x_14805); +lean_dec(x_14804); +x_15737 = !lean_is_exclusive(x_14830); +if (x_15737 == 0) +{ +lean_object* x_15738; lean_object* x_15739; uint8_t x_15740; +x_15738 = lean_ctor_get(x_14830, 0); +lean_dec(x_15738); +x_15739 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_15740 = lean_name_eq(x_153, x_15739); +if (x_15740 == 0) +{ +uint8_t x_15741; lean_object* x_15742; lean_object* x_15743; lean_object* x_15744; lean_object* x_15745; lean_object* x_15746; lean_object* x_15747; lean_object* x_15748; lean_object* x_15749; +lean_dec(x_14813); +lean_dec(x_2); +lean_dec(x_1); +x_15741 = 1; +x_15742 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_15743 = l_Lean_Name_toString(x_153, x_15741, x_15742); +lean_ctor_set_tag(x_14830, 3); +lean_ctor_set(x_14830, 0, x_15743); +x_15744 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_15745 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_15745, 0, x_15744); +lean_ctor_set(x_15745, 1, x_14830); +x_15746 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_15747 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_15747, 0, x_15745); +lean_ctor_set(x_15747, 1, x_15746); +x_15748 = l_Lean_MessageData_ofFormat(x_15747); +x_15749 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_15748, x_14819, x_4, x_5, x_14823); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_14819); +return x_15749; +} +else +{ +lean_object* x_15750; lean_object* x_15751; lean_object* x_15752; +lean_free_object(x_14830); +lean_dec(x_153); +x_15750 = l_Lean_IR_instInhabitedArg; +x_15751 = lean_unsigned_to_nat(2u); +x_15752 = lean_array_get(x_15750, x_14813, x_15751); +lean_dec(x_14813); +if (lean_obj_tag(x_15752) == 0) +{ +lean_object* x_15753; lean_object* x_15754; lean_object* x_15755; lean_object* x_15756; lean_object* x_15757; lean_object* x_15758; lean_object* x_15759; +x_15753 = lean_ctor_get(x_15752, 0); +lean_inc(x_15753); +lean_dec(x_15752); +x_15754 = lean_ctor_get(x_1, 0); +lean_inc(x_15754); +lean_dec(x_1); +x_15755 = l_Lean_IR_ToIR_bindVarToVarId(x_15754, x_15753, x_14819, x_4, x_5, x_14823); +x_15756 = lean_ctor_get(x_15755, 0); +lean_inc(x_15756); +x_15757 = lean_ctor_get(x_15755, 1); +lean_inc(x_15757); +lean_dec(x_15755); +x_15758 = lean_ctor_get(x_15756, 1); +lean_inc(x_15758); +lean_dec(x_15756); +x_15759 = l_Lean_IR_ToIR_lowerCode(x_2, x_15758, x_4, x_5, x_15757); +return x_15759; +} +else +{ +lean_object* x_15760; lean_object* x_15761; lean_object* x_15762; lean_object* x_15763; lean_object* x_15764; lean_object* x_15765; +x_15760 = lean_ctor_get(x_1, 0); +lean_inc(x_15760); +lean_dec(x_1); +x_15761 = l_Lean_IR_ToIR_bindErased(x_15760, x_14819, x_4, x_5, x_14823); +x_15762 = lean_ctor_get(x_15761, 0); +lean_inc(x_15762); +x_15763 = lean_ctor_get(x_15761, 1); +lean_inc(x_15763); +lean_dec(x_15761); +x_15764 = lean_ctor_get(x_15762, 1); +lean_inc(x_15764); +lean_dec(x_15762); +x_15765 = l_Lean_IR_ToIR_lowerCode(x_2, x_15764, x_4, x_5, x_15763); +return x_15765; +} +} +} +else +{ +lean_object* x_15766; uint8_t x_15767; +lean_dec(x_14830); +x_15766 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_15767 = lean_name_eq(x_153, x_15766); +if (x_15767 == 0) +{ +uint8_t x_15768; lean_object* x_15769; lean_object* x_15770; lean_object* x_15771; lean_object* x_15772; lean_object* x_15773; lean_object* x_15774; lean_object* x_15775; lean_object* x_15776; lean_object* x_15777; +lean_dec(x_14813); +lean_dec(x_2); +lean_dec(x_1); +x_15768 = 1; +x_15769 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_15770 = l_Lean_Name_toString(x_153, x_15768, x_15769); +x_15771 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_15771, 0, x_15770); +x_15772 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_15773 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_15773, 0, x_15772); +lean_ctor_set(x_15773, 1, x_15771); +x_15774 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_15775 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_15775, 0, x_15773); +lean_ctor_set(x_15775, 1, x_15774); +x_15776 = l_Lean_MessageData_ofFormat(x_15775); +x_15777 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_15776, x_14819, x_4, x_5, x_14823); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_14819); +return x_15777; +} +else +{ +lean_object* x_15778; lean_object* x_15779; lean_object* x_15780; +lean_dec(x_153); +x_15778 = l_Lean_IR_instInhabitedArg; +x_15779 = lean_unsigned_to_nat(2u); +x_15780 = lean_array_get(x_15778, x_14813, x_15779); +lean_dec(x_14813); +if (lean_obj_tag(x_15780) == 0) +{ +lean_object* x_15781; lean_object* x_15782; lean_object* x_15783; lean_object* x_15784; lean_object* x_15785; lean_object* x_15786; lean_object* x_15787; +x_15781 = lean_ctor_get(x_15780, 0); +lean_inc(x_15781); +lean_dec(x_15780); +x_15782 = lean_ctor_get(x_1, 0); +lean_inc(x_15782); +lean_dec(x_1); +x_15783 = l_Lean_IR_ToIR_bindVarToVarId(x_15782, x_15781, x_14819, x_4, x_5, x_14823); +x_15784 = lean_ctor_get(x_15783, 0); +lean_inc(x_15784); +x_15785 = lean_ctor_get(x_15783, 1); +lean_inc(x_15785); +lean_dec(x_15783); +x_15786 = lean_ctor_get(x_15784, 1); +lean_inc(x_15786); +lean_dec(x_15784); +x_15787 = l_Lean_IR_ToIR_lowerCode(x_2, x_15786, x_4, x_5, x_15785); +return x_15787; +} +else +{ +lean_object* x_15788; lean_object* x_15789; lean_object* x_15790; lean_object* x_15791; lean_object* x_15792; lean_object* x_15793; +x_15788 = lean_ctor_get(x_1, 0); +lean_inc(x_15788); +lean_dec(x_1); +x_15789 = l_Lean_IR_ToIR_bindErased(x_15788, x_14819, x_4, x_5, x_14823); +x_15790 = lean_ctor_get(x_15789, 0); +lean_inc(x_15790); +x_15791 = lean_ctor_get(x_15789, 1); +lean_inc(x_15791); +lean_dec(x_15789); +x_15792 = lean_ctor_get(x_15790, 1); +lean_inc(x_15792); +lean_dec(x_15790); +x_15793 = l_Lean_IR_ToIR_lowerCode(x_2, x_15792, x_4, x_5, x_15791); +return x_15793; +} +} +} +} +case 5: +{ +lean_object* x_15794; lean_object* x_15795; +lean_dec(x_14830); +lean_dec(x_14825); +lean_dec(x_14824); +lean_free_object(x_14815); +lean_dec(x_14813); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_15794 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_15795 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_15794, x_14819, x_4, x_5, x_14823); +return x_15795; +} +case 6: +{ +lean_object* x_15796; uint8_t x_15797; +x_15796 = lean_ctor_get(x_14830, 0); +lean_inc(x_15796); +lean_dec(x_14830); +lean_inc(x_153); +x_15797 = l_Lean_isExtern(x_14825, x_153); +if (x_15797 == 0) +{ +lean_object* x_15798; +lean_dec(x_14824); +lean_free_object(x_14815); +lean_dec(x_14813); +lean_inc(x_5); +lean_inc(x_4); +x_15798 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_14819, x_4, x_5, x_14823); +if (lean_obj_tag(x_15798) == 0) +{ +lean_object* x_15799; lean_object* x_15800; lean_object* x_15801; lean_object* x_15802; lean_object* x_15803; lean_object* x_15804; lean_object* x_15805; lean_object* x_15806; lean_object* x_15807; lean_object* x_15808; lean_object* x_15809; lean_object* x_15810; lean_object* x_15811; lean_object* x_15812; lean_object* x_15813; lean_object* x_15814; lean_object* x_15815; lean_object* x_15816; lean_object* x_15817; lean_object* x_15818; +x_15799 = lean_ctor_get(x_15798, 0); +lean_inc(x_15799); +x_15800 = lean_ctor_get(x_15799, 0); +lean_inc(x_15800); +x_15801 = lean_ctor_get(x_15798, 1); +lean_inc(x_15801); +lean_dec(x_15798); +x_15802 = lean_ctor_get(x_15799, 1); +lean_inc(x_15802); +lean_dec(x_15799); +x_15803 = lean_ctor_get(x_15800, 0); +lean_inc(x_15803); +x_15804 = lean_ctor_get(x_15800, 1); +lean_inc(x_15804); +lean_dec(x_15800); +x_15805 = lean_ctor_get(x_15796, 3); +lean_inc(x_15805); +lean_dec(x_15796); +x_15806 = lean_array_get_size(x_14804); +x_15807 = l_Array_extract___rarg(x_14804, x_15805, x_15806); +lean_dec(x_15806); +lean_dec(x_14804); +x_15808 = lean_array_get_size(x_15804); +x_15809 = lean_unsigned_to_nat(0u); +x_15810 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_14805)) { + x_15811 = lean_alloc_ctor(0, 3, 0); +} else { + x_15811 = x_14805; + lean_ctor_set_tag(x_15811, 0); +} +lean_ctor_set(x_15811, 0, x_15809); +lean_ctor_set(x_15811, 1, x_15808); +lean_ctor_set(x_15811, 2, x_15810); +x_15812 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_15813 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__7(x_15804, x_15807, x_15811, x_15811, x_15812, x_15809, lean_box(0), lean_box(0), x_15802, x_4, x_5, x_15801); +lean_dec(x_15811); +x_15814 = lean_ctor_get(x_15813, 0); +lean_inc(x_15814); +x_15815 = lean_ctor_get(x_15813, 1); +lean_inc(x_15815); +lean_dec(x_15813); +x_15816 = lean_ctor_get(x_15814, 0); +lean_inc(x_15816); +x_15817 = lean_ctor_get(x_15814, 1); +lean_inc(x_15817); +lean_dec(x_15814); +x_15818 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_15803, x_15804, x_15807, x_15816, x_15817, x_4, x_5, x_15815); +lean_dec(x_15807); +lean_dec(x_15804); +return x_15818; +} +else +{ +uint8_t x_15819; +lean_dec(x_15796); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15819 = !lean_is_exclusive(x_15798); +if (x_15819 == 0) +{ +return x_15798; +} +else +{ +lean_object* x_15820; lean_object* x_15821; lean_object* x_15822; +x_15820 = lean_ctor_get(x_15798, 0); +x_15821 = lean_ctor_get(x_15798, 1); +lean_inc(x_15821); +lean_inc(x_15820); +lean_dec(x_15798); +x_15822 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15822, 0, x_15820); +lean_ctor_set(x_15822, 1, x_15821); +return x_15822; +} +} +} +else +{ +lean_object* x_15823; lean_object* x_15824; lean_object* x_15854; lean_object* x_15855; +lean_dec(x_15796); +lean_dec(x_14805); +lean_dec(x_14804); +lean_inc(x_153); +x_15854 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_14823); +x_15855 = lean_ctor_get(x_15854, 0); +lean_inc(x_15855); +if (lean_obj_tag(x_15855) == 0) +{ +lean_object* x_15856; lean_object* x_15857; +x_15856 = lean_ctor_get(x_15854, 1); +lean_inc(x_15856); +lean_dec(x_15854); +x_15857 = lean_box(0); +lean_ctor_set(x_14815, 0, x_15857); +x_15823 = x_14815; +x_15824 = x_15856; +goto block_15853; +} +else +{ +uint8_t x_15858; +lean_free_object(x_14815); +x_15858 = !lean_is_exclusive(x_15854); +if (x_15858 == 0) +{ +lean_object* x_15859; lean_object* x_15860; uint8_t x_15861; +x_15859 = lean_ctor_get(x_15854, 1); +x_15860 = lean_ctor_get(x_15854, 0); +lean_dec(x_15860); +x_15861 = !lean_is_exclusive(x_15855); +if (x_15861 == 0) +{ +lean_object* x_15862; lean_object* x_15863; lean_object* x_15864; lean_object* x_15865; uint8_t x_15866; +x_15862 = lean_ctor_get(x_15855, 0); +x_15863 = lean_array_get_size(x_14813); +x_15864 = lean_ctor_get(x_15862, 3); +lean_inc(x_15864); +lean_dec(x_15862); +x_15865 = lean_array_get_size(x_15864); +lean_dec(x_15864); +x_15866 = lean_nat_dec_lt(x_15863, x_15865); +if (x_15866 == 0) +{ +uint8_t x_15867; +x_15867 = lean_nat_dec_eq(x_15863, x_15865); +if (x_15867 == 0) +{ +lean_object* x_15868; lean_object* x_15869; lean_object* x_15870; lean_object* x_15871; lean_object* x_15872; lean_object* x_15873; lean_object* x_15874; lean_object* x_15875; lean_object* x_15876; lean_object* x_15877; lean_object* x_15878; lean_object* x_15879; lean_object* x_15880; lean_object* x_15881; lean_object* x_15882; lean_object* x_15883; +x_15868 = lean_unsigned_to_nat(0u); +x_15869 = l_Array_extract___rarg(x_14813, x_15868, x_15865); +x_15870 = l_Array_extract___rarg(x_14813, x_15865, x_15863); +lean_dec(x_15863); +lean_inc(x_153); +lean_ctor_set_tag(x_15854, 6); +lean_ctor_set(x_15854, 1, x_15869); +lean_ctor_set(x_15854, 0, x_153); +x_15871 = lean_ctor_get(x_1, 0); +lean_inc(x_15871); +x_15872 = l_Lean_IR_ToIR_bindVar(x_15871, x_14819, x_4, x_5, x_15859); +x_15873 = lean_ctor_get(x_15872, 0); +lean_inc(x_15873); +x_15874 = lean_ctor_get(x_15872, 1); +lean_inc(x_15874); +lean_dec(x_15872); +x_15875 = lean_ctor_get(x_15873, 0); +lean_inc(x_15875); +x_15876 = lean_ctor_get(x_15873, 1); +lean_inc(x_15876); +lean_dec(x_15873); +x_15877 = l_Lean_IR_ToIR_newVar(x_15876, x_4, x_5, x_15874); +x_15878 = lean_ctor_get(x_15877, 0); +lean_inc(x_15878); +x_15879 = lean_ctor_get(x_15877, 1); +lean_inc(x_15879); +lean_dec(x_15877); +x_15880 = lean_ctor_get(x_15878, 0); +lean_inc(x_15880); +x_15881 = lean_ctor_get(x_15878, 1); +lean_inc(x_15881); +lean_dec(x_15878); +x_15882 = lean_ctor_get(x_1, 2); +lean_inc(x_15882); +lean_inc(x_5); +lean_inc(x_4); +x_15883 = l_Lean_IR_ToIR_lowerType(x_15882, x_15881, x_4, x_5, x_15879); +if (lean_obj_tag(x_15883) == 0) +{ +lean_object* x_15884; lean_object* x_15885; lean_object* x_15886; lean_object* x_15887; lean_object* x_15888; +x_15884 = lean_ctor_get(x_15883, 0); +lean_inc(x_15884); +x_15885 = lean_ctor_get(x_15883, 1); +lean_inc(x_15885); +lean_dec(x_15883); +x_15886 = lean_ctor_get(x_15884, 0); +lean_inc(x_15886); +x_15887 = lean_ctor_get(x_15884, 1); +lean_inc(x_15887); +lean_dec(x_15884); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15888 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_15880, x_15870, x_15875, x_15854, x_15886, x_15887, x_4, x_5, x_15885); +if (lean_obj_tag(x_15888) == 0) +{ +lean_object* x_15889; lean_object* x_15890; uint8_t x_15891; +x_15889 = lean_ctor_get(x_15888, 0); +lean_inc(x_15889); +x_15890 = lean_ctor_get(x_15888, 1); +lean_inc(x_15890); +lean_dec(x_15888); +x_15891 = !lean_is_exclusive(x_15889); +if (x_15891 == 0) +{ +lean_object* x_15892; +x_15892 = lean_ctor_get(x_15889, 0); +lean_ctor_set(x_15855, 0, x_15892); +lean_ctor_set(x_15889, 0, x_15855); +x_15823 = x_15889; +x_15824 = x_15890; +goto block_15853; +} +else +{ +lean_object* x_15893; lean_object* x_15894; lean_object* x_15895; +x_15893 = lean_ctor_get(x_15889, 0); +x_15894 = lean_ctor_get(x_15889, 1); +lean_inc(x_15894); +lean_inc(x_15893); +lean_dec(x_15889); +lean_ctor_set(x_15855, 0, x_15893); +x_15895 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15895, 0, x_15855); +lean_ctor_set(x_15895, 1, x_15894); +x_15823 = x_15895; +x_15824 = x_15890; +goto block_15853; +} +} +else +{ +uint8_t x_15896; +lean_free_object(x_15855); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15896 = !lean_is_exclusive(x_15888); +if (x_15896 == 0) +{ +return x_15888; +} +else +{ +lean_object* x_15897; lean_object* x_15898; lean_object* x_15899; +x_15897 = lean_ctor_get(x_15888, 0); +x_15898 = lean_ctor_get(x_15888, 1); +lean_inc(x_15898); +lean_inc(x_15897); +lean_dec(x_15888); +x_15899 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15899, 0, x_15897); +lean_ctor_set(x_15899, 1, x_15898); +return x_15899; +} +} +} +else +{ +uint8_t x_15900; +lean_dec(x_15880); +lean_dec(x_15875); +lean_dec(x_15854); +lean_dec(x_15870); +lean_free_object(x_15855); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15900 = !lean_is_exclusive(x_15883); +if (x_15900 == 0) +{ +return x_15883; +} +else +{ +lean_object* x_15901; lean_object* x_15902; lean_object* x_15903; +x_15901 = lean_ctor_get(x_15883, 0); +x_15902 = lean_ctor_get(x_15883, 1); +lean_inc(x_15902); +lean_inc(x_15901); +lean_dec(x_15883); +x_15903 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15903, 0, x_15901); +lean_ctor_set(x_15903, 1, x_15902); +return x_15903; +} +} +} +else +{ +lean_object* x_15904; lean_object* x_15905; lean_object* x_15906; lean_object* x_15907; lean_object* x_15908; lean_object* x_15909; lean_object* x_15910; lean_object* x_15911; +lean_dec(x_15865); +lean_dec(x_15863); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_15854, 6); +lean_ctor_set(x_15854, 1, x_14813); +lean_ctor_set(x_15854, 0, x_153); +x_15904 = lean_ctor_get(x_1, 0); +lean_inc(x_15904); +x_15905 = l_Lean_IR_ToIR_bindVar(x_15904, x_14819, x_4, x_5, x_15859); +x_15906 = lean_ctor_get(x_15905, 0); +lean_inc(x_15906); +x_15907 = lean_ctor_get(x_15905, 1); +lean_inc(x_15907); +lean_dec(x_15905); +x_15908 = lean_ctor_get(x_15906, 0); +lean_inc(x_15908); +x_15909 = lean_ctor_get(x_15906, 1); +lean_inc(x_15909); +lean_dec(x_15906); +x_15910 = lean_ctor_get(x_1, 2); +lean_inc(x_15910); +lean_inc(x_5); +lean_inc(x_4); +x_15911 = l_Lean_IR_ToIR_lowerType(x_15910, x_15909, x_4, x_5, x_15907); +if (lean_obj_tag(x_15911) == 0) +{ +lean_object* x_15912; lean_object* x_15913; lean_object* x_15914; lean_object* x_15915; lean_object* x_15916; +x_15912 = lean_ctor_get(x_15911, 0); +lean_inc(x_15912); +x_15913 = lean_ctor_get(x_15911, 1); +lean_inc(x_15913); +lean_dec(x_15911); +x_15914 = lean_ctor_get(x_15912, 0); +lean_inc(x_15914); +x_15915 = lean_ctor_get(x_15912, 1); +lean_inc(x_15915); +lean_dec(x_15912); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15916 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15908, x_15854, x_15914, x_15915, x_4, x_5, x_15913); +if (lean_obj_tag(x_15916) == 0) +{ +lean_object* x_15917; lean_object* x_15918; uint8_t x_15919; +x_15917 = lean_ctor_get(x_15916, 0); +lean_inc(x_15917); +x_15918 = lean_ctor_get(x_15916, 1); +lean_inc(x_15918); +lean_dec(x_15916); +x_15919 = !lean_is_exclusive(x_15917); +if (x_15919 == 0) +{ +lean_object* x_15920; +x_15920 = lean_ctor_get(x_15917, 0); +lean_ctor_set(x_15855, 0, x_15920); +lean_ctor_set(x_15917, 0, x_15855); +x_15823 = x_15917; +x_15824 = x_15918; +goto block_15853; +} +else +{ +lean_object* x_15921; lean_object* x_15922; lean_object* x_15923; +x_15921 = lean_ctor_get(x_15917, 0); +x_15922 = lean_ctor_get(x_15917, 1); +lean_inc(x_15922); +lean_inc(x_15921); +lean_dec(x_15917); +lean_ctor_set(x_15855, 0, x_15921); +x_15923 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15923, 0, x_15855); +lean_ctor_set(x_15923, 1, x_15922); +x_15823 = x_15923; +x_15824 = x_15918; +goto block_15853; +} +} +else +{ +uint8_t x_15924; +lean_free_object(x_15855); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15924 = !lean_is_exclusive(x_15916); +if (x_15924 == 0) +{ +return x_15916; +} +else +{ +lean_object* x_15925; lean_object* x_15926; lean_object* x_15927; +x_15925 = lean_ctor_get(x_15916, 0); +x_15926 = lean_ctor_get(x_15916, 1); +lean_inc(x_15926); +lean_inc(x_15925); +lean_dec(x_15916); +x_15927 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15927, 0, x_15925); +lean_ctor_set(x_15927, 1, x_15926); +return x_15927; +} +} +} +else +{ +uint8_t x_15928; +lean_dec(x_15908); +lean_dec(x_15854); +lean_free_object(x_15855); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15928 = !lean_is_exclusive(x_15911); +if (x_15928 == 0) +{ +return x_15911; +} +else +{ +lean_object* x_15929; lean_object* x_15930; lean_object* x_15931; +x_15929 = lean_ctor_get(x_15911, 0); +x_15930 = lean_ctor_get(x_15911, 1); +lean_inc(x_15930); +lean_inc(x_15929); +lean_dec(x_15911); +x_15931 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15931, 0, x_15929); +lean_ctor_set(x_15931, 1, x_15930); +return x_15931; +} +} +} +} +else +{ +lean_object* x_15932; lean_object* x_15933; lean_object* x_15934; lean_object* x_15935; lean_object* x_15936; lean_object* x_15937; lean_object* x_15938; lean_object* x_15939; +lean_dec(x_15865); +lean_dec(x_15863); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_15854, 7); +lean_ctor_set(x_15854, 1, x_14813); +lean_ctor_set(x_15854, 0, x_153); +x_15932 = lean_ctor_get(x_1, 0); +lean_inc(x_15932); +x_15933 = l_Lean_IR_ToIR_bindVar(x_15932, x_14819, x_4, x_5, x_15859); +x_15934 = lean_ctor_get(x_15933, 0); +lean_inc(x_15934); +x_15935 = lean_ctor_get(x_15933, 1); +lean_inc(x_15935); +lean_dec(x_15933); +x_15936 = lean_ctor_get(x_15934, 0); +lean_inc(x_15936); +x_15937 = lean_ctor_get(x_15934, 1); +lean_inc(x_15937); +lean_dec(x_15934); +x_15938 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15939 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15936, x_15854, x_15938, x_15937, x_4, x_5, x_15935); +if (lean_obj_tag(x_15939) == 0) +{ +lean_object* x_15940; lean_object* x_15941; uint8_t x_15942; +x_15940 = lean_ctor_get(x_15939, 0); +lean_inc(x_15940); +x_15941 = lean_ctor_get(x_15939, 1); +lean_inc(x_15941); +lean_dec(x_15939); +x_15942 = !lean_is_exclusive(x_15940); +if (x_15942 == 0) +{ +lean_object* x_15943; +x_15943 = lean_ctor_get(x_15940, 0); +lean_ctor_set(x_15855, 0, x_15943); +lean_ctor_set(x_15940, 0, x_15855); +x_15823 = x_15940; +x_15824 = x_15941; +goto block_15853; +} +else +{ +lean_object* x_15944; lean_object* x_15945; lean_object* x_15946; +x_15944 = lean_ctor_get(x_15940, 0); +x_15945 = lean_ctor_get(x_15940, 1); +lean_inc(x_15945); +lean_inc(x_15944); +lean_dec(x_15940); +lean_ctor_set(x_15855, 0, x_15944); +x_15946 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15946, 0, x_15855); +lean_ctor_set(x_15946, 1, x_15945); +x_15823 = x_15946; +x_15824 = x_15941; +goto block_15853; +} +} +else +{ +uint8_t x_15947; +lean_free_object(x_15855); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15947 = !lean_is_exclusive(x_15939); +if (x_15947 == 0) +{ +return x_15939; +} +else +{ +lean_object* x_15948; lean_object* x_15949; lean_object* x_15950; +x_15948 = lean_ctor_get(x_15939, 0); +x_15949 = lean_ctor_get(x_15939, 1); +lean_inc(x_15949); +lean_inc(x_15948); +lean_dec(x_15939); +x_15950 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15950, 0, x_15948); +lean_ctor_set(x_15950, 1, x_15949); +return x_15950; +} +} +} +} +else +{ +lean_object* x_15951; lean_object* x_15952; lean_object* x_15953; lean_object* x_15954; uint8_t x_15955; +x_15951 = lean_ctor_get(x_15855, 0); +lean_inc(x_15951); +lean_dec(x_15855); +x_15952 = lean_array_get_size(x_14813); +x_15953 = lean_ctor_get(x_15951, 3); +lean_inc(x_15953); +lean_dec(x_15951); +x_15954 = lean_array_get_size(x_15953); +lean_dec(x_15953); +x_15955 = lean_nat_dec_lt(x_15952, x_15954); +if (x_15955 == 0) +{ +uint8_t x_15956; +x_15956 = lean_nat_dec_eq(x_15952, x_15954); +if (x_15956 == 0) +{ +lean_object* x_15957; lean_object* x_15958; lean_object* x_15959; lean_object* x_15960; lean_object* x_15961; lean_object* x_15962; lean_object* x_15963; lean_object* x_15964; lean_object* x_15965; lean_object* x_15966; lean_object* x_15967; lean_object* x_15968; lean_object* x_15969; lean_object* x_15970; lean_object* x_15971; lean_object* x_15972; +x_15957 = lean_unsigned_to_nat(0u); +x_15958 = l_Array_extract___rarg(x_14813, x_15957, x_15954); +x_15959 = l_Array_extract___rarg(x_14813, x_15954, x_15952); +lean_dec(x_15952); +lean_inc(x_153); +lean_ctor_set_tag(x_15854, 6); +lean_ctor_set(x_15854, 1, x_15958); +lean_ctor_set(x_15854, 0, x_153); +x_15960 = lean_ctor_get(x_1, 0); +lean_inc(x_15960); +x_15961 = l_Lean_IR_ToIR_bindVar(x_15960, x_14819, x_4, x_5, x_15859); +x_15962 = lean_ctor_get(x_15961, 0); +lean_inc(x_15962); +x_15963 = lean_ctor_get(x_15961, 1); +lean_inc(x_15963); +lean_dec(x_15961); +x_15964 = lean_ctor_get(x_15962, 0); +lean_inc(x_15964); +x_15965 = lean_ctor_get(x_15962, 1); +lean_inc(x_15965); +lean_dec(x_15962); +x_15966 = l_Lean_IR_ToIR_newVar(x_15965, x_4, x_5, x_15963); +x_15967 = lean_ctor_get(x_15966, 0); +lean_inc(x_15967); +x_15968 = lean_ctor_get(x_15966, 1); +lean_inc(x_15968); +lean_dec(x_15966); +x_15969 = lean_ctor_get(x_15967, 0); +lean_inc(x_15969); +x_15970 = lean_ctor_get(x_15967, 1); +lean_inc(x_15970); +lean_dec(x_15967); +x_15971 = lean_ctor_get(x_1, 2); +lean_inc(x_15971); +lean_inc(x_5); +lean_inc(x_4); +x_15972 = l_Lean_IR_ToIR_lowerType(x_15971, x_15970, x_4, x_5, x_15968); +if (lean_obj_tag(x_15972) == 0) +{ +lean_object* x_15973; lean_object* x_15974; lean_object* x_15975; lean_object* x_15976; lean_object* x_15977; +x_15973 = lean_ctor_get(x_15972, 0); +lean_inc(x_15973); +x_15974 = lean_ctor_get(x_15972, 1); +lean_inc(x_15974); +lean_dec(x_15972); +x_15975 = lean_ctor_get(x_15973, 0); +lean_inc(x_15975); +x_15976 = lean_ctor_get(x_15973, 1); +lean_inc(x_15976); +lean_dec(x_15973); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_15977 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_15969, x_15959, x_15964, x_15854, x_15975, x_15976, x_4, x_5, x_15974); +if (lean_obj_tag(x_15977) == 0) +{ +lean_object* x_15978; lean_object* x_15979; lean_object* x_15980; lean_object* x_15981; lean_object* x_15982; lean_object* x_15983; lean_object* x_15984; +x_15978 = lean_ctor_get(x_15977, 0); +lean_inc(x_15978); +x_15979 = lean_ctor_get(x_15977, 1); +lean_inc(x_15979); +lean_dec(x_15977); +x_15980 = lean_ctor_get(x_15978, 0); +lean_inc(x_15980); +x_15981 = lean_ctor_get(x_15978, 1); +lean_inc(x_15981); +if (lean_is_exclusive(x_15978)) { + lean_ctor_release(x_15978, 0); + lean_ctor_release(x_15978, 1); + x_15982 = x_15978; +} else { + lean_dec_ref(x_15978); + x_15982 = lean_box(0); +} +x_15983 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_15983, 0, x_15980); +if (lean_is_scalar(x_15982)) { + x_15984 = lean_alloc_ctor(0, 2, 0); +} else { + x_15984 = x_15982; +} +lean_ctor_set(x_15984, 0, x_15983); +lean_ctor_set(x_15984, 1, x_15981); +x_15823 = x_15984; +x_15824 = x_15979; +goto block_15853; +} +else +{ +lean_object* x_15985; lean_object* x_15986; lean_object* x_15987; lean_object* x_15988; +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15985 = lean_ctor_get(x_15977, 0); +lean_inc(x_15985); +x_15986 = lean_ctor_get(x_15977, 1); +lean_inc(x_15986); +if (lean_is_exclusive(x_15977)) { + lean_ctor_release(x_15977, 0); + lean_ctor_release(x_15977, 1); + x_15987 = x_15977; +} else { + lean_dec_ref(x_15977); + x_15987 = lean_box(0); +} +if (lean_is_scalar(x_15987)) { + x_15988 = lean_alloc_ctor(1, 2, 0); +} else { + x_15988 = x_15987; +} +lean_ctor_set(x_15988, 0, x_15985); +lean_ctor_set(x_15988, 1, x_15986); +return x_15988; +} +} +else +{ +lean_object* x_15989; lean_object* x_15990; lean_object* x_15991; lean_object* x_15992; +lean_dec(x_15969); +lean_dec(x_15964); +lean_dec(x_15854); +lean_dec(x_15959); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15989 = lean_ctor_get(x_15972, 0); +lean_inc(x_15989); +x_15990 = lean_ctor_get(x_15972, 1); +lean_inc(x_15990); +if (lean_is_exclusive(x_15972)) { + lean_ctor_release(x_15972, 0); + lean_ctor_release(x_15972, 1); + x_15991 = x_15972; +} else { + lean_dec_ref(x_15972); + x_15991 = lean_box(0); +} +if (lean_is_scalar(x_15991)) { + x_15992 = lean_alloc_ctor(1, 2, 0); +} else { + x_15992 = x_15991; +} +lean_ctor_set(x_15992, 0, x_15989); +lean_ctor_set(x_15992, 1, x_15990); +return x_15992; +} +} +else +{ +lean_object* x_15993; lean_object* x_15994; lean_object* x_15995; lean_object* x_15996; lean_object* x_15997; lean_object* x_15998; lean_object* x_15999; lean_object* x_16000; +lean_dec(x_15954); +lean_dec(x_15952); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_15854, 6); +lean_ctor_set(x_15854, 1, x_14813); +lean_ctor_set(x_15854, 0, x_153); +x_15993 = lean_ctor_get(x_1, 0); +lean_inc(x_15993); +x_15994 = l_Lean_IR_ToIR_bindVar(x_15993, x_14819, x_4, x_5, x_15859); +x_15995 = lean_ctor_get(x_15994, 0); +lean_inc(x_15995); +x_15996 = lean_ctor_get(x_15994, 1); +lean_inc(x_15996); +lean_dec(x_15994); +x_15997 = lean_ctor_get(x_15995, 0); +lean_inc(x_15997); +x_15998 = lean_ctor_get(x_15995, 1); +lean_inc(x_15998); +lean_dec(x_15995); +x_15999 = lean_ctor_get(x_1, 2); +lean_inc(x_15999); +lean_inc(x_5); +lean_inc(x_4); +x_16000 = l_Lean_IR_ToIR_lowerType(x_15999, x_15998, x_4, x_5, x_15996); +if (lean_obj_tag(x_16000) == 0) +{ +lean_object* x_16001; lean_object* x_16002; lean_object* x_16003; lean_object* x_16004; lean_object* x_16005; +x_16001 = lean_ctor_get(x_16000, 0); +lean_inc(x_16001); +x_16002 = lean_ctor_get(x_16000, 1); +lean_inc(x_16002); +lean_dec(x_16000); +x_16003 = lean_ctor_get(x_16001, 0); +lean_inc(x_16003); +x_16004 = lean_ctor_get(x_16001, 1); +lean_inc(x_16004); +lean_dec(x_16001); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16005 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15997, x_15854, x_16003, x_16004, x_4, x_5, x_16002); +if (lean_obj_tag(x_16005) == 0) +{ +lean_object* x_16006; lean_object* x_16007; lean_object* x_16008; lean_object* x_16009; lean_object* x_16010; lean_object* x_16011; lean_object* x_16012; +x_16006 = lean_ctor_get(x_16005, 0); +lean_inc(x_16006); +x_16007 = lean_ctor_get(x_16005, 1); +lean_inc(x_16007); +lean_dec(x_16005); +x_16008 = lean_ctor_get(x_16006, 0); +lean_inc(x_16008); +x_16009 = lean_ctor_get(x_16006, 1); +lean_inc(x_16009); +if (lean_is_exclusive(x_16006)) { + lean_ctor_release(x_16006, 0); + lean_ctor_release(x_16006, 1); + x_16010 = x_16006; +} else { + lean_dec_ref(x_16006); + x_16010 = lean_box(0); +} +x_16011 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_16011, 0, x_16008); +if (lean_is_scalar(x_16010)) { + x_16012 = lean_alloc_ctor(0, 2, 0); +} else { + x_16012 = x_16010; +} +lean_ctor_set(x_16012, 0, x_16011); +lean_ctor_set(x_16012, 1, x_16009); +x_15823 = x_16012; +x_15824 = x_16007; +goto block_15853; +} +else +{ +lean_object* x_16013; lean_object* x_16014; lean_object* x_16015; lean_object* x_16016; +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16013 = lean_ctor_get(x_16005, 0); +lean_inc(x_16013); +x_16014 = lean_ctor_get(x_16005, 1); +lean_inc(x_16014); +if (lean_is_exclusive(x_16005)) { + lean_ctor_release(x_16005, 0); + lean_ctor_release(x_16005, 1); + x_16015 = x_16005; +} else { + lean_dec_ref(x_16005); + x_16015 = lean_box(0); +} +if (lean_is_scalar(x_16015)) { + x_16016 = lean_alloc_ctor(1, 2, 0); +} else { + x_16016 = x_16015; +} +lean_ctor_set(x_16016, 0, x_16013); +lean_ctor_set(x_16016, 1, x_16014); +return x_16016; +} +} +else +{ +lean_object* x_16017; lean_object* x_16018; lean_object* x_16019; lean_object* x_16020; +lean_dec(x_15997); +lean_dec(x_15854); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16017 = lean_ctor_get(x_16000, 0); +lean_inc(x_16017); +x_16018 = lean_ctor_get(x_16000, 1); +lean_inc(x_16018); +if (lean_is_exclusive(x_16000)) { + lean_ctor_release(x_16000, 0); + lean_ctor_release(x_16000, 1); + x_16019 = x_16000; +} else { + lean_dec_ref(x_16000); + x_16019 = lean_box(0); +} +if (lean_is_scalar(x_16019)) { + x_16020 = lean_alloc_ctor(1, 2, 0); +} else { + x_16020 = x_16019; +} +lean_ctor_set(x_16020, 0, x_16017); +lean_ctor_set(x_16020, 1, x_16018); +return x_16020; +} +} +} +else +{ +lean_object* x_16021; lean_object* x_16022; lean_object* x_16023; lean_object* x_16024; lean_object* x_16025; lean_object* x_16026; lean_object* x_16027; lean_object* x_16028; +lean_dec(x_15954); +lean_dec(x_15952); +lean_inc(x_14813); +lean_inc(x_153); +lean_ctor_set_tag(x_15854, 7); +lean_ctor_set(x_15854, 1, x_14813); +lean_ctor_set(x_15854, 0, x_153); +x_16021 = lean_ctor_get(x_1, 0); +lean_inc(x_16021); +x_16022 = l_Lean_IR_ToIR_bindVar(x_16021, x_14819, x_4, x_5, x_15859); +x_16023 = lean_ctor_get(x_16022, 0); +lean_inc(x_16023); +x_16024 = lean_ctor_get(x_16022, 1); +lean_inc(x_16024); +lean_dec(x_16022); +x_16025 = lean_ctor_get(x_16023, 0); +lean_inc(x_16025); +x_16026 = lean_ctor_get(x_16023, 1); +lean_inc(x_16026); +lean_dec(x_16023); +x_16027 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16028 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16025, x_15854, x_16027, x_16026, x_4, x_5, x_16024); +if (lean_obj_tag(x_16028) == 0) +{ +lean_object* x_16029; lean_object* x_16030; lean_object* x_16031; lean_object* x_16032; lean_object* x_16033; lean_object* x_16034; lean_object* x_16035; +x_16029 = lean_ctor_get(x_16028, 0); +lean_inc(x_16029); +x_16030 = lean_ctor_get(x_16028, 1); +lean_inc(x_16030); +lean_dec(x_16028); +x_16031 = lean_ctor_get(x_16029, 0); +lean_inc(x_16031); +x_16032 = lean_ctor_get(x_16029, 1); +lean_inc(x_16032); +if (lean_is_exclusive(x_16029)) { + lean_ctor_release(x_16029, 0); + lean_ctor_release(x_16029, 1); + x_16033 = x_16029; +} else { + lean_dec_ref(x_16029); + x_16033 = lean_box(0); +} +x_16034 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_16034, 0, x_16031); +if (lean_is_scalar(x_16033)) { + x_16035 = lean_alloc_ctor(0, 2, 0); +} else { + x_16035 = x_16033; +} +lean_ctor_set(x_16035, 0, x_16034); +lean_ctor_set(x_16035, 1, x_16032); +x_15823 = x_16035; +x_15824 = x_16030; +goto block_15853; +} +else +{ +lean_object* x_16036; lean_object* x_16037; lean_object* x_16038; lean_object* x_16039; +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16036 = lean_ctor_get(x_16028, 0); +lean_inc(x_16036); +x_16037 = lean_ctor_get(x_16028, 1); +lean_inc(x_16037); +if (lean_is_exclusive(x_16028)) { + lean_ctor_release(x_16028, 0); + lean_ctor_release(x_16028, 1); + x_16038 = x_16028; +} else { + lean_dec_ref(x_16028); + x_16038 = lean_box(0); +} +if (lean_is_scalar(x_16038)) { + x_16039 = lean_alloc_ctor(1, 2, 0); +} else { + x_16039 = x_16038; +} +lean_ctor_set(x_16039, 0, x_16036); +lean_ctor_set(x_16039, 1, x_16037); +return x_16039; +} +} +} +} +else +{ +lean_object* x_16040; lean_object* x_16041; lean_object* x_16042; lean_object* x_16043; lean_object* x_16044; lean_object* x_16045; uint8_t x_16046; +x_16040 = lean_ctor_get(x_15854, 1); +lean_inc(x_16040); +lean_dec(x_15854); +x_16041 = lean_ctor_get(x_15855, 0); +lean_inc(x_16041); +if (lean_is_exclusive(x_15855)) { + lean_ctor_release(x_15855, 0); + x_16042 = x_15855; +} else { + lean_dec_ref(x_15855); + x_16042 = lean_box(0); +} +x_16043 = lean_array_get_size(x_14813); +x_16044 = lean_ctor_get(x_16041, 3); +lean_inc(x_16044); +lean_dec(x_16041); +x_16045 = lean_array_get_size(x_16044); +lean_dec(x_16044); +x_16046 = lean_nat_dec_lt(x_16043, x_16045); +if (x_16046 == 0) +{ +uint8_t x_16047; +x_16047 = lean_nat_dec_eq(x_16043, x_16045); +if (x_16047 == 0) +{ +lean_object* x_16048; lean_object* x_16049; lean_object* x_16050; lean_object* x_16051; lean_object* x_16052; lean_object* x_16053; lean_object* x_16054; lean_object* x_16055; lean_object* x_16056; lean_object* x_16057; lean_object* x_16058; lean_object* x_16059; lean_object* x_16060; lean_object* x_16061; lean_object* x_16062; lean_object* x_16063; lean_object* x_16064; +x_16048 = lean_unsigned_to_nat(0u); +x_16049 = l_Array_extract___rarg(x_14813, x_16048, x_16045); +x_16050 = l_Array_extract___rarg(x_14813, x_16045, x_16043); +lean_dec(x_16043); +lean_inc(x_153); +x_16051 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_16051, 0, x_153); +lean_ctor_set(x_16051, 1, x_16049); +x_16052 = lean_ctor_get(x_1, 0); +lean_inc(x_16052); +x_16053 = l_Lean_IR_ToIR_bindVar(x_16052, x_14819, x_4, x_5, x_16040); +x_16054 = lean_ctor_get(x_16053, 0); +lean_inc(x_16054); +x_16055 = lean_ctor_get(x_16053, 1); +lean_inc(x_16055); +lean_dec(x_16053); +x_16056 = lean_ctor_get(x_16054, 0); +lean_inc(x_16056); +x_16057 = lean_ctor_get(x_16054, 1); +lean_inc(x_16057); +lean_dec(x_16054); +x_16058 = l_Lean_IR_ToIR_newVar(x_16057, x_4, x_5, x_16055); +x_16059 = lean_ctor_get(x_16058, 0); +lean_inc(x_16059); +x_16060 = lean_ctor_get(x_16058, 1); +lean_inc(x_16060); +lean_dec(x_16058); +x_16061 = lean_ctor_get(x_16059, 0); +lean_inc(x_16061); +x_16062 = lean_ctor_get(x_16059, 1); +lean_inc(x_16062); +lean_dec(x_16059); +x_16063 = lean_ctor_get(x_1, 2); +lean_inc(x_16063); +lean_inc(x_5); +lean_inc(x_4); +x_16064 = l_Lean_IR_ToIR_lowerType(x_16063, x_16062, x_4, x_5, x_16060); +if (lean_obj_tag(x_16064) == 0) +{ +lean_object* x_16065; lean_object* x_16066; lean_object* x_16067; lean_object* x_16068; lean_object* x_16069; +x_16065 = lean_ctor_get(x_16064, 0); +lean_inc(x_16065); +x_16066 = lean_ctor_get(x_16064, 1); +lean_inc(x_16066); +lean_dec(x_16064); +x_16067 = lean_ctor_get(x_16065, 0); +lean_inc(x_16067); +x_16068 = lean_ctor_get(x_16065, 1); +lean_inc(x_16068); +lean_dec(x_16065); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16069 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_16061, x_16050, x_16056, x_16051, x_16067, x_16068, x_4, x_5, x_16066); +if (lean_obj_tag(x_16069) == 0) +{ +lean_object* x_16070; lean_object* x_16071; lean_object* x_16072; lean_object* x_16073; lean_object* x_16074; lean_object* x_16075; lean_object* x_16076; +x_16070 = lean_ctor_get(x_16069, 0); +lean_inc(x_16070); +x_16071 = lean_ctor_get(x_16069, 1); +lean_inc(x_16071); +lean_dec(x_16069); +x_16072 = lean_ctor_get(x_16070, 0); +lean_inc(x_16072); +x_16073 = lean_ctor_get(x_16070, 1); +lean_inc(x_16073); +if (lean_is_exclusive(x_16070)) { + lean_ctor_release(x_16070, 0); + lean_ctor_release(x_16070, 1); + x_16074 = x_16070; +} else { + lean_dec_ref(x_16070); + x_16074 = lean_box(0); +} +if (lean_is_scalar(x_16042)) { + x_16075 = lean_alloc_ctor(1, 1, 0); +} else { + x_16075 = x_16042; +} +lean_ctor_set(x_16075, 0, x_16072); +if (lean_is_scalar(x_16074)) { + x_16076 = lean_alloc_ctor(0, 2, 0); +} else { + x_16076 = x_16074; +} +lean_ctor_set(x_16076, 0, x_16075); +lean_ctor_set(x_16076, 1, x_16073); +x_15823 = x_16076; +x_15824 = x_16071; +goto block_15853; +} +else +{ +lean_object* x_16077; lean_object* x_16078; lean_object* x_16079; lean_object* x_16080; +lean_dec(x_16042); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16077 = lean_ctor_get(x_16069, 0); +lean_inc(x_16077); +x_16078 = lean_ctor_get(x_16069, 1); +lean_inc(x_16078); +if (lean_is_exclusive(x_16069)) { + lean_ctor_release(x_16069, 0); + lean_ctor_release(x_16069, 1); + x_16079 = x_16069; +} else { + lean_dec_ref(x_16069); + x_16079 = lean_box(0); +} +if (lean_is_scalar(x_16079)) { + x_16080 = lean_alloc_ctor(1, 2, 0); +} else { + x_16080 = x_16079; +} +lean_ctor_set(x_16080, 0, x_16077); +lean_ctor_set(x_16080, 1, x_16078); +return x_16080; +} +} +else +{ +lean_object* x_16081; lean_object* x_16082; lean_object* x_16083; lean_object* x_16084; +lean_dec(x_16061); +lean_dec(x_16056); +lean_dec(x_16051); +lean_dec(x_16050); +lean_dec(x_16042); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16081 = lean_ctor_get(x_16064, 0); +lean_inc(x_16081); +x_16082 = lean_ctor_get(x_16064, 1); +lean_inc(x_16082); +if (lean_is_exclusive(x_16064)) { + lean_ctor_release(x_16064, 0); + lean_ctor_release(x_16064, 1); + x_16083 = x_16064; +} else { + lean_dec_ref(x_16064); + x_16083 = lean_box(0); +} +if (lean_is_scalar(x_16083)) { + x_16084 = lean_alloc_ctor(1, 2, 0); +} else { + x_16084 = x_16083; +} +lean_ctor_set(x_16084, 0, x_16081); +lean_ctor_set(x_16084, 1, x_16082); +return x_16084; +} +} +else +{ +lean_object* x_16085; lean_object* x_16086; lean_object* x_16087; lean_object* x_16088; lean_object* x_16089; lean_object* x_16090; lean_object* x_16091; lean_object* x_16092; lean_object* x_16093; +lean_dec(x_16045); +lean_dec(x_16043); +lean_inc(x_14813); +lean_inc(x_153); +x_16085 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_16085, 0, x_153); +lean_ctor_set(x_16085, 1, x_14813); +x_16086 = lean_ctor_get(x_1, 0); +lean_inc(x_16086); +x_16087 = l_Lean_IR_ToIR_bindVar(x_16086, x_14819, x_4, x_5, x_16040); +x_16088 = lean_ctor_get(x_16087, 0); +lean_inc(x_16088); +x_16089 = lean_ctor_get(x_16087, 1); +lean_inc(x_16089); +lean_dec(x_16087); +x_16090 = lean_ctor_get(x_16088, 0); +lean_inc(x_16090); +x_16091 = lean_ctor_get(x_16088, 1); +lean_inc(x_16091); +lean_dec(x_16088); +x_16092 = lean_ctor_get(x_1, 2); +lean_inc(x_16092); +lean_inc(x_5); +lean_inc(x_4); +x_16093 = l_Lean_IR_ToIR_lowerType(x_16092, x_16091, x_4, x_5, x_16089); +if (lean_obj_tag(x_16093) == 0) +{ +lean_object* x_16094; lean_object* x_16095; lean_object* x_16096; lean_object* x_16097; lean_object* x_16098; +x_16094 = lean_ctor_get(x_16093, 0); +lean_inc(x_16094); +x_16095 = lean_ctor_get(x_16093, 1); +lean_inc(x_16095); +lean_dec(x_16093); +x_16096 = lean_ctor_get(x_16094, 0); +lean_inc(x_16096); +x_16097 = lean_ctor_get(x_16094, 1); +lean_inc(x_16097); +lean_dec(x_16094); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16098 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16090, x_16085, x_16096, x_16097, x_4, x_5, x_16095); +if (lean_obj_tag(x_16098) == 0) +{ +lean_object* x_16099; lean_object* x_16100; lean_object* x_16101; lean_object* x_16102; lean_object* x_16103; lean_object* x_16104; lean_object* x_16105; +x_16099 = lean_ctor_get(x_16098, 0); +lean_inc(x_16099); +x_16100 = lean_ctor_get(x_16098, 1); +lean_inc(x_16100); +lean_dec(x_16098); +x_16101 = lean_ctor_get(x_16099, 0); +lean_inc(x_16101); +x_16102 = lean_ctor_get(x_16099, 1); +lean_inc(x_16102); +if (lean_is_exclusive(x_16099)) { + lean_ctor_release(x_16099, 0); + lean_ctor_release(x_16099, 1); + x_16103 = x_16099; +} else { + lean_dec_ref(x_16099); + x_16103 = lean_box(0); +} +if (lean_is_scalar(x_16042)) { + x_16104 = lean_alloc_ctor(1, 1, 0); +} else { + x_16104 = x_16042; +} +lean_ctor_set(x_16104, 0, x_16101); +if (lean_is_scalar(x_16103)) { + x_16105 = lean_alloc_ctor(0, 2, 0); +} else { + x_16105 = x_16103; +} +lean_ctor_set(x_16105, 0, x_16104); +lean_ctor_set(x_16105, 1, x_16102); +x_15823 = x_16105; +x_15824 = x_16100; +goto block_15853; +} +else +{ +lean_object* x_16106; lean_object* x_16107; lean_object* x_16108; lean_object* x_16109; +lean_dec(x_16042); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16106 = lean_ctor_get(x_16098, 0); +lean_inc(x_16106); +x_16107 = lean_ctor_get(x_16098, 1); +lean_inc(x_16107); +if (lean_is_exclusive(x_16098)) { + lean_ctor_release(x_16098, 0); + lean_ctor_release(x_16098, 1); + x_16108 = x_16098; +} else { + lean_dec_ref(x_16098); + x_16108 = lean_box(0); +} +if (lean_is_scalar(x_16108)) { + x_16109 = lean_alloc_ctor(1, 2, 0); +} else { + x_16109 = x_16108; +} +lean_ctor_set(x_16109, 0, x_16106); +lean_ctor_set(x_16109, 1, x_16107); +return x_16109; +} +} +else +{ +lean_object* x_16110; lean_object* x_16111; lean_object* x_16112; lean_object* x_16113; +lean_dec(x_16090); +lean_dec(x_16085); +lean_dec(x_16042); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16110 = lean_ctor_get(x_16093, 0); +lean_inc(x_16110); +x_16111 = lean_ctor_get(x_16093, 1); +lean_inc(x_16111); +if (lean_is_exclusive(x_16093)) { + lean_ctor_release(x_16093, 0); + lean_ctor_release(x_16093, 1); + x_16112 = x_16093; +} else { + lean_dec_ref(x_16093); + x_16112 = lean_box(0); +} +if (lean_is_scalar(x_16112)) { + x_16113 = lean_alloc_ctor(1, 2, 0); +} else { + x_16113 = x_16112; +} +lean_ctor_set(x_16113, 0, x_16110); +lean_ctor_set(x_16113, 1, x_16111); +return x_16113; +} +} +} +else +{ +lean_object* x_16114; lean_object* x_16115; lean_object* x_16116; lean_object* x_16117; lean_object* x_16118; lean_object* x_16119; lean_object* x_16120; lean_object* x_16121; lean_object* x_16122; +lean_dec(x_16045); +lean_dec(x_16043); +lean_inc(x_14813); +lean_inc(x_153); +x_16114 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_16114, 0, x_153); +lean_ctor_set(x_16114, 1, x_14813); +x_16115 = lean_ctor_get(x_1, 0); +lean_inc(x_16115); +x_16116 = l_Lean_IR_ToIR_bindVar(x_16115, x_14819, x_4, x_5, x_16040); +x_16117 = lean_ctor_get(x_16116, 0); +lean_inc(x_16117); +x_16118 = lean_ctor_get(x_16116, 1); +lean_inc(x_16118); +lean_dec(x_16116); +x_16119 = lean_ctor_get(x_16117, 0); +lean_inc(x_16119); +x_16120 = lean_ctor_get(x_16117, 1); +lean_inc(x_16120); +lean_dec(x_16117); +x_16121 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16122 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16119, x_16114, x_16121, x_16120, x_4, x_5, x_16118); +if (lean_obj_tag(x_16122) == 0) +{ +lean_object* x_16123; lean_object* x_16124; lean_object* x_16125; lean_object* x_16126; lean_object* x_16127; lean_object* x_16128; lean_object* x_16129; +x_16123 = lean_ctor_get(x_16122, 0); +lean_inc(x_16123); +x_16124 = lean_ctor_get(x_16122, 1); +lean_inc(x_16124); +lean_dec(x_16122); +x_16125 = lean_ctor_get(x_16123, 0); +lean_inc(x_16125); +x_16126 = lean_ctor_get(x_16123, 1); +lean_inc(x_16126); +if (lean_is_exclusive(x_16123)) { + lean_ctor_release(x_16123, 0); + lean_ctor_release(x_16123, 1); + x_16127 = x_16123; +} else { + lean_dec_ref(x_16123); + x_16127 = lean_box(0); +} +if (lean_is_scalar(x_16042)) { + x_16128 = lean_alloc_ctor(1, 1, 0); +} else { + x_16128 = x_16042; +} +lean_ctor_set(x_16128, 0, x_16125); +if (lean_is_scalar(x_16127)) { + x_16129 = lean_alloc_ctor(0, 2, 0); +} else { + x_16129 = x_16127; +} +lean_ctor_set(x_16129, 0, x_16128); +lean_ctor_set(x_16129, 1, x_16126); +x_15823 = x_16129; +x_15824 = x_16124; +goto block_15853; +} +else +{ +lean_object* x_16130; lean_object* x_16131; lean_object* x_16132; lean_object* x_16133; +lean_dec(x_16042); +lean_dec(x_14824); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16130 = lean_ctor_get(x_16122, 0); +lean_inc(x_16130); +x_16131 = lean_ctor_get(x_16122, 1); +lean_inc(x_16131); +if (lean_is_exclusive(x_16122)) { + lean_ctor_release(x_16122, 0); + lean_ctor_release(x_16122, 1); + x_16132 = x_16122; +} else { + lean_dec_ref(x_16122); + x_16132 = lean_box(0); +} +if (lean_is_scalar(x_16132)) { + x_16133 = lean_alloc_ctor(1, 2, 0); +} else { + x_16133 = x_16132; +} +lean_ctor_set(x_16133, 0, x_16130); +lean_ctor_set(x_16133, 1, x_16131); +return x_16133; +} +} +} +} +block_15853: +{ +lean_object* x_15825; +x_15825 = lean_ctor_get(x_15823, 0); +lean_inc(x_15825); +if (lean_obj_tag(x_15825) == 0) +{ +lean_object* x_15826; lean_object* x_15827; lean_object* x_15828; lean_object* x_15829; lean_object* x_15830; lean_object* x_15831; lean_object* x_15832; lean_object* x_15833; lean_object* x_15834; lean_object* x_15835; +lean_dec(x_14824); +x_15826 = lean_ctor_get(x_15823, 1); +lean_inc(x_15826); +lean_dec(x_15823); +x_15827 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_15827, 0, x_153); +lean_ctor_set(x_15827, 1, x_14813); +x_15828 = lean_ctor_get(x_1, 0); +lean_inc(x_15828); +x_15829 = l_Lean_IR_ToIR_bindVar(x_15828, x_15826, x_4, x_5, x_15824); +x_15830 = lean_ctor_get(x_15829, 0); +lean_inc(x_15830); +x_15831 = lean_ctor_get(x_15829, 1); +lean_inc(x_15831); +lean_dec(x_15829); +x_15832 = lean_ctor_get(x_15830, 0); +lean_inc(x_15832); +x_15833 = lean_ctor_get(x_15830, 1); +lean_inc(x_15833); +lean_dec(x_15830); +x_15834 = lean_ctor_get(x_1, 2); +lean_inc(x_15834); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_15835 = l_Lean_IR_ToIR_lowerType(x_15834, x_15833, x_4, x_5, x_15831); +if (lean_obj_tag(x_15835) == 0) +{ +lean_object* x_15836; lean_object* x_15837; lean_object* x_15838; lean_object* x_15839; lean_object* x_15840; +x_15836 = lean_ctor_get(x_15835, 0); +lean_inc(x_15836); +x_15837 = lean_ctor_get(x_15835, 1); +lean_inc(x_15837); +lean_dec(x_15835); +x_15838 = lean_ctor_get(x_15836, 0); +lean_inc(x_15838); +x_15839 = lean_ctor_get(x_15836, 1); +lean_inc(x_15839); +lean_dec(x_15836); +x_15840 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_15832, x_15827, x_15838, x_15839, x_4, x_5, x_15837); +return x_15840; +} +else +{ +uint8_t x_15841; +lean_dec(x_15832); +lean_dec(x_15827); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_15841 = !lean_is_exclusive(x_15835); +if (x_15841 == 0) +{ +return x_15835; +} +else +{ +lean_object* x_15842; lean_object* x_15843; lean_object* x_15844; +x_15842 = lean_ctor_get(x_15835, 0); +x_15843 = lean_ctor_get(x_15835, 1); +lean_inc(x_15843); +lean_inc(x_15842); +lean_dec(x_15835); +x_15844 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15844, 0, x_15842); +lean_ctor_set(x_15844, 1, x_15843); +return x_15844; +} +} +} +else +{ +uint8_t x_15845; +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_15845 = !lean_is_exclusive(x_15823); +if (x_15845 == 0) +{ +lean_object* x_15846; lean_object* x_15847; lean_object* x_15848; +x_15846 = lean_ctor_get(x_15823, 0); +lean_dec(x_15846); +x_15847 = lean_ctor_get(x_15825, 0); +lean_inc(x_15847); +lean_dec(x_15825); +lean_ctor_set(x_15823, 0, x_15847); +if (lean_is_scalar(x_14824)) { + x_15848 = lean_alloc_ctor(0, 2, 0); +} else { + x_15848 = x_14824; +} +lean_ctor_set(x_15848, 0, x_15823); +lean_ctor_set(x_15848, 1, x_15824); +return x_15848; +} +else +{ +lean_object* x_15849; lean_object* x_15850; lean_object* x_15851; lean_object* x_15852; +x_15849 = lean_ctor_get(x_15823, 1); +lean_inc(x_15849); +lean_dec(x_15823); +x_15850 = lean_ctor_get(x_15825, 0); +lean_inc(x_15850); +lean_dec(x_15825); +x_15851 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15851, 0, x_15850); +lean_ctor_set(x_15851, 1, x_15849); +if (lean_is_scalar(x_14824)) { + x_15852 = lean_alloc_ctor(0, 2, 0); +} else { + x_15852 = x_14824; +} +lean_ctor_set(x_15852, 0, x_15851); +lean_ctor_set(x_15852, 1, x_15824); +return x_15852; +} +} +} +} +} +default: +{ +uint8_t x_16134; +lean_dec(x_14825); +lean_dec(x_14824); +lean_free_object(x_14815); +lean_dec(x_14813); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_2); +lean_dec(x_1); +x_16134 = !lean_is_exclusive(x_14830); +if (x_16134 == 0) +{ +lean_object* x_16135; uint8_t x_16136; lean_object* x_16137; lean_object* x_16138; lean_object* x_16139; lean_object* x_16140; lean_object* x_16141; lean_object* x_16142; lean_object* x_16143; lean_object* x_16144; +x_16135 = lean_ctor_get(x_14830, 0); +lean_dec(x_16135); +x_16136 = 1; +x_16137 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_16138 = l_Lean_Name_toString(x_153, x_16136, x_16137); +lean_ctor_set_tag(x_14830, 3); +lean_ctor_set(x_14830, 0, x_16138); +x_16139 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_16140 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_16140, 0, x_16139); +lean_ctor_set(x_16140, 1, x_14830); +x_16141 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_16142 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_16142, 0, x_16140); +lean_ctor_set(x_16142, 1, x_16141); +x_16143 = l_Lean_MessageData_ofFormat(x_16142); +x_16144 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_16143, x_14819, x_4, x_5, x_14823); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_14819); +return x_16144; +} +else +{ +uint8_t x_16145; lean_object* x_16146; lean_object* x_16147; lean_object* x_16148; lean_object* x_16149; lean_object* x_16150; lean_object* x_16151; lean_object* x_16152; lean_object* x_16153; lean_object* x_16154; +lean_dec(x_14830); +x_16145 = 1; +x_16146 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_16147 = l_Lean_Name_toString(x_153, x_16145, x_16146); +x_16148 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_16148, 0, x_16147); +x_16149 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_16150 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_16150, 0, x_16149); +lean_ctor_set(x_16150, 1, x_16148); +x_16151 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_16152 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_16152, 0, x_16150); +lean_ctor_set(x_16152, 1, x_16151); +x_16153 = l_Lean_MessageData_ofFormat(x_16152); +x_16154 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_16153, x_14819, x_4, x_5, x_14823); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_14819); +return x_16154; +} +} +} +} +} +else +{ +lean_object* x_16155; lean_object* x_16156; lean_object* x_16157; lean_object* x_16158; lean_object* x_16159; lean_object* x_16160; uint8_t x_16161; lean_object* x_16162; +x_16155 = lean_ctor_get(x_14815, 1); +lean_inc(x_16155); +lean_dec(x_14815); +x_16156 = lean_st_ref_get(x_5, x_14816); +x_16157 = lean_ctor_get(x_16156, 0); +lean_inc(x_16157); +x_16158 = lean_ctor_get(x_16156, 1); +lean_inc(x_16158); +if (lean_is_exclusive(x_16156)) { + lean_ctor_release(x_16156, 0); + lean_ctor_release(x_16156, 1); + x_16159 = x_16156; +} else { + lean_dec_ref(x_16156); + x_16159 = lean_box(0); +} +x_16160 = lean_ctor_get(x_16157, 0); +lean_inc(x_16160); +lean_dec(x_16157); +x_16161 = 0; +lean_inc(x_153); +lean_inc(x_16160); +x_16162 = l_Lean_Environment_find_x3f(x_16160, x_153, x_16161); +if (lean_obj_tag(x_16162) == 0) +{ +lean_object* x_16163; lean_object* x_16164; +lean_dec(x_16160); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_16163 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_16164 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_16163, x_16155, x_4, x_5, x_16158); +return x_16164; +} +else +{ +lean_object* x_16165; +x_16165 = lean_ctor_get(x_16162, 0); +lean_inc(x_16165); +lean_dec(x_16162); +switch (lean_obj_tag(x_16165)) { +case 0: +{ +lean_object* x_16166; lean_object* x_16167; uint8_t x_16168; +lean_dec(x_16160); +lean_dec(x_14805); +lean_dec(x_14804); +if (lean_is_exclusive(x_16165)) { + lean_ctor_release(x_16165, 0); + x_16166 = x_16165; +} else { + lean_dec_ref(x_16165); + x_16166 = lean_box(0); +} +x_16167 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_16168 = lean_name_eq(x_153, x_16167); +if (x_16168 == 0) +{ +lean_object* x_16169; uint8_t x_16170; +x_16169 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_16170 = lean_name_eq(x_153, x_16169); +if (x_16170 == 0) +{ +lean_object* x_16171; lean_object* x_16172; lean_object* x_16173; +lean_dec(x_16159); +lean_inc(x_153); +x_16171 = l_Lean_IR_ToIR_findDecl(x_153, x_16155, x_4, x_5, x_16158); +x_16172 = lean_ctor_get(x_16171, 0); +lean_inc(x_16172); +x_16173 = lean_ctor_get(x_16172, 0); +lean_inc(x_16173); +if (lean_obj_tag(x_16173) == 0) +{ +lean_object* x_16174; lean_object* x_16175; lean_object* x_16176; lean_object* x_16177; uint8_t x_16178; lean_object* x_16179; lean_object* x_16180; lean_object* x_16181; lean_object* x_16182; lean_object* x_16183; lean_object* x_16184; lean_object* x_16185; lean_object* x_16186; lean_object* x_16187; +lean_dec(x_14813); +lean_dec(x_2); +lean_dec(x_1); +x_16174 = lean_ctor_get(x_16171, 1); +lean_inc(x_16174); +if (lean_is_exclusive(x_16171)) { + lean_ctor_release(x_16171, 0); + lean_ctor_release(x_16171, 1); + x_16175 = x_16171; +} else { + lean_dec_ref(x_16171); + x_16175 = lean_box(0); +} +x_16176 = lean_ctor_get(x_16172, 1); +lean_inc(x_16176); +if (lean_is_exclusive(x_16172)) { + lean_ctor_release(x_16172, 0); + lean_ctor_release(x_16172, 1); + x_16177 = x_16172; +} else { + lean_dec_ref(x_16172); + x_16177 = lean_box(0); +} +x_16178 = 1; +x_16179 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_16180 = l_Lean_Name_toString(x_153, x_16178, x_16179); +if (lean_is_scalar(x_16166)) { + x_16181 = lean_alloc_ctor(3, 1, 0); +} else { + x_16181 = x_16166; + lean_ctor_set_tag(x_16181, 3); +} +lean_ctor_set(x_16181, 0, x_16180); +x_16182 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_16177)) { + x_16183 = lean_alloc_ctor(5, 2, 0); +} else { + x_16183 = x_16177; + lean_ctor_set_tag(x_16183, 5); +} +lean_ctor_set(x_16183, 0, x_16182); +lean_ctor_set(x_16183, 1, x_16181); +x_16184 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_16175)) { + x_16185 = lean_alloc_ctor(5, 2, 0); +} else { + x_16185 = x_16175; + lean_ctor_set_tag(x_16185, 5); +} +lean_ctor_set(x_16185, 0, x_16183); +lean_ctor_set(x_16185, 1, x_16184); +x_16186 = l_Lean_MessageData_ofFormat(x_16185); +x_16187 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_16186, x_16176, x_4, x_5, x_16174); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_16176); +return x_16187; +} +else +{ +lean_object* x_16188; lean_object* x_16189; lean_object* x_16190; lean_object* x_16191; lean_object* x_16192; lean_object* x_16193; lean_object* x_16194; uint8_t x_16195; +lean_dec(x_16166); +x_16188 = lean_ctor_get(x_16171, 1); +lean_inc(x_16188); +lean_dec(x_16171); +x_16189 = lean_ctor_get(x_16172, 1); +lean_inc(x_16189); +if (lean_is_exclusive(x_16172)) { + lean_ctor_release(x_16172, 0); + lean_ctor_release(x_16172, 1); + x_16190 = x_16172; +} else { + lean_dec_ref(x_16172); + x_16190 = lean_box(0); +} +x_16191 = lean_ctor_get(x_16173, 0); +lean_inc(x_16191); +lean_dec(x_16173); +x_16192 = lean_array_get_size(x_14813); +x_16193 = l_Lean_IR_Decl_params(x_16191); +lean_dec(x_16191); +x_16194 = lean_array_get_size(x_16193); +lean_dec(x_16193); +x_16195 = lean_nat_dec_lt(x_16192, x_16194); +if (x_16195 == 0) +{ +uint8_t x_16196; +x_16196 = lean_nat_dec_eq(x_16192, x_16194); +if (x_16196 == 0) +{ +lean_object* x_16197; lean_object* x_16198; lean_object* x_16199; lean_object* x_16200; lean_object* x_16201; lean_object* x_16202; lean_object* x_16203; lean_object* x_16204; lean_object* x_16205; lean_object* x_16206; lean_object* x_16207; lean_object* x_16208; lean_object* x_16209; lean_object* x_16210; lean_object* x_16211; lean_object* x_16212; lean_object* x_16213; +x_16197 = lean_unsigned_to_nat(0u); +x_16198 = l_Array_extract___rarg(x_14813, x_16197, x_16194); +x_16199 = l_Array_extract___rarg(x_14813, x_16194, x_16192); +lean_dec(x_16192); +lean_dec(x_14813); +if (lean_is_scalar(x_16190)) { + x_16200 = lean_alloc_ctor(6, 2, 0); +} else { + x_16200 = x_16190; + lean_ctor_set_tag(x_16200, 6); +} +lean_ctor_set(x_16200, 0, x_153); +lean_ctor_set(x_16200, 1, x_16198); +x_16201 = lean_ctor_get(x_1, 0); +lean_inc(x_16201); +x_16202 = l_Lean_IR_ToIR_bindVar(x_16201, x_16189, x_4, x_5, x_16188); +x_16203 = lean_ctor_get(x_16202, 0); +lean_inc(x_16203); +x_16204 = lean_ctor_get(x_16202, 1); +lean_inc(x_16204); +lean_dec(x_16202); +x_16205 = lean_ctor_get(x_16203, 0); +lean_inc(x_16205); +x_16206 = lean_ctor_get(x_16203, 1); +lean_inc(x_16206); +lean_dec(x_16203); +x_16207 = l_Lean_IR_ToIR_newVar(x_16206, x_4, x_5, x_16204); +x_16208 = lean_ctor_get(x_16207, 0); +lean_inc(x_16208); +x_16209 = lean_ctor_get(x_16207, 1); +lean_inc(x_16209); +lean_dec(x_16207); +x_16210 = lean_ctor_get(x_16208, 0); +lean_inc(x_16210); +x_16211 = lean_ctor_get(x_16208, 1); +lean_inc(x_16211); +lean_dec(x_16208); +x_16212 = lean_ctor_get(x_1, 2); +lean_inc(x_16212); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_16213 = l_Lean_IR_ToIR_lowerType(x_16212, x_16211, x_4, x_5, x_16209); +if (lean_obj_tag(x_16213) == 0) +{ +lean_object* x_16214; lean_object* x_16215; lean_object* x_16216; lean_object* x_16217; lean_object* x_16218; +x_16214 = lean_ctor_get(x_16213, 0); +lean_inc(x_16214); +x_16215 = lean_ctor_get(x_16213, 1); +lean_inc(x_16215); +lean_dec(x_16213); +x_16216 = lean_ctor_get(x_16214, 0); +lean_inc(x_16216); +x_16217 = lean_ctor_get(x_16214, 1); +lean_inc(x_16217); +lean_dec(x_16214); +x_16218 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_16210, x_16199, x_16205, x_16200, x_16216, x_16217, x_4, x_5, x_16215); +return x_16218; +} +else +{ +lean_object* x_16219; lean_object* x_16220; lean_object* x_16221; lean_object* x_16222; +lean_dec(x_16210); +lean_dec(x_16205); +lean_dec(x_16200); +lean_dec(x_16199); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_16219 = lean_ctor_get(x_16213, 0); +lean_inc(x_16219); +x_16220 = lean_ctor_get(x_16213, 1); +lean_inc(x_16220); +if (lean_is_exclusive(x_16213)) { + lean_ctor_release(x_16213, 0); + lean_ctor_release(x_16213, 1); + x_16221 = x_16213; +} else { + lean_dec_ref(x_16213); + x_16221 = lean_box(0); +} +if (lean_is_scalar(x_16221)) { + x_16222 = lean_alloc_ctor(1, 2, 0); +} else { + x_16222 = x_16221; +} +lean_ctor_set(x_16222, 0, x_16219); +lean_ctor_set(x_16222, 1, x_16220); +return x_16222; +} +} +else +{ +lean_object* x_16223; lean_object* x_16224; lean_object* x_16225; lean_object* x_16226; lean_object* x_16227; lean_object* x_16228; lean_object* x_16229; lean_object* x_16230; lean_object* x_16231; +lean_dec(x_16194); +lean_dec(x_16192); +if (lean_is_scalar(x_16190)) { + x_16223 = lean_alloc_ctor(6, 2, 0); +} else { + x_16223 = x_16190; + lean_ctor_set_tag(x_16223, 6); +} +lean_ctor_set(x_16223, 0, x_153); +lean_ctor_set(x_16223, 1, x_14813); +x_16224 = lean_ctor_get(x_1, 0); +lean_inc(x_16224); +x_16225 = l_Lean_IR_ToIR_bindVar(x_16224, x_16189, x_4, x_5, x_16188); +x_16226 = lean_ctor_get(x_16225, 0); +lean_inc(x_16226); +x_16227 = lean_ctor_get(x_16225, 1); +lean_inc(x_16227); +lean_dec(x_16225); +x_16228 = lean_ctor_get(x_16226, 0); +lean_inc(x_16228); +x_16229 = lean_ctor_get(x_16226, 1); +lean_inc(x_16229); +lean_dec(x_16226); +x_16230 = lean_ctor_get(x_1, 2); +lean_inc(x_16230); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_16231 = l_Lean_IR_ToIR_lowerType(x_16230, x_16229, x_4, x_5, x_16227); +if (lean_obj_tag(x_16231) == 0) +{ +lean_object* x_16232; lean_object* x_16233; lean_object* x_16234; lean_object* x_16235; lean_object* x_16236; +x_16232 = lean_ctor_get(x_16231, 0); +lean_inc(x_16232); +x_16233 = lean_ctor_get(x_16231, 1); +lean_inc(x_16233); +lean_dec(x_16231); +x_16234 = lean_ctor_get(x_16232, 0); +lean_inc(x_16234); +x_16235 = lean_ctor_get(x_16232, 1); +lean_inc(x_16235); +lean_dec(x_16232); +x_16236 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16228, x_16223, x_16234, x_16235, x_4, x_5, x_16233); +return x_16236; +} +else +{ +lean_object* x_16237; lean_object* x_16238; lean_object* x_16239; lean_object* x_16240; +lean_dec(x_16228); +lean_dec(x_16223); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_16237 = lean_ctor_get(x_16231, 0); +lean_inc(x_16237); +x_16238 = lean_ctor_get(x_16231, 1); +lean_inc(x_16238); +if (lean_is_exclusive(x_16231)) { + lean_ctor_release(x_16231, 0); + lean_ctor_release(x_16231, 1); + x_16239 = x_16231; +} else { + lean_dec_ref(x_16231); + x_16239 = lean_box(0); +} +if (lean_is_scalar(x_16239)) { + x_16240 = lean_alloc_ctor(1, 2, 0); +} else { + x_16240 = x_16239; +} +lean_ctor_set(x_16240, 0, x_16237); +lean_ctor_set(x_16240, 1, x_16238); +return x_16240; +} +} +} +else +{ +lean_object* x_16241; lean_object* x_16242; lean_object* x_16243; lean_object* x_16244; lean_object* x_16245; lean_object* x_16246; lean_object* x_16247; lean_object* x_16248; lean_object* x_16249; +lean_dec(x_16194); +lean_dec(x_16192); +if (lean_is_scalar(x_16190)) { + x_16241 = lean_alloc_ctor(7, 2, 0); +} else { + x_16241 = x_16190; + lean_ctor_set_tag(x_16241, 7); +} +lean_ctor_set(x_16241, 0, x_153); +lean_ctor_set(x_16241, 1, x_14813); +x_16242 = lean_ctor_get(x_1, 0); +lean_inc(x_16242); +lean_dec(x_1); +x_16243 = l_Lean_IR_ToIR_bindVar(x_16242, x_16189, x_4, x_5, x_16188); +x_16244 = lean_ctor_get(x_16243, 0); +lean_inc(x_16244); +x_16245 = lean_ctor_get(x_16243, 1); +lean_inc(x_16245); +lean_dec(x_16243); +x_16246 = lean_ctor_get(x_16244, 0); +lean_inc(x_16246); +x_16247 = lean_ctor_get(x_16244, 1); +lean_inc(x_16247); +lean_dec(x_16244); +x_16248 = lean_box(7); +x_16249 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16246, x_16241, x_16248, x_16247, x_4, x_5, x_16245); +return x_16249; +} +} +} +else +{ +lean_object* x_16250; lean_object* x_16251; lean_object* x_16252; +lean_dec(x_16166); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16250 = lean_box(13); +x_16251 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16251, 0, x_16250); +lean_ctor_set(x_16251, 1, x_16155); +if (lean_is_scalar(x_16159)) { + x_16252 = lean_alloc_ctor(0, 2, 0); +} else { + x_16252 = x_16159; +} +lean_ctor_set(x_16252, 0, x_16251); +lean_ctor_set(x_16252, 1, x_16158); +return x_16252; +} +} +else +{ +lean_object* x_16253; lean_object* x_16254; lean_object* x_16255; +lean_dec(x_16166); +lean_dec(x_16159); +lean_dec(x_153); +x_16253 = l_Lean_IR_instInhabitedArg; +x_16254 = lean_unsigned_to_nat(2u); +x_16255 = lean_array_get(x_16253, x_14813, x_16254); +lean_dec(x_14813); +if (lean_obj_tag(x_16255) == 0) +{ +lean_object* x_16256; lean_object* x_16257; lean_object* x_16258; lean_object* x_16259; lean_object* x_16260; lean_object* x_16261; lean_object* x_16262; +x_16256 = lean_ctor_get(x_16255, 0); +lean_inc(x_16256); +lean_dec(x_16255); +x_16257 = lean_ctor_get(x_1, 0); +lean_inc(x_16257); +lean_dec(x_1); +x_16258 = l_Lean_IR_ToIR_bindVarToVarId(x_16257, x_16256, x_16155, x_4, x_5, x_16158); +x_16259 = lean_ctor_get(x_16258, 0); +lean_inc(x_16259); +x_16260 = lean_ctor_get(x_16258, 1); +lean_inc(x_16260); +lean_dec(x_16258); +x_16261 = lean_ctor_get(x_16259, 1); +lean_inc(x_16261); +lean_dec(x_16259); +x_16262 = l_Lean_IR_ToIR_lowerCode(x_2, x_16261, x_4, x_5, x_16260); +return x_16262; +} +else +{ +lean_object* x_16263; lean_object* x_16264; lean_object* x_16265; lean_object* x_16266; lean_object* x_16267; lean_object* x_16268; +x_16263 = lean_ctor_get(x_1, 0); +lean_inc(x_16263); +lean_dec(x_1); +x_16264 = l_Lean_IR_ToIR_bindErased(x_16263, x_16155, x_4, x_5, x_16158); +x_16265 = lean_ctor_get(x_16264, 0); +lean_inc(x_16265); +x_16266 = lean_ctor_get(x_16264, 1); +lean_inc(x_16266); +lean_dec(x_16264); +x_16267 = lean_ctor_get(x_16265, 1); +lean_inc(x_16267); +lean_dec(x_16265); +x_16268 = l_Lean_IR_ToIR_lowerCode(x_2, x_16267, x_4, x_5, x_16266); +return x_16268; +} +} +} +case 1: +{ +lean_object* x_16269; lean_object* x_16270; lean_object* x_16297; lean_object* x_16298; +lean_dec(x_16165); +lean_dec(x_16160); +lean_dec(x_14805); +lean_dec(x_14804); +lean_inc(x_153); +x_16297 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_16158); +x_16298 = lean_ctor_get(x_16297, 0); +lean_inc(x_16298); +if (lean_obj_tag(x_16298) == 0) +{ +lean_object* x_16299; lean_object* x_16300; lean_object* x_16301; +x_16299 = lean_ctor_get(x_16297, 1); +lean_inc(x_16299); +lean_dec(x_16297); +x_16300 = lean_box(0); +x_16301 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16301, 0, x_16300); +lean_ctor_set(x_16301, 1, x_16155); +x_16269 = x_16301; +x_16270 = x_16299; +goto block_16296; +} +else +{ +lean_object* x_16302; lean_object* x_16303; lean_object* x_16304; lean_object* x_16305; lean_object* x_16306; lean_object* x_16307; lean_object* x_16308; uint8_t x_16309; +x_16302 = lean_ctor_get(x_16297, 1); +lean_inc(x_16302); +if (lean_is_exclusive(x_16297)) { + lean_ctor_release(x_16297, 0); + lean_ctor_release(x_16297, 1); + x_16303 = x_16297; +} else { + lean_dec_ref(x_16297); + x_16303 = lean_box(0); +} +x_16304 = lean_ctor_get(x_16298, 0); +lean_inc(x_16304); +if (lean_is_exclusive(x_16298)) { + lean_ctor_release(x_16298, 0); + x_16305 = x_16298; +} else { + lean_dec_ref(x_16298); + x_16305 = lean_box(0); +} +x_16306 = lean_array_get_size(x_14813); +x_16307 = lean_ctor_get(x_16304, 3); +lean_inc(x_16307); +lean_dec(x_16304); +x_16308 = lean_array_get_size(x_16307); +lean_dec(x_16307); +x_16309 = lean_nat_dec_lt(x_16306, x_16308); +if (x_16309 == 0) +{ +uint8_t x_16310; +x_16310 = lean_nat_dec_eq(x_16306, x_16308); +if (x_16310 == 0) +{ +lean_object* x_16311; lean_object* x_16312; lean_object* x_16313; lean_object* x_16314; lean_object* x_16315; lean_object* x_16316; lean_object* x_16317; lean_object* x_16318; lean_object* x_16319; lean_object* x_16320; lean_object* x_16321; lean_object* x_16322; lean_object* x_16323; lean_object* x_16324; lean_object* x_16325; lean_object* x_16326; lean_object* x_16327; +x_16311 = lean_unsigned_to_nat(0u); +x_16312 = l_Array_extract___rarg(x_14813, x_16311, x_16308); +x_16313 = l_Array_extract___rarg(x_14813, x_16308, x_16306); +lean_dec(x_16306); +lean_inc(x_153); +if (lean_is_scalar(x_16303)) { + x_16314 = lean_alloc_ctor(6, 2, 0); +} else { + x_16314 = x_16303; + lean_ctor_set_tag(x_16314, 6); +} +lean_ctor_set(x_16314, 0, x_153); +lean_ctor_set(x_16314, 1, x_16312); +x_16315 = lean_ctor_get(x_1, 0); +lean_inc(x_16315); +x_16316 = l_Lean_IR_ToIR_bindVar(x_16315, x_16155, x_4, x_5, x_16302); +x_16317 = lean_ctor_get(x_16316, 0); +lean_inc(x_16317); +x_16318 = lean_ctor_get(x_16316, 1); +lean_inc(x_16318); +lean_dec(x_16316); +x_16319 = lean_ctor_get(x_16317, 0); +lean_inc(x_16319); +x_16320 = lean_ctor_get(x_16317, 1); +lean_inc(x_16320); +lean_dec(x_16317); +x_16321 = l_Lean_IR_ToIR_newVar(x_16320, x_4, x_5, x_16318); +x_16322 = lean_ctor_get(x_16321, 0); +lean_inc(x_16322); +x_16323 = lean_ctor_get(x_16321, 1); +lean_inc(x_16323); +lean_dec(x_16321); +x_16324 = lean_ctor_get(x_16322, 0); +lean_inc(x_16324); +x_16325 = lean_ctor_get(x_16322, 1); +lean_inc(x_16325); +lean_dec(x_16322); +x_16326 = lean_ctor_get(x_1, 2); +lean_inc(x_16326); +lean_inc(x_5); +lean_inc(x_4); +x_16327 = l_Lean_IR_ToIR_lowerType(x_16326, x_16325, x_4, x_5, x_16323); +if (lean_obj_tag(x_16327) == 0) +{ +lean_object* x_16328; lean_object* x_16329; lean_object* x_16330; lean_object* x_16331; lean_object* x_16332; +x_16328 = lean_ctor_get(x_16327, 0); +lean_inc(x_16328); +x_16329 = lean_ctor_get(x_16327, 1); +lean_inc(x_16329); +lean_dec(x_16327); +x_16330 = lean_ctor_get(x_16328, 0); +lean_inc(x_16330); +x_16331 = lean_ctor_get(x_16328, 1); +lean_inc(x_16331); +lean_dec(x_16328); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16332 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_16324, x_16313, x_16319, x_16314, x_16330, x_16331, x_4, x_5, x_16329); +if (lean_obj_tag(x_16332) == 0) +{ +lean_object* x_16333; lean_object* x_16334; lean_object* x_16335; lean_object* x_16336; lean_object* x_16337; lean_object* x_16338; lean_object* x_16339; +x_16333 = lean_ctor_get(x_16332, 0); +lean_inc(x_16333); +x_16334 = lean_ctor_get(x_16332, 1); +lean_inc(x_16334); +lean_dec(x_16332); +x_16335 = lean_ctor_get(x_16333, 0); +lean_inc(x_16335); +x_16336 = lean_ctor_get(x_16333, 1); +lean_inc(x_16336); +if (lean_is_exclusive(x_16333)) { + lean_ctor_release(x_16333, 0); + lean_ctor_release(x_16333, 1); + x_16337 = x_16333; +} else { + lean_dec_ref(x_16333); + x_16337 = lean_box(0); +} +if (lean_is_scalar(x_16305)) { + x_16338 = lean_alloc_ctor(1, 1, 0); +} else { + x_16338 = x_16305; +} +lean_ctor_set(x_16338, 0, x_16335); +if (lean_is_scalar(x_16337)) { + x_16339 = lean_alloc_ctor(0, 2, 0); +} else { + x_16339 = x_16337; +} +lean_ctor_set(x_16339, 0, x_16338); +lean_ctor_set(x_16339, 1, x_16336); +x_16269 = x_16339; +x_16270 = x_16334; +goto block_16296; +} +else +{ +lean_object* x_16340; lean_object* x_16341; lean_object* x_16342; lean_object* x_16343; +lean_dec(x_16305); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16340 = lean_ctor_get(x_16332, 0); +lean_inc(x_16340); +x_16341 = lean_ctor_get(x_16332, 1); +lean_inc(x_16341); +if (lean_is_exclusive(x_16332)) { + lean_ctor_release(x_16332, 0); + lean_ctor_release(x_16332, 1); + x_16342 = x_16332; +} else { + lean_dec_ref(x_16332); + x_16342 = lean_box(0); +} +if (lean_is_scalar(x_16342)) { + x_16343 = lean_alloc_ctor(1, 2, 0); +} else { + x_16343 = x_16342; +} +lean_ctor_set(x_16343, 0, x_16340); +lean_ctor_set(x_16343, 1, x_16341); +return x_16343; +} +} +else +{ +lean_object* x_16344; lean_object* x_16345; lean_object* x_16346; lean_object* x_16347; +lean_dec(x_16324); +lean_dec(x_16319); +lean_dec(x_16314); +lean_dec(x_16313); +lean_dec(x_16305); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16344 = lean_ctor_get(x_16327, 0); +lean_inc(x_16344); +x_16345 = lean_ctor_get(x_16327, 1); +lean_inc(x_16345); +if (lean_is_exclusive(x_16327)) { + lean_ctor_release(x_16327, 0); + lean_ctor_release(x_16327, 1); + x_16346 = x_16327; +} else { + lean_dec_ref(x_16327); + x_16346 = lean_box(0); +} +if (lean_is_scalar(x_16346)) { + x_16347 = lean_alloc_ctor(1, 2, 0); +} else { + x_16347 = x_16346; +} +lean_ctor_set(x_16347, 0, x_16344); +lean_ctor_set(x_16347, 1, x_16345); +return x_16347; +} +} +else +{ +lean_object* x_16348; lean_object* x_16349; lean_object* x_16350; lean_object* x_16351; lean_object* x_16352; lean_object* x_16353; lean_object* x_16354; lean_object* x_16355; lean_object* x_16356; +lean_dec(x_16308); +lean_dec(x_16306); +lean_inc(x_14813); +lean_inc(x_153); +if (lean_is_scalar(x_16303)) { + x_16348 = lean_alloc_ctor(6, 2, 0); +} else { + x_16348 = x_16303; + lean_ctor_set_tag(x_16348, 6); +} +lean_ctor_set(x_16348, 0, x_153); +lean_ctor_set(x_16348, 1, x_14813); +x_16349 = lean_ctor_get(x_1, 0); +lean_inc(x_16349); +x_16350 = l_Lean_IR_ToIR_bindVar(x_16349, x_16155, x_4, x_5, x_16302); +x_16351 = lean_ctor_get(x_16350, 0); +lean_inc(x_16351); +x_16352 = lean_ctor_get(x_16350, 1); +lean_inc(x_16352); +lean_dec(x_16350); +x_16353 = lean_ctor_get(x_16351, 0); +lean_inc(x_16353); +x_16354 = lean_ctor_get(x_16351, 1); +lean_inc(x_16354); +lean_dec(x_16351); +x_16355 = lean_ctor_get(x_1, 2); +lean_inc(x_16355); +lean_inc(x_5); +lean_inc(x_4); +x_16356 = l_Lean_IR_ToIR_lowerType(x_16355, x_16354, x_4, x_5, x_16352); +if (lean_obj_tag(x_16356) == 0) +{ +lean_object* x_16357; lean_object* x_16358; lean_object* x_16359; lean_object* x_16360; lean_object* x_16361; +x_16357 = lean_ctor_get(x_16356, 0); +lean_inc(x_16357); +x_16358 = lean_ctor_get(x_16356, 1); +lean_inc(x_16358); +lean_dec(x_16356); +x_16359 = lean_ctor_get(x_16357, 0); +lean_inc(x_16359); +x_16360 = lean_ctor_get(x_16357, 1); +lean_inc(x_16360); +lean_dec(x_16357); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16361 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16353, x_16348, x_16359, x_16360, x_4, x_5, x_16358); +if (lean_obj_tag(x_16361) == 0) +{ +lean_object* x_16362; lean_object* x_16363; lean_object* x_16364; lean_object* x_16365; lean_object* x_16366; lean_object* x_16367; lean_object* x_16368; +x_16362 = lean_ctor_get(x_16361, 0); +lean_inc(x_16362); +x_16363 = lean_ctor_get(x_16361, 1); +lean_inc(x_16363); +lean_dec(x_16361); +x_16364 = lean_ctor_get(x_16362, 0); +lean_inc(x_16364); +x_16365 = lean_ctor_get(x_16362, 1); +lean_inc(x_16365); +if (lean_is_exclusive(x_16362)) { + lean_ctor_release(x_16362, 0); + lean_ctor_release(x_16362, 1); + x_16366 = x_16362; +} else { + lean_dec_ref(x_16362); + x_16366 = lean_box(0); +} +if (lean_is_scalar(x_16305)) { + x_16367 = lean_alloc_ctor(1, 1, 0); +} else { + x_16367 = x_16305; +} +lean_ctor_set(x_16367, 0, x_16364); +if (lean_is_scalar(x_16366)) { + x_16368 = lean_alloc_ctor(0, 2, 0); +} else { + x_16368 = x_16366; +} +lean_ctor_set(x_16368, 0, x_16367); +lean_ctor_set(x_16368, 1, x_16365); +x_16269 = x_16368; +x_16270 = x_16363; +goto block_16296; +} +else +{ +lean_object* x_16369; lean_object* x_16370; lean_object* x_16371; lean_object* x_16372; +lean_dec(x_16305); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16369 = lean_ctor_get(x_16361, 0); +lean_inc(x_16369); +x_16370 = lean_ctor_get(x_16361, 1); +lean_inc(x_16370); +if (lean_is_exclusive(x_16361)) { + lean_ctor_release(x_16361, 0); + lean_ctor_release(x_16361, 1); + x_16371 = x_16361; +} else { + lean_dec_ref(x_16361); + x_16371 = lean_box(0); +} +if (lean_is_scalar(x_16371)) { + x_16372 = lean_alloc_ctor(1, 2, 0); +} else { + x_16372 = x_16371; +} +lean_ctor_set(x_16372, 0, x_16369); +lean_ctor_set(x_16372, 1, x_16370); +return x_16372; +} +} +else +{ +lean_object* x_16373; lean_object* x_16374; lean_object* x_16375; lean_object* x_16376; +lean_dec(x_16353); +lean_dec(x_16348); +lean_dec(x_16305); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16373 = lean_ctor_get(x_16356, 0); +lean_inc(x_16373); +x_16374 = lean_ctor_get(x_16356, 1); +lean_inc(x_16374); +if (lean_is_exclusive(x_16356)) { + lean_ctor_release(x_16356, 0); + lean_ctor_release(x_16356, 1); + x_16375 = x_16356; +} else { + lean_dec_ref(x_16356); + x_16375 = lean_box(0); +} +if (lean_is_scalar(x_16375)) { + x_16376 = lean_alloc_ctor(1, 2, 0); +} else { + x_16376 = x_16375; +} +lean_ctor_set(x_16376, 0, x_16373); +lean_ctor_set(x_16376, 1, x_16374); +return x_16376; +} +} +} +else +{ +lean_object* x_16377; lean_object* x_16378; lean_object* x_16379; lean_object* x_16380; lean_object* x_16381; lean_object* x_16382; lean_object* x_16383; lean_object* x_16384; lean_object* x_16385; +lean_dec(x_16308); +lean_dec(x_16306); +lean_inc(x_14813); +lean_inc(x_153); +if (lean_is_scalar(x_16303)) { + x_16377 = lean_alloc_ctor(7, 2, 0); +} else { + x_16377 = x_16303; + lean_ctor_set_tag(x_16377, 7); +} +lean_ctor_set(x_16377, 0, x_153); +lean_ctor_set(x_16377, 1, x_14813); +x_16378 = lean_ctor_get(x_1, 0); +lean_inc(x_16378); +x_16379 = l_Lean_IR_ToIR_bindVar(x_16378, x_16155, x_4, x_5, x_16302); +x_16380 = lean_ctor_get(x_16379, 0); +lean_inc(x_16380); +x_16381 = lean_ctor_get(x_16379, 1); +lean_inc(x_16381); +lean_dec(x_16379); +x_16382 = lean_ctor_get(x_16380, 0); +lean_inc(x_16382); +x_16383 = lean_ctor_get(x_16380, 1); +lean_inc(x_16383); +lean_dec(x_16380); +x_16384 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16385 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16382, x_16377, x_16384, x_16383, x_4, x_5, x_16381); +if (lean_obj_tag(x_16385) == 0) +{ +lean_object* x_16386; lean_object* x_16387; lean_object* x_16388; lean_object* x_16389; lean_object* x_16390; lean_object* x_16391; lean_object* x_16392; +x_16386 = lean_ctor_get(x_16385, 0); +lean_inc(x_16386); +x_16387 = lean_ctor_get(x_16385, 1); +lean_inc(x_16387); +lean_dec(x_16385); +x_16388 = lean_ctor_get(x_16386, 0); +lean_inc(x_16388); +x_16389 = lean_ctor_get(x_16386, 1); +lean_inc(x_16389); +if (lean_is_exclusive(x_16386)) { + lean_ctor_release(x_16386, 0); + lean_ctor_release(x_16386, 1); + x_16390 = x_16386; +} else { + lean_dec_ref(x_16386); + x_16390 = lean_box(0); +} +if (lean_is_scalar(x_16305)) { + x_16391 = lean_alloc_ctor(1, 1, 0); +} else { + x_16391 = x_16305; +} +lean_ctor_set(x_16391, 0, x_16388); +if (lean_is_scalar(x_16390)) { + x_16392 = lean_alloc_ctor(0, 2, 0); +} else { + x_16392 = x_16390; +} +lean_ctor_set(x_16392, 0, x_16391); +lean_ctor_set(x_16392, 1, x_16389); +x_16269 = x_16392; +x_16270 = x_16387; +goto block_16296; +} +else +{ +lean_object* x_16393; lean_object* x_16394; lean_object* x_16395; lean_object* x_16396; +lean_dec(x_16305); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16393 = lean_ctor_get(x_16385, 0); +lean_inc(x_16393); +x_16394 = lean_ctor_get(x_16385, 1); +lean_inc(x_16394); +if (lean_is_exclusive(x_16385)) { + lean_ctor_release(x_16385, 0); + lean_ctor_release(x_16385, 1); + x_16395 = x_16385; +} else { + lean_dec_ref(x_16385); + x_16395 = lean_box(0); +} +if (lean_is_scalar(x_16395)) { + x_16396 = lean_alloc_ctor(1, 2, 0); +} else { + x_16396 = x_16395; +} +lean_ctor_set(x_16396, 0, x_16393); +lean_ctor_set(x_16396, 1, x_16394); +return x_16396; +} +} +} +block_16296: +{ +lean_object* x_16271; +x_16271 = lean_ctor_get(x_16269, 0); +lean_inc(x_16271); +if (lean_obj_tag(x_16271) == 0) +{ +lean_object* x_16272; lean_object* x_16273; lean_object* x_16274; lean_object* x_16275; lean_object* x_16276; lean_object* x_16277; lean_object* x_16278; lean_object* x_16279; lean_object* x_16280; lean_object* x_16281; +lean_dec(x_16159); +x_16272 = lean_ctor_get(x_16269, 1); +lean_inc(x_16272); +lean_dec(x_16269); +x_16273 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_16273, 0, x_153); +lean_ctor_set(x_16273, 1, x_14813); +x_16274 = lean_ctor_get(x_1, 0); +lean_inc(x_16274); +x_16275 = l_Lean_IR_ToIR_bindVar(x_16274, x_16272, x_4, x_5, x_16270); +x_16276 = lean_ctor_get(x_16275, 0); +lean_inc(x_16276); +x_16277 = lean_ctor_get(x_16275, 1); +lean_inc(x_16277); +lean_dec(x_16275); +x_16278 = lean_ctor_get(x_16276, 0); +lean_inc(x_16278); +x_16279 = lean_ctor_get(x_16276, 1); +lean_inc(x_16279); +lean_dec(x_16276); +x_16280 = lean_ctor_get(x_1, 2); +lean_inc(x_16280); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_16281 = l_Lean_IR_ToIR_lowerType(x_16280, x_16279, x_4, x_5, x_16277); +if (lean_obj_tag(x_16281) == 0) +{ +lean_object* x_16282; lean_object* x_16283; lean_object* x_16284; lean_object* x_16285; lean_object* x_16286; +x_16282 = lean_ctor_get(x_16281, 0); +lean_inc(x_16282); +x_16283 = lean_ctor_get(x_16281, 1); +lean_inc(x_16283); +lean_dec(x_16281); +x_16284 = lean_ctor_get(x_16282, 0); +lean_inc(x_16284); +x_16285 = lean_ctor_get(x_16282, 1); +lean_inc(x_16285); +lean_dec(x_16282); +x_16286 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16278, x_16273, x_16284, x_16285, x_4, x_5, x_16283); +return x_16286; +} +else +{ +lean_object* x_16287; lean_object* x_16288; lean_object* x_16289; lean_object* x_16290; +lean_dec(x_16278); +lean_dec(x_16273); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_16287 = lean_ctor_get(x_16281, 0); +lean_inc(x_16287); +x_16288 = lean_ctor_get(x_16281, 1); +lean_inc(x_16288); +if (lean_is_exclusive(x_16281)) { + lean_ctor_release(x_16281, 0); + lean_ctor_release(x_16281, 1); + x_16289 = x_16281; +} else { + lean_dec_ref(x_16281); + x_16289 = lean_box(0); +} +if (lean_is_scalar(x_16289)) { + x_16290 = lean_alloc_ctor(1, 2, 0); +} else { + x_16290 = x_16289; +} +lean_ctor_set(x_16290, 0, x_16287); +lean_ctor_set(x_16290, 1, x_16288); +return x_16290; +} +} +else +{ +lean_object* x_16291; lean_object* x_16292; lean_object* x_16293; lean_object* x_16294; lean_object* x_16295; +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16291 = lean_ctor_get(x_16269, 1); +lean_inc(x_16291); +if (lean_is_exclusive(x_16269)) { + lean_ctor_release(x_16269, 0); + lean_ctor_release(x_16269, 1); + x_16292 = x_16269; +} else { + lean_dec_ref(x_16269); + x_16292 = lean_box(0); +} +x_16293 = lean_ctor_get(x_16271, 0); +lean_inc(x_16293); +lean_dec(x_16271); +if (lean_is_scalar(x_16292)) { + x_16294 = lean_alloc_ctor(0, 2, 0); +} else { + x_16294 = x_16292; +} +lean_ctor_set(x_16294, 0, x_16293); +lean_ctor_set(x_16294, 1, x_16291); +if (lean_is_scalar(x_16159)) { + x_16295 = lean_alloc_ctor(0, 2, 0); +} else { + x_16295 = x_16159; +} +lean_ctor_set(x_16295, 0, x_16294); +lean_ctor_set(x_16295, 1, x_16270); +return x_16295; +} +} +} +case 2: +{ +lean_object* x_16397; lean_object* x_16398; +lean_dec(x_16165); +lean_dec(x_16160); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_16397 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_16398 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_16397, x_16155, x_4, x_5, x_16158); +return x_16398; +} +case 3: +{ +lean_object* x_16399; lean_object* x_16400; lean_object* x_16427; lean_object* x_16428; +lean_dec(x_16165); +lean_dec(x_16160); +lean_dec(x_14805); +lean_dec(x_14804); +lean_inc(x_153); +x_16427 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_16158); +x_16428 = lean_ctor_get(x_16427, 0); +lean_inc(x_16428); +if (lean_obj_tag(x_16428) == 0) +{ +lean_object* x_16429; lean_object* x_16430; lean_object* x_16431; +x_16429 = lean_ctor_get(x_16427, 1); +lean_inc(x_16429); +lean_dec(x_16427); +x_16430 = lean_box(0); +x_16431 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16431, 0, x_16430); +lean_ctor_set(x_16431, 1, x_16155); +x_16399 = x_16431; +x_16400 = x_16429; +goto block_16426; +} +else +{ +lean_object* x_16432; lean_object* x_16433; lean_object* x_16434; lean_object* x_16435; lean_object* x_16436; lean_object* x_16437; lean_object* x_16438; uint8_t x_16439; +x_16432 = lean_ctor_get(x_16427, 1); +lean_inc(x_16432); +if (lean_is_exclusive(x_16427)) { + lean_ctor_release(x_16427, 0); + lean_ctor_release(x_16427, 1); + x_16433 = x_16427; +} else { + lean_dec_ref(x_16427); + x_16433 = lean_box(0); +} +x_16434 = lean_ctor_get(x_16428, 0); +lean_inc(x_16434); +if (lean_is_exclusive(x_16428)) { + lean_ctor_release(x_16428, 0); + x_16435 = x_16428; +} else { + lean_dec_ref(x_16428); + x_16435 = lean_box(0); +} +x_16436 = lean_array_get_size(x_14813); +x_16437 = lean_ctor_get(x_16434, 3); +lean_inc(x_16437); +lean_dec(x_16434); +x_16438 = lean_array_get_size(x_16437); +lean_dec(x_16437); +x_16439 = lean_nat_dec_lt(x_16436, x_16438); +if (x_16439 == 0) +{ +uint8_t x_16440; +x_16440 = lean_nat_dec_eq(x_16436, x_16438); +if (x_16440 == 0) +{ +lean_object* x_16441; lean_object* x_16442; lean_object* x_16443; lean_object* x_16444; lean_object* x_16445; lean_object* x_16446; lean_object* x_16447; lean_object* x_16448; lean_object* x_16449; lean_object* x_16450; lean_object* x_16451; lean_object* x_16452; lean_object* x_16453; lean_object* x_16454; lean_object* x_16455; lean_object* x_16456; lean_object* x_16457; +x_16441 = lean_unsigned_to_nat(0u); +x_16442 = l_Array_extract___rarg(x_14813, x_16441, x_16438); +x_16443 = l_Array_extract___rarg(x_14813, x_16438, x_16436); +lean_dec(x_16436); +lean_inc(x_153); +if (lean_is_scalar(x_16433)) { + x_16444 = lean_alloc_ctor(6, 2, 0); +} else { + x_16444 = x_16433; + lean_ctor_set_tag(x_16444, 6); +} +lean_ctor_set(x_16444, 0, x_153); +lean_ctor_set(x_16444, 1, x_16442); +x_16445 = lean_ctor_get(x_1, 0); +lean_inc(x_16445); +x_16446 = l_Lean_IR_ToIR_bindVar(x_16445, x_16155, x_4, x_5, x_16432); +x_16447 = lean_ctor_get(x_16446, 0); +lean_inc(x_16447); +x_16448 = lean_ctor_get(x_16446, 1); +lean_inc(x_16448); +lean_dec(x_16446); +x_16449 = lean_ctor_get(x_16447, 0); +lean_inc(x_16449); +x_16450 = lean_ctor_get(x_16447, 1); +lean_inc(x_16450); +lean_dec(x_16447); +x_16451 = l_Lean_IR_ToIR_newVar(x_16450, x_4, x_5, x_16448); +x_16452 = lean_ctor_get(x_16451, 0); +lean_inc(x_16452); +x_16453 = lean_ctor_get(x_16451, 1); +lean_inc(x_16453); +lean_dec(x_16451); +x_16454 = lean_ctor_get(x_16452, 0); +lean_inc(x_16454); +x_16455 = lean_ctor_get(x_16452, 1); +lean_inc(x_16455); +lean_dec(x_16452); +x_16456 = lean_ctor_get(x_1, 2); +lean_inc(x_16456); +lean_inc(x_5); +lean_inc(x_4); +x_16457 = l_Lean_IR_ToIR_lowerType(x_16456, x_16455, x_4, x_5, x_16453); +if (lean_obj_tag(x_16457) == 0) +{ +lean_object* x_16458; lean_object* x_16459; lean_object* x_16460; lean_object* x_16461; lean_object* x_16462; +x_16458 = lean_ctor_get(x_16457, 0); +lean_inc(x_16458); +x_16459 = lean_ctor_get(x_16457, 1); +lean_inc(x_16459); +lean_dec(x_16457); +x_16460 = lean_ctor_get(x_16458, 0); +lean_inc(x_16460); +x_16461 = lean_ctor_get(x_16458, 1); +lean_inc(x_16461); +lean_dec(x_16458); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16462 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_16454, x_16443, x_16449, x_16444, x_16460, x_16461, x_4, x_5, x_16459); +if (lean_obj_tag(x_16462) == 0) +{ +lean_object* x_16463; lean_object* x_16464; lean_object* x_16465; lean_object* x_16466; lean_object* x_16467; lean_object* x_16468; lean_object* x_16469; +x_16463 = lean_ctor_get(x_16462, 0); +lean_inc(x_16463); +x_16464 = lean_ctor_get(x_16462, 1); +lean_inc(x_16464); +lean_dec(x_16462); +x_16465 = lean_ctor_get(x_16463, 0); +lean_inc(x_16465); +x_16466 = lean_ctor_get(x_16463, 1); +lean_inc(x_16466); +if (lean_is_exclusive(x_16463)) { + lean_ctor_release(x_16463, 0); + lean_ctor_release(x_16463, 1); + x_16467 = x_16463; +} else { + lean_dec_ref(x_16463); + x_16467 = lean_box(0); +} +if (lean_is_scalar(x_16435)) { + x_16468 = lean_alloc_ctor(1, 1, 0); +} else { + x_16468 = x_16435; +} +lean_ctor_set(x_16468, 0, x_16465); +if (lean_is_scalar(x_16467)) { + x_16469 = lean_alloc_ctor(0, 2, 0); +} else { + x_16469 = x_16467; +} +lean_ctor_set(x_16469, 0, x_16468); +lean_ctor_set(x_16469, 1, x_16466); +x_16399 = x_16469; +x_16400 = x_16464; +goto block_16426; +} +else +{ +lean_object* x_16470; lean_object* x_16471; lean_object* x_16472; lean_object* x_16473; +lean_dec(x_16435); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16470 = lean_ctor_get(x_16462, 0); +lean_inc(x_16470); +x_16471 = lean_ctor_get(x_16462, 1); +lean_inc(x_16471); +if (lean_is_exclusive(x_16462)) { + lean_ctor_release(x_16462, 0); + lean_ctor_release(x_16462, 1); + x_16472 = x_16462; +} else { + lean_dec_ref(x_16462); + x_16472 = lean_box(0); +} +if (lean_is_scalar(x_16472)) { + x_16473 = lean_alloc_ctor(1, 2, 0); +} else { + x_16473 = x_16472; +} +lean_ctor_set(x_16473, 0, x_16470); +lean_ctor_set(x_16473, 1, x_16471); +return x_16473; +} +} +else +{ +lean_object* x_16474; lean_object* x_16475; lean_object* x_16476; lean_object* x_16477; +lean_dec(x_16454); +lean_dec(x_16449); +lean_dec(x_16444); +lean_dec(x_16443); +lean_dec(x_16435); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16474 = lean_ctor_get(x_16457, 0); +lean_inc(x_16474); +x_16475 = lean_ctor_get(x_16457, 1); +lean_inc(x_16475); +if (lean_is_exclusive(x_16457)) { + lean_ctor_release(x_16457, 0); + lean_ctor_release(x_16457, 1); + x_16476 = x_16457; +} else { + lean_dec_ref(x_16457); + x_16476 = lean_box(0); +} +if (lean_is_scalar(x_16476)) { + x_16477 = lean_alloc_ctor(1, 2, 0); +} else { + x_16477 = x_16476; +} +lean_ctor_set(x_16477, 0, x_16474); +lean_ctor_set(x_16477, 1, x_16475); +return x_16477; +} +} +else +{ +lean_object* x_16478; lean_object* x_16479; lean_object* x_16480; lean_object* x_16481; lean_object* x_16482; lean_object* x_16483; lean_object* x_16484; lean_object* x_16485; lean_object* x_16486; +lean_dec(x_16438); +lean_dec(x_16436); +lean_inc(x_14813); +lean_inc(x_153); +if (lean_is_scalar(x_16433)) { + x_16478 = lean_alloc_ctor(6, 2, 0); +} else { + x_16478 = x_16433; + lean_ctor_set_tag(x_16478, 6); +} +lean_ctor_set(x_16478, 0, x_153); +lean_ctor_set(x_16478, 1, x_14813); +x_16479 = lean_ctor_get(x_1, 0); +lean_inc(x_16479); +x_16480 = l_Lean_IR_ToIR_bindVar(x_16479, x_16155, x_4, x_5, x_16432); +x_16481 = lean_ctor_get(x_16480, 0); +lean_inc(x_16481); +x_16482 = lean_ctor_get(x_16480, 1); +lean_inc(x_16482); +lean_dec(x_16480); +x_16483 = lean_ctor_get(x_16481, 0); +lean_inc(x_16483); +x_16484 = lean_ctor_get(x_16481, 1); +lean_inc(x_16484); +lean_dec(x_16481); +x_16485 = lean_ctor_get(x_1, 2); +lean_inc(x_16485); +lean_inc(x_5); +lean_inc(x_4); +x_16486 = l_Lean_IR_ToIR_lowerType(x_16485, x_16484, x_4, x_5, x_16482); +if (lean_obj_tag(x_16486) == 0) +{ +lean_object* x_16487; lean_object* x_16488; lean_object* x_16489; lean_object* x_16490; lean_object* x_16491; +x_16487 = lean_ctor_get(x_16486, 0); +lean_inc(x_16487); +x_16488 = lean_ctor_get(x_16486, 1); +lean_inc(x_16488); +lean_dec(x_16486); +x_16489 = lean_ctor_get(x_16487, 0); +lean_inc(x_16489); +x_16490 = lean_ctor_get(x_16487, 1); +lean_inc(x_16490); +lean_dec(x_16487); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16491 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16483, x_16478, x_16489, x_16490, x_4, x_5, x_16488); +if (lean_obj_tag(x_16491) == 0) +{ +lean_object* x_16492; lean_object* x_16493; lean_object* x_16494; lean_object* x_16495; lean_object* x_16496; lean_object* x_16497; lean_object* x_16498; +x_16492 = lean_ctor_get(x_16491, 0); +lean_inc(x_16492); +x_16493 = lean_ctor_get(x_16491, 1); +lean_inc(x_16493); +lean_dec(x_16491); +x_16494 = lean_ctor_get(x_16492, 0); +lean_inc(x_16494); +x_16495 = lean_ctor_get(x_16492, 1); +lean_inc(x_16495); +if (lean_is_exclusive(x_16492)) { + lean_ctor_release(x_16492, 0); + lean_ctor_release(x_16492, 1); + x_16496 = x_16492; +} else { + lean_dec_ref(x_16492); + x_16496 = lean_box(0); +} +if (lean_is_scalar(x_16435)) { + x_16497 = lean_alloc_ctor(1, 1, 0); +} else { + x_16497 = x_16435; +} +lean_ctor_set(x_16497, 0, x_16494); +if (lean_is_scalar(x_16496)) { + x_16498 = lean_alloc_ctor(0, 2, 0); +} else { + x_16498 = x_16496; +} +lean_ctor_set(x_16498, 0, x_16497); +lean_ctor_set(x_16498, 1, x_16495); +x_16399 = x_16498; +x_16400 = x_16493; +goto block_16426; +} +else +{ +lean_object* x_16499; lean_object* x_16500; lean_object* x_16501; lean_object* x_16502; +lean_dec(x_16435); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16499 = lean_ctor_get(x_16491, 0); +lean_inc(x_16499); +x_16500 = lean_ctor_get(x_16491, 1); +lean_inc(x_16500); +if (lean_is_exclusive(x_16491)) { + lean_ctor_release(x_16491, 0); + lean_ctor_release(x_16491, 1); + x_16501 = x_16491; +} else { + lean_dec_ref(x_16491); + x_16501 = lean_box(0); +} +if (lean_is_scalar(x_16501)) { + x_16502 = lean_alloc_ctor(1, 2, 0); +} else { + x_16502 = x_16501; +} +lean_ctor_set(x_16502, 0, x_16499); +lean_ctor_set(x_16502, 1, x_16500); +return x_16502; +} +} +else +{ +lean_object* x_16503; lean_object* x_16504; lean_object* x_16505; lean_object* x_16506; +lean_dec(x_16483); +lean_dec(x_16478); +lean_dec(x_16435); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16503 = lean_ctor_get(x_16486, 0); +lean_inc(x_16503); +x_16504 = lean_ctor_get(x_16486, 1); +lean_inc(x_16504); +if (lean_is_exclusive(x_16486)) { + lean_ctor_release(x_16486, 0); + lean_ctor_release(x_16486, 1); + x_16505 = x_16486; +} else { + lean_dec_ref(x_16486); + x_16505 = lean_box(0); +} +if (lean_is_scalar(x_16505)) { + x_16506 = lean_alloc_ctor(1, 2, 0); +} else { + x_16506 = x_16505; +} +lean_ctor_set(x_16506, 0, x_16503); +lean_ctor_set(x_16506, 1, x_16504); +return x_16506; +} +} +} +else +{ +lean_object* x_16507; lean_object* x_16508; lean_object* x_16509; lean_object* x_16510; lean_object* x_16511; lean_object* x_16512; lean_object* x_16513; lean_object* x_16514; lean_object* x_16515; +lean_dec(x_16438); +lean_dec(x_16436); +lean_inc(x_14813); +lean_inc(x_153); +if (lean_is_scalar(x_16433)) { + x_16507 = lean_alloc_ctor(7, 2, 0); +} else { + x_16507 = x_16433; + lean_ctor_set_tag(x_16507, 7); +} +lean_ctor_set(x_16507, 0, x_153); +lean_ctor_set(x_16507, 1, x_14813); +x_16508 = lean_ctor_get(x_1, 0); +lean_inc(x_16508); +x_16509 = l_Lean_IR_ToIR_bindVar(x_16508, x_16155, x_4, x_5, x_16432); +x_16510 = lean_ctor_get(x_16509, 0); +lean_inc(x_16510); +x_16511 = lean_ctor_get(x_16509, 1); +lean_inc(x_16511); +lean_dec(x_16509); +x_16512 = lean_ctor_get(x_16510, 0); +lean_inc(x_16512); +x_16513 = lean_ctor_get(x_16510, 1); +lean_inc(x_16513); +lean_dec(x_16510); +x_16514 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16515 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16512, x_16507, x_16514, x_16513, x_4, x_5, x_16511); +if (lean_obj_tag(x_16515) == 0) +{ +lean_object* x_16516; lean_object* x_16517; lean_object* x_16518; lean_object* x_16519; lean_object* x_16520; lean_object* x_16521; lean_object* x_16522; +x_16516 = lean_ctor_get(x_16515, 0); +lean_inc(x_16516); +x_16517 = lean_ctor_get(x_16515, 1); +lean_inc(x_16517); +lean_dec(x_16515); +x_16518 = lean_ctor_get(x_16516, 0); +lean_inc(x_16518); +x_16519 = lean_ctor_get(x_16516, 1); +lean_inc(x_16519); +if (lean_is_exclusive(x_16516)) { + lean_ctor_release(x_16516, 0); + lean_ctor_release(x_16516, 1); + x_16520 = x_16516; +} else { + lean_dec_ref(x_16516); + x_16520 = lean_box(0); +} +if (lean_is_scalar(x_16435)) { + x_16521 = lean_alloc_ctor(1, 1, 0); +} else { + x_16521 = x_16435; +} +lean_ctor_set(x_16521, 0, x_16518); +if (lean_is_scalar(x_16520)) { + x_16522 = lean_alloc_ctor(0, 2, 0); +} else { + x_16522 = x_16520; +} +lean_ctor_set(x_16522, 0, x_16521); +lean_ctor_set(x_16522, 1, x_16519); +x_16399 = x_16522; +x_16400 = x_16517; +goto block_16426; +} +else +{ +lean_object* x_16523; lean_object* x_16524; lean_object* x_16525; lean_object* x_16526; +lean_dec(x_16435); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16523 = lean_ctor_get(x_16515, 0); +lean_inc(x_16523); +x_16524 = lean_ctor_get(x_16515, 1); +lean_inc(x_16524); +if (lean_is_exclusive(x_16515)) { + lean_ctor_release(x_16515, 0); + lean_ctor_release(x_16515, 1); + x_16525 = x_16515; +} else { + lean_dec_ref(x_16515); + x_16525 = lean_box(0); +} +if (lean_is_scalar(x_16525)) { + x_16526 = lean_alloc_ctor(1, 2, 0); +} else { + x_16526 = x_16525; +} +lean_ctor_set(x_16526, 0, x_16523); +lean_ctor_set(x_16526, 1, x_16524); +return x_16526; +} +} +} +block_16426: +{ +lean_object* x_16401; +x_16401 = lean_ctor_get(x_16399, 0); +lean_inc(x_16401); +if (lean_obj_tag(x_16401) == 0) +{ +lean_object* x_16402; lean_object* x_16403; lean_object* x_16404; lean_object* x_16405; lean_object* x_16406; lean_object* x_16407; lean_object* x_16408; lean_object* x_16409; lean_object* x_16410; lean_object* x_16411; +lean_dec(x_16159); +x_16402 = lean_ctor_get(x_16399, 1); +lean_inc(x_16402); +lean_dec(x_16399); +x_16403 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_16403, 0, x_153); +lean_ctor_set(x_16403, 1, x_14813); +x_16404 = lean_ctor_get(x_1, 0); +lean_inc(x_16404); +x_16405 = l_Lean_IR_ToIR_bindVar(x_16404, x_16402, x_4, x_5, x_16400); +x_16406 = lean_ctor_get(x_16405, 0); +lean_inc(x_16406); +x_16407 = lean_ctor_get(x_16405, 1); +lean_inc(x_16407); +lean_dec(x_16405); +x_16408 = lean_ctor_get(x_16406, 0); +lean_inc(x_16408); +x_16409 = lean_ctor_get(x_16406, 1); +lean_inc(x_16409); +lean_dec(x_16406); +x_16410 = lean_ctor_get(x_1, 2); +lean_inc(x_16410); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_16411 = l_Lean_IR_ToIR_lowerType(x_16410, x_16409, x_4, x_5, x_16407); +if (lean_obj_tag(x_16411) == 0) +{ +lean_object* x_16412; lean_object* x_16413; lean_object* x_16414; lean_object* x_16415; lean_object* x_16416; +x_16412 = lean_ctor_get(x_16411, 0); +lean_inc(x_16412); +x_16413 = lean_ctor_get(x_16411, 1); +lean_inc(x_16413); +lean_dec(x_16411); +x_16414 = lean_ctor_get(x_16412, 0); +lean_inc(x_16414); +x_16415 = lean_ctor_get(x_16412, 1); +lean_inc(x_16415); +lean_dec(x_16412); +x_16416 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16408, x_16403, x_16414, x_16415, x_4, x_5, x_16413); +return x_16416; +} +else +{ +lean_object* x_16417; lean_object* x_16418; lean_object* x_16419; lean_object* x_16420; +lean_dec(x_16408); +lean_dec(x_16403); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_16417 = lean_ctor_get(x_16411, 0); +lean_inc(x_16417); +x_16418 = lean_ctor_get(x_16411, 1); +lean_inc(x_16418); +if (lean_is_exclusive(x_16411)) { + lean_ctor_release(x_16411, 0); + lean_ctor_release(x_16411, 1); + x_16419 = x_16411; +} else { + lean_dec_ref(x_16411); + x_16419 = lean_box(0); +} +if (lean_is_scalar(x_16419)) { + x_16420 = lean_alloc_ctor(1, 2, 0); +} else { + x_16420 = x_16419; +} +lean_ctor_set(x_16420, 0, x_16417); +lean_ctor_set(x_16420, 1, x_16418); +return x_16420; +} +} +else +{ +lean_object* x_16421; lean_object* x_16422; lean_object* x_16423; lean_object* x_16424; lean_object* x_16425; +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16421 = lean_ctor_get(x_16399, 1); +lean_inc(x_16421); +if (lean_is_exclusive(x_16399)) { + lean_ctor_release(x_16399, 0); + lean_ctor_release(x_16399, 1); + x_16422 = x_16399; +} else { + lean_dec_ref(x_16399); + x_16422 = lean_box(0); +} +x_16423 = lean_ctor_get(x_16401, 0); +lean_inc(x_16423); +lean_dec(x_16401); +if (lean_is_scalar(x_16422)) { + x_16424 = lean_alloc_ctor(0, 2, 0); +} else { + x_16424 = x_16422; +} +lean_ctor_set(x_16424, 0, x_16423); +lean_ctor_set(x_16424, 1, x_16421); +if (lean_is_scalar(x_16159)) { + x_16425 = lean_alloc_ctor(0, 2, 0); +} else { + x_16425 = x_16159; +} +lean_ctor_set(x_16425, 0, x_16424); +lean_ctor_set(x_16425, 1, x_16400); +return x_16425; +} +} +} +case 4: +{ +lean_object* x_16527; lean_object* x_16528; uint8_t x_16529; +lean_dec(x_16160); +lean_dec(x_16159); +lean_dec(x_14805); +lean_dec(x_14804); +if (lean_is_exclusive(x_16165)) { + lean_ctor_release(x_16165, 0); + x_16527 = x_16165; +} else { + lean_dec_ref(x_16165); + x_16527 = lean_box(0); +} +x_16528 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_16529 = lean_name_eq(x_153, x_16528); +if (x_16529 == 0) +{ +uint8_t x_16530; lean_object* x_16531; lean_object* x_16532; lean_object* x_16533; lean_object* x_16534; lean_object* x_16535; lean_object* x_16536; lean_object* x_16537; lean_object* x_16538; lean_object* x_16539; +lean_dec(x_14813); +lean_dec(x_2); +lean_dec(x_1); +x_16530 = 1; +x_16531 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_16532 = l_Lean_Name_toString(x_153, x_16530, x_16531); +if (lean_is_scalar(x_16527)) { + x_16533 = lean_alloc_ctor(3, 1, 0); +} else { + x_16533 = x_16527; + lean_ctor_set_tag(x_16533, 3); +} +lean_ctor_set(x_16533, 0, x_16532); +x_16534 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_16535 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_16535, 0, x_16534); +lean_ctor_set(x_16535, 1, x_16533); +x_16536 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_16537 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_16537, 0, x_16535); +lean_ctor_set(x_16537, 1, x_16536); +x_16538 = l_Lean_MessageData_ofFormat(x_16537); +x_16539 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_16538, x_16155, x_4, x_5, x_16158); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_16155); +return x_16539; +} +else +{ +lean_object* x_16540; lean_object* x_16541; lean_object* x_16542; +lean_dec(x_16527); +lean_dec(x_153); +x_16540 = l_Lean_IR_instInhabitedArg; +x_16541 = lean_unsigned_to_nat(2u); +x_16542 = lean_array_get(x_16540, x_14813, x_16541); +lean_dec(x_14813); +if (lean_obj_tag(x_16542) == 0) +{ +lean_object* x_16543; lean_object* x_16544; lean_object* x_16545; lean_object* x_16546; lean_object* x_16547; lean_object* x_16548; lean_object* x_16549; +x_16543 = lean_ctor_get(x_16542, 0); +lean_inc(x_16543); +lean_dec(x_16542); +x_16544 = lean_ctor_get(x_1, 0); +lean_inc(x_16544); +lean_dec(x_1); +x_16545 = l_Lean_IR_ToIR_bindVarToVarId(x_16544, x_16543, x_16155, x_4, x_5, x_16158); +x_16546 = lean_ctor_get(x_16545, 0); +lean_inc(x_16546); +x_16547 = lean_ctor_get(x_16545, 1); +lean_inc(x_16547); +lean_dec(x_16545); +x_16548 = lean_ctor_get(x_16546, 1); +lean_inc(x_16548); +lean_dec(x_16546); +x_16549 = l_Lean_IR_ToIR_lowerCode(x_2, x_16548, x_4, x_5, x_16547); +return x_16549; +} +else +{ +lean_object* x_16550; lean_object* x_16551; lean_object* x_16552; lean_object* x_16553; lean_object* x_16554; lean_object* x_16555; +x_16550 = lean_ctor_get(x_1, 0); +lean_inc(x_16550); +lean_dec(x_1); +x_16551 = l_Lean_IR_ToIR_bindErased(x_16550, x_16155, x_4, x_5, x_16158); +x_16552 = lean_ctor_get(x_16551, 0); +lean_inc(x_16552); +x_16553 = lean_ctor_get(x_16551, 1); +lean_inc(x_16553); +lean_dec(x_16551); +x_16554 = lean_ctor_get(x_16552, 1); +lean_inc(x_16554); +lean_dec(x_16552); +x_16555 = l_Lean_IR_ToIR_lowerCode(x_2, x_16554, x_4, x_5, x_16553); +return x_16555; +} +} +} +case 5: +{ +lean_object* x_16556; lean_object* x_16557; +lean_dec(x_16165); +lean_dec(x_16160); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_16556 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_16557 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_16556, x_16155, x_4, x_5, x_16158); +return x_16557; +} +case 6: +{ +lean_object* x_16558; uint8_t x_16559; +x_16558 = lean_ctor_get(x_16165, 0); +lean_inc(x_16558); +lean_dec(x_16165); +lean_inc(x_153); +x_16559 = l_Lean_isExtern(x_16160, x_153); +if (x_16559 == 0) +{ +lean_object* x_16560; +lean_dec(x_16159); +lean_dec(x_14813); +lean_inc(x_5); +lean_inc(x_4); +x_16560 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_16155, x_4, x_5, x_16158); +if (lean_obj_tag(x_16560) == 0) +{ +lean_object* x_16561; lean_object* x_16562; lean_object* x_16563; lean_object* x_16564; lean_object* x_16565; lean_object* x_16566; lean_object* x_16567; lean_object* x_16568; lean_object* x_16569; lean_object* x_16570; lean_object* x_16571; lean_object* x_16572; lean_object* x_16573; lean_object* x_16574; lean_object* x_16575; lean_object* x_16576; lean_object* x_16577; lean_object* x_16578; lean_object* x_16579; lean_object* x_16580; +x_16561 = lean_ctor_get(x_16560, 0); +lean_inc(x_16561); +x_16562 = lean_ctor_get(x_16561, 0); +lean_inc(x_16562); +x_16563 = lean_ctor_get(x_16560, 1); +lean_inc(x_16563); +lean_dec(x_16560); +x_16564 = lean_ctor_get(x_16561, 1); +lean_inc(x_16564); +lean_dec(x_16561); +x_16565 = lean_ctor_get(x_16562, 0); +lean_inc(x_16565); +x_16566 = lean_ctor_get(x_16562, 1); +lean_inc(x_16566); +lean_dec(x_16562); +x_16567 = lean_ctor_get(x_16558, 3); +lean_inc(x_16567); +lean_dec(x_16558); +x_16568 = lean_array_get_size(x_14804); +x_16569 = l_Array_extract___rarg(x_14804, x_16567, x_16568); +lean_dec(x_16568); +lean_dec(x_14804); +x_16570 = lean_array_get_size(x_16566); +x_16571 = lean_unsigned_to_nat(0u); +x_16572 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_14805)) { + x_16573 = lean_alloc_ctor(0, 3, 0); +} else { + x_16573 = x_14805; + lean_ctor_set_tag(x_16573, 0); +} +lean_ctor_set(x_16573, 0, x_16571); +lean_ctor_set(x_16573, 1, x_16570); +lean_ctor_set(x_16573, 2, x_16572); +x_16574 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_16575 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__7(x_16566, x_16569, x_16573, x_16573, x_16574, x_16571, lean_box(0), lean_box(0), x_16564, x_4, x_5, x_16563); +lean_dec(x_16573); +x_16576 = lean_ctor_get(x_16575, 0); +lean_inc(x_16576); +x_16577 = lean_ctor_get(x_16575, 1); +lean_inc(x_16577); +lean_dec(x_16575); +x_16578 = lean_ctor_get(x_16576, 0); +lean_inc(x_16578); +x_16579 = lean_ctor_get(x_16576, 1); +lean_inc(x_16579); +lean_dec(x_16576); +x_16580 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_16565, x_16566, x_16569, x_16578, x_16579, x_4, x_5, x_16577); +lean_dec(x_16569); +lean_dec(x_16566); +return x_16580; +} +else +{ +lean_object* x_16581; lean_object* x_16582; lean_object* x_16583; lean_object* x_16584; +lean_dec(x_16558); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16581 = lean_ctor_get(x_16560, 0); +lean_inc(x_16581); +x_16582 = lean_ctor_get(x_16560, 1); +lean_inc(x_16582); +if (lean_is_exclusive(x_16560)) { + lean_ctor_release(x_16560, 0); + lean_ctor_release(x_16560, 1); + x_16583 = x_16560; +} else { + lean_dec_ref(x_16560); + x_16583 = lean_box(0); +} +if (lean_is_scalar(x_16583)) { + x_16584 = lean_alloc_ctor(1, 2, 0); +} else { + x_16584 = x_16583; +} +lean_ctor_set(x_16584, 0, x_16581); +lean_ctor_set(x_16584, 1, x_16582); +return x_16584; +} +} +else +{ +lean_object* x_16585; lean_object* x_16586; lean_object* x_16613; lean_object* x_16614; +lean_dec(x_16558); +lean_dec(x_14805); +lean_dec(x_14804); +lean_inc(x_153); +x_16613 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_16158); +x_16614 = lean_ctor_get(x_16613, 0); +lean_inc(x_16614); +if (lean_obj_tag(x_16614) == 0) +{ +lean_object* x_16615; lean_object* x_16616; lean_object* x_16617; +x_16615 = lean_ctor_get(x_16613, 1); +lean_inc(x_16615); +lean_dec(x_16613); +x_16616 = lean_box(0); +x_16617 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16617, 0, x_16616); +lean_ctor_set(x_16617, 1, x_16155); +x_16585 = x_16617; +x_16586 = x_16615; +goto block_16612; +} +else +{ +lean_object* x_16618; lean_object* x_16619; lean_object* x_16620; lean_object* x_16621; lean_object* x_16622; lean_object* x_16623; lean_object* x_16624; uint8_t x_16625; +x_16618 = lean_ctor_get(x_16613, 1); +lean_inc(x_16618); +if (lean_is_exclusive(x_16613)) { + lean_ctor_release(x_16613, 0); + lean_ctor_release(x_16613, 1); + x_16619 = x_16613; +} else { + lean_dec_ref(x_16613); + x_16619 = lean_box(0); +} +x_16620 = lean_ctor_get(x_16614, 0); +lean_inc(x_16620); +if (lean_is_exclusive(x_16614)) { + lean_ctor_release(x_16614, 0); + x_16621 = x_16614; +} else { + lean_dec_ref(x_16614); + x_16621 = lean_box(0); +} +x_16622 = lean_array_get_size(x_14813); +x_16623 = lean_ctor_get(x_16620, 3); +lean_inc(x_16623); +lean_dec(x_16620); +x_16624 = lean_array_get_size(x_16623); +lean_dec(x_16623); +x_16625 = lean_nat_dec_lt(x_16622, x_16624); +if (x_16625 == 0) +{ +uint8_t x_16626; +x_16626 = lean_nat_dec_eq(x_16622, x_16624); +if (x_16626 == 0) +{ +lean_object* x_16627; lean_object* x_16628; lean_object* x_16629; lean_object* x_16630; lean_object* x_16631; lean_object* x_16632; lean_object* x_16633; lean_object* x_16634; lean_object* x_16635; lean_object* x_16636; lean_object* x_16637; lean_object* x_16638; lean_object* x_16639; lean_object* x_16640; lean_object* x_16641; lean_object* x_16642; lean_object* x_16643; +x_16627 = lean_unsigned_to_nat(0u); +x_16628 = l_Array_extract___rarg(x_14813, x_16627, x_16624); +x_16629 = l_Array_extract___rarg(x_14813, x_16624, x_16622); +lean_dec(x_16622); +lean_inc(x_153); +if (lean_is_scalar(x_16619)) { + x_16630 = lean_alloc_ctor(6, 2, 0); +} else { + x_16630 = x_16619; + lean_ctor_set_tag(x_16630, 6); +} +lean_ctor_set(x_16630, 0, x_153); +lean_ctor_set(x_16630, 1, x_16628); +x_16631 = lean_ctor_get(x_1, 0); +lean_inc(x_16631); +x_16632 = l_Lean_IR_ToIR_bindVar(x_16631, x_16155, x_4, x_5, x_16618); +x_16633 = lean_ctor_get(x_16632, 0); +lean_inc(x_16633); +x_16634 = lean_ctor_get(x_16632, 1); +lean_inc(x_16634); +lean_dec(x_16632); +x_16635 = lean_ctor_get(x_16633, 0); +lean_inc(x_16635); +x_16636 = lean_ctor_get(x_16633, 1); +lean_inc(x_16636); +lean_dec(x_16633); +x_16637 = l_Lean_IR_ToIR_newVar(x_16636, x_4, x_5, x_16634); +x_16638 = lean_ctor_get(x_16637, 0); +lean_inc(x_16638); +x_16639 = lean_ctor_get(x_16637, 1); +lean_inc(x_16639); +lean_dec(x_16637); +x_16640 = lean_ctor_get(x_16638, 0); +lean_inc(x_16640); +x_16641 = lean_ctor_get(x_16638, 1); +lean_inc(x_16641); +lean_dec(x_16638); +x_16642 = lean_ctor_get(x_1, 2); +lean_inc(x_16642); +lean_inc(x_5); +lean_inc(x_4); +x_16643 = l_Lean_IR_ToIR_lowerType(x_16642, x_16641, x_4, x_5, x_16639); +if (lean_obj_tag(x_16643) == 0) +{ +lean_object* x_16644; lean_object* x_16645; lean_object* x_16646; lean_object* x_16647; lean_object* x_16648; +x_16644 = lean_ctor_get(x_16643, 0); +lean_inc(x_16644); +x_16645 = lean_ctor_get(x_16643, 1); +lean_inc(x_16645); +lean_dec(x_16643); +x_16646 = lean_ctor_get(x_16644, 0); +lean_inc(x_16646); +x_16647 = lean_ctor_get(x_16644, 1); +lean_inc(x_16647); +lean_dec(x_16644); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16648 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_16640, x_16629, x_16635, x_16630, x_16646, x_16647, x_4, x_5, x_16645); +if (lean_obj_tag(x_16648) == 0) +{ +lean_object* x_16649; lean_object* x_16650; lean_object* x_16651; lean_object* x_16652; lean_object* x_16653; lean_object* x_16654; lean_object* x_16655; +x_16649 = lean_ctor_get(x_16648, 0); +lean_inc(x_16649); +x_16650 = lean_ctor_get(x_16648, 1); +lean_inc(x_16650); +lean_dec(x_16648); +x_16651 = lean_ctor_get(x_16649, 0); +lean_inc(x_16651); +x_16652 = lean_ctor_get(x_16649, 1); +lean_inc(x_16652); +if (lean_is_exclusive(x_16649)) { + lean_ctor_release(x_16649, 0); + lean_ctor_release(x_16649, 1); + x_16653 = x_16649; +} else { + lean_dec_ref(x_16649); + x_16653 = lean_box(0); +} +if (lean_is_scalar(x_16621)) { + x_16654 = lean_alloc_ctor(1, 1, 0); +} else { + x_16654 = x_16621; +} +lean_ctor_set(x_16654, 0, x_16651); +if (lean_is_scalar(x_16653)) { + x_16655 = lean_alloc_ctor(0, 2, 0); +} else { + x_16655 = x_16653; +} +lean_ctor_set(x_16655, 0, x_16654); +lean_ctor_set(x_16655, 1, x_16652); +x_16585 = x_16655; +x_16586 = x_16650; +goto block_16612; +} +else +{ +lean_object* x_16656; lean_object* x_16657; lean_object* x_16658; lean_object* x_16659; +lean_dec(x_16621); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16656 = lean_ctor_get(x_16648, 0); +lean_inc(x_16656); +x_16657 = lean_ctor_get(x_16648, 1); +lean_inc(x_16657); +if (lean_is_exclusive(x_16648)) { + lean_ctor_release(x_16648, 0); + lean_ctor_release(x_16648, 1); + x_16658 = x_16648; +} else { + lean_dec_ref(x_16648); + x_16658 = lean_box(0); +} +if (lean_is_scalar(x_16658)) { + x_16659 = lean_alloc_ctor(1, 2, 0); +} else { + x_16659 = x_16658; +} +lean_ctor_set(x_16659, 0, x_16656); +lean_ctor_set(x_16659, 1, x_16657); +return x_16659; +} +} +else +{ +lean_object* x_16660; lean_object* x_16661; lean_object* x_16662; lean_object* x_16663; +lean_dec(x_16640); +lean_dec(x_16635); +lean_dec(x_16630); +lean_dec(x_16629); +lean_dec(x_16621); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16660 = lean_ctor_get(x_16643, 0); +lean_inc(x_16660); +x_16661 = lean_ctor_get(x_16643, 1); +lean_inc(x_16661); +if (lean_is_exclusive(x_16643)) { + lean_ctor_release(x_16643, 0); + lean_ctor_release(x_16643, 1); + x_16662 = x_16643; +} else { + lean_dec_ref(x_16643); + x_16662 = lean_box(0); +} +if (lean_is_scalar(x_16662)) { + x_16663 = lean_alloc_ctor(1, 2, 0); +} else { + x_16663 = x_16662; +} +lean_ctor_set(x_16663, 0, x_16660); +lean_ctor_set(x_16663, 1, x_16661); +return x_16663; +} +} +else +{ +lean_object* x_16664; lean_object* x_16665; lean_object* x_16666; lean_object* x_16667; lean_object* x_16668; lean_object* x_16669; lean_object* x_16670; lean_object* x_16671; lean_object* x_16672; +lean_dec(x_16624); +lean_dec(x_16622); +lean_inc(x_14813); +lean_inc(x_153); +if (lean_is_scalar(x_16619)) { + x_16664 = lean_alloc_ctor(6, 2, 0); +} else { + x_16664 = x_16619; + lean_ctor_set_tag(x_16664, 6); +} +lean_ctor_set(x_16664, 0, x_153); +lean_ctor_set(x_16664, 1, x_14813); +x_16665 = lean_ctor_get(x_1, 0); +lean_inc(x_16665); +x_16666 = l_Lean_IR_ToIR_bindVar(x_16665, x_16155, x_4, x_5, x_16618); +x_16667 = lean_ctor_get(x_16666, 0); +lean_inc(x_16667); +x_16668 = lean_ctor_get(x_16666, 1); +lean_inc(x_16668); +lean_dec(x_16666); +x_16669 = lean_ctor_get(x_16667, 0); +lean_inc(x_16669); +x_16670 = lean_ctor_get(x_16667, 1); +lean_inc(x_16670); +lean_dec(x_16667); +x_16671 = lean_ctor_get(x_1, 2); +lean_inc(x_16671); +lean_inc(x_5); +lean_inc(x_4); +x_16672 = l_Lean_IR_ToIR_lowerType(x_16671, x_16670, x_4, x_5, x_16668); +if (lean_obj_tag(x_16672) == 0) +{ +lean_object* x_16673; lean_object* x_16674; lean_object* x_16675; lean_object* x_16676; lean_object* x_16677; +x_16673 = lean_ctor_get(x_16672, 0); +lean_inc(x_16673); +x_16674 = lean_ctor_get(x_16672, 1); +lean_inc(x_16674); +lean_dec(x_16672); +x_16675 = lean_ctor_get(x_16673, 0); +lean_inc(x_16675); +x_16676 = lean_ctor_get(x_16673, 1); +lean_inc(x_16676); +lean_dec(x_16673); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16677 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16669, x_16664, x_16675, x_16676, x_4, x_5, x_16674); +if (lean_obj_tag(x_16677) == 0) +{ +lean_object* x_16678; lean_object* x_16679; lean_object* x_16680; lean_object* x_16681; lean_object* x_16682; lean_object* x_16683; lean_object* x_16684; +x_16678 = lean_ctor_get(x_16677, 0); +lean_inc(x_16678); +x_16679 = lean_ctor_get(x_16677, 1); +lean_inc(x_16679); +lean_dec(x_16677); +x_16680 = lean_ctor_get(x_16678, 0); +lean_inc(x_16680); +x_16681 = lean_ctor_get(x_16678, 1); +lean_inc(x_16681); +if (lean_is_exclusive(x_16678)) { + lean_ctor_release(x_16678, 0); + lean_ctor_release(x_16678, 1); + x_16682 = x_16678; +} else { + lean_dec_ref(x_16678); + x_16682 = lean_box(0); +} +if (lean_is_scalar(x_16621)) { + x_16683 = lean_alloc_ctor(1, 1, 0); +} else { + x_16683 = x_16621; +} +lean_ctor_set(x_16683, 0, x_16680); +if (lean_is_scalar(x_16682)) { + x_16684 = lean_alloc_ctor(0, 2, 0); +} else { + x_16684 = x_16682; +} +lean_ctor_set(x_16684, 0, x_16683); +lean_ctor_set(x_16684, 1, x_16681); +x_16585 = x_16684; +x_16586 = x_16679; +goto block_16612; +} +else +{ +lean_object* x_16685; lean_object* x_16686; lean_object* x_16687; lean_object* x_16688; +lean_dec(x_16621); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16685 = lean_ctor_get(x_16677, 0); +lean_inc(x_16685); +x_16686 = lean_ctor_get(x_16677, 1); +lean_inc(x_16686); +if (lean_is_exclusive(x_16677)) { + lean_ctor_release(x_16677, 0); + lean_ctor_release(x_16677, 1); + x_16687 = x_16677; +} else { + lean_dec_ref(x_16677); + x_16687 = lean_box(0); +} +if (lean_is_scalar(x_16687)) { + x_16688 = lean_alloc_ctor(1, 2, 0); +} else { + x_16688 = x_16687; +} +lean_ctor_set(x_16688, 0, x_16685); +lean_ctor_set(x_16688, 1, x_16686); +return x_16688; +} +} +else +{ +lean_object* x_16689; lean_object* x_16690; lean_object* x_16691; lean_object* x_16692; +lean_dec(x_16669); +lean_dec(x_16664); +lean_dec(x_16621); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16689 = lean_ctor_get(x_16672, 0); +lean_inc(x_16689); +x_16690 = lean_ctor_get(x_16672, 1); +lean_inc(x_16690); +if (lean_is_exclusive(x_16672)) { + lean_ctor_release(x_16672, 0); + lean_ctor_release(x_16672, 1); + x_16691 = x_16672; +} else { + lean_dec_ref(x_16672); + x_16691 = lean_box(0); +} +if (lean_is_scalar(x_16691)) { + x_16692 = lean_alloc_ctor(1, 2, 0); +} else { + x_16692 = x_16691; +} +lean_ctor_set(x_16692, 0, x_16689); +lean_ctor_set(x_16692, 1, x_16690); +return x_16692; +} +} +} +else +{ +lean_object* x_16693; lean_object* x_16694; lean_object* x_16695; lean_object* x_16696; lean_object* x_16697; lean_object* x_16698; lean_object* x_16699; lean_object* x_16700; lean_object* x_16701; +lean_dec(x_16624); +lean_dec(x_16622); +lean_inc(x_14813); +lean_inc(x_153); +if (lean_is_scalar(x_16619)) { + x_16693 = lean_alloc_ctor(7, 2, 0); +} else { + x_16693 = x_16619; + lean_ctor_set_tag(x_16693, 7); +} +lean_ctor_set(x_16693, 0, x_153); +lean_ctor_set(x_16693, 1, x_14813); +x_16694 = lean_ctor_get(x_1, 0); +lean_inc(x_16694); +x_16695 = l_Lean_IR_ToIR_bindVar(x_16694, x_16155, x_4, x_5, x_16618); +x_16696 = lean_ctor_get(x_16695, 0); +lean_inc(x_16696); +x_16697 = lean_ctor_get(x_16695, 1); +lean_inc(x_16697); +lean_dec(x_16695); +x_16698 = lean_ctor_get(x_16696, 0); +lean_inc(x_16698); +x_16699 = lean_ctor_get(x_16696, 1); +lean_inc(x_16699); +lean_dec(x_16696); +x_16700 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_16701 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16698, x_16693, x_16700, x_16699, x_4, x_5, x_16697); +if (lean_obj_tag(x_16701) == 0) +{ +lean_object* x_16702; lean_object* x_16703; lean_object* x_16704; lean_object* x_16705; lean_object* x_16706; lean_object* x_16707; lean_object* x_16708; +x_16702 = lean_ctor_get(x_16701, 0); +lean_inc(x_16702); +x_16703 = lean_ctor_get(x_16701, 1); +lean_inc(x_16703); +lean_dec(x_16701); +x_16704 = lean_ctor_get(x_16702, 0); +lean_inc(x_16704); +x_16705 = lean_ctor_get(x_16702, 1); +lean_inc(x_16705); +if (lean_is_exclusive(x_16702)) { + lean_ctor_release(x_16702, 0); + lean_ctor_release(x_16702, 1); + x_16706 = x_16702; +} else { + lean_dec_ref(x_16702); + x_16706 = lean_box(0); +} +if (lean_is_scalar(x_16621)) { + x_16707 = lean_alloc_ctor(1, 1, 0); +} else { + x_16707 = x_16621; +} +lean_ctor_set(x_16707, 0, x_16704); +if (lean_is_scalar(x_16706)) { + x_16708 = lean_alloc_ctor(0, 2, 0); +} else { + x_16708 = x_16706; +} +lean_ctor_set(x_16708, 0, x_16707); +lean_ctor_set(x_16708, 1, x_16705); +x_16585 = x_16708; +x_16586 = x_16703; +goto block_16612; +} +else +{ +lean_object* x_16709; lean_object* x_16710; lean_object* x_16711; lean_object* x_16712; +lean_dec(x_16621); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16709 = lean_ctor_get(x_16701, 0); +lean_inc(x_16709); +x_16710 = lean_ctor_get(x_16701, 1); +lean_inc(x_16710); +if (lean_is_exclusive(x_16701)) { + lean_ctor_release(x_16701, 0); + lean_ctor_release(x_16701, 1); + x_16711 = x_16701; +} else { + lean_dec_ref(x_16701); + x_16711 = lean_box(0); +} +if (lean_is_scalar(x_16711)) { + x_16712 = lean_alloc_ctor(1, 2, 0); +} else { + x_16712 = x_16711; +} +lean_ctor_set(x_16712, 0, x_16709); +lean_ctor_set(x_16712, 1, x_16710); +return x_16712; +} +} +} +block_16612: +{ +lean_object* x_16587; +x_16587 = lean_ctor_get(x_16585, 0); +lean_inc(x_16587); +if (lean_obj_tag(x_16587) == 0) +{ +lean_object* x_16588; lean_object* x_16589; lean_object* x_16590; lean_object* x_16591; lean_object* x_16592; lean_object* x_16593; lean_object* x_16594; lean_object* x_16595; lean_object* x_16596; lean_object* x_16597; +lean_dec(x_16159); +x_16588 = lean_ctor_get(x_16585, 1); +lean_inc(x_16588); +lean_dec(x_16585); +x_16589 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_16589, 0, x_153); +lean_ctor_set(x_16589, 1, x_14813); +x_16590 = lean_ctor_get(x_1, 0); +lean_inc(x_16590); +x_16591 = l_Lean_IR_ToIR_bindVar(x_16590, x_16588, x_4, x_5, x_16586); +x_16592 = lean_ctor_get(x_16591, 0); +lean_inc(x_16592); +x_16593 = lean_ctor_get(x_16591, 1); +lean_inc(x_16593); +lean_dec(x_16591); +x_16594 = lean_ctor_get(x_16592, 0); +lean_inc(x_16594); +x_16595 = lean_ctor_get(x_16592, 1); +lean_inc(x_16595); +lean_dec(x_16592); +x_16596 = lean_ctor_get(x_1, 2); +lean_inc(x_16596); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_16597 = l_Lean_IR_ToIR_lowerType(x_16596, x_16595, x_4, x_5, x_16593); +if (lean_obj_tag(x_16597) == 0) +{ +lean_object* x_16598; lean_object* x_16599; lean_object* x_16600; lean_object* x_16601; lean_object* x_16602; +x_16598 = lean_ctor_get(x_16597, 0); +lean_inc(x_16598); +x_16599 = lean_ctor_get(x_16597, 1); +lean_inc(x_16599); +lean_dec(x_16597); +x_16600 = lean_ctor_get(x_16598, 0); +lean_inc(x_16600); +x_16601 = lean_ctor_get(x_16598, 1); +lean_inc(x_16601); +lean_dec(x_16598); +x_16602 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_16594, x_16589, x_16600, x_16601, x_4, x_5, x_16599); +return x_16602; +} +else +{ +lean_object* x_16603; lean_object* x_16604; lean_object* x_16605; lean_object* x_16606; +lean_dec(x_16594); +lean_dec(x_16589); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_16603 = lean_ctor_get(x_16597, 0); +lean_inc(x_16603); +x_16604 = lean_ctor_get(x_16597, 1); +lean_inc(x_16604); +if (lean_is_exclusive(x_16597)) { + lean_ctor_release(x_16597, 0); + lean_ctor_release(x_16597, 1); + x_16605 = x_16597; +} else { + lean_dec_ref(x_16597); + x_16605 = lean_box(0); +} +if (lean_is_scalar(x_16605)) { + x_16606 = lean_alloc_ctor(1, 2, 0); +} else { + x_16606 = x_16605; +} +lean_ctor_set(x_16606, 0, x_16603); +lean_ctor_set(x_16606, 1, x_16604); +return x_16606; +} +} +else +{ +lean_object* x_16607; lean_object* x_16608; lean_object* x_16609; lean_object* x_16610; lean_object* x_16611; +lean_dec(x_14813); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16607 = lean_ctor_get(x_16585, 1); +lean_inc(x_16607); +if (lean_is_exclusive(x_16585)) { + lean_ctor_release(x_16585, 0); + lean_ctor_release(x_16585, 1); + x_16608 = x_16585; +} else { + lean_dec_ref(x_16585); + x_16608 = lean_box(0); +} +x_16609 = lean_ctor_get(x_16587, 0); +lean_inc(x_16609); +lean_dec(x_16587); +if (lean_is_scalar(x_16608)) { + x_16610 = lean_alloc_ctor(0, 2, 0); +} else { + x_16610 = x_16608; +} +lean_ctor_set(x_16610, 0, x_16609); +lean_ctor_set(x_16610, 1, x_16607); +if (lean_is_scalar(x_16159)) { + x_16611 = lean_alloc_ctor(0, 2, 0); +} else { + x_16611 = x_16159; +} +lean_ctor_set(x_16611, 0, x_16610); +lean_ctor_set(x_16611, 1, x_16586); +return x_16611; +} +} +} +} +default: +{ +lean_object* x_16713; uint8_t x_16714; lean_object* x_16715; lean_object* x_16716; lean_object* x_16717; lean_object* x_16718; lean_object* x_16719; lean_object* x_16720; lean_object* x_16721; lean_object* x_16722; lean_object* x_16723; +lean_dec(x_16160); +lean_dec(x_16159); +lean_dec(x_14813); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_16165)) { + lean_ctor_release(x_16165, 0); + x_16713 = x_16165; +} else { + lean_dec_ref(x_16165); + x_16713 = lean_box(0); +} +x_16714 = 1; +x_16715 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_16716 = l_Lean_Name_toString(x_153, x_16714, x_16715); +if (lean_is_scalar(x_16713)) { + x_16717 = lean_alloc_ctor(3, 1, 0); +} else { + x_16717 = x_16713; + lean_ctor_set_tag(x_16717, 3); +} +lean_ctor_set(x_16717, 0, x_16716); +x_16718 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_16719 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_16719, 0, x_16718); +lean_ctor_set(x_16719, 1, x_16717); +x_16720 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_16721 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_16721, 0, x_16719); +lean_ctor_set(x_16721, 1, x_16720); +x_16722 = l_Lean_MessageData_ofFormat(x_16721); +x_16723 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_16722, x_16155, x_4, x_5, x_16158); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_16155); +return x_16723; +} +} +} +} +} +else +{ +uint8_t x_16724; +lean_dec(x_14813); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_16724 = !lean_is_exclusive(x_14815); +if (x_16724 == 0) +{ +lean_object* x_16725; lean_object* x_16726; lean_object* x_16727; +x_16725 = lean_ctor_get(x_14815, 0); +lean_dec(x_16725); +x_16726 = lean_ctor_get(x_14817, 0); +lean_inc(x_16726); +lean_dec(x_14817); +lean_ctor_set(x_14815, 0, x_16726); +if (lean_is_scalar(x_14811)) { + x_16727 = lean_alloc_ctor(0, 2, 0); +} else { + x_16727 = x_14811; +} +lean_ctor_set(x_16727, 0, x_14815); +lean_ctor_set(x_16727, 1, x_14816); +return x_16727; +} +else +{ +lean_object* x_16728; lean_object* x_16729; lean_object* x_16730; lean_object* x_16731; +x_16728 = lean_ctor_get(x_14815, 1); +lean_inc(x_16728); +lean_dec(x_14815); +x_16729 = lean_ctor_get(x_14817, 0); +lean_inc(x_16729); +lean_dec(x_14817); +x_16730 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16730, 0, x_16729); +lean_ctor_set(x_16730, 1, x_16728); +if (lean_is_scalar(x_14811)) { + x_16731 = lean_alloc_ctor(0, 2, 0); +} else { + x_16731 = x_14811; +} +lean_ctor_set(x_16731, 0, x_16730); +lean_ctor_set(x_16731, 1, x_14816); +return x_16731; +} +} +} +} +else +{ +lean_object* x_17013; lean_object* x_17014; lean_object* x_17015; lean_object* x_17016; lean_object* x_17594; lean_object* x_17595; +x_17013 = lean_ctor_get(x_14809, 0); +x_17014 = lean_ctor_get(x_14809, 1); +lean_inc(x_17014); +lean_inc(x_17013); +lean_dec(x_14809); +lean_inc(x_153); +x_17594 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_14810); +x_17595 = lean_ctor_get(x_17594, 0); +lean_inc(x_17595); +if (lean_obj_tag(x_17595) == 0) +{ +lean_object* x_17596; lean_object* x_17597; lean_object* x_17598; +x_17596 = lean_ctor_get(x_17594, 1); +lean_inc(x_17596); +lean_dec(x_17594); +x_17597 = lean_box(0); +x_17598 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_17598, 0, x_17597); +lean_ctor_set(x_17598, 1, x_17014); +x_17015 = x_17598; +x_17016 = x_17596; +goto block_17593; +} +else +{ +lean_object* x_17599; lean_object* x_17600; lean_object* x_17601; lean_object* x_17602; lean_object* x_17603; lean_object* x_17604; lean_object* x_17605; uint8_t x_17606; +x_17599 = lean_ctor_get(x_17594, 1); +lean_inc(x_17599); +if (lean_is_exclusive(x_17594)) { + lean_ctor_release(x_17594, 0); + lean_ctor_release(x_17594, 1); + x_17600 = x_17594; +} else { + lean_dec_ref(x_17594); + x_17600 = lean_box(0); +} +x_17601 = lean_ctor_get(x_17595, 0); +lean_inc(x_17601); +if (lean_is_exclusive(x_17595)) { + lean_ctor_release(x_17595, 0); + x_17602 = x_17595; +} else { + lean_dec_ref(x_17595); + x_17602 = lean_box(0); +} +x_17603 = lean_array_get_size(x_17013); +x_17604 = lean_ctor_get(x_17601, 3); +lean_inc(x_17604); +lean_dec(x_17601); +x_17605 = lean_array_get_size(x_17604); +lean_dec(x_17604); +x_17606 = lean_nat_dec_lt(x_17603, x_17605); +if (x_17606 == 0) +{ +uint8_t x_17607; +x_17607 = lean_nat_dec_eq(x_17603, x_17605); +if (x_17607 == 0) +{ +lean_object* x_17608; lean_object* x_17609; lean_object* x_17610; lean_object* x_17611; lean_object* x_17612; lean_object* x_17613; lean_object* x_17614; lean_object* x_17615; lean_object* x_17616; lean_object* x_17617; lean_object* x_17618; lean_object* x_17619; lean_object* x_17620; lean_object* x_17621; lean_object* x_17622; lean_object* x_17623; lean_object* x_17624; +x_17608 = lean_unsigned_to_nat(0u); +x_17609 = l_Array_extract___rarg(x_17013, x_17608, x_17605); +x_17610 = l_Array_extract___rarg(x_17013, x_17605, x_17603); +lean_dec(x_17603); +lean_inc(x_153); +if (lean_is_scalar(x_17600)) { + x_17611 = lean_alloc_ctor(6, 2, 0); +} else { + x_17611 = x_17600; + lean_ctor_set_tag(x_17611, 6); +} +lean_ctor_set(x_17611, 0, x_153); +lean_ctor_set(x_17611, 1, x_17609); +x_17612 = lean_ctor_get(x_1, 0); +lean_inc(x_17612); +x_17613 = l_Lean_IR_ToIR_bindVar(x_17612, x_17014, x_4, x_5, x_17599); +x_17614 = lean_ctor_get(x_17613, 0); +lean_inc(x_17614); +x_17615 = lean_ctor_get(x_17613, 1); +lean_inc(x_17615); +lean_dec(x_17613); +x_17616 = lean_ctor_get(x_17614, 0); +lean_inc(x_17616); +x_17617 = lean_ctor_get(x_17614, 1); +lean_inc(x_17617); +lean_dec(x_17614); +x_17618 = l_Lean_IR_ToIR_newVar(x_17617, x_4, x_5, x_17615); +x_17619 = lean_ctor_get(x_17618, 0); +lean_inc(x_17619); +x_17620 = lean_ctor_get(x_17618, 1); +lean_inc(x_17620); +lean_dec(x_17618); +x_17621 = lean_ctor_get(x_17619, 0); +lean_inc(x_17621); +x_17622 = lean_ctor_get(x_17619, 1); +lean_inc(x_17622); +lean_dec(x_17619); +x_17623 = lean_ctor_get(x_1, 2); +lean_inc(x_17623); +lean_inc(x_5); +lean_inc(x_4); +x_17624 = l_Lean_IR_ToIR_lowerType(x_17623, x_17622, x_4, x_5, x_17620); +if (lean_obj_tag(x_17624) == 0) +{ +lean_object* x_17625; lean_object* x_17626; lean_object* x_17627; lean_object* x_17628; lean_object* x_17629; +x_17625 = lean_ctor_get(x_17624, 0); +lean_inc(x_17625); +x_17626 = lean_ctor_get(x_17624, 1); +lean_inc(x_17626); +lean_dec(x_17624); +x_17627 = lean_ctor_get(x_17625, 0); +lean_inc(x_17627); +x_17628 = lean_ctor_get(x_17625, 1); +lean_inc(x_17628); +lean_dec(x_17625); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17629 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_17621, x_17610, x_17616, x_17611, x_17627, x_17628, x_4, x_5, x_17626); +if (lean_obj_tag(x_17629) == 0) +{ +lean_object* x_17630; lean_object* x_17631; lean_object* x_17632; lean_object* x_17633; lean_object* x_17634; lean_object* x_17635; lean_object* x_17636; +x_17630 = lean_ctor_get(x_17629, 0); +lean_inc(x_17630); +x_17631 = lean_ctor_get(x_17629, 1); +lean_inc(x_17631); +lean_dec(x_17629); +x_17632 = lean_ctor_get(x_17630, 0); +lean_inc(x_17632); +x_17633 = lean_ctor_get(x_17630, 1); +lean_inc(x_17633); +if (lean_is_exclusive(x_17630)) { + lean_ctor_release(x_17630, 0); + lean_ctor_release(x_17630, 1); + x_17634 = x_17630; +} else { + lean_dec_ref(x_17630); + x_17634 = lean_box(0); +} +if (lean_is_scalar(x_17602)) { + x_17635 = lean_alloc_ctor(1, 1, 0); +} else { + x_17635 = x_17602; +} +lean_ctor_set(x_17635, 0, x_17632); +if (lean_is_scalar(x_17634)) { + x_17636 = lean_alloc_ctor(0, 2, 0); +} else { + x_17636 = x_17634; +} +lean_ctor_set(x_17636, 0, x_17635); +lean_ctor_set(x_17636, 1, x_17633); +x_17015 = x_17636; +x_17016 = x_17631; +goto block_17593; +} +else +{ +lean_object* x_17637; lean_object* x_17638; lean_object* x_17639; lean_object* x_17640; +lean_dec(x_17602); +lean_dec(x_17013); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17637 = lean_ctor_get(x_17629, 0); +lean_inc(x_17637); +x_17638 = lean_ctor_get(x_17629, 1); +lean_inc(x_17638); +if (lean_is_exclusive(x_17629)) { + lean_ctor_release(x_17629, 0); + lean_ctor_release(x_17629, 1); + x_17639 = x_17629; +} else { + lean_dec_ref(x_17629); + x_17639 = lean_box(0); +} +if (lean_is_scalar(x_17639)) { + x_17640 = lean_alloc_ctor(1, 2, 0); +} else { + x_17640 = x_17639; +} +lean_ctor_set(x_17640, 0, x_17637); +lean_ctor_set(x_17640, 1, x_17638); +return x_17640; +} +} +else +{ +lean_object* x_17641; lean_object* x_17642; lean_object* x_17643; lean_object* x_17644; +lean_dec(x_17621); +lean_dec(x_17616); +lean_dec(x_17611); +lean_dec(x_17610); +lean_dec(x_17602); +lean_dec(x_17013); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17641 = lean_ctor_get(x_17624, 0); +lean_inc(x_17641); +x_17642 = lean_ctor_get(x_17624, 1); +lean_inc(x_17642); +if (lean_is_exclusive(x_17624)) { + lean_ctor_release(x_17624, 0); + lean_ctor_release(x_17624, 1); + x_17643 = x_17624; +} else { + lean_dec_ref(x_17624); + x_17643 = lean_box(0); +} +if (lean_is_scalar(x_17643)) { + x_17644 = lean_alloc_ctor(1, 2, 0); +} else { + x_17644 = x_17643; +} +lean_ctor_set(x_17644, 0, x_17641); +lean_ctor_set(x_17644, 1, x_17642); +return x_17644; +} +} +else +{ +lean_object* x_17645; lean_object* x_17646; lean_object* x_17647; lean_object* x_17648; lean_object* x_17649; lean_object* x_17650; lean_object* x_17651; lean_object* x_17652; lean_object* x_17653; +lean_dec(x_17605); +lean_dec(x_17603); +lean_inc(x_17013); +lean_inc(x_153); +if (lean_is_scalar(x_17600)) { + x_17645 = lean_alloc_ctor(6, 2, 0); +} else { + x_17645 = x_17600; + lean_ctor_set_tag(x_17645, 6); +} +lean_ctor_set(x_17645, 0, x_153); +lean_ctor_set(x_17645, 1, x_17013); +x_17646 = lean_ctor_get(x_1, 0); +lean_inc(x_17646); +x_17647 = l_Lean_IR_ToIR_bindVar(x_17646, x_17014, x_4, x_5, x_17599); +x_17648 = lean_ctor_get(x_17647, 0); +lean_inc(x_17648); +x_17649 = lean_ctor_get(x_17647, 1); +lean_inc(x_17649); +lean_dec(x_17647); +x_17650 = lean_ctor_get(x_17648, 0); +lean_inc(x_17650); +x_17651 = lean_ctor_get(x_17648, 1); +lean_inc(x_17651); +lean_dec(x_17648); +x_17652 = lean_ctor_get(x_1, 2); +lean_inc(x_17652); +lean_inc(x_5); +lean_inc(x_4); +x_17653 = l_Lean_IR_ToIR_lowerType(x_17652, x_17651, x_4, x_5, x_17649); +if (lean_obj_tag(x_17653) == 0) +{ +lean_object* x_17654; lean_object* x_17655; lean_object* x_17656; lean_object* x_17657; lean_object* x_17658; +x_17654 = lean_ctor_get(x_17653, 0); +lean_inc(x_17654); +x_17655 = lean_ctor_get(x_17653, 1); +lean_inc(x_17655); +lean_dec(x_17653); +x_17656 = lean_ctor_get(x_17654, 0); +lean_inc(x_17656); +x_17657 = lean_ctor_get(x_17654, 1); +lean_inc(x_17657); +lean_dec(x_17654); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17658 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17650, x_17645, x_17656, x_17657, x_4, x_5, x_17655); +if (lean_obj_tag(x_17658) == 0) +{ +lean_object* x_17659; lean_object* x_17660; lean_object* x_17661; lean_object* x_17662; lean_object* x_17663; lean_object* x_17664; lean_object* x_17665; +x_17659 = lean_ctor_get(x_17658, 0); +lean_inc(x_17659); +x_17660 = lean_ctor_get(x_17658, 1); +lean_inc(x_17660); +lean_dec(x_17658); +x_17661 = lean_ctor_get(x_17659, 0); +lean_inc(x_17661); +x_17662 = lean_ctor_get(x_17659, 1); +lean_inc(x_17662); +if (lean_is_exclusive(x_17659)) { + lean_ctor_release(x_17659, 0); + lean_ctor_release(x_17659, 1); + x_17663 = x_17659; +} else { + lean_dec_ref(x_17659); + x_17663 = lean_box(0); +} +if (lean_is_scalar(x_17602)) { + x_17664 = lean_alloc_ctor(1, 1, 0); +} else { + x_17664 = x_17602; +} +lean_ctor_set(x_17664, 0, x_17661); +if (lean_is_scalar(x_17663)) { + x_17665 = lean_alloc_ctor(0, 2, 0); +} else { + x_17665 = x_17663; +} +lean_ctor_set(x_17665, 0, x_17664); +lean_ctor_set(x_17665, 1, x_17662); +x_17015 = x_17665; +x_17016 = x_17660; +goto block_17593; +} +else +{ +lean_object* x_17666; lean_object* x_17667; lean_object* x_17668; lean_object* x_17669; +lean_dec(x_17602); +lean_dec(x_17013); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17666 = lean_ctor_get(x_17658, 0); +lean_inc(x_17666); +x_17667 = lean_ctor_get(x_17658, 1); +lean_inc(x_17667); +if (lean_is_exclusive(x_17658)) { + lean_ctor_release(x_17658, 0); + lean_ctor_release(x_17658, 1); + x_17668 = x_17658; +} else { + lean_dec_ref(x_17658); + x_17668 = lean_box(0); +} +if (lean_is_scalar(x_17668)) { + x_17669 = lean_alloc_ctor(1, 2, 0); +} else { + x_17669 = x_17668; +} +lean_ctor_set(x_17669, 0, x_17666); +lean_ctor_set(x_17669, 1, x_17667); +return x_17669; +} +} +else +{ +lean_object* x_17670; lean_object* x_17671; lean_object* x_17672; lean_object* x_17673; +lean_dec(x_17650); +lean_dec(x_17645); +lean_dec(x_17602); +lean_dec(x_17013); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17670 = lean_ctor_get(x_17653, 0); +lean_inc(x_17670); +x_17671 = lean_ctor_get(x_17653, 1); +lean_inc(x_17671); +if (lean_is_exclusive(x_17653)) { + lean_ctor_release(x_17653, 0); + lean_ctor_release(x_17653, 1); + x_17672 = x_17653; +} else { + lean_dec_ref(x_17653); + x_17672 = lean_box(0); +} +if (lean_is_scalar(x_17672)) { + x_17673 = lean_alloc_ctor(1, 2, 0); +} else { + x_17673 = x_17672; +} +lean_ctor_set(x_17673, 0, x_17670); +lean_ctor_set(x_17673, 1, x_17671); +return x_17673; +} +} +} +else +{ +lean_object* x_17674; lean_object* x_17675; lean_object* x_17676; lean_object* x_17677; lean_object* x_17678; lean_object* x_17679; lean_object* x_17680; lean_object* x_17681; lean_object* x_17682; +lean_dec(x_17605); +lean_dec(x_17603); +lean_inc(x_17013); +lean_inc(x_153); +if (lean_is_scalar(x_17600)) { + x_17674 = lean_alloc_ctor(7, 2, 0); +} else { + x_17674 = x_17600; + lean_ctor_set_tag(x_17674, 7); +} +lean_ctor_set(x_17674, 0, x_153); +lean_ctor_set(x_17674, 1, x_17013); +x_17675 = lean_ctor_get(x_1, 0); +lean_inc(x_17675); +x_17676 = l_Lean_IR_ToIR_bindVar(x_17675, x_17014, x_4, x_5, x_17599); +x_17677 = lean_ctor_get(x_17676, 0); +lean_inc(x_17677); +x_17678 = lean_ctor_get(x_17676, 1); +lean_inc(x_17678); +lean_dec(x_17676); +x_17679 = lean_ctor_get(x_17677, 0); +lean_inc(x_17679); +x_17680 = lean_ctor_get(x_17677, 1); +lean_inc(x_17680); +lean_dec(x_17677); +x_17681 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17682 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17679, x_17674, x_17681, x_17680, x_4, x_5, x_17678); +if (lean_obj_tag(x_17682) == 0) +{ +lean_object* x_17683; lean_object* x_17684; lean_object* x_17685; lean_object* x_17686; lean_object* x_17687; lean_object* x_17688; lean_object* x_17689; +x_17683 = lean_ctor_get(x_17682, 0); +lean_inc(x_17683); +x_17684 = lean_ctor_get(x_17682, 1); +lean_inc(x_17684); +lean_dec(x_17682); +x_17685 = lean_ctor_get(x_17683, 0); +lean_inc(x_17685); +x_17686 = lean_ctor_get(x_17683, 1); +lean_inc(x_17686); +if (lean_is_exclusive(x_17683)) { + lean_ctor_release(x_17683, 0); + lean_ctor_release(x_17683, 1); + x_17687 = x_17683; +} else { + lean_dec_ref(x_17683); + x_17687 = lean_box(0); +} +if (lean_is_scalar(x_17602)) { + x_17688 = lean_alloc_ctor(1, 1, 0); +} else { + x_17688 = x_17602; +} +lean_ctor_set(x_17688, 0, x_17685); +if (lean_is_scalar(x_17687)) { + x_17689 = lean_alloc_ctor(0, 2, 0); +} else { + x_17689 = x_17687; +} +lean_ctor_set(x_17689, 0, x_17688); +lean_ctor_set(x_17689, 1, x_17686); +x_17015 = x_17689; +x_17016 = x_17684; +goto block_17593; +} +else +{ +lean_object* x_17690; lean_object* x_17691; lean_object* x_17692; lean_object* x_17693; +lean_dec(x_17602); +lean_dec(x_17013); +lean_dec(x_14811); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17690 = lean_ctor_get(x_17682, 0); +lean_inc(x_17690); +x_17691 = lean_ctor_get(x_17682, 1); +lean_inc(x_17691); +if (lean_is_exclusive(x_17682)) { + lean_ctor_release(x_17682, 0); + lean_ctor_release(x_17682, 1); + x_17692 = x_17682; +} else { + lean_dec_ref(x_17682); + x_17692 = lean_box(0); +} +if (lean_is_scalar(x_17692)) { + x_17693 = lean_alloc_ctor(1, 2, 0); +} else { + x_17693 = x_17692; +} +lean_ctor_set(x_17693, 0, x_17690); +lean_ctor_set(x_17693, 1, x_17691); +return x_17693; +} +} +} +block_17593: +{ +lean_object* x_17017; +x_17017 = lean_ctor_get(x_17015, 0); +lean_inc(x_17017); +if (lean_obj_tag(x_17017) == 0) +{ +lean_object* x_17018; lean_object* x_17019; lean_object* x_17020; lean_object* x_17021; lean_object* x_17022; lean_object* x_17023; lean_object* x_17024; uint8_t x_17025; lean_object* x_17026; +lean_dec(x_14811); +x_17018 = lean_ctor_get(x_17015, 1); +lean_inc(x_17018); +if (lean_is_exclusive(x_17015)) { + lean_ctor_release(x_17015, 0); + lean_ctor_release(x_17015, 1); + x_17019 = x_17015; +} else { + lean_dec_ref(x_17015); + x_17019 = lean_box(0); +} +x_17020 = lean_st_ref_get(x_5, x_17016); +x_17021 = lean_ctor_get(x_17020, 0); +lean_inc(x_17021); +x_17022 = lean_ctor_get(x_17020, 1); +lean_inc(x_17022); +if (lean_is_exclusive(x_17020)) { + lean_ctor_release(x_17020, 0); + lean_ctor_release(x_17020, 1); + x_17023 = x_17020; +} else { + lean_dec_ref(x_17020); + x_17023 = lean_box(0); +} +x_17024 = lean_ctor_get(x_17021, 0); +lean_inc(x_17024); +lean_dec(x_17021); +x_17025 = 0; +lean_inc(x_153); +lean_inc(x_17024); +x_17026 = l_Lean_Environment_find_x3f(x_17024, x_153, x_17025); +if (lean_obj_tag(x_17026) == 0) +{ +lean_object* x_17027; lean_object* x_17028; +lean_dec(x_17024); +lean_dec(x_17023); +lean_dec(x_17019); +lean_dec(x_17013); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_17027 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_17028 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_17027, x_17018, x_4, x_5, x_17022); +return x_17028; +} +else +{ +lean_object* x_17029; +x_17029 = lean_ctor_get(x_17026, 0); +lean_inc(x_17029); +lean_dec(x_17026); +switch (lean_obj_tag(x_17029)) { +case 0: +{ +lean_object* x_17030; lean_object* x_17031; uint8_t x_17032; +lean_dec(x_17024); +lean_dec(x_14805); +lean_dec(x_14804); +if (lean_is_exclusive(x_17029)) { + lean_ctor_release(x_17029, 0); + x_17030 = x_17029; +} else { + lean_dec_ref(x_17029); + x_17030 = lean_box(0); +} +x_17031 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_17032 = lean_name_eq(x_153, x_17031); +if (x_17032 == 0) +{ +lean_object* x_17033; uint8_t x_17034; +x_17033 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_17034 = lean_name_eq(x_153, x_17033); +if (x_17034 == 0) +{ +lean_object* x_17035; lean_object* x_17036; lean_object* x_17037; +lean_dec(x_17023); +lean_dec(x_17019); +lean_inc(x_153); +x_17035 = l_Lean_IR_ToIR_findDecl(x_153, x_17018, x_4, x_5, x_17022); +x_17036 = lean_ctor_get(x_17035, 0); +lean_inc(x_17036); +x_17037 = lean_ctor_get(x_17036, 0); +lean_inc(x_17037); +if (lean_obj_tag(x_17037) == 0) +{ +lean_object* x_17038; lean_object* x_17039; lean_object* x_17040; lean_object* x_17041; uint8_t x_17042; lean_object* x_17043; lean_object* x_17044; lean_object* x_17045; lean_object* x_17046; lean_object* x_17047; lean_object* x_17048; lean_object* x_17049; lean_object* x_17050; lean_object* x_17051; +lean_dec(x_17013); +lean_dec(x_2); +lean_dec(x_1); +x_17038 = lean_ctor_get(x_17035, 1); +lean_inc(x_17038); +if (lean_is_exclusive(x_17035)) { + lean_ctor_release(x_17035, 0); + lean_ctor_release(x_17035, 1); + x_17039 = x_17035; +} else { + lean_dec_ref(x_17035); + x_17039 = lean_box(0); +} +x_17040 = lean_ctor_get(x_17036, 1); +lean_inc(x_17040); +if (lean_is_exclusive(x_17036)) { + lean_ctor_release(x_17036, 0); + lean_ctor_release(x_17036, 1); + x_17041 = x_17036; +} else { + lean_dec_ref(x_17036); + x_17041 = lean_box(0); +} +x_17042 = 1; +x_17043 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_17044 = l_Lean_Name_toString(x_153, x_17042, x_17043); +if (lean_is_scalar(x_17030)) { + x_17045 = lean_alloc_ctor(3, 1, 0); +} else { + x_17045 = x_17030; + lean_ctor_set_tag(x_17045, 3); +} +lean_ctor_set(x_17045, 0, x_17044); +x_17046 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_17041)) { + x_17047 = lean_alloc_ctor(5, 2, 0); +} else { + x_17047 = x_17041; + lean_ctor_set_tag(x_17047, 5); +} +lean_ctor_set(x_17047, 0, x_17046); +lean_ctor_set(x_17047, 1, x_17045); +x_17048 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_17039)) { + x_17049 = lean_alloc_ctor(5, 2, 0); +} else { + x_17049 = x_17039; + lean_ctor_set_tag(x_17049, 5); +} +lean_ctor_set(x_17049, 0, x_17047); +lean_ctor_set(x_17049, 1, x_17048); +x_17050 = l_Lean_MessageData_ofFormat(x_17049); +x_17051 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_17050, x_17040, x_4, x_5, x_17038); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_17040); +return x_17051; +} +else +{ +lean_object* x_17052; lean_object* x_17053; lean_object* x_17054; lean_object* x_17055; lean_object* x_17056; lean_object* x_17057; lean_object* x_17058; uint8_t x_17059; +lean_dec(x_17030); +x_17052 = lean_ctor_get(x_17035, 1); +lean_inc(x_17052); +lean_dec(x_17035); +x_17053 = lean_ctor_get(x_17036, 1); +lean_inc(x_17053); +if (lean_is_exclusive(x_17036)) { + lean_ctor_release(x_17036, 0); + lean_ctor_release(x_17036, 1); + x_17054 = x_17036; +} else { + lean_dec_ref(x_17036); + x_17054 = lean_box(0); +} +x_17055 = lean_ctor_get(x_17037, 0); +lean_inc(x_17055); +lean_dec(x_17037); +x_17056 = lean_array_get_size(x_17013); +x_17057 = l_Lean_IR_Decl_params(x_17055); +lean_dec(x_17055); +x_17058 = lean_array_get_size(x_17057); +lean_dec(x_17057); +x_17059 = lean_nat_dec_lt(x_17056, x_17058); +if (x_17059 == 0) +{ +uint8_t x_17060; +x_17060 = lean_nat_dec_eq(x_17056, x_17058); +if (x_17060 == 0) +{ +lean_object* x_17061; lean_object* x_17062; lean_object* x_17063; lean_object* x_17064; lean_object* x_17065; lean_object* x_17066; lean_object* x_17067; lean_object* x_17068; lean_object* x_17069; lean_object* x_17070; lean_object* x_17071; lean_object* x_17072; lean_object* x_17073; lean_object* x_17074; lean_object* x_17075; lean_object* x_17076; lean_object* x_17077; +x_17061 = lean_unsigned_to_nat(0u); +x_17062 = l_Array_extract___rarg(x_17013, x_17061, x_17058); +x_17063 = l_Array_extract___rarg(x_17013, x_17058, x_17056); +lean_dec(x_17056); +lean_dec(x_17013); +if (lean_is_scalar(x_17054)) { + x_17064 = lean_alloc_ctor(6, 2, 0); +} else { + x_17064 = x_17054; + lean_ctor_set_tag(x_17064, 6); +} +lean_ctor_set(x_17064, 0, x_153); +lean_ctor_set(x_17064, 1, x_17062); +x_17065 = lean_ctor_get(x_1, 0); +lean_inc(x_17065); +x_17066 = l_Lean_IR_ToIR_bindVar(x_17065, x_17053, x_4, x_5, x_17052); +x_17067 = lean_ctor_get(x_17066, 0); +lean_inc(x_17067); +x_17068 = lean_ctor_get(x_17066, 1); +lean_inc(x_17068); +lean_dec(x_17066); +x_17069 = lean_ctor_get(x_17067, 0); +lean_inc(x_17069); +x_17070 = lean_ctor_get(x_17067, 1); +lean_inc(x_17070); +lean_dec(x_17067); +x_17071 = l_Lean_IR_ToIR_newVar(x_17070, x_4, x_5, x_17068); +x_17072 = lean_ctor_get(x_17071, 0); +lean_inc(x_17072); +x_17073 = lean_ctor_get(x_17071, 1); +lean_inc(x_17073); +lean_dec(x_17071); +x_17074 = lean_ctor_get(x_17072, 0); +lean_inc(x_17074); +x_17075 = lean_ctor_get(x_17072, 1); +lean_inc(x_17075); +lean_dec(x_17072); +x_17076 = lean_ctor_get(x_1, 2); +lean_inc(x_17076); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_17077 = l_Lean_IR_ToIR_lowerType(x_17076, x_17075, x_4, x_5, x_17073); +if (lean_obj_tag(x_17077) == 0) +{ +lean_object* x_17078; lean_object* x_17079; lean_object* x_17080; lean_object* x_17081; lean_object* x_17082; +x_17078 = lean_ctor_get(x_17077, 0); +lean_inc(x_17078); +x_17079 = lean_ctor_get(x_17077, 1); +lean_inc(x_17079); +lean_dec(x_17077); +x_17080 = lean_ctor_get(x_17078, 0); +lean_inc(x_17080); +x_17081 = lean_ctor_get(x_17078, 1); +lean_inc(x_17081); +lean_dec(x_17078); +x_17082 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_17074, x_17063, x_17069, x_17064, x_17080, x_17081, x_4, x_5, x_17079); +return x_17082; +} +else +{ +lean_object* x_17083; lean_object* x_17084; lean_object* x_17085; lean_object* x_17086; +lean_dec(x_17074); +lean_dec(x_17069); +lean_dec(x_17064); +lean_dec(x_17063); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_17083 = lean_ctor_get(x_17077, 0); +lean_inc(x_17083); +x_17084 = lean_ctor_get(x_17077, 1); +lean_inc(x_17084); +if (lean_is_exclusive(x_17077)) { + lean_ctor_release(x_17077, 0); + lean_ctor_release(x_17077, 1); + x_17085 = x_17077; +} else { + lean_dec_ref(x_17077); + x_17085 = lean_box(0); +} +if (lean_is_scalar(x_17085)) { + x_17086 = lean_alloc_ctor(1, 2, 0); +} else { + x_17086 = x_17085; +} +lean_ctor_set(x_17086, 0, x_17083); +lean_ctor_set(x_17086, 1, x_17084); +return x_17086; +} +} +else +{ +lean_object* x_17087; lean_object* x_17088; lean_object* x_17089; lean_object* x_17090; lean_object* x_17091; lean_object* x_17092; lean_object* x_17093; lean_object* x_17094; lean_object* x_17095; +lean_dec(x_17058); +lean_dec(x_17056); +if (lean_is_scalar(x_17054)) { + x_17087 = lean_alloc_ctor(6, 2, 0); +} else { + x_17087 = x_17054; + lean_ctor_set_tag(x_17087, 6); +} +lean_ctor_set(x_17087, 0, x_153); +lean_ctor_set(x_17087, 1, x_17013); +x_17088 = lean_ctor_get(x_1, 0); +lean_inc(x_17088); +x_17089 = l_Lean_IR_ToIR_bindVar(x_17088, x_17053, x_4, x_5, x_17052); +x_17090 = lean_ctor_get(x_17089, 0); +lean_inc(x_17090); +x_17091 = lean_ctor_get(x_17089, 1); +lean_inc(x_17091); +lean_dec(x_17089); +x_17092 = lean_ctor_get(x_17090, 0); +lean_inc(x_17092); +x_17093 = lean_ctor_get(x_17090, 1); +lean_inc(x_17093); +lean_dec(x_17090); +x_17094 = lean_ctor_get(x_1, 2); +lean_inc(x_17094); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_17095 = l_Lean_IR_ToIR_lowerType(x_17094, x_17093, x_4, x_5, x_17091); +if (lean_obj_tag(x_17095) == 0) +{ +lean_object* x_17096; lean_object* x_17097; lean_object* x_17098; lean_object* x_17099; lean_object* x_17100; +x_17096 = lean_ctor_get(x_17095, 0); +lean_inc(x_17096); +x_17097 = lean_ctor_get(x_17095, 1); +lean_inc(x_17097); +lean_dec(x_17095); +x_17098 = lean_ctor_get(x_17096, 0); +lean_inc(x_17098); +x_17099 = lean_ctor_get(x_17096, 1); +lean_inc(x_17099); +lean_dec(x_17096); +x_17100 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17092, x_17087, x_17098, x_17099, x_4, x_5, x_17097); +return x_17100; +} +else +{ +lean_object* x_17101; lean_object* x_17102; lean_object* x_17103; lean_object* x_17104; +lean_dec(x_17092); +lean_dec(x_17087); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_17101 = lean_ctor_get(x_17095, 0); +lean_inc(x_17101); +x_17102 = lean_ctor_get(x_17095, 1); +lean_inc(x_17102); +if (lean_is_exclusive(x_17095)) { + lean_ctor_release(x_17095, 0); + lean_ctor_release(x_17095, 1); + x_17103 = x_17095; +} else { + lean_dec_ref(x_17095); + x_17103 = lean_box(0); +} +if (lean_is_scalar(x_17103)) { + x_17104 = lean_alloc_ctor(1, 2, 0); +} else { + x_17104 = x_17103; +} +lean_ctor_set(x_17104, 0, x_17101); +lean_ctor_set(x_17104, 1, x_17102); +return x_17104; +} +} +} +else +{ +lean_object* x_17105; lean_object* x_17106; lean_object* x_17107; lean_object* x_17108; lean_object* x_17109; lean_object* x_17110; lean_object* x_17111; lean_object* x_17112; lean_object* x_17113; +lean_dec(x_17058); +lean_dec(x_17056); +if (lean_is_scalar(x_17054)) { + x_17105 = lean_alloc_ctor(7, 2, 0); +} else { + x_17105 = x_17054; + lean_ctor_set_tag(x_17105, 7); +} +lean_ctor_set(x_17105, 0, x_153); +lean_ctor_set(x_17105, 1, x_17013); +x_17106 = lean_ctor_get(x_1, 0); +lean_inc(x_17106); +lean_dec(x_1); +x_17107 = l_Lean_IR_ToIR_bindVar(x_17106, x_17053, x_4, x_5, x_17052); +x_17108 = lean_ctor_get(x_17107, 0); +lean_inc(x_17108); +x_17109 = lean_ctor_get(x_17107, 1); +lean_inc(x_17109); +lean_dec(x_17107); +x_17110 = lean_ctor_get(x_17108, 0); +lean_inc(x_17110); +x_17111 = lean_ctor_get(x_17108, 1); +lean_inc(x_17111); +lean_dec(x_17108); +x_17112 = lean_box(7); +x_17113 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17110, x_17105, x_17112, x_17111, x_4, x_5, x_17109); +return x_17113; +} +} +} +else +{ +lean_object* x_17114; lean_object* x_17115; lean_object* x_17116; +lean_dec(x_17030); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17114 = lean_box(13); +if (lean_is_scalar(x_17019)) { + x_17115 = lean_alloc_ctor(0, 2, 0); +} else { + x_17115 = x_17019; +} +lean_ctor_set(x_17115, 0, x_17114); +lean_ctor_set(x_17115, 1, x_17018); +if (lean_is_scalar(x_17023)) { + x_17116 = lean_alloc_ctor(0, 2, 0); +} else { + x_17116 = x_17023; +} +lean_ctor_set(x_17116, 0, x_17115); +lean_ctor_set(x_17116, 1, x_17022); +return x_17116; +} +} +else +{ +lean_object* x_17117; lean_object* x_17118; lean_object* x_17119; +lean_dec(x_17030); +lean_dec(x_17023); +lean_dec(x_17019); +lean_dec(x_153); +x_17117 = l_Lean_IR_instInhabitedArg; +x_17118 = lean_unsigned_to_nat(2u); +x_17119 = lean_array_get(x_17117, x_17013, x_17118); +lean_dec(x_17013); +if (lean_obj_tag(x_17119) == 0) +{ +lean_object* x_17120; lean_object* x_17121; lean_object* x_17122; lean_object* x_17123; lean_object* x_17124; lean_object* x_17125; lean_object* x_17126; +x_17120 = lean_ctor_get(x_17119, 0); +lean_inc(x_17120); +lean_dec(x_17119); +x_17121 = lean_ctor_get(x_1, 0); +lean_inc(x_17121); +lean_dec(x_1); +x_17122 = l_Lean_IR_ToIR_bindVarToVarId(x_17121, x_17120, x_17018, x_4, x_5, x_17022); +x_17123 = lean_ctor_get(x_17122, 0); +lean_inc(x_17123); +x_17124 = lean_ctor_get(x_17122, 1); +lean_inc(x_17124); +lean_dec(x_17122); +x_17125 = lean_ctor_get(x_17123, 1); +lean_inc(x_17125); +lean_dec(x_17123); +x_17126 = l_Lean_IR_ToIR_lowerCode(x_2, x_17125, x_4, x_5, x_17124); +return x_17126; +} +else +{ +lean_object* x_17127; lean_object* x_17128; lean_object* x_17129; lean_object* x_17130; lean_object* x_17131; lean_object* x_17132; +x_17127 = lean_ctor_get(x_1, 0); +lean_inc(x_17127); +lean_dec(x_1); +x_17128 = l_Lean_IR_ToIR_bindErased(x_17127, x_17018, x_4, x_5, x_17022); +x_17129 = lean_ctor_get(x_17128, 0); +lean_inc(x_17129); +x_17130 = lean_ctor_get(x_17128, 1); +lean_inc(x_17130); +lean_dec(x_17128); +x_17131 = lean_ctor_get(x_17129, 1); +lean_inc(x_17131); +lean_dec(x_17129); +x_17132 = l_Lean_IR_ToIR_lowerCode(x_2, x_17131, x_4, x_5, x_17130); +return x_17132; +} +} +} +case 1: +{ +lean_object* x_17133; lean_object* x_17134; lean_object* x_17161; lean_object* x_17162; +lean_dec(x_17029); +lean_dec(x_17024); +lean_dec(x_14805); +lean_dec(x_14804); +lean_inc(x_153); +x_17161 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_17022); +x_17162 = lean_ctor_get(x_17161, 0); +lean_inc(x_17162); +if (lean_obj_tag(x_17162) == 0) +{ +lean_object* x_17163; lean_object* x_17164; lean_object* x_17165; +x_17163 = lean_ctor_get(x_17161, 1); +lean_inc(x_17163); +lean_dec(x_17161); +x_17164 = lean_box(0); +if (lean_is_scalar(x_17019)) { + x_17165 = lean_alloc_ctor(0, 2, 0); +} else { + x_17165 = x_17019; +} +lean_ctor_set(x_17165, 0, x_17164); +lean_ctor_set(x_17165, 1, x_17018); +x_17133 = x_17165; +x_17134 = x_17163; +goto block_17160; +} +else +{ +lean_object* x_17166; lean_object* x_17167; lean_object* x_17168; lean_object* x_17169; lean_object* x_17170; lean_object* x_17171; lean_object* x_17172; uint8_t x_17173; +lean_dec(x_17019); +x_17166 = lean_ctor_get(x_17161, 1); +lean_inc(x_17166); +if (lean_is_exclusive(x_17161)) { + lean_ctor_release(x_17161, 0); + lean_ctor_release(x_17161, 1); + x_17167 = x_17161; +} else { + lean_dec_ref(x_17161); + x_17167 = lean_box(0); +} +x_17168 = lean_ctor_get(x_17162, 0); +lean_inc(x_17168); +if (lean_is_exclusive(x_17162)) { + lean_ctor_release(x_17162, 0); + x_17169 = x_17162; +} else { + lean_dec_ref(x_17162); + x_17169 = lean_box(0); +} +x_17170 = lean_array_get_size(x_17013); +x_17171 = lean_ctor_get(x_17168, 3); +lean_inc(x_17171); +lean_dec(x_17168); +x_17172 = lean_array_get_size(x_17171); +lean_dec(x_17171); +x_17173 = lean_nat_dec_lt(x_17170, x_17172); +if (x_17173 == 0) +{ +uint8_t x_17174; +x_17174 = lean_nat_dec_eq(x_17170, x_17172); +if (x_17174 == 0) +{ +lean_object* x_17175; lean_object* x_17176; lean_object* x_17177; lean_object* x_17178; lean_object* x_17179; lean_object* x_17180; lean_object* x_17181; lean_object* x_17182; lean_object* x_17183; lean_object* x_17184; lean_object* x_17185; lean_object* x_17186; lean_object* x_17187; lean_object* x_17188; lean_object* x_17189; lean_object* x_17190; lean_object* x_17191; +x_17175 = lean_unsigned_to_nat(0u); +x_17176 = l_Array_extract___rarg(x_17013, x_17175, x_17172); +x_17177 = l_Array_extract___rarg(x_17013, x_17172, x_17170); +lean_dec(x_17170); +lean_inc(x_153); +if (lean_is_scalar(x_17167)) { + x_17178 = lean_alloc_ctor(6, 2, 0); +} else { + x_17178 = x_17167; + lean_ctor_set_tag(x_17178, 6); +} +lean_ctor_set(x_17178, 0, x_153); +lean_ctor_set(x_17178, 1, x_17176); +x_17179 = lean_ctor_get(x_1, 0); +lean_inc(x_17179); +x_17180 = l_Lean_IR_ToIR_bindVar(x_17179, x_17018, x_4, x_5, x_17166); +x_17181 = lean_ctor_get(x_17180, 0); +lean_inc(x_17181); +x_17182 = lean_ctor_get(x_17180, 1); +lean_inc(x_17182); +lean_dec(x_17180); +x_17183 = lean_ctor_get(x_17181, 0); +lean_inc(x_17183); +x_17184 = lean_ctor_get(x_17181, 1); +lean_inc(x_17184); +lean_dec(x_17181); +x_17185 = l_Lean_IR_ToIR_newVar(x_17184, x_4, x_5, x_17182); +x_17186 = lean_ctor_get(x_17185, 0); +lean_inc(x_17186); +x_17187 = lean_ctor_get(x_17185, 1); +lean_inc(x_17187); +lean_dec(x_17185); +x_17188 = lean_ctor_get(x_17186, 0); +lean_inc(x_17188); +x_17189 = lean_ctor_get(x_17186, 1); +lean_inc(x_17189); +lean_dec(x_17186); +x_17190 = lean_ctor_get(x_1, 2); +lean_inc(x_17190); +lean_inc(x_5); +lean_inc(x_4); +x_17191 = l_Lean_IR_ToIR_lowerType(x_17190, x_17189, x_4, x_5, x_17187); +if (lean_obj_tag(x_17191) == 0) +{ +lean_object* x_17192; lean_object* x_17193; lean_object* x_17194; lean_object* x_17195; lean_object* x_17196; +x_17192 = lean_ctor_get(x_17191, 0); +lean_inc(x_17192); +x_17193 = lean_ctor_get(x_17191, 1); +lean_inc(x_17193); +lean_dec(x_17191); +x_17194 = lean_ctor_get(x_17192, 0); +lean_inc(x_17194); +x_17195 = lean_ctor_get(x_17192, 1); +lean_inc(x_17195); +lean_dec(x_17192); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17196 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_17188, x_17177, x_17183, x_17178, x_17194, x_17195, x_4, x_5, x_17193); +if (lean_obj_tag(x_17196) == 0) +{ +lean_object* x_17197; lean_object* x_17198; lean_object* x_17199; lean_object* x_17200; lean_object* x_17201; lean_object* x_17202; lean_object* x_17203; +x_17197 = lean_ctor_get(x_17196, 0); +lean_inc(x_17197); +x_17198 = lean_ctor_get(x_17196, 1); +lean_inc(x_17198); +lean_dec(x_17196); +x_17199 = lean_ctor_get(x_17197, 0); +lean_inc(x_17199); +x_17200 = lean_ctor_get(x_17197, 1); +lean_inc(x_17200); +if (lean_is_exclusive(x_17197)) { + lean_ctor_release(x_17197, 0); + lean_ctor_release(x_17197, 1); + x_17201 = x_17197; +} else { + lean_dec_ref(x_17197); + x_17201 = lean_box(0); +} +if (lean_is_scalar(x_17169)) { + x_17202 = lean_alloc_ctor(1, 1, 0); +} else { + x_17202 = x_17169; +} +lean_ctor_set(x_17202, 0, x_17199); +if (lean_is_scalar(x_17201)) { + x_17203 = lean_alloc_ctor(0, 2, 0); +} else { + x_17203 = x_17201; +} +lean_ctor_set(x_17203, 0, x_17202); +lean_ctor_set(x_17203, 1, x_17200); +x_17133 = x_17203; +x_17134 = x_17198; +goto block_17160; +} +else +{ +lean_object* x_17204; lean_object* x_17205; lean_object* x_17206; lean_object* x_17207; +lean_dec(x_17169); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17204 = lean_ctor_get(x_17196, 0); +lean_inc(x_17204); +x_17205 = lean_ctor_get(x_17196, 1); +lean_inc(x_17205); +if (lean_is_exclusive(x_17196)) { + lean_ctor_release(x_17196, 0); + lean_ctor_release(x_17196, 1); + x_17206 = x_17196; +} else { + lean_dec_ref(x_17196); + x_17206 = lean_box(0); +} +if (lean_is_scalar(x_17206)) { + x_17207 = lean_alloc_ctor(1, 2, 0); +} else { + x_17207 = x_17206; +} +lean_ctor_set(x_17207, 0, x_17204); +lean_ctor_set(x_17207, 1, x_17205); +return x_17207; +} +} +else +{ +lean_object* x_17208; lean_object* x_17209; lean_object* x_17210; lean_object* x_17211; +lean_dec(x_17188); +lean_dec(x_17183); +lean_dec(x_17178); +lean_dec(x_17177); +lean_dec(x_17169); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17208 = lean_ctor_get(x_17191, 0); +lean_inc(x_17208); +x_17209 = lean_ctor_get(x_17191, 1); +lean_inc(x_17209); +if (lean_is_exclusive(x_17191)) { + lean_ctor_release(x_17191, 0); + lean_ctor_release(x_17191, 1); + x_17210 = x_17191; +} else { + lean_dec_ref(x_17191); + x_17210 = lean_box(0); +} +if (lean_is_scalar(x_17210)) { + x_17211 = lean_alloc_ctor(1, 2, 0); +} else { + x_17211 = x_17210; +} +lean_ctor_set(x_17211, 0, x_17208); +lean_ctor_set(x_17211, 1, x_17209); +return x_17211; +} +} +else +{ +lean_object* x_17212; lean_object* x_17213; lean_object* x_17214; lean_object* x_17215; lean_object* x_17216; lean_object* x_17217; lean_object* x_17218; lean_object* x_17219; lean_object* x_17220; +lean_dec(x_17172); +lean_dec(x_17170); +lean_inc(x_17013); +lean_inc(x_153); +if (lean_is_scalar(x_17167)) { + x_17212 = lean_alloc_ctor(6, 2, 0); +} else { + x_17212 = x_17167; + lean_ctor_set_tag(x_17212, 6); +} +lean_ctor_set(x_17212, 0, x_153); +lean_ctor_set(x_17212, 1, x_17013); +x_17213 = lean_ctor_get(x_1, 0); +lean_inc(x_17213); +x_17214 = l_Lean_IR_ToIR_bindVar(x_17213, x_17018, x_4, x_5, x_17166); +x_17215 = lean_ctor_get(x_17214, 0); +lean_inc(x_17215); +x_17216 = lean_ctor_get(x_17214, 1); +lean_inc(x_17216); +lean_dec(x_17214); +x_17217 = lean_ctor_get(x_17215, 0); +lean_inc(x_17217); +x_17218 = lean_ctor_get(x_17215, 1); +lean_inc(x_17218); +lean_dec(x_17215); +x_17219 = lean_ctor_get(x_1, 2); +lean_inc(x_17219); +lean_inc(x_5); +lean_inc(x_4); +x_17220 = l_Lean_IR_ToIR_lowerType(x_17219, x_17218, x_4, x_5, x_17216); +if (lean_obj_tag(x_17220) == 0) +{ +lean_object* x_17221; lean_object* x_17222; lean_object* x_17223; lean_object* x_17224; lean_object* x_17225; +x_17221 = lean_ctor_get(x_17220, 0); +lean_inc(x_17221); +x_17222 = lean_ctor_get(x_17220, 1); +lean_inc(x_17222); +lean_dec(x_17220); +x_17223 = lean_ctor_get(x_17221, 0); +lean_inc(x_17223); +x_17224 = lean_ctor_get(x_17221, 1); +lean_inc(x_17224); +lean_dec(x_17221); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17225 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17217, x_17212, x_17223, x_17224, x_4, x_5, x_17222); +if (lean_obj_tag(x_17225) == 0) +{ +lean_object* x_17226; lean_object* x_17227; lean_object* x_17228; lean_object* x_17229; lean_object* x_17230; lean_object* x_17231; lean_object* x_17232; +x_17226 = lean_ctor_get(x_17225, 0); +lean_inc(x_17226); +x_17227 = lean_ctor_get(x_17225, 1); +lean_inc(x_17227); +lean_dec(x_17225); +x_17228 = lean_ctor_get(x_17226, 0); +lean_inc(x_17228); +x_17229 = lean_ctor_get(x_17226, 1); +lean_inc(x_17229); +if (lean_is_exclusive(x_17226)) { + lean_ctor_release(x_17226, 0); + lean_ctor_release(x_17226, 1); + x_17230 = x_17226; +} else { + lean_dec_ref(x_17226); + x_17230 = lean_box(0); +} +if (lean_is_scalar(x_17169)) { + x_17231 = lean_alloc_ctor(1, 1, 0); +} else { + x_17231 = x_17169; +} +lean_ctor_set(x_17231, 0, x_17228); +if (lean_is_scalar(x_17230)) { + x_17232 = lean_alloc_ctor(0, 2, 0); +} else { + x_17232 = x_17230; +} +lean_ctor_set(x_17232, 0, x_17231); +lean_ctor_set(x_17232, 1, x_17229); +x_17133 = x_17232; +x_17134 = x_17227; +goto block_17160; +} +else +{ +lean_object* x_17233; lean_object* x_17234; lean_object* x_17235; lean_object* x_17236; +lean_dec(x_17169); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17233 = lean_ctor_get(x_17225, 0); +lean_inc(x_17233); +x_17234 = lean_ctor_get(x_17225, 1); +lean_inc(x_17234); +if (lean_is_exclusive(x_17225)) { + lean_ctor_release(x_17225, 0); + lean_ctor_release(x_17225, 1); + x_17235 = x_17225; +} else { + lean_dec_ref(x_17225); + x_17235 = lean_box(0); +} +if (lean_is_scalar(x_17235)) { + x_17236 = lean_alloc_ctor(1, 2, 0); +} else { + x_17236 = x_17235; +} +lean_ctor_set(x_17236, 0, x_17233); +lean_ctor_set(x_17236, 1, x_17234); +return x_17236; +} +} +else +{ +lean_object* x_17237; lean_object* x_17238; lean_object* x_17239; lean_object* x_17240; +lean_dec(x_17217); +lean_dec(x_17212); +lean_dec(x_17169); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17237 = lean_ctor_get(x_17220, 0); +lean_inc(x_17237); +x_17238 = lean_ctor_get(x_17220, 1); +lean_inc(x_17238); +if (lean_is_exclusive(x_17220)) { + lean_ctor_release(x_17220, 0); + lean_ctor_release(x_17220, 1); + x_17239 = x_17220; +} else { + lean_dec_ref(x_17220); + x_17239 = lean_box(0); +} +if (lean_is_scalar(x_17239)) { + x_17240 = lean_alloc_ctor(1, 2, 0); +} else { + x_17240 = x_17239; +} +lean_ctor_set(x_17240, 0, x_17237); +lean_ctor_set(x_17240, 1, x_17238); +return x_17240; +} +} +} +else +{ +lean_object* x_17241; lean_object* x_17242; lean_object* x_17243; lean_object* x_17244; lean_object* x_17245; lean_object* x_17246; lean_object* x_17247; lean_object* x_17248; lean_object* x_17249; +lean_dec(x_17172); +lean_dec(x_17170); +lean_inc(x_17013); +lean_inc(x_153); +if (lean_is_scalar(x_17167)) { + x_17241 = lean_alloc_ctor(7, 2, 0); +} else { + x_17241 = x_17167; + lean_ctor_set_tag(x_17241, 7); +} +lean_ctor_set(x_17241, 0, x_153); +lean_ctor_set(x_17241, 1, x_17013); +x_17242 = lean_ctor_get(x_1, 0); +lean_inc(x_17242); +x_17243 = l_Lean_IR_ToIR_bindVar(x_17242, x_17018, x_4, x_5, x_17166); +x_17244 = lean_ctor_get(x_17243, 0); +lean_inc(x_17244); +x_17245 = lean_ctor_get(x_17243, 1); +lean_inc(x_17245); +lean_dec(x_17243); +x_17246 = lean_ctor_get(x_17244, 0); +lean_inc(x_17246); +x_17247 = lean_ctor_get(x_17244, 1); +lean_inc(x_17247); +lean_dec(x_17244); +x_17248 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17249 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17246, x_17241, x_17248, x_17247, x_4, x_5, x_17245); +if (lean_obj_tag(x_17249) == 0) +{ +lean_object* x_17250; lean_object* x_17251; lean_object* x_17252; lean_object* x_17253; lean_object* x_17254; lean_object* x_17255; lean_object* x_17256; +x_17250 = lean_ctor_get(x_17249, 0); +lean_inc(x_17250); +x_17251 = lean_ctor_get(x_17249, 1); +lean_inc(x_17251); +lean_dec(x_17249); +x_17252 = lean_ctor_get(x_17250, 0); +lean_inc(x_17252); +x_17253 = lean_ctor_get(x_17250, 1); +lean_inc(x_17253); +if (lean_is_exclusive(x_17250)) { + lean_ctor_release(x_17250, 0); + lean_ctor_release(x_17250, 1); + x_17254 = x_17250; +} else { + lean_dec_ref(x_17250); + x_17254 = lean_box(0); +} +if (lean_is_scalar(x_17169)) { + x_17255 = lean_alloc_ctor(1, 1, 0); +} else { + x_17255 = x_17169; +} +lean_ctor_set(x_17255, 0, x_17252); +if (lean_is_scalar(x_17254)) { + x_17256 = lean_alloc_ctor(0, 2, 0); +} else { + x_17256 = x_17254; +} +lean_ctor_set(x_17256, 0, x_17255); +lean_ctor_set(x_17256, 1, x_17253); +x_17133 = x_17256; +x_17134 = x_17251; +goto block_17160; +} +else +{ +lean_object* x_17257; lean_object* x_17258; lean_object* x_17259; lean_object* x_17260; +lean_dec(x_17169); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17257 = lean_ctor_get(x_17249, 0); +lean_inc(x_17257); +x_17258 = lean_ctor_get(x_17249, 1); +lean_inc(x_17258); +if (lean_is_exclusive(x_17249)) { + lean_ctor_release(x_17249, 0); + lean_ctor_release(x_17249, 1); + x_17259 = x_17249; +} else { + lean_dec_ref(x_17249); + x_17259 = lean_box(0); +} +if (lean_is_scalar(x_17259)) { + x_17260 = lean_alloc_ctor(1, 2, 0); +} else { + x_17260 = x_17259; +} +lean_ctor_set(x_17260, 0, x_17257); +lean_ctor_set(x_17260, 1, x_17258); +return x_17260; +} +} +} +block_17160: +{ +lean_object* x_17135; +x_17135 = lean_ctor_get(x_17133, 0); +lean_inc(x_17135); +if (lean_obj_tag(x_17135) == 0) +{ +lean_object* x_17136; lean_object* x_17137; lean_object* x_17138; lean_object* x_17139; lean_object* x_17140; lean_object* x_17141; lean_object* x_17142; lean_object* x_17143; lean_object* x_17144; lean_object* x_17145; +lean_dec(x_17023); +x_17136 = lean_ctor_get(x_17133, 1); +lean_inc(x_17136); +lean_dec(x_17133); +x_17137 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_17137, 0, x_153); +lean_ctor_set(x_17137, 1, x_17013); +x_17138 = lean_ctor_get(x_1, 0); +lean_inc(x_17138); +x_17139 = l_Lean_IR_ToIR_bindVar(x_17138, x_17136, x_4, x_5, x_17134); +x_17140 = lean_ctor_get(x_17139, 0); +lean_inc(x_17140); +x_17141 = lean_ctor_get(x_17139, 1); +lean_inc(x_17141); +lean_dec(x_17139); +x_17142 = lean_ctor_get(x_17140, 0); +lean_inc(x_17142); +x_17143 = lean_ctor_get(x_17140, 1); +lean_inc(x_17143); +lean_dec(x_17140); +x_17144 = lean_ctor_get(x_1, 2); +lean_inc(x_17144); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_17145 = l_Lean_IR_ToIR_lowerType(x_17144, x_17143, x_4, x_5, x_17141); +if (lean_obj_tag(x_17145) == 0) +{ +lean_object* x_17146; lean_object* x_17147; lean_object* x_17148; lean_object* x_17149; lean_object* x_17150; +x_17146 = lean_ctor_get(x_17145, 0); +lean_inc(x_17146); +x_17147 = lean_ctor_get(x_17145, 1); +lean_inc(x_17147); +lean_dec(x_17145); +x_17148 = lean_ctor_get(x_17146, 0); +lean_inc(x_17148); +x_17149 = lean_ctor_get(x_17146, 1); +lean_inc(x_17149); +lean_dec(x_17146); +x_17150 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17142, x_17137, x_17148, x_17149, x_4, x_5, x_17147); +return x_17150; +} +else +{ +lean_object* x_17151; lean_object* x_17152; lean_object* x_17153; lean_object* x_17154; +lean_dec(x_17142); +lean_dec(x_17137); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_17151 = lean_ctor_get(x_17145, 0); +lean_inc(x_17151); +x_17152 = lean_ctor_get(x_17145, 1); +lean_inc(x_17152); +if (lean_is_exclusive(x_17145)) { + lean_ctor_release(x_17145, 0); + lean_ctor_release(x_17145, 1); + x_17153 = x_17145; +} else { + lean_dec_ref(x_17145); + x_17153 = lean_box(0); +} +if (lean_is_scalar(x_17153)) { + x_17154 = lean_alloc_ctor(1, 2, 0); +} else { + x_17154 = x_17153; +} +lean_ctor_set(x_17154, 0, x_17151); +lean_ctor_set(x_17154, 1, x_17152); +return x_17154; +} +} +else +{ +lean_object* x_17155; lean_object* x_17156; lean_object* x_17157; lean_object* x_17158; lean_object* x_17159; +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17155 = lean_ctor_get(x_17133, 1); +lean_inc(x_17155); +if (lean_is_exclusive(x_17133)) { + lean_ctor_release(x_17133, 0); + lean_ctor_release(x_17133, 1); + x_17156 = x_17133; +} else { + lean_dec_ref(x_17133); + x_17156 = lean_box(0); +} +x_17157 = lean_ctor_get(x_17135, 0); +lean_inc(x_17157); +lean_dec(x_17135); +if (lean_is_scalar(x_17156)) { + x_17158 = lean_alloc_ctor(0, 2, 0); +} else { + x_17158 = x_17156; +} +lean_ctor_set(x_17158, 0, x_17157); +lean_ctor_set(x_17158, 1, x_17155); +if (lean_is_scalar(x_17023)) { + x_17159 = lean_alloc_ctor(0, 2, 0); +} else { + x_17159 = x_17023; +} +lean_ctor_set(x_17159, 0, x_17158); +lean_ctor_set(x_17159, 1, x_17134); +return x_17159; +} +} +} +case 2: +{ +lean_object* x_17261; lean_object* x_17262; +lean_dec(x_17029); +lean_dec(x_17024); +lean_dec(x_17023); +lean_dec(x_17019); +lean_dec(x_17013); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_17261 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_17262 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_17261, x_17018, x_4, x_5, x_17022); +return x_17262; +} +case 3: +{ +lean_object* x_17263; lean_object* x_17264; lean_object* x_17291; lean_object* x_17292; +lean_dec(x_17029); +lean_dec(x_17024); +lean_dec(x_14805); +lean_dec(x_14804); +lean_inc(x_153); +x_17291 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_17022); +x_17292 = lean_ctor_get(x_17291, 0); +lean_inc(x_17292); +if (lean_obj_tag(x_17292) == 0) +{ +lean_object* x_17293; lean_object* x_17294; lean_object* x_17295; +x_17293 = lean_ctor_get(x_17291, 1); +lean_inc(x_17293); +lean_dec(x_17291); +x_17294 = lean_box(0); +if (lean_is_scalar(x_17019)) { + x_17295 = lean_alloc_ctor(0, 2, 0); +} else { + x_17295 = x_17019; +} +lean_ctor_set(x_17295, 0, x_17294); +lean_ctor_set(x_17295, 1, x_17018); +x_17263 = x_17295; +x_17264 = x_17293; +goto block_17290; +} +else +{ +lean_object* x_17296; lean_object* x_17297; lean_object* x_17298; lean_object* x_17299; lean_object* x_17300; lean_object* x_17301; lean_object* x_17302; uint8_t x_17303; +lean_dec(x_17019); +x_17296 = lean_ctor_get(x_17291, 1); +lean_inc(x_17296); +if (lean_is_exclusive(x_17291)) { + lean_ctor_release(x_17291, 0); + lean_ctor_release(x_17291, 1); + x_17297 = x_17291; +} else { + lean_dec_ref(x_17291); + x_17297 = lean_box(0); +} +x_17298 = lean_ctor_get(x_17292, 0); +lean_inc(x_17298); +if (lean_is_exclusive(x_17292)) { + lean_ctor_release(x_17292, 0); + x_17299 = x_17292; +} else { + lean_dec_ref(x_17292); + x_17299 = lean_box(0); +} +x_17300 = lean_array_get_size(x_17013); +x_17301 = lean_ctor_get(x_17298, 3); +lean_inc(x_17301); +lean_dec(x_17298); +x_17302 = lean_array_get_size(x_17301); +lean_dec(x_17301); +x_17303 = lean_nat_dec_lt(x_17300, x_17302); +if (x_17303 == 0) +{ +uint8_t x_17304; +x_17304 = lean_nat_dec_eq(x_17300, x_17302); +if (x_17304 == 0) +{ +lean_object* x_17305; lean_object* x_17306; lean_object* x_17307; lean_object* x_17308; lean_object* x_17309; lean_object* x_17310; lean_object* x_17311; lean_object* x_17312; lean_object* x_17313; lean_object* x_17314; lean_object* x_17315; lean_object* x_17316; lean_object* x_17317; lean_object* x_17318; lean_object* x_17319; lean_object* x_17320; lean_object* x_17321; +x_17305 = lean_unsigned_to_nat(0u); +x_17306 = l_Array_extract___rarg(x_17013, x_17305, x_17302); +x_17307 = l_Array_extract___rarg(x_17013, x_17302, x_17300); +lean_dec(x_17300); +lean_inc(x_153); +if (lean_is_scalar(x_17297)) { + x_17308 = lean_alloc_ctor(6, 2, 0); +} else { + x_17308 = x_17297; + lean_ctor_set_tag(x_17308, 6); +} +lean_ctor_set(x_17308, 0, x_153); +lean_ctor_set(x_17308, 1, x_17306); +x_17309 = lean_ctor_get(x_1, 0); +lean_inc(x_17309); +x_17310 = l_Lean_IR_ToIR_bindVar(x_17309, x_17018, x_4, x_5, x_17296); +x_17311 = lean_ctor_get(x_17310, 0); +lean_inc(x_17311); +x_17312 = lean_ctor_get(x_17310, 1); +lean_inc(x_17312); +lean_dec(x_17310); +x_17313 = lean_ctor_get(x_17311, 0); +lean_inc(x_17313); +x_17314 = lean_ctor_get(x_17311, 1); +lean_inc(x_17314); +lean_dec(x_17311); +x_17315 = l_Lean_IR_ToIR_newVar(x_17314, x_4, x_5, x_17312); +x_17316 = lean_ctor_get(x_17315, 0); +lean_inc(x_17316); +x_17317 = lean_ctor_get(x_17315, 1); +lean_inc(x_17317); +lean_dec(x_17315); +x_17318 = lean_ctor_get(x_17316, 0); +lean_inc(x_17318); +x_17319 = lean_ctor_get(x_17316, 1); +lean_inc(x_17319); +lean_dec(x_17316); +x_17320 = lean_ctor_get(x_1, 2); +lean_inc(x_17320); +lean_inc(x_5); +lean_inc(x_4); +x_17321 = l_Lean_IR_ToIR_lowerType(x_17320, x_17319, x_4, x_5, x_17317); +if (lean_obj_tag(x_17321) == 0) +{ +lean_object* x_17322; lean_object* x_17323; lean_object* x_17324; lean_object* x_17325; lean_object* x_17326; +x_17322 = lean_ctor_get(x_17321, 0); +lean_inc(x_17322); +x_17323 = lean_ctor_get(x_17321, 1); +lean_inc(x_17323); +lean_dec(x_17321); +x_17324 = lean_ctor_get(x_17322, 0); +lean_inc(x_17324); +x_17325 = lean_ctor_get(x_17322, 1); +lean_inc(x_17325); +lean_dec(x_17322); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17326 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_17318, x_17307, x_17313, x_17308, x_17324, x_17325, x_4, x_5, x_17323); +if (lean_obj_tag(x_17326) == 0) +{ +lean_object* x_17327; lean_object* x_17328; lean_object* x_17329; lean_object* x_17330; lean_object* x_17331; lean_object* x_17332; lean_object* x_17333; +x_17327 = lean_ctor_get(x_17326, 0); +lean_inc(x_17327); +x_17328 = lean_ctor_get(x_17326, 1); +lean_inc(x_17328); +lean_dec(x_17326); +x_17329 = lean_ctor_get(x_17327, 0); +lean_inc(x_17329); +x_17330 = lean_ctor_get(x_17327, 1); +lean_inc(x_17330); +if (lean_is_exclusive(x_17327)) { + lean_ctor_release(x_17327, 0); + lean_ctor_release(x_17327, 1); + x_17331 = x_17327; +} else { + lean_dec_ref(x_17327); + x_17331 = lean_box(0); +} +if (lean_is_scalar(x_17299)) { + x_17332 = lean_alloc_ctor(1, 1, 0); +} else { + x_17332 = x_17299; +} +lean_ctor_set(x_17332, 0, x_17329); +if (lean_is_scalar(x_17331)) { + x_17333 = lean_alloc_ctor(0, 2, 0); +} else { + x_17333 = x_17331; +} +lean_ctor_set(x_17333, 0, x_17332); +lean_ctor_set(x_17333, 1, x_17330); +x_17263 = x_17333; +x_17264 = x_17328; +goto block_17290; +} +else +{ +lean_object* x_17334; lean_object* x_17335; lean_object* x_17336; lean_object* x_17337; +lean_dec(x_17299); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17334 = lean_ctor_get(x_17326, 0); +lean_inc(x_17334); +x_17335 = lean_ctor_get(x_17326, 1); +lean_inc(x_17335); +if (lean_is_exclusive(x_17326)) { + lean_ctor_release(x_17326, 0); + lean_ctor_release(x_17326, 1); + x_17336 = x_17326; +} else { + lean_dec_ref(x_17326); + x_17336 = lean_box(0); +} +if (lean_is_scalar(x_17336)) { + x_17337 = lean_alloc_ctor(1, 2, 0); +} else { + x_17337 = x_17336; +} +lean_ctor_set(x_17337, 0, x_17334); +lean_ctor_set(x_17337, 1, x_17335); +return x_17337; +} +} +else +{ +lean_object* x_17338; lean_object* x_17339; lean_object* x_17340; lean_object* x_17341; +lean_dec(x_17318); +lean_dec(x_17313); +lean_dec(x_17308); +lean_dec(x_17307); +lean_dec(x_17299); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17338 = lean_ctor_get(x_17321, 0); +lean_inc(x_17338); +x_17339 = lean_ctor_get(x_17321, 1); +lean_inc(x_17339); +if (lean_is_exclusive(x_17321)) { + lean_ctor_release(x_17321, 0); + lean_ctor_release(x_17321, 1); + x_17340 = x_17321; +} else { + lean_dec_ref(x_17321); + x_17340 = lean_box(0); +} +if (lean_is_scalar(x_17340)) { + x_17341 = lean_alloc_ctor(1, 2, 0); +} else { + x_17341 = x_17340; +} +lean_ctor_set(x_17341, 0, x_17338); +lean_ctor_set(x_17341, 1, x_17339); +return x_17341; +} +} +else +{ +lean_object* x_17342; lean_object* x_17343; lean_object* x_17344; lean_object* x_17345; lean_object* x_17346; lean_object* x_17347; lean_object* x_17348; lean_object* x_17349; lean_object* x_17350; +lean_dec(x_17302); +lean_dec(x_17300); +lean_inc(x_17013); +lean_inc(x_153); +if (lean_is_scalar(x_17297)) { + x_17342 = lean_alloc_ctor(6, 2, 0); +} else { + x_17342 = x_17297; + lean_ctor_set_tag(x_17342, 6); +} +lean_ctor_set(x_17342, 0, x_153); +lean_ctor_set(x_17342, 1, x_17013); +x_17343 = lean_ctor_get(x_1, 0); +lean_inc(x_17343); +x_17344 = l_Lean_IR_ToIR_bindVar(x_17343, x_17018, x_4, x_5, x_17296); +x_17345 = lean_ctor_get(x_17344, 0); +lean_inc(x_17345); +x_17346 = lean_ctor_get(x_17344, 1); +lean_inc(x_17346); +lean_dec(x_17344); +x_17347 = lean_ctor_get(x_17345, 0); +lean_inc(x_17347); +x_17348 = lean_ctor_get(x_17345, 1); +lean_inc(x_17348); +lean_dec(x_17345); +x_17349 = lean_ctor_get(x_1, 2); +lean_inc(x_17349); +lean_inc(x_5); +lean_inc(x_4); +x_17350 = l_Lean_IR_ToIR_lowerType(x_17349, x_17348, x_4, x_5, x_17346); +if (lean_obj_tag(x_17350) == 0) +{ +lean_object* x_17351; lean_object* x_17352; lean_object* x_17353; lean_object* x_17354; lean_object* x_17355; +x_17351 = lean_ctor_get(x_17350, 0); +lean_inc(x_17351); +x_17352 = lean_ctor_get(x_17350, 1); +lean_inc(x_17352); +lean_dec(x_17350); +x_17353 = lean_ctor_get(x_17351, 0); +lean_inc(x_17353); +x_17354 = lean_ctor_get(x_17351, 1); +lean_inc(x_17354); +lean_dec(x_17351); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17355 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17347, x_17342, x_17353, x_17354, x_4, x_5, x_17352); +if (lean_obj_tag(x_17355) == 0) +{ +lean_object* x_17356; lean_object* x_17357; lean_object* x_17358; lean_object* x_17359; lean_object* x_17360; lean_object* x_17361; lean_object* x_17362; +x_17356 = lean_ctor_get(x_17355, 0); +lean_inc(x_17356); +x_17357 = lean_ctor_get(x_17355, 1); +lean_inc(x_17357); +lean_dec(x_17355); +x_17358 = lean_ctor_get(x_17356, 0); +lean_inc(x_17358); +x_17359 = lean_ctor_get(x_17356, 1); +lean_inc(x_17359); +if (lean_is_exclusive(x_17356)) { + lean_ctor_release(x_17356, 0); + lean_ctor_release(x_17356, 1); + x_17360 = x_17356; +} else { + lean_dec_ref(x_17356); + x_17360 = lean_box(0); +} +if (lean_is_scalar(x_17299)) { + x_17361 = lean_alloc_ctor(1, 1, 0); +} else { + x_17361 = x_17299; +} +lean_ctor_set(x_17361, 0, x_17358); +if (lean_is_scalar(x_17360)) { + x_17362 = lean_alloc_ctor(0, 2, 0); +} else { + x_17362 = x_17360; +} +lean_ctor_set(x_17362, 0, x_17361); +lean_ctor_set(x_17362, 1, x_17359); +x_17263 = x_17362; +x_17264 = x_17357; +goto block_17290; +} +else +{ +lean_object* x_17363; lean_object* x_17364; lean_object* x_17365; lean_object* x_17366; +lean_dec(x_17299); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17363 = lean_ctor_get(x_17355, 0); +lean_inc(x_17363); +x_17364 = lean_ctor_get(x_17355, 1); +lean_inc(x_17364); +if (lean_is_exclusive(x_17355)) { + lean_ctor_release(x_17355, 0); + lean_ctor_release(x_17355, 1); + x_17365 = x_17355; +} else { + lean_dec_ref(x_17355); + x_17365 = lean_box(0); +} +if (lean_is_scalar(x_17365)) { + x_17366 = lean_alloc_ctor(1, 2, 0); +} else { + x_17366 = x_17365; +} +lean_ctor_set(x_17366, 0, x_17363); +lean_ctor_set(x_17366, 1, x_17364); +return x_17366; +} +} +else +{ +lean_object* x_17367; lean_object* x_17368; lean_object* x_17369; lean_object* x_17370; +lean_dec(x_17347); +lean_dec(x_17342); +lean_dec(x_17299); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17367 = lean_ctor_get(x_17350, 0); +lean_inc(x_17367); +x_17368 = lean_ctor_get(x_17350, 1); +lean_inc(x_17368); +if (lean_is_exclusive(x_17350)) { + lean_ctor_release(x_17350, 0); + lean_ctor_release(x_17350, 1); + x_17369 = x_17350; +} else { + lean_dec_ref(x_17350); + x_17369 = lean_box(0); +} +if (lean_is_scalar(x_17369)) { + x_17370 = lean_alloc_ctor(1, 2, 0); +} else { + x_17370 = x_17369; +} +lean_ctor_set(x_17370, 0, x_17367); +lean_ctor_set(x_17370, 1, x_17368); +return x_17370; +} +} +} +else +{ +lean_object* x_17371; lean_object* x_17372; lean_object* x_17373; lean_object* x_17374; lean_object* x_17375; lean_object* x_17376; lean_object* x_17377; lean_object* x_17378; lean_object* x_17379; +lean_dec(x_17302); +lean_dec(x_17300); +lean_inc(x_17013); +lean_inc(x_153); +if (lean_is_scalar(x_17297)) { + x_17371 = lean_alloc_ctor(7, 2, 0); +} else { + x_17371 = x_17297; + lean_ctor_set_tag(x_17371, 7); +} +lean_ctor_set(x_17371, 0, x_153); +lean_ctor_set(x_17371, 1, x_17013); +x_17372 = lean_ctor_get(x_1, 0); +lean_inc(x_17372); +x_17373 = l_Lean_IR_ToIR_bindVar(x_17372, x_17018, x_4, x_5, x_17296); +x_17374 = lean_ctor_get(x_17373, 0); +lean_inc(x_17374); +x_17375 = lean_ctor_get(x_17373, 1); +lean_inc(x_17375); +lean_dec(x_17373); +x_17376 = lean_ctor_get(x_17374, 0); +lean_inc(x_17376); +x_17377 = lean_ctor_get(x_17374, 1); +lean_inc(x_17377); +lean_dec(x_17374); +x_17378 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17379 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17376, x_17371, x_17378, x_17377, x_4, x_5, x_17375); +if (lean_obj_tag(x_17379) == 0) +{ +lean_object* x_17380; lean_object* x_17381; lean_object* x_17382; lean_object* x_17383; lean_object* x_17384; lean_object* x_17385; lean_object* x_17386; +x_17380 = lean_ctor_get(x_17379, 0); +lean_inc(x_17380); +x_17381 = lean_ctor_get(x_17379, 1); +lean_inc(x_17381); +lean_dec(x_17379); +x_17382 = lean_ctor_get(x_17380, 0); +lean_inc(x_17382); +x_17383 = lean_ctor_get(x_17380, 1); +lean_inc(x_17383); +if (lean_is_exclusive(x_17380)) { + lean_ctor_release(x_17380, 0); + lean_ctor_release(x_17380, 1); + x_17384 = x_17380; +} else { + lean_dec_ref(x_17380); + x_17384 = lean_box(0); +} +if (lean_is_scalar(x_17299)) { + x_17385 = lean_alloc_ctor(1, 1, 0); +} else { + x_17385 = x_17299; +} +lean_ctor_set(x_17385, 0, x_17382); +if (lean_is_scalar(x_17384)) { + x_17386 = lean_alloc_ctor(0, 2, 0); +} else { + x_17386 = x_17384; +} +lean_ctor_set(x_17386, 0, x_17385); +lean_ctor_set(x_17386, 1, x_17383); +x_17263 = x_17386; +x_17264 = x_17381; +goto block_17290; +} +else +{ +lean_object* x_17387; lean_object* x_17388; lean_object* x_17389; lean_object* x_17390; +lean_dec(x_17299); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17387 = lean_ctor_get(x_17379, 0); +lean_inc(x_17387); +x_17388 = lean_ctor_get(x_17379, 1); +lean_inc(x_17388); +if (lean_is_exclusive(x_17379)) { + lean_ctor_release(x_17379, 0); + lean_ctor_release(x_17379, 1); + x_17389 = x_17379; +} else { + lean_dec_ref(x_17379); + x_17389 = lean_box(0); +} +if (lean_is_scalar(x_17389)) { + x_17390 = lean_alloc_ctor(1, 2, 0); +} else { + x_17390 = x_17389; +} +lean_ctor_set(x_17390, 0, x_17387); +lean_ctor_set(x_17390, 1, x_17388); +return x_17390; +} +} +} +block_17290: +{ +lean_object* x_17265; +x_17265 = lean_ctor_get(x_17263, 0); +lean_inc(x_17265); +if (lean_obj_tag(x_17265) == 0) +{ +lean_object* x_17266; lean_object* x_17267; lean_object* x_17268; lean_object* x_17269; lean_object* x_17270; lean_object* x_17271; lean_object* x_17272; lean_object* x_17273; lean_object* x_17274; lean_object* x_17275; +lean_dec(x_17023); +x_17266 = lean_ctor_get(x_17263, 1); +lean_inc(x_17266); +lean_dec(x_17263); +x_17267 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_17267, 0, x_153); +lean_ctor_set(x_17267, 1, x_17013); +x_17268 = lean_ctor_get(x_1, 0); +lean_inc(x_17268); +x_17269 = l_Lean_IR_ToIR_bindVar(x_17268, x_17266, x_4, x_5, x_17264); +x_17270 = lean_ctor_get(x_17269, 0); +lean_inc(x_17270); +x_17271 = lean_ctor_get(x_17269, 1); +lean_inc(x_17271); +lean_dec(x_17269); +x_17272 = lean_ctor_get(x_17270, 0); +lean_inc(x_17272); +x_17273 = lean_ctor_get(x_17270, 1); +lean_inc(x_17273); +lean_dec(x_17270); +x_17274 = lean_ctor_get(x_1, 2); +lean_inc(x_17274); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_17275 = l_Lean_IR_ToIR_lowerType(x_17274, x_17273, x_4, x_5, x_17271); +if (lean_obj_tag(x_17275) == 0) +{ +lean_object* x_17276; lean_object* x_17277; lean_object* x_17278; lean_object* x_17279; lean_object* x_17280; +x_17276 = lean_ctor_get(x_17275, 0); +lean_inc(x_17276); +x_17277 = lean_ctor_get(x_17275, 1); +lean_inc(x_17277); +lean_dec(x_17275); +x_17278 = lean_ctor_get(x_17276, 0); +lean_inc(x_17278); +x_17279 = lean_ctor_get(x_17276, 1); +lean_inc(x_17279); +lean_dec(x_17276); +x_17280 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17272, x_17267, x_17278, x_17279, x_4, x_5, x_17277); +return x_17280; +} +else +{ +lean_object* x_17281; lean_object* x_17282; lean_object* x_17283; lean_object* x_17284; +lean_dec(x_17272); +lean_dec(x_17267); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_17281 = lean_ctor_get(x_17275, 0); +lean_inc(x_17281); +x_17282 = lean_ctor_get(x_17275, 1); +lean_inc(x_17282); +if (lean_is_exclusive(x_17275)) { + lean_ctor_release(x_17275, 0); + lean_ctor_release(x_17275, 1); + x_17283 = x_17275; +} else { + lean_dec_ref(x_17275); + x_17283 = lean_box(0); +} +if (lean_is_scalar(x_17283)) { + x_17284 = lean_alloc_ctor(1, 2, 0); +} else { + x_17284 = x_17283; +} +lean_ctor_set(x_17284, 0, x_17281); +lean_ctor_set(x_17284, 1, x_17282); +return x_17284; +} +} +else +{ +lean_object* x_17285; lean_object* x_17286; lean_object* x_17287; lean_object* x_17288; lean_object* x_17289; +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17285 = lean_ctor_get(x_17263, 1); +lean_inc(x_17285); +if (lean_is_exclusive(x_17263)) { + lean_ctor_release(x_17263, 0); + lean_ctor_release(x_17263, 1); + x_17286 = x_17263; +} else { + lean_dec_ref(x_17263); + x_17286 = lean_box(0); +} +x_17287 = lean_ctor_get(x_17265, 0); +lean_inc(x_17287); +lean_dec(x_17265); +if (lean_is_scalar(x_17286)) { + x_17288 = lean_alloc_ctor(0, 2, 0); +} else { + x_17288 = x_17286; +} +lean_ctor_set(x_17288, 0, x_17287); +lean_ctor_set(x_17288, 1, x_17285); +if (lean_is_scalar(x_17023)) { + x_17289 = lean_alloc_ctor(0, 2, 0); +} else { + x_17289 = x_17023; +} +lean_ctor_set(x_17289, 0, x_17288); +lean_ctor_set(x_17289, 1, x_17264); +return x_17289; +} +} +} +case 4: +{ +lean_object* x_17391; lean_object* x_17392; uint8_t x_17393; +lean_dec(x_17024); +lean_dec(x_17023); +lean_dec(x_17019); +lean_dec(x_14805); +lean_dec(x_14804); +if (lean_is_exclusive(x_17029)) { + lean_ctor_release(x_17029, 0); + x_17391 = x_17029; +} else { + lean_dec_ref(x_17029); + x_17391 = lean_box(0); +} +x_17392 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_17393 = lean_name_eq(x_153, x_17392); +if (x_17393 == 0) +{ +uint8_t x_17394; lean_object* x_17395; lean_object* x_17396; lean_object* x_17397; lean_object* x_17398; lean_object* x_17399; lean_object* x_17400; lean_object* x_17401; lean_object* x_17402; lean_object* x_17403; +lean_dec(x_17013); +lean_dec(x_2); +lean_dec(x_1); +x_17394 = 1; +x_17395 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_17396 = l_Lean_Name_toString(x_153, x_17394, x_17395); +if (lean_is_scalar(x_17391)) { + x_17397 = lean_alloc_ctor(3, 1, 0); +} else { + x_17397 = x_17391; + lean_ctor_set_tag(x_17397, 3); +} +lean_ctor_set(x_17397, 0, x_17396); +x_17398 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_17399 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17399, 0, x_17398); +lean_ctor_set(x_17399, 1, x_17397); +x_17400 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_17401 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17401, 0, x_17399); +lean_ctor_set(x_17401, 1, x_17400); +x_17402 = l_Lean_MessageData_ofFormat(x_17401); +x_17403 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_17402, x_17018, x_4, x_5, x_17022); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_17018); +return x_17403; +} +else +{ +lean_object* x_17404; lean_object* x_17405; lean_object* x_17406; +lean_dec(x_17391); +lean_dec(x_153); +x_17404 = l_Lean_IR_instInhabitedArg; +x_17405 = lean_unsigned_to_nat(2u); +x_17406 = lean_array_get(x_17404, x_17013, x_17405); +lean_dec(x_17013); +if (lean_obj_tag(x_17406) == 0) +{ +lean_object* x_17407; lean_object* x_17408; lean_object* x_17409; lean_object* x_17410; lean_object* x_17411; lean_object* x_17412; lean_object* x_17413; +x_17407 = lean_ctor_get(x_17406, 0); +lean_inc(x_17407); +lean_dec(x_17406); +x_17408 = lean_ctor_get(x_1, 0); +lean_inc(x_17408); +lean_dec(x_1); +x_17409 = l_Lean_IR_ToIR_bindVarToVarId(x_17408, x_17407, x_17018, x_4, x_5, x_17022); +x_17410 = lean_ctor_get(x_17409, 0); +lean_inc(x_17410); +x_17411 = lean_ctor_get(x_17409, 1); +lean_inc(x_17411); +lean_dec(x_17409); +x_17412 = lean_ctor_get(x_17410, 1); +lean_inc(x_17412); +lean_dec(x_17410); +x_17413 = l_Lean_IR_ToIR_lowerCode(x_2, x_17412, x_4, x_5, x_17411); +return x_17413; +} +else +{ +lean_object* x_17414; lean_object* x_17415; lean_object* x_17416; lean_object* x_17417; lean_object* x_17418; lean_object* x_17419; +x_17414 = lean_ctor_get(x_1, 0); +lean_inc(x_17414); +lean_dec(x_1); +x_17415 = l_Lean_IR_ToIR_bindErased(x_17414, x_17018, x_4, x_5, x_17022); +x_17416 = lean_ctor_get(x_17415, 0); +lean_inc(x_17416); +x_17417 = lean_ctor_get(x_17415, 1); +lean_inc(x_17417); +lean_dec(x_17415); +x_17418 = lean_ctor_get(x_17416, 1); +lean_inc(x_17418); +lean_dec(x_17416); +x_17419 = l_Lean_IR_ToIR_lowerCode(x_2, x_17418, x_4, x_5, x_17417); +return x_17419; +} +} +} +case 5: +{ +lean_object* x_17420; lean_object* x_17421; +lean_dec(x_17029); +lean_dec(x_17024); +lean_dec(x_17023); +lean_dec(x_17019); +lean_dec(x_17013); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_17420 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_17421 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_17420, x_17018, x_4, x_5, x_17022); +return x_17421; +} +case 6: +{ +lean_object* x_17422; uint8_t x_17423; +x_17422 = lean_ctor_get(x_17029, 0); +lean_inc(x_17422); +lean_dec(x_17029); +lean_inc(x_153); +x_17423 = l_Lean_isExtern(x_17024, x_153); +if (x_17423 == 0) +{ +lean_object* x_17424; +lean_dec(x_17023); +lean_dec(x_17019); +lean_dec(x_17013); +lean_inc(x_5); +lean_inc(x_4); +x_17424 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_17018, x_4, x_5, x_17022); +if (lean_obj_tag(x_17424) == 0) +{ +lean_object* x_17425; lean_object* x_17426; lean_object* x_17427; lean_object* x_17428; lean_object* x_17429; lean_object* x_17430; lean_object* x_17431; lean_object* x_17432; lean_object* x_17433; lean_object* x_17434; lean_object* x_17435; lean_object* x_17436; lean_object* x_17437; lean_object* x_17438; lean_object* x_17439; lean_object* x_17440; lean_object* x_17441; lean_object* x_17442; lean_object* x_17443; lean_object* x_17444; +x_17425 = lean_ctor_get(x_17424, 0); +lean_inc(x_17425); +x_17426 = lean_ctor_get(x_17425, 0); +lean_inc(x_17426); +x_17427 = lean_ctor_get(x_17424, 1); +lean_inc(x_17427); +lean_dec(x_17424); +x_17428 = lean_ctor_get(x_17425, 1); +lean_inc(x_17428); +lean_dec(x_17425); +x_17429 = lean_ctor_get(x_17426, 0); +lean_inc(x_17429); +x_17430 = lean_ctor_get(x_17426, 1); +lean_inc(x_17430); +lean_dec(x_17426); +x_17431 = lean_ctor_get(x_17422, 3); +lean_inc(x_17431); +lean_dec(x_17422); +x_17432 = lean_array_get_size(x_14804); +x_17433 = l_Array_extract___rarg(x_14804, x_17431, x_17432); +lean_dec(x_17432); +lean_dec(x_14804); +x_17434 = lean_array_get_size(x_17430); +x_17435 = lean_unsigned_to_nat(0u); +x_17436 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_14805)) { + x_17437 = lean_alloc_ctor(0, 3, 0); +} else { + x_17437 = x_14805; + lean_ctor_set_tag(x_17437, 0); +} +lean_ctor_set(x_17437, 0, x_17435); +lean_ctor_set(x_17437, 1, x_17434); +lean_ctor_set(x_17437, 2, x_17436); +x_17438 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_17439 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__7(x_17430, x_17433, x_17437, x_17437, x_17438, x_17435, lean_box(0), lean_box(0), x_17428, x_4, x_5, x_17427); +lean_dec(x_17437); +x_17440 = lean_ctor_get(x_17439, 0); +lean_inc(x_17440); +x_17441 = lean_ctor_get(x_17439, 1); +lean_inc(x_17441); +lean_dec(x_17439); +x_17442 = lean_ctor_get(x_17440, 0); +lean_inc(x_17442); +x_17443 = lean_ctor_get(x_17440, 1); +lean_inc(x_17443); +lean_dec(x_17440); +x_17444 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_17429, x_17430, x_17433, x_17442, x_17443, x_4, x_5, x_17441); +lean_dec(x_17433); +lean_dec(x_17430); +return x_17444; +} +else +{ +lean_object* x_17445; lean_object* x_17446; lean_object* x_17447; lean_object* x_17448; +lean_dec(x_17422); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17445 = lean_ctor_get(x_17424, 0); +lean_inc(x_17445); +x_17446 = lean_ctor_get(x_17424, 1); +lean_inc(x_17446); +if (lean_is_exclusive(x_17424)) { + lean_ctor_release(x_17424, 0); + lean_ctor_release(x_17424, 1); + x_17447 = x_17424; +} else { + lean_dec_ref(x_17424); + x_17447 = lean_box(0); +} +if (lean_is_scalar(x_17447)) { + x_17448 = lean_alloc_ctor(1, 2, 0); +} else { + x_17448 = x_17447; +} +lean_ctor_set(x_17448, 0, x_17445); +lean_ctor_set(x_17448, 1, x_17446); +return x_17448; +} +} +else +{ +lean_object* x_17449; lean_object* x_17450; lean_object* x_17477; lean_object* x_17478; +lean_dec(x_17422); +lean_dec(x_14805); +lean_dec(x_14804); +lean_inc(x_153); +x_17477 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_17022); +x_17478 = lean_ctor_get(x_17477, 0); +lean_inc(x_17478); +if (lean_obj_tag(x_17478) == 0) +{ +lean_object* x_17479; lean_object* x_17480; lean_object* x_17481; +x_17479 = lean_ctor_get(x_17477, 1); +lean_inc(x_17479); +lean_dec(x_17477); +x_17480 = lean_box(0); +if (lean_is_scalar(x_17019)) { + x_17481 = lean_alloc_ctor(0, 2, 0); +} else { + x_17481 = x_17019; +} +lean_ctor_set(x_17481, 0, x_17480); +lean_ctor_set(x_17481, 1, x_17018); +x_17449 = x_17481; +x_17450 = x_17479; +goto block_17476; +} +else +{ +lean_object* x_17482; lean_object* x_17483; lean_object* x_17484; lean_object* x_17485; lean_object* x_17486; lean_object* x_17487; lean_object* x_17488; uint8_t x_17489; +lean_dec(x_17019); +x_17482 = lean_ctor_get(x_17477, 1); +lean_inc(x_17482); +if (lean_is_exclusive(x_17477)) { + lean_ctor_release(x_17477, 0); + lean_ctor_release(x_17477, 1); + x_17483 = x_17477; +} else { + lean_dec_ref(x_17477); + x_17483 = lean_box(0); +} +x_17484 = lean_ctor_get(x_17478, 0); +lean_inc(x_17484); +if (lean_is_exclusive(x_17478)) { + lean_ctor_release(x_17478, 0); + x_17485 = x_17478; +} else { + lean_dec_ref(x_17478); + x_17485 = lean_box(0); +} +x_17486 = lean_array_get_size(x_17013); +x_17487 = lean_ctor_get(x_17484, 3); +lean_inc(x_17487); +lean_dec(x_17484); +x_17488 = lean_array_get_size(x_17487); +lean_dec(x_17487); +x_17489 = lean_nat_dec_lt(x_17486, x_17488); +if (x_17489 == 0) +{ +uint8_t x_17490; +x_17490 = lean_nat_dec_eq(x_17486, x_17488); +if (x_17490 == 0) +{ +lean_object* x_17491; lean_object* x_17492; lean_object* x_17493; lean_object* x_17494; lean_object* x_17495; lean_object* x_17496; lean_object* x_17497; lean_object* x_17498; lean_object* x_17499; lean_object* x_17500; lean_object* x_17501; lean_object* x_17502; lean_object* x_17503; lean_object* x_17504; lean_object* x_17505; lean_object* x_17506; lean_object* x_17507; +x_17491 = lean_unsigned_to_nat(0u); +x_17492 = l_Array_extract___rarg(x_17013, x_17491, x_17488); +x_17493 = l_Array_extract___rarg(x_17013, x_17488, x_17486); +lean_dec(x_17486); +lean_inc(x_153); +if (lean_is_scalar(x_17483)) { + x_17494 = lean_alloc_ctor(6, 2, 0); +} else { + x_17494 = x_17483; + lean_ctor_set_tag(x_17494, 6); +} +lean_ctor_set(x_17494, 0, x_153); +lean_ctor_set(x_17494, 1, x_17492); +x_17495 = lean_ctor_get(x_1, 0); +lean_inc(x_17495); +x_17496 = l_Lean_IR_ToIR_bindVar(x_17495, x_17018, x_4, x_5, x_17482); +x_17497 = lean_ctor_get(x_17496, 0); +lean_inc(x_17497); +x_17498 = lean_ctor_get(x_17496, 1); +lean_inc(x_17498); +lean_dec(x_17496); +x_17499 = lean_ctor_get(x_17497, 0); +lean_inc(x_17499); +x_17500 = lean_ctor_get(x_17497, 1); +lean_inc(x_17500); +lean_dec(x_17497); +x_17501 = l_Lean_IR_ToIR_newVar(x_17500, x_4, x_5, x_17498); +x_17502 = lean_ctor_get(x_17501, 0); +lean_inc(x_17502); +x_17503 = lean_ctor_get(x_17501, 1); +lean_inc(x_17503); +lean_dec(x_17501); +x_17504 = lean_ctor_get(x_17502, 0); +lean_inc(x_17504); +x_17505 = lean_ctor_get(x_17502, 1); +lean_inc(x_17505); +lean_dec(x_17502); +x_17506 = lean_ctor_get(x_1, 2); +lean_inc(x_17506); +lean_inc(x_5); +lean_inc(x_4); +x_17507 = l_Lean_IR_ToIR_lowerType(x_17506, x_17505, x_4, x_5, x_17503); +if (lean_obj_tag(x_17507) == 0) +{ +lean_object* x_17508; lean_object* x_17509; lean_object* x_17510; lean_object* x_17511; lean_object* x_17512; +x_17508 = lean_ctor_get(x_17507, 0); +lean_inc(x_17508); +x_17509 = lean_ctor_get(x_17507, 1); +lean_inc(x_17509); +lean_dec(x_17507); +x_17510 = lean_ctor_get(x_17508, 0); +lean_inc(x_17510); +x_17511 = lean_ctor_get(x_17508, 1); +lean_inc(x_17511); +lean_dec(x_17508); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17512 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_17504, x_17493, x_17499, x_17494, x_17510, x_17511, x_4, x_5, x_17509); +if (lean_obj_tag(x_17512) == 0) +{ +lean_object* x_17513; lean_object* x_17514; lean_object* x_17515; lean_object* x_17516; lean_object* x_17517; lean_object* x_17518; lean_object* x_17519; +x_17513 = lean_ctor_get(x_17512, 0); +lean_inc(x_17513); +x_17514 = lean_ctor_get(x_17512, 1); +lean_inc(x_17514); +lean_dec(x_17512); +x_17515 = lean_ctor_get(x_17513, 0); +lean_inc(x_17515); +x_17516 = lean_ctor_get(x_17513, 1); +lean_inc(x_17516); +if (lean_is_exclusive(x_17513)) { + lean_ctor_release(x_17513, 0); + lean_ctor_release(x_17513, 1); + x_17517 = x_17513; +} else { + lean_dec_ref(x_17513); + x_17517 = lean_box(0); +} +if (lean_is_scalar(x_17485)) { + x_17518 = lean_alloc_ctor(1, 1, 0); +} else { + x_17518 = x_17485; +} +lean_ctor_set(x_17518, 0, x_17515); +if (lean_is_scalar(x_17517)) { + x_17519 = lean_alloc_ctor(0, 2, 0); +} else { + x_17519 = x_17517; +} +lean_ctor_set(x_17519, 0, x_17518); +lean_ctor_set(x_17519, 1, x_17516); +x_17449 = x_17519; +x_17450 = x_17514; +goto block_17476; +} +else +{ +lean_object* x_17520; lean_object* x_17521; lean_object* x_17522; lean_object* x_17523; +lean_dec(x_17485); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17520 = lean_ctor_get(x_17512, 0); +lean_inc(x_17520); +x_17521 = lean_ctor_get(x_17512, 1); +lean_inc(x_17521); +if (lean_is_exclusive(x_17512)) { + lean_ctor_release(x_17512, 0); + lean_ctor_release(x_17512, 1); + x_17522 = x_17512; +} else { + lean_dec_ref(x_17512); + x_17522 = lean_box(0); +} +if (lean_is_scalar(x_17522)) { + x_17523 = lean_alloc_ctor(1, 2, 0); +} else { + x_17523 = x_17522; +} +lean_ctor_set(x_17523, 0, x_17520); +lean_ctor_set(x_17523, 1, x_17521); +return x_17523; +} +} +else +{ +lean_object* x_17524; lean_object* x_17525; lean_object* x_17526; lean_object* x_17527; +lean_dec(x_17504); +lean_dec(x_17499); +lean_dec(x_17494); +lean_dec(x_17493); +lean_dec(x_17485); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17524 = lean_ctor_get(x_17507, 0); +lean_inc(x_17524); +x_17525 = lean_ctor_get(x_17507, 1); +lean_inc(x_17525); +if (lean_is_exclusive(x_17507)) { + lean_ctor_release(x_17507, 0); + lean_ctor_release(x_17507, 1); + x_17526 = x_17507; +} else { + lean_dec_ref(x_17507); + x_17526 = lean_box(0); +} +if (lean_is_scalar(x_17526)) { + x_17527 = lean_alloc_ctor(1, 2, 0); +} else { + x_17527 = x_17526; +} +lean_ctor_set(x_17527, 0, x_17524); +lean_ctor_set(x_17527, 1, x_17525); +return x_17527; +} +} +else +{ +lean_object* x_17528; lean_object* x_17529; lean_object* x_17530; lean_object* x_17531; lean_object* x_17532; lean_object* x_17533; lean_object* x_17534; lean_object* x_17535; lean_object* x_17536; +lean_dec(x_17488); +lean_dec(x_17486); +lean_inc(x_17013); +lean_inc(x_153); +if (lean_is_scalar(x_17483)) { + x_17528 = lean_alloc_ctor(6, 2, 0); +} else { + x_17528 = x_17483; + lean_ctor_set_tag(x_17528, 6); +} +lean_ctor_set(x_17528, 0, x_153); +lean_ctor_set(x_17528, 1, x_17013); +x_17529 = lean_ctor_get(x_1, 0); +lean_inc(x_17529); +x_17530 = l_Lean_IR_ToIR_bindVar(x_17529, x_17018, x_4, x_5, x_17482); +x_17531 = lean_ctor_get(x_17530, 0); +lean_inc(x_17531); +x_17532 = lean_ctor_get(x_17530, 1); +lean_inc(x_17532); +lean_dec(x_17530); +x_17533 = lean_ctor_get(x_17531, 0); +lean_inc(x_17533); +x_17534 = lean_ctor_get(x_17531, 1); +lean_inc(x_17534); +lean_dec(x_17531); +x_17535 = lean_ctor_get(x_1, 2); +lean_inc(x_17535); +lean_inc(x_5); +lean_inc(x_4); +x_17536 = l_Lean_IR_ToIR_lowerType(x_17535, x_17534, x_4, x_5, x_17532); +if (lean_obj_tag(x_17536) == 0) +{ +lean_object* x_17537; lean_object* x_17538; lean_object* x_17539; lean_object* x_17540; lean_object* x_17541; +x_17537 = lean_ctor_get(x_17536, 0); +lean_inc(x_17537); +x_17538 = lean_ctor_get(x_17536, 1); +lean_inc(x_17538); +lean_dec(x_17536); +x_17539 = lean_ctor_get(x_17537, 0); +lean_inc(x_17539); +x_17540 = lean_ctor_get(x_17537, 1); +lean_inc(x_17540); +lean_dec(x_17537); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17541 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17533, x_17528, x_17539, x_17540, x_4, x_5, x_17538); +if (lean_obj_tag(x_17541) == 0) +{ +lean_object* x_17542; lean_object* x_17543; lean_object* x_17544; lean_object* x_17545; lean_object* x_17546; lean_object* x_17547; lean_object* x_17548; +x_17542 = lean_ctor_get(x_17541, 0); +lean_inc(x_17542); +x_17543 = lean_ctor_get(x_17541, 1); +lean_inc(x_17543); +lean_dec(x_17541); +x_17544 = lean_ctor_get(x_17542, 0); +lean_inc(x_17544); +x_17545 = lean_ctor_get(x_17542, 1); +lean_inc(x_17545); +if (lean_is_exclusive(x_17542)) { + lean_ctor_release(x_17542, 0); + lean_ctor_release(x_17542, 1); + x_17546 = x_17542; +} else { + lean_dec_ref(x_17542); + x_17546 = lean_box(0); +} +if (lean_is_scalar(x_17485)) { + x_17547 = lean_alloc_ctor(1, 1, 0); +} else { + x_17547 = x_17485; +} +lean_ctor_set(x_17547, 0, x_17544); +if (lean_is_scalar(x_17546)) { + x_17548 = lean_alloc_ctor(0, 2, 0); +} else { + x_17548 = x_17546; +} +lean_ctor_set(x_17548, 0, x_17547); +lean_ctor_set(x_17548, 1, x_17545); +x_17449 = x_17548; +x_17450 = x_17543; +goto block_17476; +} +else +{ +lean_object* x_17549; lean_object* x_17550; lean_object* x_17551; lean_object* x_17552; +lean_dec(x_17485); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17549 = lean_ctor_get(x_17541, 0); +lean_inc(x_17549); +x_17550 = lean_ctor_get(x_17541, 1); +lean_inc(x_17550); +if (lean_is_exclusive(x_17541)) { + lean_ctor_release(x_17541, 0); + lean_ctor_release(x_17541, 1); + x_17551 = x_17541; +} else { + lean_dec_ref(x_17541); + x_17551 = lean_box(0); +} +if (lean_is_scalar(x_17551)) { + x_17552 = lean_alloc_ctor(1, 2, 0); +} else { + x_17552 = x_17551; +} +lean_ctor_set(x_17552, 0, x_17549); +lean_ctor_set(x_17552, 1, x_17550); +return x_17552; +} +} +else +{ +lean_object* x_17553; lean_object* x_17554; lean_object* x_17555; lean_object* x_17556; +lean_dec(x_17533); +lean_dec(x_17528); +lean_dec(x_17485); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17553 = lean_ctor_get(x_17536, 0); +lean_inc(x_17553); +x_17554 = lean_ctor_get(x_17536, 1); +lean_inc(x_17554); +if (lean_is_exclusive(x_17536)) { + lean_ctor_release(x_17536, 0); + lean_ctor_release(x_17536, 1); + x_17555 = x_17536; +} else { + lean_dec_ref(x_17536); + x_17555 = lean_box(0); +} +if (lean_is_scalar(x_17555)) { + x_17556 = lean_alloc_ctor(1, 2, 0); +} else { + x_17556 = x_17555; +} +lean_ctor_set(x_17556, 0, x_17553); +lean_ctor_set(x_17556, 1, x_17554); +return x_17556; +} +} +} +else +{ +lean_object* x_17557; lean_object* x_17558; lean_object* x_17559; lean_object* x_17560; lean_object* x_17561; lean_object* x_17562; lean_object* x_17563; lean_object* x_17564; lean_object* x_17565; +lean_dec(x_17488); +lean_dec(x_17486); +lean_inc(x_17013); +lean_inc(x_153); +if (lean_is_scalar(x_17483)) { + x_17557 = lean_alloc_ctor(7, 2, 0); +} else { + x_17557 = x_17483; + lean_ctor_set_tag(x_17557, 7); +} +lean_ctor_set(x_17557, 0, x_153); +lean_ctor_set(x_17557, 1, x_17013); +x_17558 = lean_ctor_get(x_1, 0); +lean_inc(x_17558); +x_17559 = l_Lean_IR_ToIR_bindVar(x_17558, x_17018, x_4, x_5, x_17482); +x_17560 = lean_ctor_get(x_17559, 0); +lean_inc(x_17560); +x_17561 = lean_ctor_get(x_17559, 1); +lean_inc(x_17561); +lean_dec(x_17559); +x_17562 = lean_ctor_get(x_17560, 0); +lean_inc(x_17562); +x_17563 = lean_ctor_get(x_17560, 1); +lean_inc(x_17563); +lean_dec(x_17560); +x_17564 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_17565 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17562, x_17557, x_17564, x_17563, x_4, x_5, x_17561); +if (lean_obj_tag(x_17565) == 0) +{ +lean_object* x_17566; lean_object* x_17567; lean_object* x_17568; lean_object* x_17569; lean_object* x_17570; lean_object* x_17571; lean_object* x_17572; +x_17566 = lean_ctor_get(x_17565, 0); +lean_inc(x_17566); +x_17567 = lean_ctor_get(x_17565, 1); +lean_inc(x_17567); +lean_dec(x_17565); +x_17568 = lean_ctor_get(x_17566, 0); +lean_inc(x_17568); +x_17569 = lean_ctor_get(x_17566, 1); +lean_inc(x_17569); +if (lean_is_exclusive(x_17566)) { + lean_ctor_release(x_17566, 0); + lean_ctor_release(x_17566, 1); + x_17570 = x_17566; +} else { + lean_dec_ref(x_17566); + x_17570 = lean_box(0); +} +if (lean_is_scalar(x_17485)) { + x_17571 = lean_alloc_ctor(1, 1, 0); +} else { + x_17571 = x_17485; +} +lean_ctor_set(x_17571, 0, x_17568); +if (lean_is_scalar(x_17570)) { + x_17572 = lean_alloc_ctor(0, 2, 0); +} else { + x_17572 = x_17570; +} +lean_ctor_set(x_17572, 0, x_17571); +lean_ctor_set(x_17572, 1, x_17569); +x_17449 = x_17572; +x_17450 = x_17567; +goto block_17476; +} +else +{ +lean_object* x_17573; lean_object* x_17574; lean_object* x_17575; lean_object* x_17576; +lean_dec(x_17485); +lean_dec(x_17023); +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17573 = lean_ctor_get(x_17565, 0); +lean_inc(x_17573); +x_17574 = lean_ctor_get(x_17565, 1); +lean_inc(x_17574); +if (lean_is_exclusive(x_17565)) { + lean_ctor_release(x_17565, 0); + lean_ctor_release(x_17565, 1); + x_17575 = x_17565; +} else { + lean_dec_ref(x_17565); + x_17575 = lean_box(0); +} +if (lean_is_scalar(x_17575)) { + x_17576 = lean_alloc_ctor(1, 2, 0); +} else { + x_17576 = x_17575; +} +lean_ctor_set(x_17576, 0, x_17573); +lean_ctor_set(x_17576, 1, x_17574); +return x_17576; +} +} +} +block_17476: +{ +lean_object* x_17451; +x_17451 = lean_ctor_get(x_17449, 0); +lean_inc(x_17451); +if (lean_obj_tag(x_17451) == 0) +{ +lean_object* x_17452; lean_object* x_17453; lean_object* x_17454; lean_object* x_17455; lean_object* x_17456; lean_object* x_17457; lean_object* x_17458; lean_object* x_17459; lean_object* x_17460; lean_object* x_17461; +lean_dec(x_17023); +x_17452 = lean_ctor_get(x_17449, 1); +lean_inc(x_17452); +lean_dec(x_17449); +x_17453 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_17453, 0, x_153); +lean_ctor_set(x_17453, 1, x_17013); +x_17454 = lean_ctor_get(x_1, 0); +lean_inc(x_17454); +x_17455 = l_Lean_IR_ToIR_bindVar(x_17454, x_17452, x_4, x_5, x_17450); +x_17456 = lean_ctor_get(x_17455, 0); +lean_inc(x_17456); +x_17457 = lean_ctor_get(x_17455, 1); +lean_inc(x_17457); +lean_dec(x_17455); +x_17458 = lean_ctor_get(x_17456, 0); +lean_inc(x_17458); +x_17459 = lean_ctor_get(x_17456, 1); +lean_inc(x_17459); +lean_dec(x_17456); +x_17460 = lean_ctor_get(x_1, 2); +lean_inc(x_17460); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_17461 = l_Lean_IR_ToIR_lowerType(x_17460, x_17459, x_4, x_5, x_17457); +if (lean_obj_tag(x_17461) == 0) +{ +lean_object* x_17462; lean_object* x_17463; lean_object* x_17464; lean_object* x_17465; lean_object* x_17466; +x_17462 = lean_ctor_get(x_17461, 0); +lean_inc(x_17462); +x_17463 = lean_ctor_get(x_17461, 1); +lean_inc(x_17463); +lean_dec(x_17461); +x_17464 = lean_ctor_get(x_17462, 0); +lean_inc(x_17464); +x_17465 = lean_ctor_get(x_17462, 1); +lean_inc(x_17465); +lean_dec(x_17462); +x_17466 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17458, x_17453, x_17464, x_17465, x_4, x_5, x_17463); +return x_17466; +} +else +{ +lean_object* x_17467; lean_object* x_17468; lean_object* x_17469; lean_object* x_17470; +lean_dec(x_17458); +lean_dec(x_17453); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_17467 = lean_ctor_get(x_17461, 0); +lean_inc(x_17467); +x_17468 = lean_ctor_get(x_17461, 1); +lean_inc(x_17468); +if (lean_is_exclusive(x_17461)) { + lean_ctor_release(x_17461, 0); + lean_ctor_release(x_17461, 1); + x_17469 = x_17461; +} else { + lean_dec_ref(x_17461); + x_17469 = lean_box(0); +} +if (lean_is_scalar(x_17469)) { + x_17470 = lean_alloc_ctor(1, 2, 0); +} else { + x_17470 = x_17469; +} +lean_ctor_set(x_17470, 0, x_17467); +lean_ctor_set(x_17470, 1, x_17468); +return x_17470; +} +} +else +{ +lean_object* x_17471; lean_object* x_17472; lean_object* x_17473; lean_object* x_17474; lean_object* x_17475; +lean_dec(x_17013); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17471 = lean_ctor_get(x_17449, 1); +lean_inc(x_17471); +if (lean_is_exclusive(x_17449)) { + lean_ctor_release(x_17449, 0); + lean_ctor_release(x_17449, 1); + x_17472 = x_17449; +} else { + lean_dec_ref(x_17449); + x_17472 = lean_box(0); +} +x_17473 = lean_ctor_get(x_17451, 0); +lean_inc(x_17473); +lean_dec(x_17451); +if (lean_is_scalar(x_17472)) { + x_17474 = lean_alloc_ctor(0, 2, 0); +} else { + x_17474 = x_17472; +} +lean_ctor_set(x_17474, 0, x_17473); +lean_ctor_set(x_17474, 1, x_17471); +if (lean_is_scalar(x_17023)) { + x_17475 = lean_alloc_ctor(0, 2, 0); +} else { + x_17475 = x_17023; +} +lean_ctor_set(x_17475, 0, x_17474); +lean_ctor_set(x_17475, 1, x_17450); +return x_17475; +} +} +} +} +default: +{ +lean_object* x_17577; uint8_t x_17578; lean_object* x_17579; lean_object* x_17580; lean_object* x_17581; lean_object* x_17582; lean_object* x_17583; lean_object* x_17584; lean_object* x_17585; lean_object* x_17586; lean_object* x_17587; +lean_dec(x_17024); +lean_dec(x_17023); +lean_dec(x_17019); +lean_dec(x_17013); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_17029)) { + lean_ctor_release(x_17029, 0); + x_17577 = x_17029; +} else { + lean_dec_ref(x_17029); + x_17577 = lean_box(0); +} +x_17578 = 1; +x_17579 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_17580 = l_Lean_Name_toString(x_153, x_17578, x_17579); +if (lean_is_scalar(x_17577)) { + x_17581 = lean_alloc_ctor(3, 1, 0); +} else { + x_17581 = x_17577; + lean_ctor_set_tag(x_17581, 3); +} +lean_ctor_set(x_17581, 0, x_17580); +x_17582 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_17583 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17583, 0, x_17582); +lean_ctor_set(x_17583, 1, x_17581); +x_17584 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_17585 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17585, 0, x_17583); +lean_ctor_set(x_17585, 1, x_17584); +x_17586 = l_Lean_MessageData_ofFormat(x_17585); +x_17587 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_17586, x_17018, x_4, x_5, x_17022); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_17018); +return x_17587; +} +} +} +} +else +{ +lean_object* x_17588; lean_object* x_17589; lean_object* x_17590; lean_object* x_17591; lean_object* x_17592; +lean_dec(x_17013); +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17588 = lean_ctor_get(x_17015, 1); +lean_inc(x_17588); +if (lean_is_exclusive(x_17015)) { + lean_ctor_release(x_17015, 0); + lean_ctor_release(x_17015, 1); + x_17589 = x_17015; +} else { + lean_dec_ref(x_17015); + x_17589 = lean_box(0); +} +x_17590 = lean_ctor_get(x_17017, 0); +lean_inc(x_17590); +lean_dec(x_17017); +if (lean_is_scalar(x_17589)) { + x_17591 = lean_alloc_ctor(0, 2, 0); +} else { + x_17591 = x_17589; +} +lean_ctor_set(x_17591, 0, x_17590); +lean_ctor_set(x_17591, 1, x_17588); +if (lean_is_scalar(x_14811)) { + x_17592 = lean_alloc_ctor(0, 2, 0); +} else { + x_17592 = x_14811; +} +lean_ctor_set(x_17592, 0, x_17591); +lean_ctor_set(x_17592, 1, x_17016); +return x_17592; +} +} +} +} +else +{ +uint8_t x_17694; +lean_dec(x_14805); +lean_dec(x_14804); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17694 = !lean_is_exclusive(x_14808); +if (x_17694 == 0) +{ +return x_14808; +} +else +{ +lean_object* x_17695; lean_object* x_17696; lean_object* x_17697; +x_17695 = lean_ctor_get(x_14808, 0); +x_17696 = lean_ctor_get(x_14808, 1); +lean_inc(x_17696); +lean_inc(x_17695); +lean_dec(x_14808); +x_17697 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_17697, 0, x_17695); +lean_ctor_set(x_17697, 1, x_17696); +return x_17697; +} +} +} +} +} +default: +{ +lean_object* x_17698; lean_object* x_17699; size_t x_17700; size_t x_17701; lean_object* x_17702; +lean_dec(x_3048); +x_17698 = lean_ctor_get(x_7, 2); +lean_inc(x_17698); +if (lean_is_exclusive(x_7)) { + lean_ctor_release(x_7, 0); + lean_ctor_release(x_7, 1); + lean_ctor_release(x_7, 2); + x_17699 = x_7; +} else { + lean_dec_ref(x_7); + x_17699 = lean_box(0); +} +x_17700 = lean_array_size(x_17698); +x_17701 = 0; +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_17698); +x_17702 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_17700, x_17701, x_17698, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_17702) == 0) +{ +lean_object* x_17703; lean_object* x_17704; lean_object* x_17705; uint8_t x_17706; +x_17703 = lean_ctor_get(x_17702, 0); +lean_inc(x_17703); +x_17704 = lean_ctor_get(x_17702, 1); +lean_inc(x_17704); +if (lean_is_exclusive(x_17702)) { + lean_ctor_release(x_17702, 0); + lean_ctor_release(x_17702, 1); + x_17705 = x_17702; +} else { + lean_dec_ref(x_17702); + x_17705 = lean_box(0); +} +x_17706 = !lean_is_exclusive(x_17703); +if (x_17706 == 0) +{ +lean_object* x_17707; lean_object* x_17708; lean_object* x_17709; lean_object* x_17710; lean_object* x_19627; lean_object* x_19628; +x_17707 = lean_ctor_get(x_17703, 0); +x_17708 = lean_ctor_get(x_17703, 1); +lean_inc(x_153); +x_19627 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_17704); +x_19628 = lean_ctor_get(x_19627, 0); +lean_inc(x_19628); +if (lean_obj_tag(x_19628) == 0) +{ +lean_object* x_19629; lean_object* x_19630; +x_19629 = lean_ctor_get(x_19627, 1); +lean_inc(x_19629); +lean_dec(x_19627); +x_19630 = lean_box(0); +lean_ctor_set(x_17703, 0, x_19630); +x_17709 = x_17703; +x_17710 = x_19629; +goto block_19626; +} +else +{ +uint8_t x_19631; +lean_free_object(x_17703); +x_19631 = !lean_is_exclusive(x_19627); +if (x_19631 == 0) +{ +lean_object* x_19632; lean_object* x_19633; uint8_t x_19634; +x_19632 = lean_ctor_get(x_19627, 1); +x_19633 = lean_ctor_get(x_19627, 0); +lean_dec(x_19633); +x_19634 = !lean_is_exclusive(x_19628); +if (x_19634 == 0) +{ +lean_object* x_19635; lean_object* x_19636; lean_object* x_19637; lean_object* x_19638; uint8_t x_19639; +x_19635 = lean_ctor_get(x_19628, 0); +x_19636 = lean_array_get_size(x_17707); +x_19637 = lean_ctor_get(x_19635, 3); +lean_inc(x_19637); +lean_dec(x_19635); +x_19638 = lean_array_get_size(x_19637); +lean_dec(x_19637); +x_19639 = lean_nat_dec_lt(x_19636, x_19638); +if (x_19639 == 0) +{ +uint8_t x_19640; +x_19640 = lean_nat_dec_eq(x_19636, x_19638); +if (x_19640 == 0) +{ +lean_object* x_19641; lean_object* x_19642; lean_object* x_19643; lean_object* x_19644; lean_object* x_19645; lean_object* x_19646; lean_object* x_19647; lean_object* x_19648; lean_object* x_19649; lean_object* x_19650; lean_object* x_19651; lean_object* x_19652; lean_object* x_19653; lean_object* x_19654; lean_object* x_19655; lean_object* x_19656; +x_19641 = lean_unsigned_to_nat(0u); +x_19642 = l_Array_extract___rarg(x_17707, x_19641, x_19638); +x_19643 = l_Array_extract___rarg(x_17707, x_19638, x_19636); +lean_dec(x_19636); +lean_inc(x_153); +lean_ctor_set_tag(x_19627, 6); +lean_ctor_set(x_19627, 1, x_19642); +lean_ctor_set(x_19627, 0, x_153); +x_19644 = lean_ctor_get(x_1, 0); +lean_inc(x_19644); +x_19645 = l_Lean_IR_ToIR_bindVar(x_19644, x_17708, x_4, x_5, x_19632); +x_19646 = lean_ctor_get(x_19645, 0); +lean_inc(x_19646); +x_19647 = lean_ctor_get(x_19645, 1); +lean_inc(x_19647); +lean_dec(x_19645); +x_19648 = lean_ctor_get(x_19646, 0); +lean_inc(x_19648); +x_19649 = lean_ctor_get(x_19646, 1); +lean_inc(x_19649); +lean_dec(x_19646); +x_19650 = l_Lean_IR_ToIR_newVar(x_19649, x_4, x_5, x_19647); +x_19651 = lean_ctor_get(x_19650, 0); +lean_inc(x_19651); +x_19652 = lean_ctor_get(x_19650, 1); +lean_inc(x_19652); +lean_dec(x_19650); +x_19653 = lean_ctor_get(x_19651, 0); +lean_inc(x_19653); +x_19654 = lean_ctor_get(x_19651, 1); +lean_inc(x_19654); +lean_dec(x_19651); +x_19655 = lean_ctor_get(x_1, 2); +lean_inc(x_19655); +lean_inc(x_5); +lean_inc(x_4); +x_19656 = l_Lean_IR_ToIR_lowerType(x_19655, x_19654, x_4, x_5, x_19652); +if (lean_obj_tag(x_19656) == 0) +{ +lean_object* x_19657; lean_object* x_19658; lean_object* x_19659; lean_object* x_19660; lean_object* x_19661; +x_19657 = lean_ctor_get(x_19656, 0); +lean_inc(x_19657); +x_19658 = lean_ctor_get(x_19656, 1); +lean_inc(x_19658); +lean_dec(x_19656); +x_19659 = lean_ctor_get(x_19657, 0); +lean_inc(x_19659); +x_19660 = lean_ctor_get(x_19657, 1); +lean_inc(x_19660); +lean_dec(x_19657); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19661 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_19653, x_19643, x_19648, x_19627, x_19659, x_19660, x_4, x_5, x_19658); +if (lean_obj_tag(x_19661) == 0) +{ +lean_object* x_19662; lean_object* x_19663; uint8_t x_19664; +x_19662 = lean_ctor_get(x_19661, 0); +lean_inc(x_19662); +x_19663 = lean_ctor_get(x_19661, 1); +lean_inc(x_19663); +lean_dec(x_19661); +x_19664 = !lean_is_exclusive(x_19662); +if (x_19664 == 0) +{ +lean_object* x_19665; +x_19665 = lean_ctor_get(x_19662, 0); +lean_ctor_set(x_19628, 0, x_19665); +lean_ctor_set(x_19662, 0, x_19628); +x_17709 = x_19662; +x_17710 = x_19663; +goto block_19626; +} +else +{ +lean_object* x_19666; lean_object* x_19667; lean_object* x_19668; +x_19666 = lean_ctor_get(x_19662, 0); +x_19667 = lean_ctor_get(x_19662, 1); +lean_inc(x_19667); +lean_inc(x_19666); +lean_dec(x_19662); +lean_ctor_set(x_19628, 0, x_19666); +x_19668 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19668, 0, x_19628); +lean_ctor_set(x_19668, 1, x_19667); +x_17709 = x_19668; +x_17710 = x_19663; +goto block_19626; +} +} +else +{ +uint8_t x_19669; +lean_free_object(x_19628); +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19669 = !lean_is_exclusive(x_19661); +if (x_19669 == 0) +{ +return x_19661; +} +else +{ +lean_object* x_19670; lean_object* x_19671; lean_object* x_19672; +x_19670 = lean_ctor_get(x_19661, 0); +x_19671 = lean_ctor_get(x_19661, 1); +lean_inc(x_19671); +lean_inc(x_19670); +lean_dec(x_19661); +x_19672 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_19672, 0, x_19670); +lean_ctor_set(x_19672, 1, x_19671); +return x_19672; +} +} +} +else +{ +uint8_t x_19673; +lean_dec(x_19653); +lean_dec(x_19648); +lean_dec(x_19627); +lean_dec(x_19643); +lean_free_object(x_19628); +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19673 = !lean_is_exclusive(x_19656); +if (x_19673 == 0) +{ +return x_19656; +} +else +{ +lean_object* x_19674; lean_object* x_19675; lean_object* x_19676; +x_19674 = lean_ctor_get(x_19656, 0); +x_19675 = lean_ctor_get(x_19656, 1); +lean_inc(x_19675); +lean_inc(x_19674); +lean_dec(x_19656); +x_19676 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_19676, 0, x_19674); +lean_ctor_set(x_19676, 1, x_19675); +return x_19676; +} +} +} +else +{ +lean_object* x_19677; lean_object* x_19678; lean_object* x_19679; lean_object* x_19680; lean_object* x_19681; lean_object* x_19682; lean_object* x_19683; lean_object* x_19684; +lean_dec(x_19638); +lean_dec(x_19636); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_19627, 6); +lean_ctor_set(x_19627, 1, x_17707); +lean_ctor_set(x_19627, 0, x_153); +x_19677 = lean_ctor_get(x_1, 0); +lean_inc(x_19677); +x_19678 = l_Lean_IR_ToIR_bindVar(x_19677, x_17708, x_4, x_5, x_19632); +x_19679 = lean_ctor_get(x_19678, 0); +lean_inc(x_19679); +x_19680 = lean_ctor_get(x_19678, 1); +lean_inc(x_19680); +lean_dec(x_19678); +x_19681 = lean_ctor_get(x_19679, 0); +lean_inc(x_19681); +x_19682 = lean_ctor_get(x_19679, 1); +lean_inc(x_19682); +lean_dec(x_19679); +x_19683 = lean_ctor_get(x_1, 2); +lean_inc(x_19683); +lean_inc(x_5); +lean_inc(x_4); +x_19684 = l_Lean_IR_ToIR_lowerType(x_19683, x_19682, x_4, x_5, x_19680); +if (lean_obj_tag(x_19684) == 0) +{ +lean_object* x_19685; lean_object* x_19686; lean_object* x_19687; lean_object* x_19688; lean_object* x_19689; +x_19685 = lean_ctor_get(x_19684, 0); +lean_inc(x_19685); +x_19686 = lean_ctor_get(x_19684, 1); +lean_inc(x_19686); +lean_dec(x_19684); +x_19687 = lean_ctor_get(x_19685, 0); +lean_inc(x_19687); +x_19688 = lean_ctor_get(x_19685, 1); +lean_inc(x_19688); +lean_dec(x_19685); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19689 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19681, x_19627, x_19687, x_19688, x_4, x_5, x_19686); +if (lean_obj_tag(x_19689) == 0) +{ +lean_object* x_19690; lean_object* x_19691; uint8_t x_19692; +x_19690 = lean_ctor_get(x_19689, 0); +lean_inc(x_19690); +x_19691 = lean_ctor_get(x_19689, 1); +lean_inc(x_19691); +lean_dec(x_19689); +x_19692 = !lean_is_exclusive(x_19690); +if (x_19692 == 0) +{ +lean_object* x_19693; +x_19693 = lean_ctor_get(x_19690, 0); +lean_ctor_set(x_19628, 0, x_19693); +lean_ctor_set(x_19690, 0, x_19628); +x_17709 = x_19690; +x_17710 = x_19691; +goto block_19626; +} +else +{ +lean_object* x_19694; lean_object* x_19695; lean_object* x_19696; +x_19694 = lean_ctor_get(x_19690, 0); +x_19695 = lean_ctor_get(x_19690, 1); +lean_inc(x_19695); +lean_inc(x_19694); +lean_dec(x_19690); +lean_ctor_set(x_19628, 0, x_19694); +x_19696 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19696, 0, x_19628); +lean_ctor_set(x_19696, 1, x_19695); +x_17709 = x_19696; +x_17710 = x_19691; +goto block_19626; +} +} +else +{ +uint8_t x_19697; +lean_free_object(x_19628); +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19697 = !lean_is_exclusive(x_19689); +if (x_19697 == 0) +{ +return x_19689; +} +else +{ +lean_object* x_19698; lean_object* x_19699; lean_object* x_19700; +x_19698 = lean_ctor_get(x_19689, 0); +x_19699 = lean_ctor_get(x_19689, 1); +lean_inc(x_19699); +lean_inc(x_19698); +lean_dec(x_19689); +x_19700 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_19700, 0, x_19698); +lean_ctor_set(x_19700, 1, x_19699); +return x_19700; +} +} +} +else +{ +uint8_t x_19701; +lean_dec(x_19681); +lean_dec(x_19627); +lean_free_object(x_19628); +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19701 = !lean_is_exclusive(x_19684); +if (x_19701 == 0) +{ +return x_19684; +} +else +{ +lean_object* x_19702; lean_object* x_19703; lean_object* x_19704; +x_19702 = lean_ctor_get(x_19684, 0); +x_19703 = lean_ctor_get(x_19684, 1); +lean_inc(x_19703); +lean_inc(x_19702); +lean_dec(x_19684); +x_19704 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_19704, 0, x_19702); +lean_ctor_set(x_19704, 1, x_19703); +return x_19704; +} +} +} +} +else +{ +lean_object* x_19705; lean_object* x_19706; lean_object* x_19707; lean_object* x_19708; lean_object* x_19709; lean_object* x_19710; lean_object* x_19711; lean_object* x_19712; +lean_dec(x_19638); +lean_dec(x_19636); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_19627, 7); +lean_ctor_set(x_19627, 1, x_17707); +lean_ctor_set(x_19627, 0, x_153); +x_19705 = lean_ctor_get(x_1, 0); +lean_inc(x_19705); +x_19706 = l_Lean_IR_ToIR_bindVar(x_19705, x_17708, x_4, x_5, x_19632); +x_19707 = lean_ctor_get(x_19706, 0); +lean_inc(x_19707); +x_19708 = lean_ctor_get(x_19706, 1); +lean_inc(x_19708); +lean_dec(x_19706); +x_19709 = lean_ctor_get(x_19707, 0); +lean_inc(x_19709); +x_19710 = lean_ctor_get(x_19707, 1); +lean_inc(x_19710); +lean_dec(x_19707); +x_19711 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19712 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19709, x_19627, x_19711, x_19710, x_4, x_5, x_19708); +if (lean_obj_tag(x_19712) == 0) +{ +lean_object* x_19713; lean_object* x_19714; uint8_t x_19715; +x_19713 = lean_ctor_get(x_19712, 0); +lean_inc(x_19713); +x_19714 = lean_ctor_get(x_19712, 1); +lean_inc(x_19714); +lean_dec(x_19712); +x_19715 = !lean_is_exclusive(x_19713); +if (x_19715 == 0) +{ +lean_object* x_19716; +x_19716 = lean_ctor_get(x_19713, 0); +lean_ctor_set(x_19628, 0, x_19716); +lean_ctor_set(x_19713, 0, x_19628); +x_17709 = x_19713; +x_17710 = x_19714; +goto block_19626; +} +else +{ +lean_object* x_19717; lean_object* x_19718; lean_object* x_19719; +x_19717 = lean_ctor_get(x_19713, 0); +x_19718 = lean_ctor_get(x_19713, 1); +lean_inc(x_19718); +lean_inc(x_19717); +lean_dec(x_19713); +lean_ctor_set(x_19628, 0, x_19717); +x_19719 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19719, 0, x_19628); +lean_ctor_set(x_19719, 1, x_19718); +x_17709 = x_19719; +x_17710 = x_19714; +goto block_19626; +} +} +else +{ +uint8_t x_19720; +lean_free_object(x_19628); +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19720 = !lean_is_exclusive(x_19712); +if (x_19720 == 0) +{ +return x_19712; +} +else +{ +lean_object* x_19721; lean_object* x_19722; lean_object* x_19723; +x_19721 = lean_ctor_get(x_19712, 0); +x_19722 = lean_ctor_get(x_19712, 1); +lean_inc(x_19722); +lean_inc(x_19721); +lean_dec(x_19712); +x_19723 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_19723, 0, x_19721); +lean_ctor_set(x_19723, 1, x_19722); +return x_19723; +} +} +} +} +else +{ +lean_object* x_19724; lean_object* x_19725; lean_object* x_19726; lean_object* x_19727; uint8_t x_19728; +x_19724 = lean_ctor_get(x_19628, 0); +lean_inc(x_19724); +lean_dec(x_19628); +x_19725 = lean_array_get_size(x_17707); +x_19726 = lean_ctor_get(x_19724, 3); +lean_inc(x_19726); +lean_dec(x_19724); +x_19727 = lean_array_get_size(x_19726); +lean_dec(x_19726); +x_19728 = lean_nat_dec_lt(x_19725, x_19727); +if (x_19728 == 0) +{ +uint8_t x_19729; +x_19729 = lean_nat_dec_eq(x_19725, x_19727); +if (x_19729 == 0) +{ +lean_object* x_19730; lean_object* x_19731; lean_object* x_19732; lean_object* x_19733; lean_object* x_19734; lean_object* x_19735; lean_object* x_19736; lean_object* x_19737; lean_object* x_19738; lean_object* x_19739; lean_object* x_19740; lean_object* x_19741; lean_object* x_19742; lean_object* x_19743; lean_object* x_19744; lean_object* x_19745; +x_19730 = lean_unsigned_to_nat(0u); +x_19731 = l_Array_extract___rarg(x_17707, x_19730, x_19727); +x_19732 = l_Array_extract___rarg(x_17707, x_19727, x_19725); +lean_dec(x_19725); +lean_inc(x_153); +lean_ctor_set_tag(x_19627, 6); +lean_ctor_set(x_19627, 1, x_19731); +lean_ctor_set(x_19627, 0, x_153); +x_19733 = lean_ctor_get(x_1, 0); +lean_inc(x_19733); +x_19734 = l_Lean_IR_ToIR_bindVar(x_19733, x_17708, x_4, x_5, x_19632); +x_19735 = lean_ctor_get(x_19734, 0); +lean_inc(x_19735); +x_19736 = lean_ctor_get(x_19734, 1); +lean_inc(x_19736); +lean_dec(x_19734); +x_19737 = lean_ctor_get(x_19735, 0); +lean_inc(x_19737); +x_19738 = lean_ctor_get(x_19735, 1); +lean_inc(x_19738); +lean_dec(x_19735); +x_19739 = l_Lean_IR_ToIR_newVar(x_19738, x_4, x_5, x_19736); +x_19740 = lean_ctor_get(x_19739, 0); +lean_inc(x_19740); +x_19741 = lean_ctor_get(x_19739, 1); +lean_inc(x_19741); +lean_dec(x_19739); +x_19742 = lean_ctor_get(x_19740, 0); +lean_inc(x_19742); +x_19743 = lean_ctor_get(x_19740, 1); +lean_inc(x_19743); +lean_dec(x_19740); +x_19744 = lean_ctor_get(x_1, 2); +lean_inc(x_19744); +lean_inc(x_5); +lean_inc(x_4); +x_19745 = l_Lean_IR_ToIR_lowerType(x_19744, x_19743, x_4, x_5, x_19741); +if (lean_obj_tag(x_19745) == 0) +{ +lean_object* x_19746; lean_object* x_19747; lean_object* x_19748; lean_object* x_19749; lean_object* x_19750; +x_19746 = lean_ctor_get(x_19745, 0); +lean_inc(x_19746); +x_19747 = lean_ctor_get(x_19745, 1); +lean_inc(x_19747); +lean_dec(x_19745); +x_19748 = lean_ctor_get(x_19746, 0); +lean_inc(x_19748); +x_19749 = lean_ctor_get(x_19746, 1); +lean_inc(x_19749); +lean_dec(x_19746); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19750 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_19742, x_19732, x_19737, x_19627, x_19748, x_19749, x_4, x_5, x_19747); +if (lean_obj_tag(x_19750) == 0) +{ +lean_object* x_19751; lean_object* x_19752; lean_object* x_19753; lean_object* x_19754; lean_object* x_19755; lean_object* x_19756; lean_object* x_19757; +x_19751 = lean_ctor_get(x_19750, 0); +lean_inc(x_19751); +x_19752 = lean_ctor_get(x_19750, 1); +lean_inc(x_19752); +lean_dec(x_19750); +x_19753 = lean_ctor_get(x_19751, 0); +lean_inc(x_19753); +x_19754 = lean_ctor_get(x_19751, 1); +lean_inc(x_19754); +if (lean_is_exclusive(x_19751)) { + lean_ctor_release(x_19751, 0); + lean_ctor_release(x_19751, 1); + x_19755 = x_19751; +} else { + lean_dec_ref(x_19751); + x_19755 = lean_box(0); +} +x_19756 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_19756, 0, x_19753); +if (lean_is_scalar(x_19755)) { + x_19757 = lean_alloc_ctor(0, 2, 0); +} else { + x_19757 = x_19755; +} +lean_ctor_set(x_19757, 0, x_19756); +lean_ctor_set(x_19757, 1, x_19754); +x_17709 = x_19757; +x_17710 = x_19752; +goto block_19626; +} +else +{ +lean_object* x_19758; lean_object* x_19759; lean_object* x_19760; lean_object* x_19761; +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19758 = lean_ctor_get(x_19750, 0); +lean_inc(x_19758); +x_19759 = lean_ctor_get(x_19750, 1); +lean_inc(x_19759); +if (lean_is_exclusive(x_19750)) { + lean_ctor_release(x_19750, 0); + lean_ctor_release(x_19750, 1); + x_19760 = x_19750; +} else { + lean_dec_ref(x_19750); + x_19760 = lean_box(0); +} +if (lean_is_scalar(x_19760)) { + x_19761 = lean_alloc_ctor(1, 2, 0); +} else { + x_19761 = x_19760; +} +lean_ctor_set(x_19761, 0, x_19758); +lean_ctor_set(x_19761, 1, x_19759); +return x_19761; +} +} +else +{ +lean_object* x_19762; lean_object* x_19763; lean_object* x_19764; lean_object* x_19765; +lean_dec(x_19742); +lean_dec(x_19737); +lean_dec(x_19627); +lean_dec(x_19732); +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19762 = lean_ctor_get(x_19745, 0); +lean_inc(x_19762); +x_19763 = lean_ctor_get(x_19745, 1); +lean_inc(x_19763); +if (lean_is_exclusive(x_19745)) { + lean_ctor_release(x_19745, 0); + lean_ctor_release(x_19745, 1); + x_19764 = x_19745; +} else { + lean_dec_ref(x_19745); + x_19764 = lean_box(0); +} +if (lean_is_scalar(x_19764)) { + x_19765 = lean_alloc_ctor(1, 2, 0); +} else { + x_19765 = x_19764; +} +lean_ctor_set(x_19765, 0, x_19762); +lean_ctor_set(x_19765, 1, x_19763); +return x_19765; +} +} +else +{ +lean_object* x_19766; lean_object* x_19767; lean_object* x_19768; lean_object* x_19769; lean_object* x_19770; lean_object* x_19771; lean_object* x_19772; lean_object* x_19773; +lean_dec(x_19727); +lean_dec(x_19725); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_19627, 6); +lean_ctor_set(x_19627, 1, x_17707); +lean_ctor_set(x_19627, 0, x_153); +x_19766 = lean_ctor_get(x_1, 0); +lean_inc(x_19766); +x_19767 = l_Lean_IR_ToIR_bindVar(x_19766, x_17708, x_4, x_5, x_19632); +x_19768 = lean_ctor_get(x_19767, 0); +lean_inc(x_19768); +x_19769 = lean_ctor_get(x_19767, 1); +lean_inc(x_19769); +lean_dec(x_19767); +x_19770 = lean_ctor_get(x_19768, 0); +lean_inc(x_19770); +x_19771 = lean_ctor_get(x_19768, 1); +lean_inc(x_19771); +lean_dec(x_19768); +x_19772 = lean_ctor_get(x_1, 2); +lean_inc(x_19772); +lean_inc(x_5); +lean_inc(x_4); +x_19773 = l_Lean_IR_ToIR_lowerType(x_19772, x_19771, x_4, x_5, x_19769); +if (lean_obj_tag(x_19773) == 0) +{ +lean_object* x_19774; lean_object* x_19775; lean_object* x_19776; lean_object* x_19777; lean_object* x_19778; +x_19774 = lean_ctor_get(x_19773, 0); +lean_inc(x_19774); +x_19775 = lean_ctor_get(x_19773, 1); +lean_inc(x_19775); +lean_dec(x_19773); +x_19776 = lean_ctor_get(x_19774, 0); +lean_inc(x_19776); +x_19777 = lean_ctor_get(x_19774, 1); +lean_inc(x_19777); +lean_dec(x_19774); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19778 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19770, x_19627, x_19776, x_19777, x_4, x_5, x_19775); +if (lean_obj_tag(x_19778) == 0) +{ +lean_object* x_19779; lean_object* x_19780; lean_object* x_19781; lean_object* x_19782; lean_object* x_19783; lean_object* x_19784; lean_object* x_19785; +x_19779 = lean_ctor_get(x_19778, 0); +lean_inc(x_19779); +x_19780 = lean_ctor_get(x_19778, 1); +lean_inc(x_19780); +lean_dec(x_19778); +x_19781 = lean_ctor_get(x_19779, 0); +lean_inc(x_19781); +x_19782 = lean_ctor_get(x_19779, 1); +lean_inc(x_19782); +if (lean_is_exclusive(x_19779)) { + lean_ctor_release(x_19779, 0); + lean_ctor_release(x_19779, 1); + x_19783 = x_19779; +} else { + lean_dec_ref(x_19779); + x_19783 = lean_box(0); +} +x_19784 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_19784, 0, x_19781); +if (lean_is_scalar(x_19783)) { + x_19785 = lean_alloc_ctor(0, 2, 0); +} else { + x_19785 = x_19783; +} +lean_ctor_set(x_19785, 0, x_19784); +lean_ctor_set(x_19785, 1, x_19782); +x_17709 = x_19785; +x_17710 = x_19780; +goto block_19626; +} +else +{ +lean_object* x_19786; lean_object* x_19787; lean_object* x_19788; lean_object* x_19789; +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19786 = lean_ctor_get(x_19778, 0); +lean_inc(x_19786); +x_19787 = lean_ctor_get(x_19778, 1); +lean_inc(x_19787); +if (lean_is_exclusive(x_19778)) { + lean_ctor_release(x_19778, 0); + lean_ctor_release(x_19778, 1); + x_19788 = x_19778; +} else { + lean_dec_ref(x_19778); + x_19788 = lean_box(0); +} +if (lean_is_scalar(x_19788)) { + x_19789 = lean_alloc_ctor(1, 2, 0); +} else { + x_19789 = x_19788; +} +lean_ctor_set(x_19789, 0, x_19786); +lean_ctor_set(x_19789, 1, x_19787); +return x_19789; +} +} +else +{ +lean_object* x_19790; lean_object* x_19791; lean_object* x_19792; lean_object* x_19793; +lean_dec(x_19770); +lean_dec(x_19627); +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19790 = lean_ctor_get(x_19773, 0); +lean_inc(x_19790); +x_19791 = lean_ctor_get(x_19773, 1); +lean_inc(x_19791); +if (lean_is_exclusive(x_19773)) { + lean_ctor_release(x_19773, 0); + lean_ctor_release(x_19773, 1); + x_19792 = x_19773; +} else { + lean_dec_ref(x_19773); + x_19792 = lean_box(0); +} +if (lean_is_scalar(x_19792)) { + x_19793 = lean_alloc_ctor(1, 2, 0); +} else { + x_19793 = x_19792; +} +lean_ctor_set(x_19793, 0, x_19790); +lean_ctor_set(x_19793, 1, x_19791); +return x_19793; +} +} +} +else +{ +lean_object* x_19794; lean_object* x_19795; lean_object* x_19796; lean_object* x_19797; lean_object* x_19798; lean_object* x_19799; lean_object* x_19800; lean_object* x_19801; +lean_dec(x_19727); +lean_dec(x_19725); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_19627, 7); +lean_ctor_set(x_19627, 1, x_17707); +lean_ctor_set(x_19627, 0, x_153); +x_19794 = lean_ctor_get(x_1, 0); +lean_inc(x_19794); +x_19795 = l_Lean_IR_ToIR_bindVar(x_19794, x_17708, x_4, x_5, x_19632); +x_19796 = lean_ctor_get(x_19795, 0); +lean_inc(x_19796); +x_19797 = lean_ctor_get(x_19795, 1); +lean_inc(x_19797); +lean_dec(x_19795); +x_19798 = lean_ctor_get(x_19796, 0); +lean_inc(x_19798); +x_19799 = lean_ctor_get(x_19796, 1); +lean_inc(x_19799); +lean_dec(x_19796); +x_19800 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19801 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19798, x_19627, x_19800, x_19799, x_4, x_5, x_19797); +if (lean_obj_tag(x_19801) == 0) +{ +lean_object* x_19802; lean_object* x_19803; lean_object* x_19804; lean_object* x_19805; lean_object* x_19806; lean_object* x_19807; lean_object* x_19808; +x_19802 = lean_ctor_get(x_19801, 0); +lean_inc(x_19802); +x_19803 = lean_ctor_get(x_19801, 1); +lean_inc(x_19803); +lean_dec(x_19801); +x_19804 = lean_ctor_get(x_19802, 0); +lean_inc(x_19804); +x_19805 = lean_ctor_get(x_19802, 1); +lean_inc(x_19805); +if (lean_is_exclusive(x_19802)) { + lean_ctor_release(x_19802, 0); + lean_ctor_release(x_19802, 1); + x_19806 = x_19802; +} else { + lean_dec_ref(x_19802); + x_19806 = lean_box(0); +} +x_19807 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_19807, 0, x_19804); +if (lean_is_scalar(x_19806)) { + x_19808 = lean_alloc_ctor(0, 2, 0); +} else { + x_19808 = x_19806; +} +lean_ctor_set(x_19808, 0, x_19807); +lean_ctor_set(x_19808, 1, x_19805); +x_17709 = x_19808; +x_17710 = x_19803; +goto block_19626; +} +else +{ +lean_object* x_19809; lean_object* x_19810; lean_object* x_19811; lean_object* x_19812; +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19809 = lean_ctor_get(x_19801, 0); +lean_inc(x_19809); +x_19810 = lean_ctor_get(x_19801, 1); +lean_inc(x_19810); +if (lean_is_exclusive(x_19801)) { + lean_ctor_release(x_19801, 0); + lean_ctor_release(x_19801, 1); + x_19811 = x_19801; +} else { + lean_dec_ref(x_19801); + x_19811 = lean_box(0); +} +if (lean_is_scalar(x_19811)) { + x_19812 = lean_alloc_ctor(1, 2, 0); +} else { + x_19812 = x_19811; +} +lean_ctor_set(x_19812, 0, x_19809); +lean_ctor_set(x_19812, 1, x_19810); +return x_19812; +} +} +} +} +else +{ +lean_object* x_19813; lean_object* x_19814; lean_object* x_19815; lean_object* x_19816; lean_object* x_19817; lean_object* x_19818; uint8_t x_19819; +x_19813 = lean_ctor_get(x_19627, 1); +lean_inc(x_19813); +lean_dec(x_19627); +x_19814 = lean_ctor_get(x_19628, 0); +lean_inc(x_19814); +if (lean_is_exclusive(x_19628)) { + lean_ctor_release(x_19628, 0); + x_19815 = x_19628; +} else { + lean_dec_ref(x_19628); + x_19815 = lean_box(0); +} +x_19816 = lean_array_get_size(x_17707); +x_19817 = lean_ctor_get(x_19814, 3); +lean_inc(x_19817); +lean_dec(x_19814); +x_19818 = lean_array_get_size(x_19817); +lean_dec(x_19817); +x_19819 = lean_nat_dec_lt(x_19816, x_19818); +if (x_19819 == 0) +{ +uint8_t x_19820; +x_19820 = lean_nat_dec_eq(x_19816, x_19818); +if (x_19820 == 0) +{ +lean_object* x_19821; lean_object* x_19822; lean_object* x_19823; lean_object* x_19824; lean_object* x_19825; lean_object* x_19826; lean_object* x_19827; lean_object* x_19828; lean_object* x_19829; lean_object* x_19830; lean_object* x_19831; lean_object* x_19832; lean_object* x_19833; lean_object* x_19834; lean_object* x_19835; lean_object* x_19836; lean_object* x_19837; +x_19821 = lean_unsigned_to_nat(0u); +x_19822 = l_Array_extract___rarg(x_17707, x_19821, x_19818); +x_19823 = l_Array_extract___rarg(x_17707, x_19818, x_19816); +lean_dec(x_19816); +lean_inc(x_153); +x_19824 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_19824, 0, x_153); +lean_ctor_set(x_19824, 1, x_19822); +x_19825 = lean_ctor_get(x_1, 0); +lean_inc(x_19825); +x_19826 = l_Lean_IR_ToIR_bindVar(x_19825, x_17708, x_4, x_5, x_19813); +x_19827 = lean_ctor_get(x_19826, 0); +lean_inc(x_19827); +x_19828 = lean_ctor_get(x_19826, 1); +lean_inc(x_19828); +lean_dec(x_19826); +x_19829 = lean_ctor_get(x_19827, 0); +lean_inc(x_19829); +x_19830 = lean_ctor_get(x_19827, 1); +lean_inc(x_19830); +lean_dec(x_19827); +x_19831 = l_Lean_IR_ToIR_newVar(x_19830, x_4, x_5, x_19828); +x_19832 = lean_ctor_get(x_19831, 0); +lean_inc(x_19832); +x_19833 = lean_ctor_get(x_19831, 1); +lean_inc(x_19833); +lean_dec(x_19831); +x_19834 = lean_ctor_get(x_19832, 0); +lean_inc(x_19834); +x_19835 = lean_ctor_get(x_19832, 1); +lean_inc(x_19835); +lean_dec(x_19832); +x_19836 = lean_ctor_get(x_1, 2); +lean_inc(x_19836); +lean_inc(x_5); +lean_inc(x_4); +x_19837 = l_Lean_IR_ToIR_lowerType(x_19836, x_19835, x_4, x_5, x_19833); +if (lean_obj_tag(x_19837) == 0) +{ +lean_object* x_19838; lean_object* x_19839; lean_object* x_19840; lean_object* x_19841; lean_object* x_19842; +x_19838 = lean_ctor_get(x_19837, 0); +lean_inc(x_19838); +x_19839 = lean_ctor_get(x_19837, 1); +lean_inc(x_19839); +lean_dec(x_19837); +x_19840 = lean_ctor_get(x_19838, 0); +lean_inc(x_19840); +x_19841 = lean_ctor_get(x_19838, 1); +lean_inc(x_19841); +lean_dec(x_19838); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19842 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_19834, x_19823, x_19829, x_19824, x_19840, x_19841, x_4, x_5, x_19839); +if (lean_obj_tag(x_19842) == 0) +{ +lean_object* x_19843; lean_object* x_19844; lean_object* x_19845; lean_object* x_19846; lean_object* x_19847; lean_object* x_19848; lean_object* x_19849; +x_19843 = lean_ctor_get(x_19842, 0); +lean_inc(x_19843); +x_19844 = lean_ctor_get(x_19842, 1); +lean_inc(x_19844); +lean_dec(x_19842); +x_19845 = lean_ctor_get(x_19843, 0); +lean_inc(x_19845); +x_19846 = lean_ctor_get(x_19843, 1); +lean_inc(x_19846); +if (lean_is_exclusive(x_19843)) { + lean_ctor_release(x_19843, 0); + lean_ctor_release(x_19843, 1); + x_19847 = x_19843; +} else { + lean_dec_ref(x_19843); + x_19847 = lean_box(0); +} +if (lean_is_scalar(x_19815)) { + x_19848 = lean_alloc_ctor(1, 1, 0); +} else { + x_19848 = x_19815; +} +lean_ctor_set(x_19848, 0, x_19845); +if (lean_is_scalar(x_19847)) { + x_19849 = lean_alloc_ctor(0, 2, 0); +} else { + x_19849 = x_19847; +} +lean_ctor_set(x_19849, 0, x_19848); +lean_ctor_set(x_19849, 1, x_19846); +x_17709 = x_19849; +x_17710 = x_19844; +goto block_19626; +} +else +{ +lean_object* x_19850; lean_object* x_19851; lean_object* x_19852; lean_object* x_19853; +lean_dec(x_19815); +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19850 = lean_ctor_get(x_19842, 0); +lean_inc(x_19850); +x_19851 = lean_ctor_get(x_19842, 1); +lean_inc(x_19851); +if (lean_is_exclusive(x_19842)) { + lean_ctor_release(x_19842, 0); + lean_ctor_release(x_19842, 1); + x_19852 = x_19842; +} else { + lean_dec_ref(x_19842); + x_19852 = lean_box(0); +} +if (lean_is_scalar(x_19852)) { + x_19853 = lean_alloc_ctor(1, 2, 0); +} else { + x_19853 = x_19852; +} +lean_ctor_set(x_19853, 0, x_19850); +lean_ctor_set(x_19853, 1, x_19851); +return x_19853; +} +} +else +{ +lean_object* x_19854; lean_object* x_19855; lean_object* x_19856; lean_object* x_19857; +lean_dec(x_19834); +lean_dec(x_19829); +lean_dec(x_19824); +lean_dec(x_19823); +lean_dec(x_19815); +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19854 = lean_ctor_get(x_19837, 0); +lean_inc(x_19854); +x_19855 = lean_ctor_get(x_19837, 1); +lean_inc(x_19855); +if (lean_is_exclusive(x_19837)) { + lean_ctor_release(x_19837, 0); + lean_ctor_release(x_19837, 1); + x_19856 = x_19837; +} else { + lean_dec_ref(x_19837); + x_19856 = lean_box(0); +} +if (lean_is_scalar(x_19856)) { + x_19857 = lean_alloc_ctor(1, 2, 0); +} else { + x_19857 = x_19856; +} +lean_ctor_set(x_19857, 0, x_19854); +lean_ctor_set(x_19857, 1, x_19855); +return x_19857; +} +} +else +{ +lean_object* x_19858; lean_object* x_19859; lean_object* x_19860; lean_object* x_19861; lean_object* x_19862; lean_object* x_19863; lean_object* x_19864; lean_object* x_19865; lean_object* x_19866; +lean_dec(x_19818); +lean_dec(x_19816); +lean_inc(x_17707); +lean_inc(x_153); +x_19858 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_19858, 0, x_153); +lean_ctor_set(x_19858, 1, x_17707); +x_19859 = lean_ctor_get(x_1, 0); +lean_inc(x_19859); +x_19860 = l_Lean_IR_ToIR_bindVar(x_19859, x_17708, x_4, x_5, x_19813); +x_19861 = lean_ctor_get(x_19860, 0); +lean_inc(x_19861); +x_19862 = lean_ctor_get(x_19860, 1); +lean_inc(x_19862); +lean_dec(x_19860); +x_19863 = lean_ctor_get(x_19861, 0); +lean_inc(x_19863); +x_19864 = lean_ctor_get(x_19861, 1); +lean_inc(x_19864); +lean_dec(x_19861); +x_19865 = lean_ctor_get(x_1, 2); +lean_inc(x_19865); +lean_inc(x_5); +lean_inc(x_4); +x_19866 = l_Lean_IR_ToIR_lowerType(x_19865, x_19864, x_4, x_5, x_19862); +if (lean_obj_tag(x_19866) == 0) +{ +lean_object* x_19867; lean_object* x_19868; lean_object* x_19869; lean_object* x_19870; lean_object* x_19871; +x_19867 = lean_ctor_get(x_19866, 0); +lean_inc(x_19867); +x_19868 = lean_ctor_get(x_19866, 1); +lean_inc(x_19868); +lean_dec(x_19866); +x_19869 = lean_ctor_get(x_19867, 0); +lean_inc(x_19869); +x_19870 = lean_ctor_get(x_19867, 1); +lean_inc(x_19870); +lean_dec(x_19867); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19871 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19863, x_19858, x_19869, x_19870, x_4, x_5, x_19868); +if (lean_obj_tag(x_19871) == 0) +{ +lean_object* x_19872; lean_object* x_19873; lean_object* x_19874; lean_object* x_19875; lean_object* x_19876; lean_object* x_19877; lean_object* x_19878; +x_19872 = lean_ctor_get(x_19871, 0); +lean_inc(x_19872); +x_19873 = lean_ctor_get(x_19871, 1); +lean_inc(x_19873); +lean_dec(x_19871); +x_19874 = lean_ctor_get(x_19872, 0); +lean_inc(x_19874); +x_19875 = lean_ctor_get(x_19872, 1); +lean_inc(x_19875); +if (lean_is_exclusive(x_19872)) { + lean_ctor_release(x_19872, 0); + lean_ctor_release(x_19872, 1); + x_19876 = x_19872; +} else { + lean_dec_ref(x_19872); + x_19876 = lean_box(0); +} +if (lean_is_scalar(x_19815)) { + x_19877 = lean_alloc_ctor(1, 1, 0); +} else { + x_19877 = x_19815; +} +lean_ctor_set(x_19877, 0, x_19874); +if (lean_is_scalar(x_19876)) { + x_19878 = lean_alloc_ctor(0, 2, 0); +} else { + x_19878 = x_19876; +} +lean_ctor_set(x_19878, 0, x_19877); +lean_ctor_set(x_19878, 1, x_19875); +x_17709 = x_19878; +x_17710 = x_19873; +goto block_19626; +} +else +{ +lean_object* x_19879; lean_object* x_19880; lean_object* x_19881; lean_object* x_19882; +lean_dec(x_19815); +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19879 = lean_ctor_get(x_19871, 0); +lean_inc(x_19879); +x_19880 = lean_ctor_get(x_19871, 1); +lean_inc(x_19880); +if (lean_is_exclusive(x_19871)) { + lean_ctor_release(x_19871, 0); + lean_ctor_release(x_19871, 1); + x_19881 = x_19871; +} else { + lean_dec_ref(x_19871); + x_19881 = lean_box(0); +} +if (lean_is_scalar(x_19881)) { + x_19882 = lean_alloc_ctor(1, 2, 0); +} else { + x_19882 = x_19881; +} +lean_ctor_set(x_19882, 0, x_19879); +lean_ctor_set(x_19882, 1, x_19880); +return x_19882; +} +} +else +{ +lean_object* x_19883; lean_object* x_19884; lean_object* x_19885; lean_object* x_19886; +lean_dec(x_19863); +lean_dec(x_19858); +lean_dec(x_19815); +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19883 = lean_ctor_get(x_19866, 0); +lean_inc(x_19883); +x_19884 = lean_ctor_get(x_19866, 1); +lean_inc(x_19884); +if (lean_is_exclusive(x_19866)) { + lean_ctor_release(x_19866, 0); + lean_ctor_release(x_19866, 1); + x_19885 = x_19866; +} else { + lean_dec_ref(x_19866); + x_19885 = lean_box(0); +} +if (lean_is_scalar(x_19885)) { + x_19886 = lean_alloc_ctor(1, 2, 0); +} else { + x_19886 = x_19885; +} +lean_ctor_set(x_19886, 0, x_19883); +lean_ctor_set(x_19886, 1, x_19884); +return x_19886; +} +} +} +else +{ +lean_object* x_19887; lean_object* x_19888; lean_object* x_19889; lean_object* x_19890; lean_object* x_19891; lean_object* x_19892; lean_object* x_19893; lean_object* x_19894; lean_object* x_19895; +lean_dec(x_19818); +lean_dec(x_19816); +lean_inc(x_17707); +lean_inc(x_153); +x_19887 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_19887, 0, x_153); +lean_ctor_set(x_19887, 1, x_17707); +x_19888 = lean_ctor_get(x_1, 0); +lean_inc(x_19888); +x_19889 = l_Lean_IR_ToIR_bindVar(x_19888, x_17708, x_4, x_5, x_19813); +x_19890 = lean_ctor_get(x_19889, 0); +lean_inc(x_19890); +x_19891 = lean_ctor_get(x_19889, 1); +lean_inc(x_19891); +lean_dec(x_19889); +x_19892 = lean_ctor_get(x_19890, 0); +lean_inc(x_19892); +x_19893 = lean_ctor_get(x_19890, 1); +lean_inc(x_19893); +lean_dec(x_19890); +x_19894 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19895 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19892, x_19887, x_19894, x_19893, x_4, x_5, x_19891); +if (lean_obj_tag(x_19895) == 0) +{ +lean_object* x_19896; lean_object* x_19897; lean_object* x_19898; lean_object* x_19899; lean_object* x_19900; lean_object* x_19901; lean_object* x_19902; +x_19896 = lean_ctor_get(x_19895, 0); +lean_inc(x_19896); +x_19897 = lean_ctor_get(x_19895, 1); +lean_inc(x_19897); +lean_dec(x_19895); +x_19898 = lean_ctor_get(x_19896, 0); +lean_inc(x_19898); +x_19899 = lean_ctor_get(x_19896, 1); +lean_inc(x_19899); +if (lean_is_exclusive(x_19896)) { + lean_ctor_release(x_19896, 0); + lean_ctor_release(x_19896, 1); + x_19900 = x_19896; +} else { + lean_dec_ref(x_19896); + x_19900 = lean_box(0); +} +if (lean_is_scalar(x_19815)) { + x_19901 = lean_alloc_ctor(1, 1, 0); +} else { + x_19901 = x_19815; +} +lean_ctor_set(x_19901, 0, x_19898); +if (lean_is_scalar(x_19900)) { + x_19902 = lean_alloc_ctor(0, 2, 0); +} else { + x_19902 = x_19900; +} +lean_ctor_set(x_19902, 0, x_19901); +lean_ctor_set(x_19902, 1, x_19899); +x_17709 = x_19902; +x_17710 = x_19897; +goto block_19626; +} +else +{ +lean_object* x_19903; lean_object* x_19904; lean_object* x_19905; lean_object* x_19906; +lean_dec(x_19815); +lean_dec(x_17707); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19903 = lean_ctor_get(x_19895, 0); +lean_inc(x_19903); +x_19904 = lean_ctor_get(x_19895, 1); +lean_inc(x_19904); +if (lean_is_exclusive(x_19895)) { + lean_ctor_release(x_19895, 0); + lean_ctor_release(x_19895, 1); + x_19905 = x_19895; +} else { + lean_dec_ref(x_19895); + x_19905 = lean_box(0); +} +if (lean_is_scalar(x_19905)) { + x_19906 = lean_alloc_ctor(1, 2, 0); +} else { + x_19906 = x_19905; +} +lean_ctor_set(x_19906, 0, x_19903); +lean_ctor_set(x_19906, 1, x_19904); +return x_19906; +} +} +} +} +block_19626: +{ +lean_object* x_17711; +x_17711 = lean_ctor_get(x_17709, 0); +lean_inc(x_17711); +if (lean_obj_tag(x_17711) == 0) +{ +uint8_t x_17712; +lean_dec(x_17705); +x_17712 = !lean_is_exclusive(x_17709); +if (x_17712 == 0) +{ +lean_object* x_17713; lean_object* x_17714; lean_object* x_17715; lean_object* x_17716; lean_object* x_17717; lean_object* x_17718; lean_object* x_17719; uint8_t x_17720; lean_object* x_17721; +x_17713 = lean_ctor_get(x_17709, 1); +x_17714 = lean_ctor_get(x_17709, 0); +lean_dec(x_17714); +x_17715 = lean_st_ref_get(x_5, x_17710); +x_17716 = lean_ctor_get(x_17715, 0); +lean_inc(x_17716); +x_17717 = lean_ctor_get(x_17715, 1); +lean_inc(x_17717); +if (lean_is_exclusive(x_17715)) { + lean_ctor_release(x_17715, 0); + lean_ctor_release(x_17715, 1); + x_17718 = x_17715; +} else { + lean_dec_ref(x_17715); + x_17718 = lean_box(0); +} +x_17719 = lean_ctor_get(x_17716, 0); +lean_inc(x_17719); +lean_dec(x_17716); +x_17720 = 0; +lean_inc(x_153); +lean_inc(x_17719); +x_17721 = l_Lean_Environment_find_x3f(x_17719, x_153, x_17720); +if (lean_obj_tag(x_17721) == 0) +{ +lean_object* x_17722; lean_object* x_17723; +lean_dec(x_17719); +lean_dec(x_17718); +lean_free_object(x_17709); +lean_dec(x_17707); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_17722 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_17723 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_17722, x_17713, x_4, x_5, x_17717); +return x_17723; +} +else +{ +lean_object* x_17724; +x_17724 = lean_ctor_get(x_17721, 0); +lean_inc(x_17724); +lean_dec(x_17721); +switch (lean_obj_tag(x_17724)) { +case 0: +{ +uint8_t x_17725; +lean_dec(x_17719); +lean_dec(x_17699); +lean_dec(x_17698); +x_17725 = !lean_is_exclusive(x_17724); +if (x_17725 == 0) +{ +lean_object* x_17726; lean_object* x_17727; uint8_t x_17728; +x_17726 = lean_ctor_get(x_17724, 0); +lean_dec(x_17726); +x_17727 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_17728 = lean_name_eq(x_153, x_17727); +if (x_17728 == 0) +{ +lean_object* x_17729; uint8_t x_17730; +x_17729 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_17730 = lean_name_eq(x_153, x_17729); +if (x_17730 == 0) +{ +lean_object* x_17731; lean_object* x_17732; lean_object* x_17733; +lean_dec(x_17718); +lean_free_object(x_17709); +lean_inc(x_153); +x_17731 = l_Lean_IR_ToIR_findDecl(x_153, x_17713, x_4, x_5, x_17717); +x_17732 = lean_ctor_get(x_17731, 0); +lean_inc(x_17732); +x_17733 = lean_ctor_get(x_17732, 0); +lean_inc(x_17733); +if (lean_obj_tag(x_17733) == 0) +{ +uint8_t x_17734; +lean_dec(x_17707); +lean_dec(x_2); +lean_dec(x_1); +x_17734 = !lean_is_exclusive(x_17731); +if (x_17734 == 0) +{ +lean_object* x_17735; lean_object* x_17736; uint8_t x_17737; +x_17735 = lean_ctor_get(x_17731, 1); +x_17736 = lean_ctor_get(x_17731, 0); +lean_dec(x_17736); +x_17737 = !lean_is_exclusive(x_17732); +if (x_17737 == 0) +{ +lean_object* x_17738; lean_object* x_17739; uint8_t x_17740; lean_object* x_17741; lean_object* x_17742; lean_object* x_17743; lean_object* x_17744; lean_object* x_17745; lean_object* x_17746; +x_17738 = lean_ctor_get(x_17732, 1); +x_17739 = lean_ctor_get(x_17732, 0); +lean_dec(x_17739); +x_17740 = 1; +x_17741 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_17742 = l_Lean_Name_toString(x_153, x_17740, x_17741); +lean_ctor_set_tag(x_17724, 3); +lean_ctor_set(x_17724, 0, x_17742); +x_17743 = l_Lean_IR_ToIR_lowerLet___closed__13; +lean_ctor_set_tag(x_17732, 5); +lean_ctor_set(x_17732, 1, x_17724); +lean_ctor_set(x_17732, 0, x_17743); +x_17744 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_17731, 5); +lean_ctor_set(x_17731, 1, x_17744); +x_17745 = l_Lean_MessageData_ofFormat(x_17731); +x_17746 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_17745, x_17738, x_4, x_5, x_17735); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_17738); +return x_17746; +} +else +{ +lean_object* x_17747; uint8_t x_17748; lean_object* x_17749; lean_object* x_17750; lean_object* x_17751; lean_object* x_17752; lean_object* x_17753; lean_object* x_17754; lean_object* x_17755; +x_17747 = lean_ctor_get(x_17732, 1); +lean_inc(x_17747); +lean_dec(x_17732); +x_17748 = 1; +x_17749 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_17750 = l_Lean_Name_toString(x_153, x_17748, x_17749); +lean_ctor_set_tag(x_17724, 3); +lean_ctor_set(x_17724, 0, x_17750); +x_17751 = l_Lean_IR_ToIR_lowerLet___closed__13; +x_17752 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17752, 0, x_17751); +lean_ctor_set(x_17752, 1, x_17724); +x_17753 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_17731, 5); +lean_ctor_set(x_17731, 1, x_17753); +lean_ctor_set(x_17731, 0, x_17752); +x_17754 = l_Lean_MessageData_ofFormat(x_17731); +x_17755 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_17754, x_17747, x_4, x_5, x_17735); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_17747); +return x_17755; +} +} +else +{ +lean_object* x_17756; lean_object* x_17757; lean_object* x_17758; uint8_t x_17759; lean_object* x_17760; lean_object* x_17761; lean_object* x_17762; lean_object* x_17763; lean_object* x_17764; lean_object* x_17765; lean_object* x_17766; lean_object* x_17767; +x_17756 = lean_ctor_get(x_17731, 1); +lean_inc(x_17756); +lean_dec(x_17731); +x_17757 = lean_ctor_get(x_17732, 1); +lean_inc(x_17757); +if (lean_is_exclusive(x_17732)) { + lean_ctor_release(x_17732, 0); + lean_ctor_release(x_17732, 1); + x_17758 = x_17732; +} else { + lean_dec_ref(x_17732); + x_17758 = lean_box(0); +} +x_17759 = 1; +x_17760 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_17761 = l_Lean_Name_toString(x_153, x_17759, x_17760); +lean_ctor_set_tag(x_17724, 3); +lean_ctor_set(x_17724, 0, x_17761); +x_17762 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_17758)) { + x_17763 = lean_alloc_ctor(5, 2, 0); +} else { + x_17763 = x_17758; + lean_ctor_set_tag(x_17763, 5); +} +lean_ctor_set(x_17763, 0, x_17762); +lean_ctor_set(x_17763, 1, x_17724); +x_17764 = l_Lean_IR_ToIR_lowerLet___closed__16; +x_17765 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17765, 0, x_17763); +lean_ctor_set(x_17765, 1, x_17764); +x_17766 = l_Lean_MessageData_ofFormat(x_17765); +x_17767 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_17766, x_17757, x_4, x_5, x_17756); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_17757); +return x_17767; +} +} +else +{ +lean_object* x_17768; uint8_t x_17769; +lean_free_object(x_17724); +x_17768 = lean_ctor_get(x_17731, 1); +lean_inc(x_17768); +lean_dec(x_17731); +x_17769 = !lean_is_exclusive(x_17732); +if (x_17769 == 0) +{ +lean_object* x_17770; lean_object* x_17771; lean_object* x_17772; lean_object* x_17773; lean_object* x_17774; lean_object* x_17775; uint8_t x_17776; +x_17770 = lean_ctor_get(x_17732, 1); +x_17771 = lean_ctor_get(x_17732, 0); +lean_dec(x_17771); +x_17772 = lean_ctor_get(x_17733, 0); +lean_inc(x_17772); +lean_dec(x_17733); +x_17773 = lean_array_get_size(x_17707); +x_17774 = l_Lean_IR_Decl_params(x_17772); +lean_dec(x_17772); +x_17775 = lean_array_get_size(x_17774); +lean_dec(x_17774); +x_17776 = lean_nat_dec_lt(x_17773, x_17775); +if (x_17776 == 0) +{ +uint8_t x_17777; +x_17777 = lean_nat_dec_eq(x_17773, x_17775); +if (x_17777 == 0) +{ +lean_object* x_17778; lean_object* x_17779; lean_object* x_17780; lean_object* x_17781; lean_object* x_17782; lean_object* x_17783; lean_object* x_17784; lean_object* x_17785; lean_object* x_17786; lean_object* x_17787; lean_object* x_17788; lean_object* x_17789; lean_object* x_17790; lean_object* x_17791; lean_object* x_17792; lean_object* x_17793; +x_17778 = lean_unsigned_to_nat(0u); +x_17779 = l_Array_extract___rarg(x_17707, x_17778, x_17775); +x_17780 = l_Array_extract___rarg(x_17707, x_17775, x_17773); +lean_dec(x_17773); +lean_dec(x_17707); +lean_ctor_set_tag(x_17732, 6); +lean_ctor_set(x_17732, 1, x_17779); +lean_ctor_set(x_17732, 0, x_153); +x_17781 = lean_ctor_get(x_1, 0); +lean_inc(x_17781); +x_17782 = l_Lean_IR_ToIR_bindVar(x_17781, x_17770, x_4, x_5, x_17768); +x_17783 = lean_ctor_get(x_17782, 0); +lean_inc(x_17783); +x_17784 = lean_ctor_get(x_17782, 1); +lean_inc(x_17784); +lean_dec(x_17782); +x_17785 = lean_ctor_get(x_17783, 0); +lean_inc(x_17785); +x_17786 = lean_ctor_get(x_17783, 1); +lean_inc(x_17786); +lean_dec(x_17783); +x_17787 = l_Lean_IR_ToIR_newVar(x_17786, x_4, x_5, x_17784); +x_17788 = lean_ctor_get(x_17787, 0); +lean_inc(x_17788); +x_17789 = lean_ctor_get(x_17787, 1); +lean_inc(x_17789); +lean_dec(x_17787); +x_17790 = lean_ctor_get(x_17788, 0); +lean_inc(x_17790); +x_17791 = lean_ctor_get(x_17788, 1); +lean_inc(x_17791); +lean_dec(x_17788); +x_17792 = lean_ctor_get(x_1, 2); +lean_inc(x_17792); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_17793 = l_Lean_IR_ToIR_lowerType(x_17792, x_17791, x_4, x_5, x_17789); +if (lean_obj_tag(x_17793) == 0) +{ +lean_object* x_17794; lean_object* x_17795; lean_object* x_17796; lean_object* x_17797; lean_object* x_17798; +x_17794 = lean_ctor_get(x_17793, 0); +lean_inc(x_17794); +x_17795 = lean_ctor_get(x_17793, 1); +lean_inc(x_17795); +lean_dec(x_17793); +x_17796 = lean_ctor_get(x_17794, 0); +lean_inc(x_17796); +x_17797 = lean_ctor_get(x_17794, 1); +lean_inc(x_17797); +lean_dec(x_17794); +x_17798 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_17790, x_17780, x_17785, x_17732, x_17796, x_17797, x_4, x_5, x_17795); +return x_17798; +} +else +{ +uint8_t x_17799; +lean_dec(x_17790); +lean_dec(x_17785); +lean_dec(x_17732); +lean_dec(x_17780); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_17799 = !lean_is_exclusive(x_17793); +if (x_17799 == 0) +{ +return x_17793; +} +else +{ +lean_object* x_17800; lean_object* x_17801; lean_object* x_17802; +x_17800 = lean_ctor_get(x_17793, 0); +x_17801 = lean_ctor_get(x_17793, 1); +lean_inc(x_17801); +lean_inc(x_17800); +lean_dec(x_17793); +x_17802 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_17802, 0, x_17800); +lean_ctor_set(x_17802, 1, x_17801); +return x_17802; +} +} +} +else +{ +lean_object* x_17803; lean_object* x_17804; lean_object* x_17805; lean_object* x_17806; lean_object* x_17807; lean_object* x_17808; lean_object* x_17809; lean_object* x_17810; +lean_dec(x_17775); +lean_dec(x_17773); +lean_ctor_set_tag(x_17732, 6); +lean_ctor_set(x_17732, 1, x_17707); +lean_ctor_set(x_17732, 0, x_153); +x_17803 = lean_ctor_get(x_1, 0); +lean_inc(x_17803); +x_17804 = l_Lean_IR_ToIR_bindVar(x_17803, x_17770, x_4, x_5, x_17768); +x_17805 = lean_ctor_get(x_17804, 0); +lean_inc(x_17805); +x_17806 = lean_ctor_get(x_17804, 1); +lean_inc(x_17806); +lean_dec(x_17804); +x_17807 = lean_ctor_get(x_17805, 0); +lean_inc(x_17807); +x_17808 = lean_ctor_get(x_17805, 1); +lean_inc(x_17808); +lean_dec(x_17805); +x_17809 = lean_ctor_get(x_1, 2); +lean_inc(x_17809); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_17810 = l_Lean_IR_ToIR_lowerType(x_17809, x_17808, x_4, x_5, x_17806); +if (lean_obj_tag(x_17810) == 0) +{ +lean_object* x_17811; lean_object* x_17812; lean_object* x_17813; lean_object* x_17814; lean_object* x_17815; +x_17811 = lean_ctor_get(x_17810, 0); +lean_inc(x_17811); +x_17812 = lean_ctor_get(x_17810, 1); +lean_inc(x_17812); +lean_dec(x_17810); +x_17813 = lean_ctor_get(x_17811, 0); +lean_inc(x_17813); +x_17814 = lean_ctor_get(x_17811, 1); +lean_inc(x_17814); +lean_dec(x_17811); +x_17815 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17807, x_17732, x_17813, x_17814, x_4, x_5, x_17812); +return x_17815; +} +else +{ +uint8_t x_17816; +lean_dec(x_17807); +lean_dec(x_17732); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_17816 = !lean_is_exclusive(x_17810); +if (x_17816 == 0) +{ +return x_17810; +} +else +{ +lean_object* x_17817; lean_object* x_17818; lean_object* x_17819; +x_17817 = lean_ctor_get(x_17810, 0); +x_17818 = lean_ctor_get(x_17810, 1); +lean_inc(x_17818); +lean_inc(x_17817); +lean_dec(x_17810); +x_17819 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_17819, 0, x_17817); +lean_ctor_set(x_17819, 1, x_17818); +return x_17819; +} +} +} +} +else +{ +lean_object* x_17820; lean_object* x_17821; lean_object* x_17822; lean_object* x_17823; lean_object* x_17824; lean_object* x_17825; lean_object* x_17826; lean_object* x_17827; +lean_dec(x_17775); +lean_dec(x_17773); +lean_ctor_set_tag(x_17732, 7); +lean_ctor_set(x_17732, 1, x_17707); +lean_ctor_set(x_17732, 0, x_153); +x_17820 = lean_ctor_get(x_1, 0); +lean_inc(x_17820); +lean_dec(x_1); +x_17821 = l_Lean_IR_ToIR_bindVar(x_17820, x_17770, x_4, x_5, x_17768); +x_17822 = lean_ctor_get(x_17821, 0); +lean_inc(x_17822); +x_17823 = lean_ctor_get(x_17821, 1); +lean_inc(x_17823); +lean_dec(x_17821); +x_17824 = lean_ctor_get(x_17822, 0); +lean_inc(x_17824); +x_17825 = lean_ctor_get(x_17822, 1); +lean_inc(x_17825); +lean_dec(x_17822); +x_17826 = lean_box(7); +x_17827 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17824, x_17732, x_17826, x_17825, x_4, x_5, x_17823); +return x_17827; +} +} +else +{ +lean_object* x_17828; lean_object* x_17829; lean_object* x_17830; lean_object* x_17831; lean_object* x_17832; uint8_t x_17833; +x_17828 = lean_ctor_get(x_17732, 1); +lean_inc(x_17828); +lean_dec(x_17732); +x_17829 = lean_ctor_get(x_17733, 0); +lean_inc(x_17829); +lean_dec(x_17733); +x_17830 = lean_array_get_size(x_17707); +x_17831 = l_Lean_IR_Decl_params(x_17829); +lean_dec(x_17829); +x_17832 = lean_array_get_size(x_17831); +lean_dec(x_17831); +x_17833 = lean_nat_dec_lt(x_17830, x_17832); +if (x_17833 == 0) +{ +uint8_t x_17834; +x_17834 = lean_nat_dec_eq(x_17830, x_17832); +if (x_17834 == 0) +{ +lean_object* x_17835; lean_object* x_17836; lean_object* x_17837; lean_object* x_17838; lean_object* x_17839; lean_object* x_17840; lean_object* x_17841; lean_object* x_17842; lean_object* x_17843; lean_object* x_17844; lean_object* x_17845; lean_object* x_17846; lean_object* x_17847; lean_object* x_17848; lean_object* x_17849; lean_object* x_17850; lean_object* x_17851; +x_17835 = lean_unsigned_to_nat(0u); +x_17836 = l_Array_extract___rarg(x_17707, x_17835, x_17832); +x_17837 = l_Array_extract___rarg(x_17707, x_17832, x_17830); +lean_dec(x_17830); +lean_dec(x_17707); +x_17838 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_17838, 0, x_153); +lean_ctor_set(x_17838, 1, x_17836); +x_17839 = lean_ctor_get(x_1, 0); +lean_inc(x_17839); +x_17840 = l_Lean_IR_ToIR_bindVar(x_17839, x_17828, x_4, x_5, x_17768); +x_17841 = lean_ctor_get(x_17840, 0); +lean_inc(x_17841); +x_17842 = lean_ctor_get(x_17840, 1); +lean_inc(x_17842); +lean_dec(x_17840); +x_17843 = lean_ctor_get(x_17841, 0); +lean_inc(x_17843); +x_17844 = lean_ctor_get(x_17841, 1); +lean_inc(x_17844); +lean_dec(x_17841); +x_17845 = l_Lean_IR_ToIR_newVar(x_17844, x_4, x_5, x_17842); +x_17846 = lean_ctor_get(x_17845, 0); +lean_inc(x_17846); +x_17847 = lean_ctor_get(x_17845, 1); +lean_inc(x_17847); +lean_dec(x_17845); +x_17848 = lean_ctor_get(x_17846, 0); +lean_inc(x_17848); +x_17849 = lean_ctor_get(x_17846, 1); +lean_inc(x_17849); +lean_dec(x_17846); +x_17850 = lean_ctor_get(x_1, 2); +lean_inc(x_17850); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_17851 = l_Lean_IR_ToIR_lowerType(x_17850, x_17849, x_4, x_5, x_17847); +if (lean_obj_tag(x_17851) == 0) +{ +lean_object* x_17852; lean_object* x_17853; lean_object* x_17854; lean_object* x_17855; lean_object* x_17856; +x_17852 = lean_ctor_get(x_17851, 0); +lean_inc(x_17852); +x_17853 = lean_ctor_get(x_17851, 1); +lean_inc(x_17853); +lean_dec(x_17851); +x_17854 = lean_ctor_get(x_17852, 0); +lean_inc(x_17854); +x_17855 = lean_ctor_get(x_17852, 1); +lean_inc(x_17855); +lean_dec(x_17852); +x_17856 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_17848, x_17837, x_17843, x_17838, x_17854, x_17855, x_4, x_5, x_17853); +return x_17856; +} +else +{ +lean_object* x_17857; lean_object* x_17858; lean_object* x_17859; lean_object* x_17860; +lean_dec(x_17848); +lean_dec(x_17843); +lean_dec(x_17838); +lean_dec(x_17837); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_17857 = lean_ctor_get(x_17851, 0); +lean_inc(x_17857); +x_17858 = lean_ctor_get(x_17851, 1); +lean_inc(x_17858); +if (lean_is_exclusive(x_17851)) { + lean_ctor_release(x_17851, 0); + lean_ctor_release(x_17851, 1); + x_17859 = x_17851; +} else { + lean_dec_ref(x_17851); + x_17859 = lean_box(0); +} +if (lean_is_scalar(x_17859)) { + x_17860 = lean_alloc_ctor(1, 2, 0); +} else { + x_17860 = x_17859; +} +lean_ctor_set(x_17860, 0, x_17857); +lean_ctor_set(x_17860, 1, x_17858); +return x_17860; +} +} +else +{ +lean_object* x_17861; lean_object* x_17862; lean_object* x_17863; lean_object* x_17864; lean_object* x_17865; lean_object* x_17866; lean_object* x_17867; lean_object* x_17868; lean_object* x_17869; +lean_dec(x_17832); +lean_dec(x_17830); +x_17861 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_17861, 0, x_153); +lean_ctor_set(x_17861, 1, x_17707); +x_17862 = lean_ctor_get(x_1, 0); +lean_inc(x_17862); +x_17863 = l_Lean_IR_ToIR_bindVar(x_17862, x_17828, x_4, x_5, x_17768); +x_17864 = lean_ctor_get(x_17863, 0); +lean_inc(x_17864); +x_17865 = lean_ctor_get(x_17863, 1); +lean_inc(x_17865); +lean_dec(x_17863); +x_17866 = lean_ctor_get(x_17864, 0); +lean_inc(x_17866); +x_17867 = lean_ctor_get(x_17864, 1); +lean_inc(x_17867); +lean_dec(x_17864); +x_17868 = lean_ctor_get(x_1, 2); +lean_inc(x_17868); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_17869 = l_Lean_IR_ToIR_lowerType(x_17868, x_17867, x_4, x_5, x_17865); +if (lean_obj_tag(x_17869) == 0) +{ +lean_object* x_17870; lean_object* x_17871; lean_object* x_17872; lean_object* x_17873; lean_object* x_17874; +x_17870 = lean_ctor_get(x_17869, 0); +lean_inc(x_17870); +x_17871 = lean_ctor_get(x_17869, 1); +lean_inc(x_17871); +lean_dec(x_17869); +x_17872 = lean_ctor_get(x_17870, 0); +lean_inc(x_17872); +x_17873 = lean_ctor_get(x_17870, 1); +lean_inc(x_17873); +lean_dec(x_17870); +x_17874 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17866, x_17861, x_17872, x_17873, x_4, x_5, x_17871); +return x_17874; +} +else +{ +lean_object* x_17875; lean_object* x_17876; lean_object* x_17877; lean_object* x_17878; +lean_dec(x_17866); +lean_dec(x_17861); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_17875 = lean_ctor_get(x_17869, 0); +lean_inc(x_17875); +x_17876 = lean_ctor_get(x_17869, 1); +lean_inc(x_17876); +if (lean_is_exclusive(x_17869)) { + lean_ctor_release(x_17869, 0); + lean_ctor_release(x_17869, 1); + x_17877 = x_17869; +} else { + lean_dec_ref(x_17869); + x_17877 = lean_box(0); +} +if (lean_is_scalar(x_17877)) { + x_17878 = lean_alloc_ctor(1, 2, 0); +} else { + x_17878 = x_17877; +} +lean_ctor_set(x_17878, 0, x_17875); +lean_ctor_set(x_17878, 1, x_17876); +return x_17878; +} +} +} +else +{ +lean_object* x_17879; lean_object* x_17880; lean_object* x_17881; lean_object* x_17882; lean_object* x_17883; lean_object* x_17884; lean_object* x_17885; lean_object* x_17886; lean_object* x_17887; +lean_dec(x_17832); +lean_dec(x_17830); +x_17879 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_17879, 0, x_153); +lean_ctor_set(x_17879, 1, x_17707); +x_17880 = lean_ctor_get(x_1, 0); +lean_inc(x_17880); +lean_dec(x_1); +x_17881 = l_Lean_IR_ToIR_bindVar(x_17880, x_17828, x_4, x_5, x_17768); +x_17882 = lean_ctor_get(x_17881, 0); +lean_inc(x_17882); +x_17883 = lean_ctor_get(x_17881, 1); +lean_inc(x_17883); +lean_dec(x_17881); +x_17884 = lean_ctor_get(x_17882, 0); +lean_inc(x_17884); +x_17885 = lean_ctor_get(x_17882, 1); +lean_inc(x_17885); +lean_dec(x_17882); +x_17886 = lean_box(7); +x_17887 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17884, x_17879, x_17886, x_17885, x_4, x_5, x_17883); +return x_17887; +} +} +} +} +else +{ +lean_object* x_17888; lean_object* x_17889; +lean_free_object(x_17724); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17888 = lean_box(13); +lean_ctor_set(x_17709, 0, x_17888); +if (lean_is_scalar(x_17718)) { + x_17889 = lean_alloc_ctor(0, 2, 0); +} else { + x_17889 = x_17718; +} +lean_ctor_set(x_17889, 0, x_17709); +lean_ctor_set(x_17889, 1, x_17717); +return x_17889; +} +} +else +{ +lean_object* x_17890; lean_object* x_17891; lean_object* x_17892; +lean_free_object(x_17724); +lean_dec(x_17718); +lean_free_object(x_17709); +lean_dec(x_153); +x_17890 = l_Lean_IR_instInhabitedArg; +x_17891 = lean_unsigned_to_nat(2u); +x_17892 = lean_array_get(x_17890, x_17707, x_17891); +lean_dec(x_17707); +if (lean_obj_tag(x_17892) == 0) +{ +lean_object* x_17893; lean_object* x_17894; lean_object* x_17895; lean_object* x_17896; lean_object* x_17897; lean_object* x_17898; lean_object* x_17899; +x_17893 = lean_ctor_get(x_17892, 0); +lean_inc(x_17893); +lean_dec(x_17892); +x_17894 = lean_ctor_get(x_1, 0); +lean_inc(x_17894); +lean_dec(x_1); +x_17895 = l_Lean_IR_ToIR_bindVarToVarId(x_17894, x_17893, x_17713, x_4, x_5, x_17717); +x_17896 = lean_ctor_get(x_17895, 0); +lean_inc(x_17896); +x_17897 = lean_ctor_get(x_17895, 1); +lean_inc(x_17897); +lean_dec(x_17895); +x_17898 = lean_ctor_get(x_17896, 1); +lean_inc(x_17898); +lean_dec(x_17896); +x_17899 = l_Lean_IR_ToIR_lowerCode(x_2, x_17898, x_4, x_5, x_17897); +return x_17899; +} +else +{ +lean_object* x_17900; lean_object* x_17901; lean_object* x_17902; lean_object* x_17903; lean_object* x_17904; lean_object* x_17905; +x_17900 = lean_ctor_get(x_1, 0); +lean_inc(x_17900); +lean_dec(x_1); +x_17901 = l_Lean_IR_ToIR_bindErased(x_17900, x_17713, x_4, x_5, x_17717); +x_17902 = lean_ctor_get(x_17901, 0); +lean_inc(x_17902); +x_17903 = lean_ctor_get(x_17901, 1); +lean_inc(x_17903); +lean_dec(x_17901); +x_17904 = lean_ctor_get(x_17902, 1); +lean_inc(x_17904); +lean_dec(x_17902); +x_17905 = l_Lean_IR_ToIR_lowerCode(x_2, x_17904, x_4, x_5, x_17903); +return x_17905; +} +} +} +else +{ +lean_object* x_17906; uint8_t x_17907; +lean_dec(x_17724); +x_17906 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_17907 = lean_name_eq(x_153, x_17906); +if (x_17907 == 0) +{ +lean_object* x_17908; uint8_t x_17909; +x_17908 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_17909 = lean_name_eq(x_153, x_17908); +if (x_17909 == 0) +{ +lean_object* x_17910; lean_object* x_17911; lean_object* x_17912; +lean_dec(x_17718); +lean_free_object(x_17709); +lean_inc(x_153); +x_17910 = l_Lean_IR_ToIR_findDecl(x_153, x_17713, x_4, x_5, x_17717); +x_17911 = lean_ctor_get(x_17910, 0); +lean_inc(x_17911); +x_17912 = lean_ctor_get(x_17911, 0); +lean_inc(x_17912); +if (lean_obj_tag(x_17912) == 0) +{ +lean_object* x_17913; lean_object* x_17914; lean_object* x_17915; lean_object* x_17916; uint8_t x_17917; lean_object* x_17918; lean_object* x_17919; lean_object* x_17920; lean_object* x_17921; lean_object* x_17922; lean_object* x_17923; lean_object* x_17924; lean_object* x_17925; lean_object* x_17926; +lean_dec(x_17707); +lean_dec(x_2); +lean_dec(x_1); +x_17913 = lean_ctor_get(x_17910, 1); +lean_inc(x_17913); +if (lean_is_exclusive(x_17910)) { + lean_ctor_release(x_17910, 0); + lean_ctor_release(x_17910, 1); + x_17914 = x_17910; +} else { + lean_dec_ref(x_17910); + x_17914 = lean_box(0); +} +x_17915 = lean_ctor_get(x_17911, 1); +lean_inc(x_17915); +if (lean_is_exclusive(x_17911)) { + lean_ctor_release(x_17911, 0); + lean_ctor_release(x_17911, 1); + x_17916 = x_17911; +} else { + lean_dec_ref(x_17911); + x_17916 = lean_box(0); +} +x_17917 = 1; +x_17918 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_17919 = l_Lean_Name_toString(x_153, x_17917, x_17918); +x_17920 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_17920, 0, x_17919); +x_17921 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_17916)) { + x_17922 = lean_alloc_ctor(5, 2, 0); +} else { + x_17922 = x_17916; + lean_ctor_set_tag(x_17922, 5); +} +lean_ctor_set(x_17922, 0, x_17921); +lean_ctor_set(x_17922, 1, x_17920); +x_17923 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_17914)) { + x_17924 = lean_alloc_ctor(5, 2, 0); +} else { + x_17924 = x_17914; + lean_ctor_set_tag(x_17924, 5); +} +lean_ctor_set(x_17924, 0, x_17922); +lean_ctor_set(x_17924, 1, x_17923); +x_17925 = l_Lean_MessageData_ofFormat(x_17924); +x_17926 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_17925, x_17915, x_4, x_5, x_17913); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_17915); +return x_17926; +} +else +{ +lean_object* x_17927; lean_object* x_17928; lean_object* x_17929; lean_object* x_17930; lean_object* x_17931; lean_object* x_17932; lean_object* x_17933; uint8_t x_17934; +x_17927 = lean_ctor_get(x_17910, 1); +lean_inc(x_17927); +lean_dec(x_17910); +x_17928 = lean_ctor_get(x_17911, 1); +lean_inc(x_17928); +if (lean_is_exclusive(x_17911)) { + lean_ctor_release(x_17911, 0); + lean_ctor_release(x_17911, 1); + x_17929 = x_17911; +} else { + lean_dec_ref(x_17911); + x_17929 = lean_box(0); +} +x_17930 = lean_ctor_get(x_17912, 0); +lean_inc(x_17930); +lean_dec(x_17912); +x_17931 = lean_array_get_size(x_17707); +x_17932 = l_Lean_IR_Decl_params(x_17930); +lean_dec(x_17930); +x_17933 = lean_array_get_size(x_17932); +lean_dec(x_17932); +x_17934 = lean_nat_dec_lt(x_17931, x_17933); +if (x_17934 == 0) +{ +uint8_t x_17935; +x_17935 = lean_nat_dec_eq(x_17931, x_17933); +if (x_17935 == 0) +{ +lean_object* x_17936; lean_object* x_17937; lean_object* x_17938; lean_object* x_17939; lean_object* x_17940; lean_object* x_17941; lean_object* x_17942; lean_object* x_17943; lean_object* x_17944; lean_object* x_17945; lean_object* x_17946; lean_object* x_17947; lean_object* x_17948; lean_object* x_17949; lean_object* x_17950; lean_object* x_17951; lean_object* x_17952; +x_17936 = lean_unsigned_to_nat(0u); +x_17937 = l_Array_extract___rarg(x_17707, x_17936, x_17933); +x_17938 = l_Array_extract___rarg(x_17707, x_17933, x_17931); +lean_dec(x_17931); +lean_dec(x_17707); +if (lean_is_scalar(x_17929)) { + x_17939 = lean_alloc_ctor(6, 2, 0); +} else { + x_17939 = x_17929; + lean_ctor_set_tag(x_17939, 6); +} +lean_ctor_set(x_17939, 0, x_153); +lean_ctor_set(x_17939, 1, x_17937); +x_17940 = lean_ctor_get(x_1, 0); +lean_inc(x_17940); +x_17941 = l_Lean_IR_ToIR_bindVar(x_17940, x_17928, x_4, x_5, x_17927); +x_17942 = lean_ctor_get(x_17941, 0); +lean_inc(x_17942); +x_17943 = lean_ctor_get(x_17941, 1); +lean_inc(x_17943); +lean_dec(x_17941); +x_17944 = lean_ctor_get(x_17942, 0); +lean_inc(x_17944); +x_17945 = lean_ctor_get(x_17942, 1); +lean_inc(x_17945); +lean_dec(x_17942); +x_17946 = l_Lean_IR_ToIR_newVar(x_17945, x_4, x_5, x_17943); +x_17947 = lean_ctor_get(x_17946, 0); +lean_inc(x_17947); +x_17948 = lean_ctor_get(x_17946, 1); +lean_inc(x_17948); +lean_dec(x_17946); +x_17949 = lean_ctor_get(x_17947, 0); +lean_inc(x_17949); +x_17950 = lean_ctor_get(x_17947, 1); +lean_inc(x_17950); +lean_dec(x_17947); +x_17951 = lean_ctor_get(x_1, 2); +lean_inc(x_17951); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_17952 = l_Lean_IR_ToIR_lowerType(x_17951, x_17950, x_4, x_5, x_17948); +if (lean_obj_tag(x_17952) == 0) +{ +lean_object* x_17953; lean_object* x_17954; lean_object* x_17955; lean_object* x_17956; lean_object* x_17957; +x_17953 = lean_ctor_get(x_17952, 0); +lean_inc(x_17953); +x_17954 = lean_ctor_get(x_17952, 1); +lean_inc(x_17954); +lean_dec(x_17952); +x_17955 = lean_ctor_get(x_17953, 0); +lean_inc(x_17955); +x_17956 = lean_ctor_get(x_17953, 1); +lean_inc(x_17956); +lean_dec(x_17953); +x_17957 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_17949, x_17938, x_17944, x_17939, x_17955, x_17956, x_4, x_5, x_17954); +return x_17957; +} +else +{ +lean_object* x_17958; lean_object* x_17959; lean_object* x_17960; lean_object* x_17961; +lean_dec(x_17949); +lean_dec(x_17944); +lean_dec(x_17939); +lean_dec(x_17938); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_17958 = lean_ctor_get(x_17952, 0); +lean_inc(x_17958); +x_17959 = lean_ctor_get(x_17952, 1); +lean_inc(x_17959); +if (lean_is_exclusive(x_17952)) { + lean_ctor_release(x_17952, 0); + lean_ctor_release(x_17952, 1); + x_17960 = x_17952; +} else { + lean_dec_ref(x_17952); + x_17960 = lean_box(0); +} +if (lean_is_scalar(x_17960)) { + x_17961 = lean_alloc_ctor(1, 2, 0); +} else { + x_17961 = x_17960; +} +lean_ctor_set(x_17961, 0, x_17958); +lean_ctor_set(x_17961, 1, x_17959); +return x_17961; +} +} +else +{ +lean_object* x_17962; lean_object* x_17963; lean_object* x_17964; lean_object* x_17965; lean_object* x_17966; lean_object* x_17967; lean_object* x_17968; lean_object* x_17969; lean_object* x_17970; +lean_dec(x_17933); +lean_dec(x_17931); +if (lean_is_scalar(x_17929)) { + x_17962 = lean_alloc_ctor(6, 2, 0); +} else { + x_17962 = x_17929; + lean_ctor_set_tag(x_17962, 6); +} +lean_ctor_set(x_17962, 0, x_153); +lean_ctor_set(x_17962, 1, x_17707); +x_17963 = lean_ctor_get(x_1, 0); +lean_inc(x_17963); +x_17964 = l_Lean_IR_ToIR_bindVar(x_17963, x_17928, x_4, x_5, x_17927); +x_17965 = lean_ctor_get(x_17964, 0); +lean_inc(x_17965); +x_17966 = lean_ctor_get(x_17964, 1); +lean_inc(x_17966); +lean_dec(x_17964); +x_17967 = lean_ctor_get(x_17965, 0); +lean_inc(x_17967); +x_17968 = lean_ctor_get(x_17965, 1); +lean_inc(x_17968); +lean_dec(x_17965); +x_17969 = lean_ctor_get(x_1, 2); +lean_inc(x_17969); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_17970 = l_Lean_IR_ToIR_lowerType(x_17969, x_17968, x_4, x_5, x_17966); +if (lean_obj_tag(x_17970) == 0) +{ +lean_object* x_17971; lean_object* x_17972; lean_object* x_17973; lean_object* x_17974; lean_object* x_17975; +x_17971 = lean_ctor_get(x_17970, 0); +lean_inc(x_17971); +x_17972 = lean_ctor_get(x_17970, 1); +lean_inc(x_17972); +lean_dec(x_17970); +x_17973 = lean_ctor_get(x_17971, 0); +lean_inc(x_17973); +x_17974 = lean_ctor_get(x_17971, 1); +lean_inc(x_17974); +lean_dec(x_17971); +x_17975 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17967, x_17962, x_17973, x_17974, x_4, x_5, x_17972); +return x_17975; +} +else +{ +lean_object* x_17976; lean_object* x_17977; lean_object* x_17978; lean_object* x_17979; +lean_dec(x_17967); +lean_dec(x_17962); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_17976 = lean_ctor_get(x_17970, 0); +lean_inc(x_17976); +x_17977 = lean_ctor_get(x_17970, 1); +lean_inc(x_17977); +if (lean_is_exclusive(x_17970)) { + lean_ctor_release(x_17970, 0); + lean_ctor_release(x_17970, 1); + x_17978 = x_17970; +} else { + lean_dec_ref(x_17970); + x_17978 = lean_box(0); +} +if (lean_is_scalar(x_17978)) { + x_17979 = lean_alloc_ctor(1, 2, 0); +} else { + x_17979 = x_17978; +} +lean_ctor_set(x_17979, 0, x_17976); +lean_ctor_set(x_17979, 1, x_17977); +return x_17979; +} +} +} +else +{ +lean_object* x_17980; lean_object* x_17981; lean_object* x_17982; lean_object* x_17983; lean_object* x_17984; lean_object* x_17985; lean_object* x_17986; lean_object* x_17987; lean_object* x_17988; +lean_dec(x_17933); +lean_dec(x_17931); +if (lean_is_scalar(x_17929)) { + x_17980 = lean_alloc_ctor(7, 2, 0); +} else { + x_17980 = x_17929; + lean_ctor_set_tag(x_17980, 7); +} +lean_ctor_set(x_17980, 0, x_153); +lean_ctor_set(x_17980, 1, x_17707); +x_17981 = lean_ctor_get(x_1, 0); +lean_inc(x_17981); +lean_dec(x_1); +x_17982 = l_Lean_IR_ToIR_bindVar(x_17981, x_17928, x_4, x_5, x_17927); +x_17983 = lean_ctor_get(x_17982, 0); +lean_inc(x_17983); +x_17984 = lean_ctor_get(x_17982, 1); +lean_inc(x_17984); +lean_dec(x_17982); +x_17985 = lean_ctor_get(x_17983, 0); +lean_inc(x_17985); +x_17986 = lean_ctor_get(x_17983, 1); +lean_inc(x_17986); +lean_dec(x_17983); +x_17987 = lean_box(7); +x_17988 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_17985, x_17980, x_17987, x_17986, x_4, x_5, x_17984); +return x_17988; +} +} +} +else +{ +lean_object* x_17989; lean_object* x_17990; +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_17989 = lean_box(13); +lean_ctor_set(x_17709, 0, x_17989); +if (lean_is_scalar(x_17718)) { + x_17990 = lean_alloc_ctor(0, 2, 0); +} else { + x_17990 = x_17718; +} +lean_ctor_set(x_17990, 0, x_17709); +lean_ctor_set(x_17990, 1, x_17717); +return x_17990; +} +} +else +{ +lean_object* x_17991; lean_object* x_17992; lean_object* x_17993; +lean_dec(x_17718); +lean_free_object(x_17709); +lean_dec(x_153); +x_17991 = l_Lean_IR_instInhabitedArg; +x_17992 = lean_unsigned_to_nat(2u); +x_17993 = lean_array_get(x_17991, x_17707, x_17992); +lean_dec(x_17707); +if (lean_obj_tag(x_17993) == 0) +{ +lean_object* x_17994; lean_object* x_17995; lean_object* x_17996; lean_object* x_17997; lean_object* x_17998; lean_object* x_17999; lean_object* x_18000; +x_17994 = lean_ctor_get(x_17993, 0); +lean_inc(x_17994); +lean_dec(x_17993); +x_17995 = lean_ctor_get(x_1, 0); +lean_inc(x_17995); +lean_dec(x_1); +x_17996 = l_Lean_IR_ToIR_bindVarToVarId(x_17995, x_17994, x_17713, x_4, x_5, x_17717); +x_17997 = lean_ctor_get(x_17996, 0); +lean_inc(x_17997); +x_17998 = lean_ctor_get(x_17996, 1); +lean_inc(x_17998); +lean_dec(x_17996); +x_17999 = lean_ctor_get(x_17997, 1); +lean_inc(x_17999); +lean_dec(x_17997); +x_18000 = l_Lean_IR_ToIR_lowerCode(x_2, x_17999, x_4, x_5, x_17998); +return x_18000; +} +else +{ +lean_object* x_18001; lean_object* x_18002; lean_object* x_18003; lean_object* x_18004; lean_object* x_18005; lean_object* x_18006; +x_18001 = lean_ctor_get(x_1, 0); +lean_inc(x_18001); +lean_dec(x_1); +x_18002 = l_Lean_IR_ToIR_bindErased(x_18001, x_17713, x_4, x_5, x_17717); +x_18003 = lean_ctor_get(x_18002, 0); +lean_inc(x_18003); +x_18004 = lean_ctor_get(x_18002, 1); +lean_inc(x_18004); +lean_dec(x_18002); +x_18005 = lean_ctor_get(x_18003, 1); +lean_inc(x_18005); +lean_dec(x_18003); +x_18006 = l_Lean_IR_ToIR_lowerCode(x_2, x_18005, x_4, x_5, x_18004); +return x_18006; +} +} +} +} +case 1: +{ +lean_object* x_18007; lean_object* x_18008; lean_object* x_18038; lean_object* x_18039; +lean_dec(x_17724); +lean_dec(x_17719); +lean_dec(x_17699); +lean_dec(x_17698); +lean_inc(x_153); +x_18038 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_17717); +x_18039 = lean_ctor_get(x_18038, 0); +lean_inc(x_18039); +if (lean_obj_tag(x_18039) == 0) +{ +lean_object* x_18040; lean_object* x_18041; +x_18040 = lean_ctor_get(x_18038, 1); +lean_inc(x_18040); +lean_dec(x_18038); +x_18041 = lean_box(0); +lean_ctor_set(x_17709, 0, x_18041); +x_18007 = x_17709; +x_18008 = x_18040; +goto block_18037; +} +else +{ +uint8_t x_18042; +lean_free_object(x_17709); +x_18042 = !lean_is_exclusive(x_18038); +if (x_18042 == 0) +{ +lean_object* x_18043; lean_object* x_18044; uint8_t x_18045; +x_18043 = lean_ctor_get(x_18038, 1); +x_18044 = lean_ctor_get(x_18038, 0); +lean_dec(x_18044); +x_18045 = !lean_is_exclusive(x_18039); +if (x_18045 == 0) +{ +lean_object* x_18046; lean_object* x_18047; lean_object* x_18048; lean_object* x_18049; uint8_t x_18050; +x_18046 = lean_ctor_get(x_18039, 0); +x_18047 = lean_array_get_size(x_17707); +x_18048 = lean_ctor_get(x_18046, 3); +lean_inc(x_18048); +lean_dec(x_18046); +x_18049 = lean_array_get_size(x_18048); +lean_dec(x_18048); +x_18050 = lean_nat_dec_lt(x_18047, x_18049); +if (x_18050 == 0) +{ +uint8_t x_18051; +x_18051 = lean_nat_dec_eq(x_18047, x_18049); +if (x_18051 == 0) +{ +lean_object* x_18052; lean_object* x_18053; lean_object* x_18054; lean_object* x_18055; lean_object* x_18056; lean_object* x_18057; lean_object* x_18058; lean_object* x_18059; lean_object* x_18060; lean_object* x_18061; lean_object* x_18062; lean_object* x_18063; lean_object* x_18064; lean_object* x_18065; lean_object* x_18066; lean_object* x_18067; +x_18052 = lean_unsigned_to_nat(0u); +x_18053 = l_Array_extract___rarg(x_17707, x_18052, x_18049); +x_18054 = l_Array_extract___rarg(x_17707, x_18049, x_18047); +lean_dec(x_18047); +lean_inc(x_153); +lean_ctor_set_tag(x_18038, 6); +lean_ctor_set(x_18038, 1, x_18053); +lean_ctor_set(x_18038, 0, x_153); +x_18055 = lean_ctor_get(x_1, 0); +lean_inc(x_18055); +x_18056 = l_Lean_IR_ToIR_bindVar(x_18055, x_17713, x_4, x_5, x_18043); +x_18057 = lean_ctor_get(x_18056, 0); +lean_inc(x_18057); +x_18058 = lean_ctor_get(x_18056, 1); +lean_inc(x_18058); +lean_dec(x_18056); +x_18059 = lean_ctor_get(x_18057, 0); +lean_inc(x_18059); +x_18060 = lean_ctor_get(x_18057, 1); +lean_inc(x_18060); +lean_dec(x_18057); +x_18061 = l_Lean_IR_ToIR_newVar(x_18060, x_4, x_5, x_18058); +x_18062 = lean_ctor_get(x_18061, 0); +lean_inc(x_18062); +x_18063 = lean_ctor_get(x_18061, 1); +lean_inc(x_18063); +lean_dec(x_18061); +x_18064 = lean_ctor_get(x_18062, 0); +lean_inc(x_18064); +x_18065 = lean_ctor_get(x_18062, 1); +lean_inc(x_18065); +lean_dec(x_18062); +x_18066 = lean_ctor_get(x_1, 2); +lean_inc(x_18066); +lean_inc(x_5); +lean_inc(x_4); +x_18067 = l_Lean_IR_ToIR_lowerType(x_18066, x_18065, x_4, x_5, x_18063); +if (lean_obj_tag(x_18067) == 0) +{ +lean_object* x_18068; lean_object* x_18069; lean_object* x_18070; lean_object* x_18071; lean_object* x_18072; +x_18068 = lean_ctor_get(x_18067, 0); +lean_inc(x_18068); +x_18069 = lean_ctor_get(x_18067, 1); +lean_inc(x_18069); +lean_dec(x_18067); +x_18070 = lean_ctor_get(x_18068, 0); +lean_inc(x_18070); +x_18071 = lean_ctor_get(x_18068, 1); +lean_inc(x_18071); +lean_dec(x_18068); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18072 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_18064, x_18054, x_18059, x_18038, x_18070, x_18071, x_4, x_5, x_18069); +if (lean_obj_tag(x_18072) == 0) +{ +lean_object* x_18073; lean_object* x_18074; uint8_t x_18075; +x_18073 = lean_ctor_get(x_18072, 0); +lean_inc(x_18073); +x_18074 = lean_ctor_get(x_18072, 1); +lean_inc(x_18074); +lean_dec(x_18072); +x_18075 = !lean_is_exclusive(x_18073); +if (x_18075 == 0) +{ +lean_object* x_18076; +x_18076 = lean_ctor_get(x_18073, 0); +lean_ctor_set(x_18039, 0, x_18076); +lean_ctor_set(x_18073, 0, x_18039); +x_18007 = x_18073; +x_18008 = x_18074; +goto block_18037; +} +else +{ +lean_object* x_18077; lean_object* x_18078; lean_object* x_18079; +x_18077 = lean_ctor_get(x_18073, 0); +x_18078 = lean_ctor_get(x_18073, 1); +lean_inc(x_18078); +lean_inc(x_18077); +lean_dec(x_18073); +lean_ctor_set(x_18039, 0, x_18077); +x_18079 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18079, 0, x_18039); +lean_ctor_set(x_18079, 1, x_18078); +x_18007 = x_18079; +x_18008 = x_18074; +goto block_18037; +} +} +else +{ +uint8_t x_18080; +lean_free_object(x_18039); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18080 = !lean_is_exclusive(x_18072); +if (x_18080 == 0) +{ +return x_18072; +} +else +{ +lean_object* x_18081; lean_object* x_18082; lean_object* x_18083; +x_18081 = lean_ctor_get(x_18072, 0); +x_18082 = lean_ctor_get(x_18072, 1); +lean_inc(x_18082); +lean_inc(x_18081); +lean_dec(x_18072); +x_18083 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18083, 0, x_18081); +lean_ctor_set(x_18083, 1, x_18082); +return x_18083; +} +} +} +else +{ +uint8_t x_18084; +lean_dec(x_18064); +lean_dec(x_18059); +lean_dec(x_18038); +lean_dec(x_18054); +lean_free_object(x_18039); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18084 = !lean_is_exclusive(x_18067); +if (x_18084 == 0) +{ +return x_18067; +} +else +{ +lean_object* x_18085; lean_object* x_18086; lean_object* x_18087; +x_18085 = lean_ctor_get(x_18067, 0); +x_18086 = lean_ctor_get(x_18067, 1); +lean_inc(x_18086); +lean_inc(x_18085); +lean_dec(x_18067); +x_18087 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18087, 0, x_18085); +lean_ctor_set(x_18087, 1, x_18086); +return x_18087; +} +} +} +else +{ +lean_object* x_18088; lean_object* x_18089; lean_object* x_18090; lean_object* x_18091; lean_object* x_18092; lean_object* x_18093; lean_object* x_18094; lean_object* x_18095; +lean_dec(x_18049); +lean_dec(x_18047); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_18038, 6); +lean_ctor_set(x_18038, 1, x_17707); +lean_ctor_set(x_18038, 0, x_153); +x_18088 = lean_ctor_get(x_1, 0); +lean_inc(x_18088); +x_18089 = l_Lean_IR_ToIR_bindVar(x_18088, x_17713, x_4, x_5, x_18043); +x_18090 = lean_ctor_get(x_18089, 0); +lean_inc(x_18090); +x_18091 = lean_ctor_get(x_18089, 1); +lean_inc(x_18091); +lean_dec(x_18089); +x_18092 = lean_ctor_get(x_18090, 0); +lean_inc(x_18092); +x_18093 = lean_ctor_get(x_18090, 1); +lean_inc(x_18093); +lean_dec(x_18090); +x_18094 = lean_ctor_get(x_1, 2); +lean_inc(x_18094); +lean_inc(x_5); +lean_inc(x_4); +x_18095 = l_Lean_IR_ToIR_lowerType(x_18094, x_18093, x_4, x_5, x_18091); +if (lean_obj_tag(x_18095) == 0) +{ +lean_object* x_18096; lean_object* x_18097; lean_object* x_18098; lean_object* x_18099; lean_object* x_18100; +x_18096 = lean_ctor_get(x_18095, 0); +lean_inc(x_18096); +x_18097 = lean_ctor_get(x_18095, 1); +lean_inc(x_18097); +lean_dec(x_18095); +x_18098 = lean_ctor_get(x_18096, 0); +lean_inc(x_18098); +x_18099 = lean_ctor_get(x_18096, 1); +lean_inc(x_18099); +lean_dec(x_18096); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18100 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18092, x_18038, x_18098, x_18099, x_4, x_5, x_18097); +if (lean_obj_tag(x_18100) == 0) +{ +lean_object* x_18101; lean_object* x_18102; uint8_t x_18103; +x_18101 = lean_ctor_get(x_18100, 0); +lean_inc(x_18101); +x_18102 = lean_ctor_get(x_18100, 1); +lean_inc(x_18102); +lean_dec(x_18100); +x_18103 = !lean_is_exclusive(x_18101); +if (x_18103 == 0) +{ +lean_object* x_18104; +x_18104 = lean_ctor_get(x_18101, 0); +lean_ctor_set(x_18039, 0, x_18104); +lean_ctor_set(x_18101, 0, x_18039); +x_18007 = x_18101; +x_18008 = x_18102; +goto block_18037; +} +else +{ +lean_object* x_18105; lean_object* x_18106; lean_object* x_18107; +x_18105 = lean_ctor_get(x_18101, 0); +x_18106 = lean_ctor_get(x_18101, 1); +lean_inc(x_18106); +lean_inc(x_18105); +lean_dec(x_18101); +lean_ctor_set(x_18039, 0, x_18105); +x_18107 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18107, 0, x_18039); +lean_ctor_set(x_18107, 1, x_18106); +x_18007 = x_18107; +x_18008 = x_18102; +goto block_18037; +} +} +else +{ +uint8_t x_18108; +lean_free_object(x_18039); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18108 = !lean_is_exclusive(x_18100); +if (x_18108 == 0) +{ +return x_18100; +} +else +{ +lean_object* x_18109; lean_object* x_18110; lean_object* x_18111; +x_18109 = lean_ctor_get(x_18100, 0); +x_18110 = lean_ctor_get(x_18100, 1); +lean_inc(x_18110); +lean_inc(x_18109); +lean_dec(x_18100); +x_18111 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18111, 0, x_18109); +lean_ctor_set(x_18111, 1, x_18110); +return x_18111; +} +} +} +else +{ +uint8_t x_18112; +lean_dec(x_18092); +lean_dec(x_18038); +lean_free_object(x_18039); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18112 = !lean_is_exclusive(x_18095); +if (x_18112 == 0) +{ +return x_18095; +} +else +{ +lean_object* x_18113; lean_object* x_18114; lean_object* x_18115; +x_18113 = lean_ctor_get(x_18095, 0); +x_18114 = lean_ctor_get(x_18095, 1); +lean_inc(x_18114); +lean_inc(x_18113); +lean_dec(x_18095); +x_18115 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18115, 0, x_18113); +lean_ctor_set(x_18115, 1, x_18114); +return x_18115; +} +} +} +} +else +{ +lean_object* x_18116; lean_object* x_18117; lean_object* x_18118; lean_object* x_18119; lean_object* x_18120; lean_object* x_18121; lean_object* x_18122; lean_object* x_18123; +lean_dec(x_18049); +lean_dec(x_18047); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_18038, 7); +lean_ctor_set(x_18038, 1, x_17707); +lean_ctor_set(x_18038, 0, x_153); +x_18116 = lean_ctor_get(x_1, 0); +lean_inc(x_18116); +x_18117 = l_Lean_IR_ToIR_bindVar(x_18116, x_17713, x_4, x_5, x_18043); +x_18118 = lean_ctor_get(x_18117, 0); +lean_inc(x_18118); +x_18119 = lean_ctor_get(x_18117, 1); +lean_inc(x_18119); +lean_dec(x_18117); +x_18120 = lean_ctor_get(x_18118, 0); +lean_inc(x_18120); +x_18121 = lean_ctor_get(x_18118, 1); +lean_inc(x_18121); +lean_dec(x_18118); +x_18122 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18123 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18120, x_18038, x_18122, x_18121, x_4, x_5, x_18119); +if (lean_obj_tag(x_18123) == 0) +{ +lean_object* x_18124; lean_object* x_18125; uint8_t x_18126; +x_18124 = lean_ctor_get(x_18123, 0); +lean_inc(x_18124); +x_18125 = lean_ctor_get(x_18123, 1); +lean_inc(x_18125); +lean_dec(x_18123); +x_18126 = !lean_is_exclusive(x_18124); +if (x_18126 == 0) +{ +lean_object* x_18127; +x_18127 = lean_ctor_get(x_18124, 0); +lean_ctor_set(x_18039, 0, x_18127); +lean_ctor_set(x_18124, 0, x_18039); +x_18007 = x_18124; +x_18008 = x_18125; +goto block_18037; +} +else +{ +lean_object* x_18128; lean_object* x_18129; lean_object* x_18130; +x_18128 = lean_ctor_get(x_18124, 0); +x_18129 = lean_ctor_get(x_18124, 1); +lean_inc(x_18129); +lean_inc(x_18128); +lean_dec(x_18124); +lean_ctor_set(x_18039, 0, x_18128); +x_18130 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18130, 0, x_18039); +lean_ctor_set(x_18130, 1, x_18129); +x_18007 = x_18130; +x_18008 = x_18125; +goto block_18037; +} +} +else +{ +uint8_t x_18131; +lean_free_object(x_18039); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18131 = !lean_is_exclusive(x_18123); +if (x_18131 == 0) +{ +return x_18123; +} +else +{ +lean_object* x_18132; lean_object* x_18133; lean_object* x_18134; +x_18132 = lean_ctor_get(x_18123, 0); +x_18133 = lean_ctor_get(x_18123, 1); +lean_inc(x_18133); +lean_inc(x_18132); +lean_dec(x_18123); +x_18134 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18134, 0, x_18132); +lean_ctor_set(x_18134, 1, x_18133); +return x_18134; +} +} +} +} +else +{ +lean_object* x_18135; lean_object* x_18136; lean_object* x_18137; lean_object* x_18138; uint8_t x_18139; +x_18135 = lean_ctor_get(x_18039, 0); +lean_inc(x_18135); +lean_dec(x_18039); +x_18136 = lean_array_get_size(x_17707); +x_18137 = lean_ctor_get(x_18135, 3); +lean_inc(x_18137); +lean_dec(x_18135); +x_18138 = lean_array_get_size(x_18137); +lean_dec(x_18137); +x_18139 = lean_nat_dec_lt(x_18136, x_18138); +if (x_18139 == 0) +{ +uint8_t x_18140; +x_18140 = lean_nat_dec_eq(x_18136, x_18138); +if (x_18140 == 0) +{ +lean_object* x_18141; lean_object* x_18142; lean_object* x_18143; lean_object* x_18144; lean_object* x_18145; lean_object* x_18146; lean_object* x_18147; lean_object* x_18148; lean_object* x_18149; lean_object* x_18150; lean_object* x_18151; lean_object* x_18152; lean_object* x_18153; lean_object* x_18154; lean_object* x_18155; lean_object* x_18156; +x_18141 = lean_unsigned_to_nat(0u); +x_18142 = l_Array_extract___rarg(x_17707, x_18141, x_18138); +x_18143 = l_Array_extract___rarg(x_17707, x_18138, x_18136); +lean_dec(x_18136); +lean_inc(x_153); +lean_ctor_set_tag(x_18038, 6); +lean_ctor_set(x_18038, 1, x_18142); +lean_ctor_set(x_18038, 0, x_153); +x_18144 = lean_ctor_get(x_1, 0); +lean_inc(x_18144); +x_18145 = l_Lean_IR_ToIR_bindVar(x_18144, x_17713, x_4, x_5, x_18043); +x_18146 = lean_ctor_get(x_18145, 0); +lean_inc(x_18146); +x_18147 = lean_ctor_get(x_18145, 1); +lean_inc(x_18147); +lean_dec(x_18145); +x_18148 = lean_ctor_get(x_18146, 0); +lean_inc(x_18148); +x_18149 = lean_ctor_get(x_18146, 1); +lean_inc(x_18149); +lean_dec(x_18146); +x_18150 = l_Lean_IR_ToIR_newVar(x_18149, x_4, x_5, x_18147); +x_18151 = lean_ctor_get(x_18150, 0); +lean_inc(x_18151); +x_18152 = lean_ctor_get(x_18150, 1); +lean_inc(x_18152); +lean_dec(x_18150); +x_18153 = lean_ctor_get(x_18151, 0); +lean_inc(x_18153); +x_18154 = lean_ctor_get(x_18151, 1); +lean_inc(x_18154); +lean_dec(x_18151); +x_18155 = lean_ctor_get(x_1, 2); +lean_inc(x_18155); +lean_inc(x_5); +lean_inc(x_4); +x_18156 = l_Lean_IR_ToIR_lowerType(x_18155, x_18154, x_4, x_5, x_18152); +if (lean_obj_tag(x_18156) == 0) +{ +lean_object* x_18157; lean_object* x_18158; lean_object* x_18159; lean_object* x_18160; lean_object* x_18161; +x_18157 = lean_ctor_get(x_18156, 0); +lean_inc(x_18157); +x_18158 = lean_ctor_get(x_18156, 1); +lean_inc(x_18158); +lean_dec(x_18156); +x_18159 = lean_ctor_get(x_18157, 0); +lean_inc(x_18159); +x_18160 = lean_ctor_get(x_18157, 1); +lean_inc(x_18160); +lean_dec(x_18157); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18161 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_18153, x_18143, x_18148, x_18038, x_18159, x_18160, x_4, x_5, x_18158); +if (lean_obj_tag(x_18161) == 0) +{ +lean_object* x_18162; lean_object* x_18163; lean_object* x_18164; lean_object* x_18165; lean_object* x_18166; lean_object* x_18167; lean_object* x_18168; +x_18162 = lean_ctor_get(x_18161, 0); +lean_inc(x_18162); +x_18163 = lean_ctor_get(x_18161, 1); +lean_inc(x_18163); +lean_dec(x_18161); +x_18164 = lean_ctor_get(x_18162, 0); +lean_inc(x_18164); +x_18165 = lean_ctor_get(x_18162, 1); +lean_inc(x_18165); +if (lean_is_exclusive(x_18162)) { + lean_ctor_release(x_18162, 0); + lean_ctor_release(x_18162, 1); + x_18166 = x_18162; +} else { + lean_dec_ref(x_18162); + x_18166 = lean_box(0); +} +x_18167 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_18167, 0, x_18164); +if (lean_is_scalar(x_18166)) { + x_18168 = lean_alloc_ctor(0, 2, 0); +} else { + x_18168 = x_18166; +} +lean_ctor_set(x_18168, 0, x_18167); +lean_ctor_set(x_18168, 1, x_18165); +x_18007 = x_18168; +x_18008 = x_18163; +goto block_18037; +} +else +{ +lean_object* x_18169; lean_object* x_18170; lean_object* x_18171; lean_object* x_18172; +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18169 = lean_ctor_get(x_18161, 0); +lean_inc(x_18169); +x_18170 = lean_ctor_get(x_18161, 1); +lean_inc(x_18170); +if (lean_is_exclusive(x_18161)) { + lean_ctor_release(x_18161, 0); + lean_ctor_release(x_18161, 1); + x_18171 = x_18161; +} else { + lean_dec_ref(x_18161); + x_18171 = lean_box(0); +} +if (lean_is_scalar(x_18171)) { + x_18172 = lean_alloc_ctor(1, 2, 0); +} else { + x_18172 = x_18171; +} +lean_ctor_set(x_18172, 0, x_18169); +lean_ctor_set(x_18172, 1, x_18170); +return x_18172; +} +} +else +{ +lean_object* x_18173; lean_object* x_18174; lean_object* x_18175; lean_object* x_18176; +lean_dec(x_18153); +lean_dec(x_18148); +lean_dec(x_18038); +lean_dec(x_18143); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18173 = lean_ctor_get(x_18156, 0); +lean_inc(x_18173); +x_18174 = lean_ctor_get(x_18156, 1); +lean_inc(x_18174); +if (lean_is_exclusive(x_18156)) { + lean_ctor_release(x_18156, 0); + lean_ctor_release(x_18156, 1); + x_18175 = x_18156; +} else { + lean_dec_ref(x_18156); + x_18175 = lean_box(0); +} +if (lean_is_scalar(x_18175)) { + x_18176 = lean_alloc_ctor(1, 2, 0); +} else { + x_18176 = x_18175; +} +lean_ctor_set(x_18176, 0, x_18173); +lean_ctor_set(x_18176, 1, x_18174); +return x_18176; +} +} +else +{ +lean_object* x_18177; lean_object* x_18178; lean_object* x_18179; lean_object* x_18180; lean_object* x_18181; lean_object* x_18182; lean_object* x_18183; lean_object* x_18184; +lean_dec(x_18138); +lean_dec(x_18136); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_18038, 6); +lean_ctor_set(x_18038, 1, x_17707); +lean_ctor_set(x_18038, 0, x_153); +x_18177 = lean_ctor_get(x_1, 0); +lean_inc(x_18177); +x_18178 = l_Lean_IR_ToIR_bindVar(x_18177, x_17713, x_4, x_5, x_18043); +x_18179 = lean_ctor_get(x_18178, 0); +lean_inc(x_18179); +x_18180 = lean_ctor_get(x_18178, 1); +lean_inc(x_18180); +lean_dec(x_18178); +x_18181 = lean_ctor_get(x_18179, 0); +lean_inc(x_18181); +x_18182 = lean_ctor_get(x_18179, 1); +lean_inc(x_18182); +lean_dec(x_18179); +x_18183 = lean_ctor_get(x_1, 2); +lean_inc(x_18183); +lean_inc(x_5); +lean_inc(x_4); +x_18184 = l_Lean_IR_ToIR_lowerType(x_18183, x_18182, x_4, x_5, x_18180); +if (lean_obj_tag(x_18184) == 0) +{ +lean_object* x_18185; lean_object* x_18186; lean_object* x_18187; lean_object* x_18188; lean_object* x_18189; +x_18185 = lean_ctor_get(x_18184, 0); +lean_inc(x_18185); +x_18186 = lean_ctor_get(x_18184, 1); +lean_inc(x_18186); +lean_dec(x_18184); +x_18187 = lean_ctor_get(x_18185, 0); +lean_inc(x_18187); +x_18188 = lean_ctor_get(x_18185, 1); +lean_inc(x_18188); +lean_dec(x_18185); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18189 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18181, x_18038, x_18187, x_18188, x_4, x_5, x_18186); +if (lean_obj_tag(x_18189) == 0) +{ +lean_object* x_18190; lean_object* x_18191; lean_object* x_18192; lean_object* x_18193; lean_object* x_18194; lean_object* x_18195; lean_object* x_18196; +x_18190 = lean_ctor_get(x_18189, 0); +lean_inc(x_18190); +x_18191 = lean_ctor_get(x_18189, 1); +lean_inc(x_18191); +lean_dec(x_18189); +x_18192 = lean_ctor_get(x_18190, 0); +lean_inc(x_18192); +x_18193 = lean_ctor_get(x_18190, 1); +lean_inc(x_18193); +if (lean_is_exclusive(x_18190)) { + lean_ctor_release(x_18190, 0); + lean_ctor_release(x_18190, 1); + x_18194 = x_18190; +} else { + lean_dec_ref(x_18190); + x_18194 = lean_box(0); +} +x_18195 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_18195, 0, x_18192); +if (lean_is_scalar(x_18194)) { + x_18196 = lean_alloc_ctor(0, 2, 0); +} else { + x_18196 = x_18194; +} +lean_ctor_set(x_18196, 0, x_18195); +lean_ctor_set(x_18196, 1, x_18193); +x_18007 = x_18196; +x_18008 = x_18191; +goto block_18037; +} +else +{ +lean_object* x_18197; lean_object* x_18198; lean_object* x_18199; lean_object* x_18200; +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18197 = lean_ctor_get(x_18189, 0); +lean_inc(x_18197); +x_18198 = lean_ctor_get(x_18189, 1); +lean_inc(x_18198); +if (lean_is_exclusive(x_18189)) { + lean_ctor_release(x_18189, 0); + lean_ctor_release(x_18189, 1); + x_18199 = x_18189; +} else { + lean_dec_ref(x_18189); + x_18199 = lean_box(0); +} +if (lean_is_scalar(x_18199)) { + x_18200 = lean_alloc_ctor(1, 2, 0); +} else { + x_18200 = x_18199; +} +lean_ctor_set(x_18200, 0, x_18197); +lean_ctor_set(x_18200, 1, x_18198); +return x_18200; +} +} +else +{ +lean_object* x_18201; lean_object* x_18202; lean_object* x_18203; lean_object* x_18204; +lean_dec(x_18181); +lean_dec(x_18038); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18201 = lean_ctor_get(x_18184, 0); +lean_inc(x_18201); +x_18202 = lean_ctor_get(x_18184, 1); +lean_inc(x_18202); +if (lean_is_exclusive(x_18184)) { + lean_ctor_release(x_18184, 0); + lean_ctor_release(x_18184, 1); + x_18203 = x_18184; +} else { + lean_dec_ref(x_18184); + x_18203 = lean_box(0); +} +if (lean_is_scalar(x_18203)) { + x_18204 = lean_alloc_ctor(1, 2, 0); +} else { + x_18204 = x_18203; +} +lean_ctor_set(x_18204, 0, x_18201); +lean_ctor_set(x_18204, 1, x_18202); +return x_18204; +} +} +} +else +{ +lean_object* x_18205; lean_object* x_18206; lean_object* x_18207; lean_object* x_18208; lean_object* x_18209; lean_object* x_18210; lean_object* x_18211; lean_object* x_18212; +lean_dec(x_18138); +lean_dec(x_18136); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_18038, 7); +lean_ctor_set(x_18038, 1, x_17707); +lean_ctor_set(x_18038, 0, x_153); +x_18205 = lean_ctor_get(x_1, 0); +lean_inc(x_18205); +x_18206 = l_Lean_IR_ToIR_bindVar(x_18205, x_17713, x_4, x_5, x_18043); +x_18207 = lean_ctor_get(x_18206, 0); +lean_inc(x_18207); +x_18208 = lean_ctor_get(x_18206, 1); +lean_inc(x_18208); +lean_dec(x_18206); +x_18209 = lean_ctor_get(x_18207, 0); +lean_inc(x_18209); +x_18210 = lean_ctor_get(x_18207, 1); +lean_inc(x_18210); +lean_dec(x_18207); +x_18211 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18212 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18209, x_18038, x_18211, x_18210, x_4, x_5, x_18208); +if (lean_obj_tag(x_18212) == 0) +{ +lean_object* x_18213; lean_object* x_18214; lean_object* x_18215; lean_object* x_18216; lean_object* x_18217; lean_object* x_18218; lean_object* x_18219; +x_18213 = lean_ctor_get(x_18212, 0); +lean_inc(x_18213); +x_18214 = lean_ctor_get(x_18212, 1); +lean_inc(x_18214); +lean_dec(x_18212); +x_18215 = lean_ctor_get(x_18213, 0); +lean_inc(x_18215); +x_18216 = lean_ctor_get(x_18213, 1); +lean_inc(x_18216); +if (lean_is_exclusive(x_18213)) { + lean_ctor_release(x_18213, 0); + lean_ctor_release(x_18213, 1); + x_18217 = x_18213; +} else { + lean_dec_ref(x_18213); + x_18217 = lean_box(0); +} +x_18218 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_18218, 0, x_18215); +if (lean_is_scalar(x_18217)) { + x_18219 = lean_alloc_ctor(0, 2, 0); +} else { + x_18219 = x_18217; +} +lean_ctor_set(x_18219, 0, x_18218); +lean_ctor_set(x_18219, 1, x_18216); +x_18007 = x_18219; +x_18008 = x_18214; +goto block_18037; +} +else +{ +lean_object* x_18220; lean_object* x_18221; lean_object* x_18222; lean_object* x_18223; +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18220 = lean_ctor_get(x_18212, 0); +lean_inc(x_18220); +x_18221 = lean_ctor_get(x_18212, 1); +lean_inc(x_18221); +if (lean_is_exclusive(x_18212)) { + lean_ctor_release(x_18212, 0); + lean_ctor_release(x_18212, 1); + x_18222 = x_18212; +} else { + lean_dec_ref(x_18212); + x_18222 = lean_box(0); +} +if (lean_is_scalar(x_18222)) { + x_18223 = lean_alloc_ctor(1, 2, 0); +} else { + x_18223 = x_18222; +} +lean_ctor_set(x_18223, 0, x_18220); +lean_ctor_set(x_18223, 1, x_18221); +return x_18223; +} +} +} +} +else +{ +lean_object* x_18224; lean_object* x_18225; lean_object* x_18226; lean_object* x_18227; lean_object* x_18228; lean_object* x_18229; uint8_t x_18230; +x_18224 = lean_ctor_get(x_18038, 1); +lean_inc(x_18224); +lean_dec(x_18038); +x_18225 = lean_ctor_get(x_18039, 0); +lean_inc(x_18225); +if (lean_is_exclusive(x_18039)) { + lean_ctor_release(x_18039, 0); + x_18226 = x_18039; +} else { + lean_dec_ref(x_18039); + x_18226 = lean_box(0); +} +x_18227 = lean_array_get_size(x_17707); +x_18228 = lean_ctor_get(x_18225, 3); +lean_inc(x_18228); +lean_dec(x_18225); +x_18229 = lean_array_get_size(x_18228); +lean_dec(x_18228); +x_18230 = lean_nat_dec_lt(x_18227, x_18229); +if (x_18230 == 0) +{ +uint8_t x_18231; +x_18231 = lean_nat_dec_eq(x_18227, x_18229); +if (x_18231 == 0) +{ +lean_object* x_18232; lean_object* x_18233; lean_object* x_18234; lean_object* x_18235; lean_object* x_18236; lean_object* x_18237; lean_object* x_18238; lean_object* x_18239; lean_object* x_18240; lean_object* x_18241; lean_object* x_18242; lean_object* x_18243; lean_object* x_18244; lean_object* x_18245; lean_object* x_18246; lean_object* x_18247; lean_object* x_18248; +x_18232 = lean_unsigned_to_nat(0u); +x_18233 = l_Array_extract___rarg(x_17707, x_18232, x_18229); +x_18234 = l_Array_extract___rarg(x_17707, x_18229, x_18227); +lean_dec(x_18227); +lean_inc(x_153); +x_18235 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_18235, 0, x_153); +lean_ctor_set(x_18235, 1, x_18233); +x_18236 = lean_ctor_get(x_1, 0); +lean_inc(x_18236); +x_18237 = l_Lean_IR_ToIR_bindVar(x_18236, x_17713, x_4, x_5, x_18224); +x_18238 = lean_ctor_get(x_18237, 0); +lean_inc(x_18238); +x_18239 = lean_ctor_get(x_18237, 1); +lean_inc(x_18239); +lean_dec(x_18237); +x_18240 = lean_ctor_get(x_18238, 0); +lean_inc(x_18240); +x_18241 = lean_ctor_get(x_18238, 1); +lean_inc(x_18241); +lean_dec(x_18238); +x_18242 = l_Lean_IR_ToIR_newVar(x_18241, x_4, x_5, x_18239); +x_18243 = lean_ctor_get(x_18242, 0); +lean_inc(x_18243); +x_18244 = lean_ctor_get(x_18242, 1); +lean_inc(x_18244); +lean_dec(x_18242); +x_18245 = lean_ctor_get(x_18243, 0); +lean_inc(x_18245); +x_18246 = lean_ctor_get(x_18243, 1); +lean_inc(x_18246); +lean_dec(x_18243); +x_18247 = lean_ctor_get(x_1, 2); +lean_inc(x_18247); +lean_inc(x_5); +lean_inc(x_4); +x_18248 = l_Lean_IR_ToIR_lowerType(x_18247, x_18246, x_4, x_5, x_18244); +if (lean_obj_tag(x_18248) == 0) +{ +lean_object* x_18249; lean_object* x_18250; lean_object* x_18251; lean_object* x_18252; lean_object* x_18253; +x_18249 = lean_ctor_get(x_18248, 0); +lean_inc(x_18249); +x_18250 = lean_ctor_get(x_18248, 1); +lean_inc(x_18250); +lean_dec(x_18248); +x_18251 = lean_ctor_get(x_18249, 0); +lean_inc(x_18251); +x_18252 = lean_ctor_get(x_18249, 1); +lean_inc(x_18252); +lean_dec(x_18249); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18253 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_18245, x_18234, x_18240, x_18235, x_18251, x_18252, x_4, x_5, x_18250); +if (lean_obj_tag(x_18253) == 0) +{ +lean_object* x_18254; lean_object* x_18255; lean_object* x_18256; lean_object* x_18257; lean_object* x_18258; lean_object* x_18259; lean_object* x_18260; +x_18254 = lean_ctor_get(x_18253, 0); +lean_inc(x_18254); +x_18255 = lean_ctor_get(x_18253, 1); +lean_inc(x_18255); +lean_dec(x_18253); +x_18256 = lean_ctor_get(x_18254, 0); +lean_inc(x_18256); +x_18257 = lean_ctor_get(x_18254, 1); +lean_inc(x_18257); +if (lean_is_exclusive(x_18254)) { + lean_ctor_release(x_18254, 0); + lean_ctor_release(x_18254, 1); + x_18258 = x_18254; +} else { + lean_dec_ref(x_18254); + x_18258 = lean_box(0); +} +if (lean_is_scalar(x_18226)) { + x_18259 = lean_alloc_ctor(1, 1, 0); +} else { + x_18259 = x_18226; +} +lean_ctor_set(x_18259, 0, x_18256); +if (lean_is_scalar(x_18258)) { + x_18260 = lean_alloc_ctor(0, 2, 0); +} else { + x_18260 = x_18258; +} +lean_ctor_set(x_18260, 0, x_18259); +lean_ctor_set(x_18260, 1, x_18257); +x_18007 = x_18260; +x_18008 = x_18255; +goto block_18037; +} +else +{ +lean_object* x_18261; lean_object* x_18262; lean_object* x_18263; lean_object* x_18264; +lean_dec(x_18226); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18261 = lean_ctor_get(x_18253, 0); +lean_inc(x_18261); +x_18262 = lean_ctor_get(x_18253, 1); +lean_inc(x_18262); +if (lean_is_exclusive(x_18253)) { + lean_ctor_release(x_18253, 0); + lean_ctor_release(x_18253, 1); + x_18263 = x_18253; +} else { + lean_dec_ref(x_18253); + x_18263 = lean_box(0); +} +if (lean_is_scalar(x_18263)) { + x_18264 = lean_alloc_ctor(1, 2, 0); +} else { + x_18264 = x_18263; +} +lean_ctor_set(x_18264, 0, x_18261); +lean_ctor_set(x_18264, 1, x_18262); +return x_18264; +} +} +else +{ +lean_object* x_18265; lean_object* x_18266; lean_object* x_18267; lean_object* x_18268; +lean_dec(x_18245); +lean_dec(x_18240); +lean_dec(x_18235); +lean_dec(x_18234); +lean_dec(x_18226); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18265 = lean_ctor_get(x_18248, 0); +lean_inc(x_18265); +x_18266 = lean_ctor_get(x_18248, 1); +lean_inc(x_18266); +if (lean_is_exclusive(x_18248)) { + lean_ctor_release(x_18248, 0); + lean_ctor_release(x_18248, 1); + x_18267 = x_18248; +} else { + lean_dec_ref(x_18248); + x_18267 = lean_box(0); +} +if (lean_is_scalar(x_18267)) { + x_18268 = lean_alloc_ctor(1, 2, 0); +} else { + x_18268 = x_18267; +} +lean_ctor_set(x_18268, 0, x_18265); +lean_ctor_set(x_18268, 1, x_18266); +return x_18268; +} +} +else +{ +lean_object* x_18269; lean_object* x_18270; lean_object* x_18271; lean_object* x_18272; lean_object* x_18273; lean_object* x_18274; lean_object* x_18275; lean_object* x_18276; lean_object* x_18277; +lean_dec(x_18229); +lean_dec(x_18227); +lean_inc(x_17707); +lean_inc(x_153); +x_18269 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_18269, 0, x_153); +lean_ctor_set(x_18269, 1, x_17707); +x_18270 = lean_ctor_get(x_1, 0); +lean_inc(x_18270); +x_18271 = l_Lean_IR_ToIR_bindVar(x_18270, x_17713, x_4, x_5, x_18224); +x_18272 = lean_ctor_get(x_18271, 0); +lean_inc(x_18272); +x_18273 = lean_ctor_get(x_18271, 1); +lean_inc(x_18273); +lean_dec(x_18271); +x_18274 = lean_ctor_get(x_18272, 0); +lean_inc(x_18274); +x_18275 = lean_ctor_get(x_18272, 1); +lean_inc(x_18275); +lean_dec(x_18272); +x_18276 = lean_ctor_get(x_1, 2); +lean_inc(x_18276); +lean_inc(x_5); +lean_inc(x_4); +x_18277 = l_Lean_IR_ToIR_lowerType(x_18276, x_18275, x_4, x_5, x_18273); +if (lean_obj_tag(x_18277) == 0) +{ +lean_object* x_18278; lean_object* x_18279; lean_object* x_18280; lean_object* x_18281; lean_object* x_18282; +x_18278 = lean_ctor_get(x_18277, 0); +lean_inc(x_18278); +x_18279 = lean_ctor_get(x_18277, 1); +lean_inc(x_18279); +lean_dec(x_18277); +x_18280 = lean_ctor_get(x_18278, 0); +lean_inc(x_18280); +x_18281 = lean_ctor_get(x_18278, 1); +lean_inc(x_18281); +lean_dec(x_18278); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18282 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18274, x_18269, x_18280, x_18281, x_4, x_5, x_18279); +if (lean_obj_tag(x_18282) == 0) +{ +lean_object* x_18283; lean_object* x_18284; lean_object* x_18285; lean_object* x_18286; lean_object* x_18287; lean_object* x_18288; lean_object* x_18289; +x_18283 = lean_ctor_get(x_18282, 0); +lean_inc(x_18283); +x_18284 = lean_ctor_get(x_18282, 1); +lean_inc(x_18284); +lean_dec(x_18282); +x_18285 = lean_ctor_get(x_18283, 0); +lean_inc(x_18285); +x_18286 = lean_ctor_get(x_18283, 1); +lean_inc(x_18286); +if (lean_is_exclusive(x_18283)) { + lean_ctor_release(x_18283, 0); + lean_ctor_release(x_18283, 1); + x_18287 = x_18283; +} else { + lean_dec_ref(x_18283); + x_18287 = lean_box(0); +} +if (lean_is_scalar(x_18226)) { + x_18288 = lean_alloc_ctor(1, 1, 0); +} else { + x_18288 = x_18226; +} +lean_ctor_set(x_18288, 0, x_18285); +if (lean_is_scalar(x_18287)) { + x_18289 = lean_alloc_ctor(0, 2, 0); +} else { + x_18289 = x_18287; +} +lean_ctor_set(x_18289, 0, x_18288); +lean_ctor_set(x_18289, 1, x_18286); +x_18007 = x_18289; +x_18008 = x_18284; +goto block_18037; +} +else +{ +lean_object* x_18290; lean_object* x_18291; lean_object* x_18292; lean_object* x_18293; +lean_dec(x_18226); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18290 = lean_ctor_get(x_18282, 0); +lean_inc(x_18290); +x_18291 = lean_ctor_get(x_18282, 1); +lean_inc(x_18291); +if (lean_is_exclusive(x_18282)) { + lean_ctor_release(x_18282, 0); + lean_ctor_release(x_18282, 1); + x_18292 = x_18282; +} else { + lean_dec_ref(x_18282); + x_18292 = lean_box(0); +} +if (lean_is_scalar(x_18292)) { + x_18293 = lean_alloc_ctor(1, 2, 0); +} else { + x_18293 = x_18292; +} +lean_ctor_set(x_18293, 0, x_18290); +lean_ctor_set(x_18293, 1, x_18291); +return x_18293; +} +} +else +{ +lean_object* x_18294; lean_object* x_18295; lean_object* x_18296; lean_object* x_18297; +lean_dec(x_18274); +lean_dec(x_18269); +lean_dec(x_18226); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18294 = lean_ctor_get(x_18277, 0); +lean_inc(x_18294); +x_18295 = lean_ctor_get(x_18277, 1); +lean_inc(x_18295); +if (lean_is_exclusive(x_18277)) { + lean_ctor_release(x_18277, 0); + lean_ctor_release(x_18277, 1); + x_18296 = x_18277; +} else { + lean_dec_ref(x_18277); + x_18296 = lean_box(0); +} +if (lean_is_scalar(x_18296)) { + x_18297 = lean_alloc_ctor(1, 2, 0); +} else { + x_18297 = x_18296; +} +lean_ctor_set(x_18297, 0, x_18294); +lean_ctor_set(x_18297, 1, x_18295); +return x_18297; +} +} +} +else +{ +lean_object* x_18298; lean_object* x_18299; lean_object* x_18300; lean_object* x_18301; lean_object* x_18302; lean_object* x_18303; lean_object* x_18304; lean_object* x_18305; lean_object* x_18306; +lean_dec(x_18229); +lean_dec(x_18227); +lean_inc(x_17707); +lean_inc(x_153); +x_18298 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_18298, 0, x_153); +lean_ctor_set(x_18298, 1, x_17707); +x_18299 = lean_ctor_get(x_1, 0); +lean_inc(x_18299); +x_18300 = l_Lean_IR_ToIR_bindVar(x_18299, x_17713, x_4, x_5, x_18224); +x_18301 = lean_ctor_get(x_18300, 0); +lean_inc(x_18301); +x_18302 = lean_ctor_get(x_18300, 1); +lean_inc(x_18302); +lean_dec(x_18300); +x_18303 = lean_ctor_get(x_18301, 0); +lean_inc(x_18303); +x_18304 = lean_ctor_get(x_18301, 1); +lean_inc(x_18304); +lean_dec(x_18301); +x_18305 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18306 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18303, x_18298, x_18305, x_18304, x_4, x_5, x_18302); +if (lean_obj_tag(x_18306) == 0) +{ +lean_object* x_18307; lean_object* x_18308; lean_object* x_18309; lean_object* x_18310; lean_object* x_18311; lean_object* x_18312; lean_object* x_18313; +x_18307 = lean_ctor_get(x_18306, 0); +lean_inc(x_18307); +x_18308 = lean_ctor_get(x_18306, 1); +lean_inc(x_18308); +lean_dec(x_18306); +x_18309 = lean_ctor_get(x_18307, 0); +lean_inc(x_18309); +x_18310 = lean_ctor_get(x_18307, 1); +lean_inc(x_18310); +if (lean_is_exclusive(x_18307)) { + lean_ctor_release(x_18307, 0); + lean_ctor_release(x_18307, 1); + x_18311 = x_18307; +} else { + lean_dec_ref(x_18307); + x_18311 = lean_box(0); +} +if (lean_is_scalar(x_18226)) { + x_18312 = lean_alloc_ctor(1, 1, 0); +} else { + x_18312 = x_18226; +} +lean_ctor_set(x_18312, 0, x_18309); +if (lean_is_scalar(x_18311)) { + x_18313 = lean_alloc_ctor(0, 2, 0); +} else { + x_18313 = x_18311; +} +lean_ctor_set(x_18313, 0, x_18312); +lean_ctor_set(x_18313, 1, x_18310); +x_18007 = x_18313; +x_18008 = x_18308; +goto block_18037; +} +else +{ +lean_object* x_18314; lean_object* x_18315; lean_object* x_18316; lean_object* x_18317; +lean_dec(x_18226); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18314 = lean_ctor_get(x_18306, 0); +lean_inc(x_18314); +x_18315 = lean_ctor_get(x_18306, 1); +lean_inc(x_18315); +if (lean_is_exclusive(x_18306)) { + lean_ctor_release(x_18306, 0); + lean_ctor_release(x_18306, 1); + x_18316 = x_18306; +} else { + lean_dec_ref(x_18306); + x_18316 = lean_box(0); +} +if (lean_is_scalar(x_18316)) { + x_18317 = lean_alloc_ctor(1, 2, 0); +} else { + x_18317 = x_18316; +} +lean_ctor_set(x_18317, 0, x_18314); +lean_ctor_set(x_18317, 1, x_18315); +return x_18317; +} +} +} +} +block_18037: +{ +lean_object* x_18009; +x_18009 = lean_ctor_get(x_18007, 0); +lean_inc(x_18009); +if (lean_obj_tag(x_18009) == 0) +{ +lean_object* x_18010; lean_object* x_18011; lean_object* x_18012; lean_object* x_18013; lean_object* x_18014; lean_object* x_18015; lean_object* x_18016; lean_object* x_18017; lean_object* x_18018; lean_object* x_18019; +lean_dec(x_17718); +x_18010 = lean_ctor_get(x_18007, 1); +lean_inc(x_18010); +lean_dec(x_18007); +x_18011 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_18011, 0, x_153); +lean_ctor_set(x_18011, 1, x_17707); +x_18012 = lean_ctor_get(x_1, 0); +lean_inc(x_18012); +x_18013 = l_Lean_IR_ToIR_bindVar(x_18012, x_18010, x_4, x_5, x_18008); +x_18014 = lean_ctor_get(x_18013, 0); +lean_inc(x_18014); +x_18015 = lean_ctor_get(x_18013, 1); +lean_inc(x_18015); +lean_dec(x_18013); +x_18016 = lean_ctor_get(x_18014, 0); +lean_inc(x_18016); +x_18017 = lean_ctor_get(x_18014, 1); +lean_inc(x_18017); +lean_dec(x_18014); +x_18018 = lean_ctor_get(x_1, 2); +lean_inc(x_18018); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_18019 = l_Lean_IR_ToIR_lowerType(x_18018, x_18017, x_4, x_5, x_18015); +if (lean_obj_tag(x_18019) == 0) +{ +lean_object* x_18020; lean_object* x_18021; lean_object* x_18022; lean_object* x_18023; lean_object* x_18024; +x_18020 = lean_ctor_get(x_18019, 0); +lean_inc(x_18020); +x_18021 = lean_ctor_get(x_18019, 1); +lean_inc(x_18021); +lean_dec(x_18019); +x_18022 = lean_ctor_get(x_18020, 0); +lean_inc(x_18022); +x_18023 = lean_ctor_get(x_18020, 1); +lean_inc(x_18023); +lean_dec(x_18020); +x_18024 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18016, x_18011, x_18022, x_18023, x_4, x_5, x_18021); +return x_18024; +} +else +{ +uint8_t x_18025; +lean_dec(x_18016); +lean_dec(x_18011); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_18025 = !lean_is_exclusive(x_18019); +if (x_18025 == 0) +{ +return x_18019; +} +else +{ +lean_object* x_18026; lean_object* x_18027; lean_object* x_18028; +x_18026 = lean_ctor_get(x_18019, 0); +x_18027 = lean_ctor_get(x_18019, 1); +lean_inc(x_18027); +lean_inc(x_18026); +lean_dec(x_18019); +x_18028 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18028, 0, x_18026); +lean_ctor_set(x_18028, 1, x_18027); +return x_18028; +} +} +} +else +{ +uint8_t x_18029; +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18029 = !lean_is_exclusive(x_18007); +if (x_18029 == 0) +{ +lean_object* x_18030; lean_object* x_18031; lean_object* x_18032; +x_18030 = lean_ctor_get(x_18007, 0); +lean_dec(x_18030); +x_18031 = lean_ctor_get(x_18009, 0); +lean_inc(x_18031); +lean_dec(x_18009); +lean_ctor_set(x_18007, 0, x_18031); +if (lean_is_scalar(x_17718)) { + x_18032 = lean_alloc_ctor(0, 2, 0); +} else { + x_18032 = x_17718; +} +lean_ctor_set(x_18032, 0, x_18007); +lean_ctor_set(x_18032, 1, x_18008); +return x_18032; +} +else +{ +lean_object* x_18033; lean_object* x_18034; lean_object* x_18035; lean_object* x_18036; +x_18033 = lean_ctor_get(x_18007, 1); +lean_inc(x_18033); +lean_dec(x_18007); +x_18034 = lean_ctor_get(x_18009, 0); +lean_inc(x_18034); +lean_dec(x_18009); +x_18035 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18035, 0, x_18034); +lean_ctor_set(x_18035, 1, x_18033); +if (lean_is_scalar(x_17718)) { + x_18036 = lean_alloc_ctor(0, 2, 0); +} else { + x_18036 = x_17718; +} +lean_ctor_set(x_18036, 0, x_18035); +lean_ctor_set(x_18036, 1, x_18008); +return x_18036; +} +} +} +} +case 2: +{ +lean_object* x_18318; lean_object* x_18319; +lean_dec(x_17724); +lean_dec(x_17719); +lean_dec(x_17718); +lean_free_object(x_17709); +lean_dec(x_17707); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_18318 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_18319 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_18318, x_17713, x_4, x_5, x_17717); +return x_18319; +} +case 3: +{ +lean_object* x_18320; lean_object* x_18321; lean_object* x_18351; lean_object* x_18352; +lean_dec(x_17724); +lean_dec(x_17719); +lean_dec(x_17699); +lean_dec(x_17698); +lean_inc(x_153); +x_18351 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_17717); +x_18352 = lean_ctor_get(x_18351, 0); +lean_inc(x_18352); +if (lean_obj_tag(x_18352) == 0) +{ +lean_object* x_18353; lean_object* x_18354; +x_18353 = lean_ctor_get(x_18351, 1); +lean_inc(x_18353); +lean_dec(x_18351); +x_18354 = lean_box(0); +lean_ctor_set(x_17709, 0, x_18354); +x_18320 = x_17709; +x_18321 = x_18353; +goto block_18350; +} +else +{ +uint8_t x_18355; +lean_free_object(x_17709); +x_18355 = !lean_is_exclusive(x_18351); +if (x_18355 == 0) +{ +lean_object* x_18356; lean_object* x_18357; uint8_t x_18358; +x_18356 = lean_ctor_get(x_18351, 1); +x_18357 = lean_ctor_get(x_18351, 0); +lean_dec(x_18357); +x_18358 = !lean_is_exclusive(x_18352); +if (x_18358 == 0) +{ +lean_object* x_18359; lean_object* x_18360; lean_object* x_18361; lean_object* x_18362; uint8_t x_18363; +x_18359 = lean_ctor_get(x_18352, 0); +x_18360 = lean_array_get_size(x_17707); +x_18361 = lean_ctor_get(x_18359, 3); +lean_inc(x_18361); +lean_dec(x_18359); +x_18362 = lean_array_get_size(x_18361); +lean_dec(x_18361); +x_18363 = lean_nat_dec_lt(x_18360, x_18362); +if (x_18363 == 0) +{ +uint8_t x_18364; +x_18364 = lean_nat_dec_eq(x_18360, x_18362); +if (x_18364 == 0) +{ +lean_object* x_18365; lean_object* x_18366; lean_object* x_18367; lean_object* x_18368; lean_object* x_18369; lean_object* x_18370; lean_object* x_18371; lean_object* x_18372; lean_object* x_18373; lean_object* x_18374; lean_object* x_18375; lean_object* x_18376; lean_object* x_18377; lean_object* x_18378; lean_object* x_18379; lean_object* x_18380; +x_18365 = lean_unsigned_to_nat(0u); +x_18366 = l_Array_extract___rarg(x_17707, x_18365, x_18362); +x_18367 = l_Array_extract___rarg(x_17707, x_18362, x_18360); +lean_dec(x_18360); +lean_inc(x_153); +lean_ctor_set_tag(x_18351, 6); +lean_ctor_set(x_18351, 1, x_18366); +lean_ctor_set(x_18351, 0, x_153); +x_18368 = lean_ctor_get(x_1, 0); +lean_inc(x_18368); +x_18369 = l_Lean_IR_ToIR_bindVar(x_18368, x_17713, x_4, x_5, x_18356); +x_18370 = lean_ctor_get(x_18369, 0); +lean_inc(x_18370); +x_18371 = lean_ctor_get(x_18369, 1); +lean_inc(x_18371); +lean_dec(x_18369); +x_18372 = lean_ctor_get(x_18370, 0); +lean_inc(x_18372); +x_18373 = lean_ctor_get(x_18370, 1); +lean_inc(x_18373); +lean_dec(x_18370); +x_18374 = l_Lean_IR_ToIR_newVar(x_18373, x_4, x_5, x_18371); +x_18375 = lean_ctor_get(x_18374, 0); +lean_inc(x_18375); +x_18376 = lean_ctor_get(x_18374, 1); +lean_inc(x_18376); +lean_dec(x_18374); +x_18377 = lean_ctor_get(x_18375, 0); +lean_inc(x_18377); +x_18378 = lean_ctor_get(x_18375, 1); +lean_inc(x_18378); +lean_dec(x_18375); +x_18379 = lean_ctor_get(x_1, 2); +lean_inc(x_18379); +lean_inc(x_5); +lean_inc(x_4); +x_18380 = l_Lean_IR_ToIR_lowerType(x_18379, x_18378, x_4, x_5, x_18376); +if (lean_obj_tag(x_18380) == 0) +{ +lean_object* x_18381; lean_object* x_18382; lean_object* x_18383; lean_object* x_18384; lean_object* x_18385; +x_18381 = lean_ctor_get(x_18380, 0); +lean_inc(x_18381); +x_18382 = lean_ctor_get(x_18380, 1); +lean_inc(x_18382); +lean_dec(x_18380); +x_18383 = lean_ctor_get(x_18381, 0); +lean_inc(x_18383); +x_18384 = lean_ctor_get(x_18381, 1); +lean_inc(x_18384); +lean_dec(x_18381); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18385 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_18377, x_18367, x_18372, x_18351, x_18383, x_18384, x_4, x_5, x_18382); +if (lean_obj_tag(x_18385) == 0) +{ +lean_object* x_18386; lean_object* x_18387; uint8_t x_18388; +x_18386 = lean_ctor_get(x_18385, 0); +lean_inc(x_18386); +x_18387 = lean_ctor_get(x_18385, 1); +lean_inc(x_18387); +lean_dec(x_18385); +x_18388 = !lean_is_exclusive(x_18386); +if (x_18388 == 0) +{ +lean_object* x_18389; +x_18389 = lean_ctor_get(x_18386, 0); +lean_ctor_set(x_18352, 0, x_18389); +lean_ctor_set(x_18386, 0, x_18352); +x_18320 = x_18386; +x_18321 = x_18387; +goto block_18350; +} +else +{ +lean_object* x_18390; lean_object* x_18391; lean_object* x_18392; +x_18390 = lean_ctor_get(x_18386, 0); +x_18391 = lean_ctor_get(x_18386, 1); +lean_inc(x_18391); +lean_inc(x_18390); +lean_dec(x_18386); +lean_ctor_set(x_18352, 0, x_18390); +x_18392 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18392, 0, x_18352); +lean_ctor_set(x_18392, 1, x_18391); +x_18320 = x_18392; +x_18321 = x_18387; +goto block_18350; +} +} +else +{ +uint8_t x_18393; +lean_free_object(x_18352); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18393 = !lean_is_exclusive(x_18385); +if (x_18393 == 0) +{ +return x_18385; +} +else +{ +lean_object* x_18394; lean_object* x_18395; lean_object* x_18396; +x_18394 = lean_ctor_get(x_18385, 0); +x_18395 = lean_ctor_get(x_18385, 1); +lean_inc(x_18395); +lean_inc(x_18394); +lean_dec(x_18385); +x_18396 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18396, 0, x_18394); +lean_ctor_set(x_18396, 1, x_18395); +return x_18396; +} +} +} +else +{ +uint8_t x_18397; +lean_dec(x_18377); +lean_dec(x_18372); +lean_dec(x_18351); +lean_dec(x_18367); +lean_free_object(x_18352); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18397 = !lean_is_exclusive(x_18380); +if (x_18397 == 0) +{ +return x_18380; +} +else +{ +lean_object* x_18398; lean_object* x_18399; lean_object* x_18400; +x_18398 = lean_ctor_get(x_18380, 0); +x_18399 = lean_ctor_get(x_18380, 1); +lean_inc(x_18399); +lean_inc(x_18398); +lean_dec(x_18380); +x_18400 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18400, 0, x_18398); +lean_ctor_set(x_18400, 1, x_18399); +return x_18400; +} +} +} +else +{ +lean_object* x_18401; lean_object* x_18402; lean_object* x_18403; lean_object* x_18404; lean_object* x_18405; lean_object* x_18406; lean_object* x_18407; lean_object* x_18408; +lean_dec(x_18362); +lean_dec(x_18360); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_18351, 6); +lean_ctor_set(x_18351, 1, x_17707); +lean_ctor_set(x_18351, 0, x_153); +x_18401 = lean_ctor_get(x_1, 0); +lean_inc(x_18401); +x_18402 = l_Lean_IR_ToIR_bindVar(x_18401, x_17713, x_4, x_5, x_18356); +x_18403 = lean_ctor_get(x_18402, 0); +lean_inc(x_18403); +x_18404 = lean_ctor_get(x_18402, 1); +lean_inc(x_18404); +lean_dec(x_18402); +x_18405 = lean_ctor_get(x_18403, 0); +lean_inc(x_18405); +x_18406 = lean_ctor_get(x_18403, 1); +lean_inc(x_18406); +lean_dec(x_18403); +x_18407 = lean_ctor_get(x_1, 2); +lean_inc(x_18407); +lean_inc(x_5); +lean_inc(x_4); +x_18408 = l_Lean_IR_ToIR_lowerType(x_18407, x_18406, x_4, x_5, x_18404); +if (lean_obj_tag(x_18408) == 0) +{ +lean_object* x_18409; lean_object* x_18410; lean_object* x_18411; lean_object* x_18412; lean_object* x_18413; +x_18409 = lean_ctor_get(x_18408, 0); +lean_inc(x_18409); +x_18410 = lean_ctor_get(x_18408, 1); +lean_inc(x_18410); +lean_dec(x_18408); +x_18411 = lean_ctor_get(x_18409, 0); +lean_inc(x_18411); +x_18412 = lean_ctor_get(x_18409, 1); +lean_inc(x_18412); +lean_dec(x_18409); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18413 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18405, x_18351, x_18411, x_18412, x_4, x_5, x_18410); +if (lean_obj_tag(x_18413) == 0) +{ +lean_object* x_18414; lean_object* x_18415; uint8_t x_18416; +x_18414 = lean_ctor_get(x_18413, 0); +lean_inc(x_18414); +x_18415 = lean_ctor_get(x_18413, 1); +lean_inc(x_18415); +lean_dec(x_18413); +x_18416 = !lean_is_exclusive(x_18414); +if (x_18416 == 0) +{ +lean_object* x_18417; +x_18417 = lean_ctor_get(x_18414, 0); +lean_ctor_set(x_18352, 0, x_18417); +lean_ctor_set(x_18414, 0, x_18352); +x_18320 = x_18414; +x_18321 = x_18415; +goto block_18350; +} +else +{ +lean_object* x_18418; lean_object* x_18419; lean_object* x_18420; +x_18418 = lean_ctor_get(x_18414, 0); +x_18419 = lean_ctor_get(x_18414, 1); +lean_inc(x_18419); +lean_inc(x_18418); +lean_dec(x_18414); +lean_ctor_set(x_18352, 0, x_18418); +x_18420 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18420, 0, x_18352); +lean_ctor_set(x_18420, 1, x_18419); +x_18320 = x_18420; +x_18321 = x_18415; +goto block_18350; +} +} +else +{ +uint8_t x_18421; +lean_free_object(x_18352); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18421 = !lean_is_exclusive(x_18413); +if (x_18421 == 0) +{ +return x_18413; +} +else +{ +lean_object* x_18422; lean_object* x_18423; lean_object* x_18424; +x_18422 = lean_ctor_get(x_18413, 0); +x_18423 = lean_ctor_get(x_18413, 1); +lean_inc(x_18423); +lean_inc(x_18422); +lean_dec(x_18413); +x_18424 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18424, 0, x_18422); +lean_ctor_set(x_18424, 1, x_18423); +return x_18424; +} +} +} +else +{ +uint8_t x_18425; +lean_dec(x_18405); +lean_dec(x_18351); +lean_free_object(x_18352); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18425 = !lean_is_exclusive(x_18408); +if (x_18425 == 0) +{ +return x_18408; +} +else +{ +lean_object* x_18426; lean_object* x_18427; lean_object* x_18428; +x_18426 = lean_ctor_get(x_18408, 0); +x_18427 = lean_ctor_get(x_18408, 1); +lean_inc(x_18427); +lean_inc(x_18426); +lean_dec(x_18408); +x_18428 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18428, 0, x_18426); +lean_ctor_set(x_18428, 1, x_18427); +return x_18428; +} +} +} +} +else +{ +lean_object* x_18429; lean_object* x_18430; lean_object* x_18431; lean_object* x_18432; lean_object* x_18433; lean_object* x_18434; lean_object* x_18435; lean_object* x_18436; +lean_dec(x_18362); +lean_dec(x_18360); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_18351, 7); +lean_ctor_set(x_18351, 1, x_17707); +lean_ctor_set(x_18351, 0, x_153); +x_18429 = lean_ctor_get(x_1, 0); +lean_inc(x_18429); +x_18430 = l_Lean_IR_ToIR_bindVar(x_18429, x_17713, x_4, x_5, x_18356); +x_18431 = lean_ctor_get(x_18430, 0); +lean_inc(x_18431); +x_18432 = lean_ctor_get(x_18430, 1); +lean_inc(x_18432); +lean_dec(x_18430); +x_18433 = lean_ctor_get(x_18431, 0); +lean_inc(x_18433); +x_18434 = lean_ctor_get(x_18431, 1); +lean_inc(x_18434); +lean_dec(x_18431); +x_18435 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18436 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18433, x_18351, x_18435, x_18434, x_4, x_5, x_18432); +if (lean_obj_tag(x_18436) == 0) +{ +lean_object* x_18437; lean_object* x_18438; uint8_t x_18439; +x_18437 = lean_ctor_get(x_18436, 0); +lean_inc(x_18437); +x_18438 = lean_ctor_get(x_18436, 1); +lean_inc(x_18438); +lean_dec(x_18436); +x_18439 = !lean_is_exclusive(x_18437); +if (x_18439 == 0) +{ +lean_object* x_18440; +x_18440 = lean_ctor_get(x_18437, 0); +lean_ctor_set(x_18352, 0, x_18440); +lean_ctor_set(x_18437, 0, x_18352); +x_18320 = x_18437; +x_18321 = x_18438; +goto block_18350; +} +else +{ +lean_object* x_18441; lean_object* x_18442; lean_object* x_18443; +x_18441 = lean_ctor_get(x_18437, 0); +x_18442 = lean_ctor_get(x_18437, 1); +lean_inc(x_18442); +lean_inc(x_18441); +lean_dec(x_18437); +lean_ctor_set(x_18352, 0, x_18441); +x_18443 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18443, 0, x_18352); +lean_ctor_set(x_18443, 1, x_18442); +x_18320 = x_18443; +x_18321 = x_18438; +goto block_18350; +} +} +else +{ +uint8_t x_18444; +lean_free_object(x_18352); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18444 = !lean_is_exclusive(x_18436); +if (x_18444 == 0) +{ +return x_18436; +} +else +{ +lean_object* x_18445; lean_object* x_18446; lean_object* x_18447; +x_18445 = lean_ctor_get(x_18436, 0); +x_18446 = lean_ctor_get(x_18436, 1); +lean_inc(x_18446); +lean_inc(x_18445); +lean_dec(x_18436); +x_18447 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18447, 0, x_18445); +lean_ctor_set(x_18447, 1, x_18446); +return x_18447; +} +} +} +} +else +{ +lean_object* x_18448; lean_object* x_18449; lean_object* x_18450; lean_object* x_18451; uint8_t x_18452; +x_18448 = lean_ctor_get(x_18352, 0); +lean_inc(x_18448); +lean_dec(x_18352); +x_18449 = lean_array_get_size(x_17707); +x_18450 = lean_ctor_get(x_18448, 3); +lean_inc(x_18450); +lean_dec(x_18448); +x_18451 = lean_array_get_size(x_18450); +lean_dec(x_18450); +x_18452 = lean_nat_dec_lt(x_18449, x_18451); +if (x_18452 == 0) +{ +uint8_t x_18453; +x_18453 = lean_nat_dec_eq(x_18449, x_18451); +if (x_18453 == 0) +{ +lean_object* x_18454; lean_object* x_18455; lean_object* x_18456; lean_object* x_18457; lean_object* x_18458; lean_object* x_18459; lean_object* x_18460; lean_object* x_18461; lean_object* x_18462; lean_object* x_18463; lean_object* x_18464; lean_object* x_18465; lean_object* x_18466; lean_object* x_18467; lean_object* x_18468; lean_object* x_18469; +x_18454 = lean_unsigned_to_nat(0u); +x_18455 = l_Array_extract___rarg(x_17707, x_18454, x_18451); +x_18456 = l_Array_extract___rarg(x_17707, x_18451, x_18449); +lean_dec(x_18449); +lean_inc(x_153); +lean_ctor_set_tag(x_18351, 6); +lean_ctor_set(x_18351, 1, x_18455); +lean_ctor_set(x_18351, 0, x_153); +x_18457 = lean_ctor_get(x_1, 0); +lean_inc(x_18457); +x_18458 = l_Lean_IR_ToIR_bindVar(x_18457, x_17713, x_4, x_5, x_18356); +x_18459 = lean_ctor_get(x_18458, 0); +lean_inc(x_18459); +x_18460 = lean_ctor_get(x_18458, 1); +lean_inc(x_18460); +lean_dec(x_18458); +x_18461 = lean_ctor_get(x_18459, 0); +lean_inc(x_18461); +x_18462 = lean_ctor_get(x_18459, 1); +lean_inc(x_18462); +lean_dec(x_18459); +x_18463 = l_Lean_IR_ToIR_newVar(x_18462, x_4, x_5, x_18460); +x_18464 = lean_ctor_get(x_18463, 0); +lean_inc(x_18464); +x_18465 = lean_ctor_get(x_18463, 1); +lean_inc(x_18465); +lean_dec(x_18463); +x_18466 = lean_ctor_get(x_18464, 0); +lean_inc(x_18466); +x_18467 = lean_ctor_get(x_18464, 1); +lean_inc(x_18467); +lean_dec(x_18464); +x_18468 = lean_ctor_get(x_1, 2); +lean_inc(x_18468); +lean_inc(x_5); +lean_inc(x_4); +x_18469 = l_Lean_IR_ToIR_lowerType(x_18468, x_18467, x_4, x_5, x_18465); +if (lean_obj_tag(x_18469) == 0) +{ +lean_object* x_18470; lean_object* x_18471; lean_object* x_18472; lean_object* x_18473; lean_object* x_18474; +x_18470 = lean_ctor_get(x_18469, 0); +lean_inc(x_18470); +x_18471 = lean_ctor_get(x_18469, 1); +lean_inc(x_18471); +lean_dec(x_18469); +x_18472 = lean_ctor_get(x_18470, 0); +lean_inc(x_18472); +x_18473 = lean_ctor_get(x_18470, 1); +lean_inc(x_18473); +lean_dec(x_18470); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18474 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_18466, x_18456, x_18461, x_18351, x_18472, x_18473, x_4, x_5, x_18471); +if (lean_obj_tag(x_18474) == 0) +{ +lean_object* x_18475; lean_object* x_18476; lean_object* x_18477; lean_object* x_18478; lean_object* x_18479; lean_object* x_18480; lean_object* x_18481; +x_18475 = lean_ctor_get(x_18474, 0); +lean_inc(x_18475); +x_18476 = lean_ctor_get(x_18474, 1); +lean_inc(x_18476); +lean_dec(x_18474); +x_18477 = lean_ctor_get(x_18475, 0); +lean_inc(x_18477); +x_18478 = lean_ctor_get(x_18475, 1); +lean_inc(x_18478); +if (lean_is_exclusive(x_18475)) { + lean_ctor_release(x_18475, 0); + lean_ctor_release(x_18475, 1); + x_18479 = x_18475; +} else { + lean_dec_ref(x_18475); + x_18479 = lean_box(0); +} +x_18480 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_18480, 0, x_18477); +if (lean_is_scalar(x_18479)) { + x_18481 = lean_alloc_ctor(0, 2, 0); +} else { + x_18481 = x_18479; +} +lean_ctor_set(x_18481, 0, x_18480); +lean_ctor_set(x_18481, 1, x_18478); +x_18320 = x_18481; +x_18321 = x_18476; +goto block_18350; +} +else +{ +lean_object* x_18482; lean_object* x_18483; lean_object* x_18484; lean_object* x_18485; +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18482 = lean_ctor_get(x_18474, 0); +lean_inc(x_18482); +x_18483 = lean_ctor_get(x_18474, 1); +lean_inc(x_18483); +if (lean_is_exclusive(x_18474)) { + lean_ctor_release(x_18474, 0); + lean_ctor_release(x_18474, 1); + x_18484 = x_18474; +} else { + lean_dec_ref(x_18474); + x_18484 = lean_box(0); +} +if (lean_is_scalar(x_18484)) { + x_18485 = lean_alloc_ctor(1, 2, 0); +} else { + x_18485 = x_18484; +} +lean_ctor_set(x_18485, 0, x_18482); +lean_ctor_set(x_18485, 1, x_18483); +return x_18485; +} +} +else +{ +lean_object* x_18486; lean_object* x_18487; lean_object* x_18488; lean_object* x_18489; +lean_dec(x_18466); +lean_dec(x_18461); +lean_dec(x_18351); +lean_dec(x_18456); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18486 = lean_ctor_get(x_18469, 0); +lean_inc(x_18486); +x_18487 = lean_ctor_get(x_18469, 1); +lean_inc(x_18487); +if (lean_is_exclusive(x_18469)) { + lean_ctor_release(x_18469, 0); + lean_ctor_release(x_18469, 1); + x_18488 = x_18469; +} else { + lean_dec_ref(x_18469); + x_18488 = lean_box(0); +} +if (lean_is_scalar(x_18488)) { + x_18489 = lean_alloc_ctor(1, 2, 0); +} else { + x_18489 = x_18488; +} +lean_ctor_set(x_18489, 0, x_18486); +lean_ctor_set(x_18489, 1, x_18487); +return x_18489; +} +} +else +{ +lean_object* x_18490; lean_object* x_18491; lean_object* x_18492; lean_object* x_18493; lean_object* x_18494; lean_object* x_18495; lean_object* x_18496; lean_object* x_18497; +lean_dec(x_18451); +lean_dec(x_18449); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_18351, 6); +lean_ctor_set(x_18351, 1, x_17707); +lean_ctor_set(x_18351, 0, x_153); +x_18490 = lean_ctor_get(x_1, 0); +lean_inc(x_18490); +x_18491 = l_Lean_IR_ToIR_bindVar(x_18490, x_17713, x_4, x_5, x_18356); +x_18492 = lean_ctor_get(x_18491, 0); +lean_inc(x_18492); +x_18493 = lean_ctor_get(x_18491, 1); +lean_inc(x_18493); +lean_dec(x_18491); +x_18494 = lean_ctor_get(x_18492, 0); +lean_inc(x_18494); +x_18495 = lean_ctor_get(x_18492, 1); +lean_inc(x_18495); +lean_dec(x_18492); +x_18496 = lean_ctor_get(x_1, 2); +lean_inc(x_18496); +lean_inc(x_5); +lean_inc(x_4); +x_18497 = l_Lean_IR_ToIR_lowerType(x_18496, x_18495, x_4, x_5, x_18493); +if (lean_obj_tag(x_18497) == 0) +{ +lean_object* x_18498; lean_object* x_18499; lean_object* x_18500; lean_object* x_18501; lean_object* x_18502; +x_18498 = lean_ctor_get(x_18497, 0); +lean_inc(x_18498); +x_18499 = lean_ctor_get(x_18497, 1); +lean_inc(x_18499); +lean_dec(x_18497); +x_18500 = lean_ctor_get(x_18498, 0); +lean_inc(x_18500); +x_18501 = lean_ctor_get(x_18498, 1); +lean_inc(x_18501); +lean_dec(x_18498); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18502 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18494, x_18351, x_18500, x_18501, x_4, x_5, x_18499); +if (lean_obj_tag(x_18502) == 0) +{ +lean_object* x_18503; lean_object* x_18504; lean_object* x_18505; lean_object* x_18506; lean_object* x_18507; lean_object* x_18508; lean_object* x_18509; +x_18503 = lean_ctor_get(x_18502, 0); +lean_inc(x_18503); +x_18504 = lean_ctor_get(x_18502, 1); +lean_inc(x_18504); +lean_dec(x_18502); +x_18505 = lean_ctor_get(x_18503, 0); +lean_inc(x_18505); +x_18506 = lean_ctor_get(x_18503, 1); +lean_inc(x_18506); +if (lean_is_exclusive(x_18503)) { + lean_ctor_release(x_18503, 0); + lean_ctor_release(x_18503, 1); + x_18507 = x_18503; +} else { + lean_dec_ref(x_18503); + x_18507 = lean_box(0); +} +x_18508 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_18508, 0, x_18505); +if (lean_is_scalar(x_18507)) { + x_18509 = lean_alloc_ctor(0, 2, 0); +} else { + x_18509 = x_18507; +} +lean_ctor_set(x_18509, 0, x_18508); +lean_ctor_set(x_18509, 1, x_18506); +x_18320 = x_18509; +x_18321 = x_18504; +goto block_18350; +} +else +{ +lean_object* x_18510; lean_object* x_18511; lean_object* x_18512; lean_object* x_18513; +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18510 = lean_ctor_get(x_18502, 0); +lean_inc(x_18510); +x_18511 = lean_ctor_get(x_18502, 1); +lean_inc(x_18511); +if (lean_is_exclusive(x_18502)) { + lean_ctor_release(x_18502, 0); + lean_ctor_release(x_18502, 1); + x_18512 = x_18502; +} else { + lean_dec_ref(x_18502); + x_18512 = lean_box(0); +} +if (lean_is_scalar(x_18512)) { + x_18513 = lean_alloc_ctor(1, 2, 0); +} else { + x_18513 = x_18512; +} +lean_ctor_set(x_18513, 0, x_18510); +lean_ctor_set(x_18513, 1, x_18511); +return x_18513; +} +} +else +{ +lean_object* x_18514; lean_object* x_18515; lean_object* x_18516; lean_object* x_18517; +lean_dec(x_18494); +lean_dec(x_18351); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18514 = lean_ctor_get(x_18497, 0); +lean_inc(x_18514); +x_18515 = lean_ctor_get(x_18497, 1); +lean_inc(x_18515); +if (lean_is_exclusive(x_18497)) { + lean_ctor_release(x_18497, 0); + lean_ctor_release(x_18497, 1); + x_18516 = x_18497; +} else { + lean_dec_ref(x_18497); + x_18516 = lean_box(0); +} +if (lean_is_scalar(x_18516)) { + x_18517 = lean_alloc_ctor(1, 2, 0); +} else { + x_18517 = x_18516; +} +lean_ctor_set(x_18517, 0, x_18514); +lean_ctor_set(x_18517, 1, x_18515); +return x_18517; +} +} +} +else +{ +lean_object* x_18518; lean_object* x_18519; lean_object* x_18520; lean_object* x_18521; lean_object* x_18522; lean_object* x_18523; lean_object* x_18524; lean_object* x_18525; +lean_dec(x_18451); +lean_dec(x_18449); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_18351, 7); +lean_ctor_set(x_18351, 1, x_17707); +lean_ctor_set(x_18351, 0, x_153); +x_18518 = lean_ctor_get(x_1, 0); +lean_inc(x_18518); +x_18519 = l_Lean_IR_ToIR_bindVar(x_18518, x_17713, x_4, x_5, x_18356); +x_18520 = lean_ctor_get(x_18519, 0); +lean_inc(x_18520); +x_18521 = lean_ctor_get(x_18519, 1); +lean_inc(x_18521); +lean_dec(x_18519); +x_18522 = lean_ctor_get(x_18520, 0); +lean_inc(x_18522); +x_18523 = lean_ctor_get(x_18520, 1); +lean_inc(x_18523); +lean_dec(x_18520); +x_18524 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18525 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18522, x_18351, x_18524, x_18523, x_4, x_5, x_18521); +if (lean_obj_tag(x_18525) == 0) +{ +lean_object* x_18526; lean_object* x_18527; lean_object* x_18528; lean_object* x_18529; lean_object* x_18530; lean_object* x_18531; lean_object* x_18532; +x_18526 = lean_ctor_get(x_18525, 0); +lean_inc(x_18526); +x_18527 = lean_ctor_get(x_18525, 1); +lean_inc(x_18527); +lean_dec(x_18525); +x_18528 = lean_ctor_get(x_18526, 0); +lean_inc(x_18528); +x_18529 = lean_ctor_get(x_18526, 1); +lean_inc(x_18529); +if (lean_is_exclusive(x_18526)) { + lean_ctor_release(x_18526, 0); + lean_ctor_release(x_18526, 1); + x_18530 = x_18526; +} else { + lean_dec_ref(x_18526); + x_18530 = lean_box(0); +} +x_18531 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_18531, 0, x_18528); +if (lean_is_scalar(x_18530)) { + x_18532 = lean_alloc_ctor(0, 2, 0); +} else { + x_18532 = x_18530; +} +lean_ctor_set(x_18532, 0, x_18531); +lean_ctor_set(x_18532, 1, x_18529); +x_18320 = x_18532; +x_18321 = x_18527; +goto block_18350; +} +else +{ +lean_object* x_18533; lean_object* x_18534; lean_object* x_18535; lean_object* x_18536; +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18533 = lean_ctor_get(x_18525, 0); +lean_inc(x_18533); +x_18534 = lean_ctor_get(x_18525, 1); +lean_inc(x_18534); +if (lean_is_exclusive(x_18525)) { + lean_ctor_release(x_18525, 0); + lean_ctor_release(x_18525, 1); + x_18535 = x_18525; +} else { + lean_dec_ref(x_18525); + x_18535 = lean_box(0); +} +if (lean_is_scalar(x_18535)) { + x_18536 = lean_alloc_ctor(1, 2, 0); +} else { + x_18536 = x_18535; +} +lean_ctor_set(x_18536, 0, x_18533); +lean_ctor_set(x_18536, 1, x_18534); +return x_18536; +} +} +} +} +else +{ +lean_object* x_18537; lean_object* x_18538; lean_object* x_18539; lean_object* x_18540; lean_object* x_18541; lean_object* x_18542; uint8_t x_18543; +x_18537 = lean_ctor_get(x_18351, 1); +lean_inc(x_18537); +lean_dec(x_18351); +x_18538 = lean_ctor_get(x_18352, 0); +lean_inc(x_18538); +if (lean_is_exclusive(x_18352)) { + lean_ctor_release(x_18352, 0); + x_18539 = x_18352; +} else { + lean_dec_ref(x_18352); + x_18539 = lean_box(0); +} +x_18540 = lean_array_get_size(x_17707); +x_18541 = lean_ctor_get(x_18538, 3); +lean_inc(x_18541); +lean_dec(x_18538); +x_18542 = lean_array_get_size(x_18541); +lean_dec(x_18541); +x_18543 = lean_nat_dec_lt(x_18540, x_18542); +if (x_18543 == 0) +{ +uint8_t x_18544; +x_18544 = lean_nat_dec_eq(x_18540, x_18542); +if (x_18544 == 0) +{ +lean_object* x_18545; lean_object* x_18546; lean_object* x_18547; lean_object* x_18548; lean_object* x_18549; lean_object* x_18550; lean_object* x_18551; lean_object* x_18552; lean_object* x_18553; lean_object* x_18554; lean_object* x_18555; lean_object* x_18556; lean_object* x_18557; lean_object* x_18558; lean_object* x_18559; lean_object* x_18560; lean_object* x_18561; +x_18545 = lean_unsigned_to_nat(0u); +x_18546 = l_Array_extract___rarg(x_17707, x_18545, x_18542); +x_18547 = l_Array_extract___rarg(x_17707, x_18542, x_18540); +lean_dec(x_18540); +lean_inc(x_153); +x_18548 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_18548, 0, x_153); +lean_ctor_set(x_18548, 1, x_18546); +x_18549 = lean_ctor_get(x_1, 0); +lean_inc(x_18549); +x_18550 = l_Lean_IR_ToIR_bindVar(x_18549, x_17713, x_4, x_5, x_18537); +x_18551 = lean_ctor_get(x_18550, 0); +lean_inc(x_18551); +x_18552 = lean_ctor_get(x_18550, 1); +lean_inc(x_18552); +lean_dec(x_18550); +x_18553 = lean_ctor_get(x_18551, 0); +lean_inc(x_18553); +x_18554 = lean_ctor_get(x_18551, 1); +lean_inc(x_18554); +lean_dec(x_18551); +x_18555 = l_Lean_IR_ToIR_newVar(x_18554, x_4, x_5, x_18552); +x_18556 = lean_ctor_get(x_18555, 0); +lean_inc(x_18556); +x_18557 = lean_ctor_get(x_18555, 1); +lean_inc(x_18557); +lean_dec(x_18555); +x_18558 = lean_ctor_get(x_18556, 0); +lean_inc(x_18558); +x_18559 = lean_ctor_get(x_18556, 1); +lean_inc(x_18559); +lean_dec(x_18556); +x_18560 = lean_ctor_get(x_1, 2); +lean_inc(x_18560); +lean_inc(x_5); +lean_inc(x_4); +x_18561 = l_Lean_IR_ToIR_lowerType(x_18560, x_18559, x_4, x_5, x_18557); +if (lean_obj_tag(x_18561) == 0) +{ +lean_object* x_18562; lean_object* x_18563; lean_object* x_18564; lean_object* x_18565; lean_object* x_18566; +x_18562 = lean_ctor_get(x_18561, 0); +lean_inc(x_18562); +x_18563 = lean_ctor_get(x_18561, 1); +lean_inc(x_18563); +lean_dec(x_18561); +x_18564 = lean_ctor_get(x_18562, 0); +lean_inc(x_18564); +x_18565 = lean_ctor_get(x_18562, 1); +lean_inc(x_18565); +lean_dec(x_18562); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18566 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_18558, x_18547, x_18553, x_18548, x_18564, x_18565, x_4, x_5, x_18563); +if (lean_obj_tag(x_18566) == 0) +{ +lean_object* x_18567; lean_object* x_18568; lean_object* x_18569; lean_object* x_18570; lean_object* x_18571; lean_object* x_18572; lean_object* x_18573; +x_18567 = lean_ctor_get(x_18566, 0); +lean_inc(x_18567); +x_18568 = lean_ctor_get(x_18566, 1); +lean_inc(x_18568); +lean_dec(x_18566); +x_18569 = lean_ctor_get(x_18567, 0); +lean_inc(x_18569); +x_18570 = lean_ctor_get(x_18567, 1); +lean_inc(x_18570); +if (lean_is_exclusive(x_18567)) { + lean_ctor_release(x_18567, 0); + lean_ctor_release(x_18567, 1); + x_18571 = x_18567; +} else { + lean_dec_ref(x_18567); + x_18571 = lean_box(0); +} +if (lean_is_scalar(x_18539)) { + x_18572 = lean_alloc_ctor(1, 1, 0); +} else { + x_18572 = x_18539; +} +lean_ctor_set(x_18572, 0, x_18569); +if (lean_is_scalar(x_18571)) { + x_18573 = lean_alloc_ctor(0, 2, 0); +} else { + x_18573 = x_18571; +} +lean_ctor_set(x_18573, 0, x_18572); +lean_ctor_set(x_18573, 1, x_18570); +x_18320 = x_18573; +x_18321 = x_18568; +goto block_18350; +} +else +{ +lean_object* x_18574; lean_object* x_18575; lean_object* x_18576; lean_object* x_18577; +lean_dec(x_18539); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18574 = lean_ctor_get(x_18566, 0); +lean_inc(x_18574); +x_18575 = lean_ctor_get(x_18566, 1); +lean_inc(x_18575); +if (lean_is_exclusive(x_18566)) { + lean_ctor_release(x_18566, 0); + lean_ctor_release(x_18566, 1); + x_18576 = x_18566; +} else { + lean_dec_ref(x_18566); + x_18576 = lean_box(0); +} +if (lean_is_scalar(x_18576)) { + x_18577 = lean_alloc_ctor(1, 2, 0); +} else { + x_18577 = x_18576; +} +lean_ctor_set(x_18577, 0, x_18574); +lean_ctor_set(x_18577, 1, x_18575); +return x_18577; +} +} +else +{ +lean_object* x_18578; lean_object* x_18579; lean_object* x_18580; lean_object* x_18581; +lean_dec(x_18558); +lean_dec(x_18553); +lean_dec(x_18548); +lean_dec(x_18547); +lean_dec(x_18539); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18578 = lean_ctor_get(x_18561, 0); +lean_inc(x_18578); +x_18579 = lean_ctor_get(x_18561, 1); +lean_inc(x_18579); +if (lean_is_exclusive(x_18561)) { + lean_ctor_release(x_18561, 0); + lean_ctor_release(x_18561, 1); + x_18580 = x_18561; +} else { + lean_dec_ref(x_18561); + x_18580 = lean_box(0); +} +if (lean_is_scalar(x_18580)) { + x_18581 = lean_alloc_ctor(1, 2, 0); +} else { + x_18581 = x_18580; +} +lean_ctor_set(x_18581, 0, x_18578); +lean_ctor_set(x_18581, 1, x_18579); +return x_18581; +} +} +else +{ +lean_object* x_18582; lean_object* x_18583; lean_object* x_18584; lean_object* x_18585; lean_object* x_18586; lean_object* x_18587; lean_object* x_18588; lean_object* x_18589; lean_object* x_18590; +lean_dec(x_18542); +lean_dec(x_18540); +lean_inc(x_17707); +lean_inc(x_153); +x_18582 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_18582, 0, x_153); +lean_ctor_set(x_18582, 1, x_17707); +x_18583 = lean_ctor_get(x_1, 0); +lean_inc(x_18583); +x_18584 = l_Lean_IR_ToIR_bindVar(x_18583, x_17713, x_4, x_5, x_18537); +x_18585 = lean_ctor_get(x_18584, 0); +lean_inc(x_18585); +x_18586 = lean_ctor_get(x_18584, 1); +lean_inc(x_18586); +lean_dec(x_18584); +x_18587 = lean_ctor_get(x_18585, 0); +lean_inc(x_18587); +x_18588 = lean_ctor_get(x_18585, 1); +lean_inc(x_18588); +lean_dec(x_18585); +x_18589 = lean_ctor_get(x_1, 2); +lean_inc(x_18589); +lean_inc(x_5); +lean_inc(x_4); +x_18590 = l_Lean_IR_ToIR_lowerType(x_18589, x_18588, x_4, x_5, x_18586); +if (lean_obj_tag(x_18590) == 0) +{ +lean_object* x_18591; lean_object* x_18592; lean_object* x_18593; lean_object* x_18594; lean_object* x_18595; +x_18591 = lean_ctor_get(x_18590, 0); +lean_inc(x_18591); +x_18592 = lean_ctor_get(x_18590, 1); +lean_inc(x_18592); +lean_dec(x_18590); +x_18593 = lean_ctor_get(x_18591, 0); +lean_inc(x_18593); +x_18594 = lean_ctor_get(x_18591, 1); +lean_inc(x_18594); +lean_dec(x_18591); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18595 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18587, x_18582, x_18593, x_18594, x_4, x_5, x_18592); +if (lean_obj_tag(x_18595) == 0) +{ +lean_object* x_18596; lean_object* x_18597; lean_object* x_18598; lean_object* x_18599; lean_object* x_18600; lean_object* x_18601; lean_object* x_18602; +x_18596 = lean_ctor_get(x_18595, 0); +lean_inc(x_18596); +x_18597 = lean_ctor_get(x_18595, 1); +lean_inc(x_18597); +lean_dec(x_18595); +x_18598 = lean_ctor_get(x_18596, 0); +lean_inc(x_18598); +x_18599 = lean_ctor_get(x_18596, 1); +lean_inc(x_18599); +if (lean_is_exclusive(x_18596)) { + lean_ctor_release(x_18596, 0); + lean_ctor_release(x_18596, 1); + x_18600 = x_18596; +} else { + lean_dec_ref(x_18596); + x_18600 = lean_box(0); +} +if (lean_is_scalar(x_18539)) { + x_18601 = lean_alloc_ctor(1, 1, 0); +} else { + x_18601 = x_18539; +} +lean_ctor_set(x_18601, 0, x_18598); +if (lean_is_scalar(x_18600)) { + x_18602 = lean_alloc_ctor(0, 2, 0); +} else { + x_18602 = x_18600; +} +lean_ctor_set(x_18602, 0, x_18601); +lean_ctor_set(x_18602, 1, x_18599); +x_18320 = x_18602; +x_18321 = x_18597; +goto block_18350; +} +else +{ +lean_object* x_18603; lean_object* x_18604; lean_object* x_18605; lean_object* x_18606; +lean_dec(x_18539); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18603 = lean_ctor_get(x_18595, 0); +lean_inc(x_18603); +x_18604 = lean_ctor_get(x_18595, 1); +lean_inc(x_18604); +if (lean_is_exclusive(x_18595)) { + lean_ctor_release(x_18595, 0); + lean_ctor_release(x_18595, 1); + x_18605 = x_18595; +} else { + lean_dec_ref(x_18595); + x_18605 = lean_box(0); +} +if (lean_is_scalar(x_18605)) { + x_18606 = lean_alloc_ctor(1, 2, 0); +} else { + x_18606 = x_18605; +} +lean_ctor_set(x_18606, 0, x_18603); +lean_ctor_set(x_18606, 1, x_18604); +return x_18606; +} +} +else +{ +lean_object* x_18607; lean_object* x_18608; lean_object* x_18609; lean_object* x_18610; +lean_dec(x_18587); +lean_dec(x_18582); +lean_dec(x_18539); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18607 = lean_ctor_get(x_18590, 0); +lean_inc(x_18607); +x_18608 = lean_ctor_get(x_18590, 1); +lean_inc(x_18608); +if (lean_is_exclusive(x_18590)) { + lean_ctor_release(x_18590, 0); + lean_ctor_release(x_18590, 1); + x_18609 = x_18590; +} else { + lean_dec_ref(x_18590); + x_18609 = lean_box(0); +} +if (lean_is_scalar(x_18609)) { + x_18610 = lean_alloc_ctor(1, 2, 0); +} else { + x_18610 = x_18609; +} +lean_ctor_set(x_18610, 0, x_18607); +lean_ctor_set(x_18610, 1, x_18608); +return x_18610; +} +} +} +else +{ +lean_object* x_18611; lean_object* x_18612; lean_object* x_18613; lean_object* x_18614; lean_object* x_18615; lean_object* x_18616; lean_object* x_18617; lean_object* x_18618; lean_object* x_18619; +lean_dec(x_18542); +lean_dec(x_18540); +lean_inc(x_17707); +lean_inc(x_153); +x_18611 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_18611, 0, x_153); +lean_ctor_set(x_18611, 1, x_17707); +x_18612 = lean_ctor_get(x_1, 0); +lean_inc(x_18612); +x_18613 = l_Lean_IR_ToIR_bindVar(x_18612, x_17713, x_4, x_5, x_18537); +x_18614 = lean_ctor_get(x_18613, 0); +lean_inc(x_18614); +x_18615 = lean_ctor_get(x_18613, 1); +lean_inc(x_18615); +lean_dec(x_18613); +x_18616 = lean_ctor_get(x_18614, 0); +lean_inc(x_18616); +x_18617 = lean_ctor_get(x_18614, 1); +lean_inc(x_18617); +lean_dec(x_18614); +x_18618 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18619 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18616, x_18611, x_18618, x_18617, x_4, x_5, x_18615); +if (lean_obj_tag(x_18619) == 0) +{ +lean_object* x_18620; lean_object* x_18621; lean_object* x_18622; lean_object* x_18623; lean_object* x_18624; lean_object* x_18625; lean_object* x_18626; +x_18620 = lean_ctor_get(x_18619, 0); +lean_inc(x_18620); +x_18621 = lean_ctor_get(x_18619, 1); +lean_inc(x_18621); +lean_dec(x_18619); +x_18622 = lean_ctor_get(x_18620, 0); +lean_inc(x_18622); +x_18623 = lean_ctor_get(x_18620, 1); +lean_inc(x_18623); +if (lean_is_exclusive(x_18620)) { + lean_ctor_release(x_18620, 0); + lean_ctor_release(x_18620, 1); + x_18624 = x_18620; +} else { + lean_dec_ref(x_18620); + x_18624 = lean_box(0); +} +if (lean_is_scalar(x_18539)) { + x_18625 = lean_alloc_ctor(1, 1, 0); +} else { + x_18625 = x_18539; +} +lean_ctor_set(x_18625, 0, x_18622); +if (lean_is_scalar(x_18624)) { + x_18626 = lean_alloc_ctor(0, 2, 0); +} else { + x_18626 = x_18624; +} +lean_ctor_set(x_18626, 0, x_18625); +lean_ctor_set(x_18626, 1, x_18623); +x_18320 = x_18626; +x_18321 = x_18621; +goto block_18350; +} +else +{ +lean_object* x_18627; lean_object* x_18628; lean_object* x_18629; lean_object* x_18630; +lean_dec(x_18539); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18627 = lean_ctor_get(x_18619, 0); +lean_inc(x_18627); +x_18628 = lean_ctor_get(x_18619, 1); +lean_inc(x_18628); +if (lean_is_exclusive(x_18619)) { + lean_ctor_release(x_18619, 0); + lean_ctor_release(x_18619, 1); + x_18629 = x_18619; +} else { + lean_dec_ref(x_18619); + x_18629 = lean_box(0); +} +if (lean_is_scalar(x_18629)) { + x_18630 = lean_alloc_ctor(1, 2, 0); +} else { + x_18630 = x_18629; +} +lean_ctor_set(x_18630, 0, x_18627); +lean_ctor_set(x_18630, 1, x_18628); +return x_18630; +} +} +} +} +block_18350: +{ +lean_object* x_18322; +x_18322 = lean_ctor_get(x_18320, 0); +lean_inc(x_18322); +if (lean_obj_tag(x_18322) == 0) +{ +lean_object* x_18323; lean_object* x_18324; lean_object* x_18325; lean_object* x_18326; lean_object* x_18327; lean_object* x_18328; lean_object* x_18329; lean_object* x_18330; lean_object* x_18331; lean_object* x_18332; +lean_dec(x_17718); +x_18323 = lean_ctor_get(x_18320, 1); +lean_inc(x_18323); +lean_dec(x_18320); +x_18324 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_18324, 0, x_153); +lean_ctor_set(x_18324, 1, x_17707); +x_18325 = lean_ctor_get(x_1, 0); +lean_inc(x_18325); +x_18326 = l_Lean_IR_ToIR_bindVar(x_18325, x_18323, x_4, x_5, x_18321); +x_18327 = lean_ctor_get(x_18326, 0); +lean_inc(x_18327); +x_18328 = lean_ctor_get(x_18326, 1); +lean_inc(x_18328); +lean_dec(x_18326); +x_18329 = lean_ctor_get(x_18327, 0); +lean_inc(x_18329); +x_18330 = lean_ctor_get(x_18327, 1); +lean_inc(x_18330); +lean_dec(x_18327); +x_18331 = lean_ctor_get(x_1, 2); +lean_inc(x_18331); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_18332 = l_Lean_IR_ToIR_lowerType(x_18331, x_18330, x_4, x_5, x_18328); +if (lean_obj_tag(x_18332) == 0) +{ +lean_object* x_18333; lean_object* x_18334; lean_object* x_18335; lean_object* x_18336; lean_object* x_18337; +x_18333 = lean_ctor_get(x_18332, 0); +lean_inc(x_18333); +x_18334 = lean_ctor_get(x_18332, 1); +lean_inc(x_18334); +lean_dec(x_18332); +x_18335 = lean_ctor_get(x_18333, 0); +lean_inc(x_18335); +x_18336 = lean_ctor_get(x_18333, 1); +lean_inc(x_18336); +lean_dec(x_18333); +x_18337 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18329, x_18324, x_18335, x_18336, x_4, x_5, x_18334); +return x_18337; +} +else +{ +uint8_t x_18338; +lean_dec(x_18329); +lean_dec(x_18324); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_18338 = !lean_is_exclusive(x_18332); +if (x_18338 == 0) +{ +return x_18332; +} +else +{ +lean_object* x_18339; lean_object* x_18340; lean_object* x_18341; +x_18339 = lean_ctor_get(x_18332, 0); +x_18340 = lean_ctor_get(x_18332, 1); +lean_inc(x_18340); +lean_inc(x_18339); +lean_dec(x_18332); +x_18341 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18341, 0, x_18339); +lean_ctor_set(x_18341, 1, x_18340); +return x_18341; +} +} +} +else +{ +uint8_t x_18342; +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18342 = !lean_is_exclusive(x_18320); +if (x_18342 == 0) +{ +lean_object* x_18343; lean_object* x_18344; lean_object* x_18345; +x_18343 = lean_ctor_get(x_18320, 0); +lean_dec(x_18343); +x_18344 = lean_ctor_get(x_18322, 0); +lean_inc(x_18344); +lean_dec(x_18322); +lean_ctor_set(x_18320, 0, x_18344); +if (lean_is_scalar(x_17718)) { + x_18345 = lean_alloc_ctor(0, 2, 0); +} else { + x_18345 = x_17718; +} +lean_ctor_set(x_18345, 0, x_18320); +lean_ctor_set(x_18345, 1, x_18321); +return x_18345; +} +else +{ +lean_object* x_18346; lean_object* x_18347; lean_object* x_18348; lean_object* x_18349; +x_18346 = lean_ctor_get(x_18320, 1); +lean_inc(x_18346); +lean_dec(x_18320); +x_18347 = lean_ctor_get(x_18322, 0); +lean_inc(x_18347); +lean_dec(x_18322); +x_18348 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18348, 0, x_18347); +lean_ctor_set(x_18348, 1, x_18346); +if (lean_is_scalar(x_17718)) { + x_18349 = lean_alloc_ctor(0, 2, 0); +} else { + x_18349 = x_17718; +} +lean_ctor_set(x_18349, 0, x_18348); +lean_ctor_set(x_18349, 1, x_18321); +return x_18349; +} +} +} +} +case 4: +{ +uint8_t x_18631; +lean_dec(x_17719); +lean_dec(x_17718); +lean_free_object(x_17709); +lean_dec(x_17699); +lean_dec(x_17698); +x_18631 = !lean_is_exclusive(x_17724); +if (x_18631 == 0) +{ +lean_object* x_18632; lean_object* x_18633; uint8_t x_18634; +x_18632 = lean_ctor_get(x_17724, 0); +lean_dec(x_18632); +x_18633 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_18634 = lean_name_eq(x_153, x_18633); +if (x_18634 == 0) +{ +uint8_t x_18635; lean_object* x_18636; lean_object* x_18637; lean_object* x_18638; lean_object* x_18639; lean_object* x_18640; lean_object* x_18641; lean_object* x_18642; lean_object* x_18643; +lean_dec(x_17707); +lean_dec(x_2); +lean_dec(x_1); +x_18635 = 1; +x_18636 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_18637 = l_Lean_Name_toString(x_153, x_18635, x_18636); +lean_ctor_set_tag(x_17724, 3); +lean_ctor_set(x_17724, 0, x_18637); +x_18638 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_18639 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_18639, 0, x_18638); +lean_ctor_set(x_18639, 1, x_17724); +x_18640 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_18641 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_18641, 0, x_18639); +lean_ctor_set(x_18641, 1, x_18640); +x_18642 = l_Lean_MessageData_ofFormat(x_18641); +x_18643 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_18642, x_17713, x_4, x_5, x_17717); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_17713); +return x_18643; +} +else +{ +lean_object* x_18644; lean_object* x_18645; lean_object* x_18646; +lean_free_object(x_17724); +lean_dec(x_153); +x_18644 = l_Lean_IR_instInhabitedArg; +x_18645 = lean_unsigned_to_nat(2u); +x_18646 = lean_array_get(x_18644, x_17707, x_18645); +lean_dec(x_17707); +if (lean_obj_tag(x_18646) == 0) +{ +lean_object* x_18647; lean_object* x_18648; lean_object* x_18649; lean_object* x_18650; lean_object* x_18651; lean_object* x_18652; lean_object* x_18653; +x_18647 = lean_ctor_get(x_18646, 0); +lean_inc(x_18647); +lean_dec(x_18646); +x_18648 = lean_ctor_get(x_1, 0); +lean_inc(x_18648); +lean_dec(x_1); +x_18649 = l_Lean_IR_ToIR_bindVarToVarId(x_18648, x_18647, x_17713, x_4, x_5, x_17717); +x_18650 = lean_ctor_get(x_18649, 0); +lean_inc(x_18650); +x_18651 = lean_ctor_get(x_18649, 1); +lean_inc(x_18651); +lean_dec(x_18649); +x_18652 = lean_ctor_get(x_18650, 1); +lean_inc(x_18652); +lean_dec(x_18650); +x_18653 = l_Lean_IR_ToIR_lowerCode(x_2, x_18652, x_4, x_5, x_18651); +return x_18653; +} +else +{ +lean_object* x_18654; lean_object* x_18655; lean_object* x_18656; lean_object* x_18657; lean_object* x_18658; lean_object* x_18659; +x_18654 = lean_ctor_get(x_1, 0); +lean_inc(x_18654); +lean_dec(x_1); +x_18655 = l_Lean_IR_ToIR_bindErased(x_18654, x_17713, x_4, x_5, x_17717); +x_18656 = lean_ctor_get(x_18655, 0); +lean_inc(x_18656); +x_18657 = lean_ctor_get(x_18655, 1); +lean_inc(x_18657); +lean_dec(x_18655); +x_18658 = lean_ctor_get(x_18656, 1); +lean_inc(x_18658); +lean_dec(x_18656); +x_18659 = l_Lean_IR_ToIR_lowerCode(x_2, x_18658, x_4, x_5, x_18657); +return x_18659; +} +} +} +else +{ +lean_object* x_18660; uint8_t x_18661; +lean_dec(x_17724); +x_18660 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_18661 = lean_name_eq(x_153, x_18660); +if (x_18661 == 0) +{ +uint8_t x_18662; lean_object* x_18663; lean_object* x_18664; lean_object* x_18665; lean_object* x_18666; lean_object* x_18667; lean_object* x_18668; lean_object* x_18669; lean_object* x_18670; lean_object* x_18671; +lean_dec(x_17707); +lean_dec(x_2); +lean_dec(x_1); +x_18662 = 1; +x_18663 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_18664 = l_Lean_Name_toString(x_153, x_18662, x_18663); +x_18665 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_18665, 0, x_18664); +x_18666 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_18667 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_18667, 0, x_18666); +lean_ctor_set(x_18667, 1, x_18665); +x_18668 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_18669 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_18669, 0, x_18667); +lean_ctor_set(x_18669, 1, x_18668); +x_18670 = l_Lean_MessageData_ofFormat(x_18669); +x_18671 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_18670, x_17713, x_4, x_5, x_17717); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_17713); +return x_18671; +} +else +{ +lean_object* x_18672; lean_object* x_18673; lean_object* x_18674; +lean_dec(x_153); +x_18672 = l_Lean_IR_instInhabitedArg; +x_18673 = lean_unsigned_to_nat(2u); +x_18674 = lean_array_get(x_18672, x_17707, x_18673); +lean_dec(x_17707); +if (lean_obj_tag(x_18674) == 0) +{ +lean_object* x_18675; lean_object* x_18676; lean_object* x_18677; lean_object* x_18678; lean_object* x_18679; lean_object* x_18680; lean_object* x_18681; +x_18675 = lean_ctor_get(x_18674, 0); +lean_inc(x_18675); +lean_dec(x_18674); +x_18676 = lean_ctor_get(x_1, 0); +lean_inc(x_18676); +lean_dec(x_1); +x_18677 = l_Lean_IR_ToIR_bindVarToVarId(x_18676, x_18675, x_17713, x_4, x_5, x_17717); +x_18678 = lean_ctor_get(x_18677, 0); +lean_inc(x_18678); +x_18679 = lean_ctor_get(x_18677, 1); +lean_inc(x_18679); +lean_dec(x_18677); +x_18680 = lean_ctor_get(x_18678, 1); +lean_inc(x_18680); +lean_dec(x_18678); +x_18681 = l_Lean_IR_ToIR_lowerCode(x_2, x_18680, x_4, x_5, x_18679); +return x_18681; +} +else +{ +lean_object* x_18682; lean_object* x_18683; lean_object* x_18684; lean_object* x_18685; lean_object* x_18686; lean_object* x_18687; +x_18682 = lean_ctor_get(x_1, 0); +lean_inc(x_18682); +lean_dec(x_1); +x_18683 = l_Lean_IR_ToIR_bindErased(x_18682, x_17713, x_4, x_5, x_17717); +x_18684 = lean_ctor_get(x_18683, 0); +lean_inc(x_18684); +x_18685 = lean_ctor_get(x_18683, 1); +lean_inc(x_18685); +lean_dec(x_18683); +x_18686 = lean_ctor_get(x_18684, 1); +lean_inc(x_18686); +lean_dec(x_18684); +x_18687 = l_Lean_IR_ToIR_lowerCode(x_2, x_18686, x_4, x_5, x_18685); +return x_18687; +} +} +} +} +case 5: +{ +lean_object* x_18688; lean_object* x_18689; +lean_dec(x_17724); +lean_dec(x_17719); +lean_dec(x_17718); +lean_free_object(x_17709); +lean_dec(x_17707); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_18688 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_18689 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_18688, x_17713, x_4, x_5, x_17717); +return x_18689; +} +case 6: +{ +lean_object* x_18690; uint8_t x_18691; +x_18690 = lean_ctor_get(x_17724, 0); +lean_inc(x_18690); +lean_dec(x_17724); +lean_inc(x_153); +x_18691 = l_Lean_isExtern(x_17719, x_153); +if (x_18691 == 0) +{ +lean_object* x_18692; +lean_dec(x_17718); +lean_free_object(x_17709); +lean_dec(x_17707); +lean_inc(x_5); +lean_inc(x_4); +x_18692 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_17713, x_4, x_5, x_17717); +if (lean_obj_tag(x_18692) == 0) +{ +lean_object* x_18693; lean_object* x_18694; lean_object* x_18695; lean_object* x_18696; lean_object* x_18697; lean_object* x_18698; lean_object* x_18699; lean_object* x_18700; lean_object* x_18701; lean_object* x_18702; lean_object* x_18703; lean_object* x_18704; lean_object* x_18705; lean_object* x_18706; lean_object* x_18707; lean_object* x_18708; lean_object* x_18709; lean_object* x_18710; lean_object* x_18711; lean_object* x_18712; +x_18693 = lean_ctor_get(x_18692, 0); +lean_inc(x_18693); +x_18694 = lean_ctor_get(x_18693, 0); +lean_inc(x_18694); +x_18695 = lean_ctor_get(x_18692, 1); +lean_inc(x_18695); +lean_dec(x_18692); +x_18696 = lean_ctor_get(x_18693, 1); +lean_inc(x_18696); +lean_dec(x_18693); +x_18697 = lean_ctor_get(x_18694, 0); +lean_inc(x_18697); +x_18698 = lean_ctor_get(x_18694, 1); +lean_inc(x_18698); +lean_dec(x_18694); +x_18699 = lean_ctor_get(x_18690, 3); +lean_inc(x_18699); +lean_dec(x_18690); +x_18700 = lean_array_get_size(x_17698); +x_18701 = l_Array_extract___rarg(x_17698, x_18699, x_18700); +lean_dec(x_18700); +lean_dec(x_17698); +x_18702 = lean_array_get_size(x_18698); +x_18703 = lean_unsigned_to_nat(0u); +x_18704 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_17699)) { + x_18705 = lean_alloc_ctor(0, 3, 0); +} else { + x_18705 = x_17699; + lean_ctor_set_tag(x_18705, 0); +} +lean_ctor_set(x_18705, 0, x_18703); +lean_ctor_set(x_18705, 1, x_18702); +lean_ctor_set(x_18705, 2, x_18704); +x_18706 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_18707 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__8(x_18698, x_18701, x_18705, x_18705, x_18706, x_18703, lean_box(0), lean_box(0), x_18696, x_4, x_5, x_18695); +lean_dec(x_18705); +x_18708 = lean_ctor_get(x_18707, 0); +lean_inc(x_18708); +x_18709 = lean_ctor_get(x_18707, 1); +lean_inc(x_18709); +lean_dec(x_18707); +x_18710 = lean_ctor_get(x_18708, 0); +lean_inc(x_18710); +x_18711 = lean_ctor_get(x_18708, 1); +lean_inc(x_18711); +lean_dec(x_18708); +x_18712 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_18697, x_18698, x_18701, x_18710, x_18711, x_4, x_5, x_18709); +lean_dec(x_18701); +lean_dec(x_18698); +return x_18712; +} +else +{ +uint8_t x_18713; +lean_dec(x_18690); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18713 = !lean_is_exclusive(x_18692); +if (x_18713 == 0) +{ +return x_18692; +} +else +{ +lean_object* x_18714; lean_object* x_18715; lean_object* x_18716; +x_18714 = lean_ctor_get(x_18692, 0); +x_18715 = lean_ctor_get(x_18692, 1); +lean_inc(x_18715); +lean_inc(x_18714); +lean_dec(x_18692); +x_18716 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18716, 0, x_18714); +lean_ctor_set(x_18716, 1, x_18715); +return x_18716; +} +} +} +else +{ +lean_object* x_18717; lean_object* x_18718; lean_object* x_18748; lean_object* x_18749; +lean_dec(x_18690); +lean_dec(x_17699); +lean_dec(x_17698); +lean_inc(x_153); +x_18748 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_17717); +x_18749 = lean_ctor_get(x_18748, 0); +lean_inc(x_18749); +if (lean_obj_tag(x_18749) == 0) +{ +lean_object* x_18750; lean_object* x_18751; +x_18750 = lean_ctor_get(x_18748, 1); +lean_inc(x_18750); +lean_dec(x_18748); +x_18751 = lean_box(0); +lean_ctor_set(x_17709, 0, x_18751); +x_18717 = x_17709; +x_18718 = x_18750; +goto block_18747; +} +else +{ +uint8_t x_18752; +lean_free_object(x_17709); +x_18752 = !lean_is_exclusive(x_18748); +if (x_18752 == 0) +{ +lean_object* x_18753; lean_object* x_18754; uint8_t x_18755; +x_18753 = lean_ctor_get(x_18748, 1); +x_18754 = lean_ctor_get(x_18748, 0); +lean_dec(x_18754); +x_18755 = !lean_is_exclusive(x_18749); +if (x_18755 == 0) +{ +lean_object* x_18756; lean_object* x_18757; lean_object* x_18758; lean_object* x_18759; uint8_t x_18760; +x_18756 = lean_ctor_get(x_18749, 0); +x_18757 = lean_array_get_size(x_17707); +x_18758 = lean_ctor_get(x_18756, 3); +lean_inc(x_18758); +lean_dec(x_18756); +x_18759 = lean_array_get_size(x_18758); +lean_dec(x_18758); +x_18760 = lean_nat_dec_lt(x_18757, x_18759); +if (x_18760 == 0) +{ +uint8_t x_18761; +x_18761 = lean_nat_dec_eq(x_18757, x_18759); +if (x_18761 == 0) +{ +lean_object* x_18762; lean_object* x_18763; lean_object* x_18764; lean_object* x_18765; lean_object* x_18766; lean_object* x_18767; lean_object* x_18768; lean_object* x_18769; lean_object* x_18770; lean_object* x_18771; lean_object* x_18772; lean_object* x_18773; lean_object* x_18774; lean_object* x_18775; lean_object* x_18776; lean_object* x_18777; +x_18762 = lean_unsigned_to_nat(0u); +x_18763 = l_Array_extract___rarg(x_17707, x_18762, x_18759); +x_18764 = l_Array_extract___rarg(x_17707, x_18759, x_18757); +lean_dec(x_18757); +lean_inc(x_153); +lean_ctor_set_tag(x_18748, 6); +lean_ctor_set(x_18748, 1, x_18763); +lean_ctor_set(x_18748, 0, x_153); +x_18765 = lean_ctor_get(x_1, 0); +lean_inc(x_18765); +x_18766 = l_Lean_IR_ToIR_bindVar(x_18765, x_17713, x_4, x_5, x_18753); +x_18767 = lean_ctor_get(x_18766, 0); +lean_inc(x_18767); +x_18768 = lean_ctor_get(x_18766, 1); +lean_inc(x_18768); +lean_dec(x_18766); +x_18769 = lean_ctor_get(x_18767, 0); +lean_inc(x_18769); +x_18770 = lean_ctor_get(x_18767, 1); +lean_inc(x_18770); +lean_dec(x_18767); +x_18771 = l_Lean_IR_ToIR_newVar(x_18770, x_4, x_5, x_18768); +x_18772 = lean_ctor_get(x_18771, 0); +lean_inc(x_18772); +x_18773 = lean_ctor_get(x_18771, 1); +lean_inc(x_18773); +lean_dec(x_18771); +x_18774 = lean_ctor_get(x_18772, 0); +lean_inc(x_18774); +x_18775 = lean_ctor_get(x_18772, 1); +lean_inc(x_18775); +lean_dec(x_18772); +x_18776 = lean_ctor_get(x_1, 2); +lean_inc(x_18776); +lean_inc(x_5); +lean_inc(x_4); +x_18777 = l_Lean_IR_ToIR_lowerType(x_18776, x_18775, x_4, x_5, x_18773); +if (lean_obj_tag(x_18777) == 0) +{ +lean_object* x_18778; lean_object* x_18779; lean_object* x_18780; lean_object* x_18781; lean_object* x_18782; +x_18778 = lean_ctor_get(x_18777, 0); +lean_inc(x_18778); +x_18779 = lean_ctor_get(x_18777, 1); +lean_inc(x_18779); +lean_dec(x_18777); +x_18780 = lean_ctor_get(x_18778, 0); +lean_inc(x_18780); +x_18781 = lean_ctor_get(x_18778, 1); +lean_inc(x_18781); +lean_dec(x_18778); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18782 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_18774, x_18764, x_18769, x_18748, x_18780, x_18781, x_4, x_5, x_18779); +if (lean_obj_tag(x_18782) == 0) +{ +lean_object* x_18783; lean_object* x_18784; uint8_t x_18785; +x_18783 = lean_ctor_get(x_18782, 0); +lean_inc(x_18783); +x_18784 = lean_ctor_get(x_18782, 1); +lean_inc(x_18784); +lean_dec(x_18782); +x_18785 = !lean_is_exclusive(x_18783); +if (x_18785 == 0) +{ +lean_object* x_18786; +x_18786 = lean_ctor_get(x_18783, 0); +lean_ctor_set(x_18749, 0, x_18786); +lean_ctor_set(x_18783, 0, x_18749); +x_18717 = x_18783; +x_18718 = x_18784; +goto block_18747; +} +else +{ +lean_object* x_18787; lean_object* x_18788; lean_object* x_18789; +x_18787 = lean_ctor_get(x_18783, 0); +x_18788 = lean_ctor_get(x_18783, 1); +lean_inc(x_18788); +lean_inc(x_18787); +lean_dec(x_18783); +lean_ctor_set(x_18749, 0, x_18787); +x_18789 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18789, 0, x_18749); +lean_ctor_set(x_18789, 1, x_18788); +x_18717 = x_18789; +x_18718 = x_18784; +goto block_18747; +} +} +else +{ +uint8_t x_18790; +lean_free_object(x_18749); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18790 = !lean_is_exclusive(x_18782); +if (x_18790 == 0) +{ +return x_18782; +} +else +{ +lean_object* x_18791; lean_object* x_18792; lean_object* x_18793; +x_18791 = lean_ctor_get(x_18782, 0); +x_18792 = lean_ctor_get(x_18782, 1); +lean_inc(x_18792); +lean_inc(x_18791); +lean_dec(x_18782); +x_18793 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18793, 0, x_18791); +lean_ctor_set(x_18793, 1, x_18792); +return x_18793; +} +} +} +else +{ +uint8_t x_18794; +lean_dec(x_18774); +lean_dec(x_18769); +lean_dec(x_18748); +lean_dec(x_18764); +lean_free_object(x_18749); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18794 = !lean_is_exclusive(x_18777); +if (x_18794 == 0) +{ +return x_18777; +} +else +{ +lean_object* x_18795; lean_object* x_18796; lean_object* x_18797; +x_18795 = lean_ctor_get(x_18777, 0); +x_18796 = lean_ctor_get(x_18777, 1); +lean_inc(x_18796); +lean_inc(x_18795); +lean_dec(x_18777); +x_18797 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18797, 0, x_18795); +lean_ctor_set(x_18797, 1, x_18796); +return x_18797; +} +} +} +else +{ +lean_object* x_18798; lean_object* x_18799; lean_object* x_18800; lean_object* x_18801; lean_object* x_18802; lean_object* x_18803; lean_object* x_18804; lean_object* x_18805; +lean_dec(x_18759); +lean_dec(x_18757); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_18748, 6); +lean_ctor_set(x_18748, 1, x_17707); +lean_ctor_set(x_18748, 0, x_153); +x_18798 = lean_ctor_get(x_1, 0); +lean_inc(x_18798); +x_18799 = l_Lean_IR_ToIR_bindVar(x_18798, x_17713, x_4, x_5, x_18753); +x_18800 = lean_ctor_get(x_18799, 0); +lean_inc(x_18800); +x_18801 = lean_ctor_get(x_18799, 1); +lean_inc(x_18801); +lean_dec(x_18799); +x_18802 = lean_ctor_get(x_18800, 0); +lean_inc(x_18802); +x_18803 = lean_ctor_get(x_18800, 1); +lean_inc(x_18803); +lean_dec(x_18800); +x_18804 = lean_ctor_get(x_1, 2); +lean_inc(x_18804); +lean_inc(x_5); +lean_inc(x_4); +x_18805 = l_Lean_IR_ToIR_lowerType(x_18804, x_18803, x_4, x_5, x_18801); +if (lean_obj_tag(x_18805) == 0) +{ +lean_object* x_18806; lean_object* x_18807; lean_object* x_18808; lean_object* x_18809; lean_object* x_18810; +x_18806 = lean_ctor_get(x_18805, 0); +lean_inc(x_18806); +x_18807 = lean_ctor_get(x_18805, 1); +lean_inc(x_18807); +lean_dec(x_18805); +x_18808 = lean_ctor_get(x_18806, 0); +lean_inc(x_18808); +x_18809 = lean_ctor_get(x_18806, 1); +lean_inc(x_18809); +lean_dec(x_18806); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18810 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18802, x_18748, x_18808, x_18809, x_4, x_5, x_18807); +if (lean_obj_tag(x_18810) == 0) +{ +lean_object* x_18811; lean_object* x_18812; uint8_t x_18813; +x_18811 = lean_ctor_get(x_18810, 0); +lean_inc(x_18811); +x_18812 = lean_ctor_get(x_18810, 1); +lean_inc(x_18812); +lean_dec(x_18810); +x_18813 = !lean_is_exclusive(x_18811); +if (x_18813 == 0) +{ +lean_object* x_18814; +x_18814 = lean_ctor_get(x_18811, 0); +lean_ctor_set(x_18749, 0, x_18814); +lean_ctor_set(x_18811, 0, x_18749); +x_18717 = x_18811; +x_18718 = x_18812; +goto block_18747; +} +else +{ +lean_object* x_18815; lean_object* x_18816; lean_object* x_18817; +x_18815 = lean_ctor_get(x_18811, 0); +x_18816 = lean_ctor_get(x_18811, 1); +lean_inc(x_18816); +lean_inc(x_18815); +lean_dec(x_18811); +lean_ctor_set(x_18749, 0, x_18815); +x_18817 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18817, 0, x_18749); +lean_ctor_set(x_18817, 1, x_18816); +x_18717 = x_18817; +x_18718 = x_18812; +goto block_18747; +} +} +else +{ +uint8_t x_18818; +lean_free_object(x_18749); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18818 = !lean_is_exclusive(x_18810); +if (x_18818 == 0) +{ +return x_18810; +} +else +{ +lean_object* x_18819; lean_object* x_18820; lean_object* x_18821; +x_18819 = lean_ctor_get(x_18810, 0); +x_18820 = lean_ctor_get(x_18810, 1); +lean_inc(x_18820); +lean_inc(x_18819); +lean_dec(x_18810); +x_18821 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18821, 0, x_18819); +lean_ctor_set(x_18821, 1, x_18820); +return x_18821; +} +} +} +else +{ +uint8_t x_18822; +lean_dec(x_18802); +lean_dec(x_18748); +lean_free_object(x_18749); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18822 = !lean_is_exclusive(x_18805); +if (x_18822 == 0) +{ +return x_18805; +} +else +{ +lean_object* x_18823; lean_object* x_18824; lean_object* x_18825; +x_18823 = lean_ctor_get(x_18805, 0); +x_18824 = lean_ctor_get(x_18805, 1); +lean_inc(x_18824); +lean_inc(x_18823); +lean_dec(x_18805); +x_18825 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18825, 0, x_18823); +lean_ctor_set(x_18825, 1, x_18824); +return x_18825; +} +} +} +} +else +{ +lean_object* x_18826; lean_object* x_18827; lean_object* x_18828; lean_object* x_18829; lean_object* x_18830; lean_object* x_18831; lean_object* x_18832; lean_object* x_18833; +lean_dec(x_18759); +lean_dec(x_18757); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_18748, 7); +lean_ctor_set(x_18748, 1, x_17707); +lean_ctor_set(x_18748, 0, x_153); +x_18826 = lean_ctor_get(x_1, 0); +lean_inc(x_18826); +x_18827 = l_Lean_IR_ToIR_bindVar(x_18826, x_17713, x_4, x_5, x_18753); +x_18828 = lean_ctor_get(x_18827, 0); +lean_inc(x_18828); +x_18829 = lean_ctor_get(x_18827, 1); +lean_inc(x_18829); +lean_dec(x_18827); +x_18830 = lean_ctor_get(x_18828, 0); +lean_inc(x_18830); +x_18831 = lean_ctor_get(x_18828, 1); +lean_inc(x_18831); +lean_dec(x_18828); +x_18832 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18833 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18830, x_18748, x_18832, x_18831, x_4, x_5, x_18829); +if (lean_obj_tag(x_18833) == 0) +{ +lean_object* x_18834; lean_object* x_18835; uint8_t x_18836; +x_18834 = lean_ctor_get(x_18833, 0); +lean_inc(x_18834); +x_18835 = lean_ctor_get(x_18833, 1); +lean_inc(x_18835); +lean_dec(x_18833); +x_18836 = !lean_is_exclusive(x_18834); +if (x_18836 == 0) +{ +lean_object* x_18837; +x_18837 = lean_ctor_get(x_18834, 0); +lean_ctor_set(x_18749, 0, x_18837); +lean_ctor_set(x_18834, 0, x_18749); +x_18717 = x_18834; +x_18718 = x_18835; +goto block_18747; +} +else +{ +lean_object* x_18838; lean_object* x_18839; lean_object* x_18840; +x_18838 = lean_ctor_get(x_18834, 0); +x_18839 = lean_ctor_get(x_18834, 1); +lean_inc(x_18839); +lean_inc(x_18838); +lean_dec(x_18834); +lean_ctor_set(x_18749, 0, x_18838); +x_18840 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18840, 0, x_18749); +lean_ctor_set(x_18840, 1, x_18839); +x_18717 = x_18840; +x_18718 = x_18835; +goto block_18747; +} +} +else +{ +uint8_t x_18841; +lean_free_object(x_18749); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18841 = !lean_is_exclusive(x_18833); +if (x_18841 == 0) +{ +return x_18833; +} +else +{ +lean_object* x_18842; lean_object* x_18843; lean_object* x_18844; +x_18842 = lean_ctor_get(x_18833, 0); +x_18843 = lean_ctor_get(x_18833, 1); +lean_inc(x_18843); +lean_inc(x_18842); +lean_dec(x_18833); +x_18844 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18844, 0, x_18842); +lean_ctor_set(x_18844, 1, x_18843); +return x_18844; +} +} +} +} +else +{ +lean_object* x_18845; lean_object* x_18846; lean_object* x_18847; lean_object* x_18848; uint8_t x_18849; +x_18845 = lean_ctor_get(x_18749, 0); +lean_inc(x_18845); +lean_dec(x_18749); +x_18846 = lean_array_get_size(x_17707); +x_18847 = lean_ctor_get(x_18845, 3); +lean_inc(x_18847); +lean_dec(x_18845); +x_18848 = lean_array_get_size(x_18847); +lean_dec(x_18847); +x_18849 = lean_nat_dec_lt(x_18846, x_18848); +if (x_18849 == 0) +{ +uint8_t x_18850; +x_18850 = lean_nat_dec_eq(x_18846, x_18848); +if (x_18850 == 0) +{ +lean_object* x_18851; lean_object* x_18852; lean_object* x_18853; lean_object* x_18854; lean_object* x_18855; lean_object* x_18856; lean_object* x_18857; lean_object* x_18858; lean_object* x_18859; lean_object* x_18860; lean_object* x_18861; lean_object* x_18862; lean_object* x_18863; lean_object* x_18864; lean_object* x_18865; lean_object* x_18866; +x_18851 = lean_unsigned_to_nat(0u); +x_18852 = l_Array_extract___rarg(x_17707, x_18851, x_18848); +x_18853 = l_Array_extract___rarg(x_17707, x_18848, x_18846); +lean_dec(x_18846); +lean_inc(x_153); +lean_ctor_set_tag(x_18748, 6); +lean_ctor_set(x_18748, 1, x_18852); +lean_ctor_set(x_18748, 0, x_153); +x_18854 = lean_ctor_get(x_1, 0); +lean_inc(x_18854); +x_18855 = l_Lean_IR_ToIR_bindVar(x_18854, x_17713, x_4, x_5, x_18753); +x_18856 = lean_ctor_get(x_18855, 0); +lean_inc(x_18856); +x_18857 = lean_ctor_get(x_18855, 1); +lean_inc(x_18857); +lean_dec(x_18855); +x_18858 = lean_ctor_get(x_18856, 0); +lean_inc(x_18858); +x_18859 = lean_ctor_get(x_18856, 1); +lean_inc(x_18859); +lean_dec(x_18856); +x_18860 = l_Lean_IR_ToIR_newVar(x_18859, x_4, x_5, x_18857); +x_18861 = lean_ctor_get(x_18860, 0); +lean_inc(x_18861); +x_18862 = lean_ctor_get(x_18860, 1); +lean_inc(x_18862); +lean_dec(x_18860); +x_18863 = lean_ctor_get(x_18861, 0); +lean_inc(x_18863); +x_18864 = lean_ctor_get(x_18861, 1); +lean_inc(x_18864); +lean_dec(x_18861); +x_18865 = lean_ctor_get(x_1, 2); +lean_inc(x_18865); +lean_inc(x_5); +lean_inc(x_4); +x_18866 = l_Lean_IR_ToIR_lowerType(x_18865, x_18864, x_4, x_5, x_18862); +if (lean_obj_tag(x_18866) == 0) +{ +lean_object* x_18867; lean_object* x_18868; lean_object* x_18869; lean_object* x_18870; lean_object* x_18871; +x_18867 = lean_ctor_get(x_18866, 0); +lean_inc(x_18867); +x_18868 = lean_ctor_get(x_18866, 1); +lean_inc(x_18868); +lean_dec(x_18866); +x_18869 = lean_ctor_get(x_18867, 0); +lean_inc(x_18869); +x_18870 = lean_ctor_get(x_18867, 1); +lean_inc(x_18870); +lean_dec(x_18867); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18871 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_18863, x_18853, x_18858, x_18748, x_18869, x_18870, x_4, x_5, x_18868); +if (lean_obj_tag(x_18871) == 0) +{ +lean_object* x_18872; lean_object* x_18873; lean_object* x_18874; lean_object* x_18875; lean_object* x_18876; lean_object* x_18877; lean_object* x_18878; +x_18872 = lean_ctor_get(x_18871, 0); +lean_inc(x_18872); +x_18873 = lean_ctor_get(x_18871, 1); +lean_inc(x_18873); +lean_dec(x_18871); +x_18874 = lean_ctor_get(x_18872, 0); +lean_inc(x_18874); +x_18875 = lean_ctor_get(x_18872, 1); +lean_inc(x_18875); +if (lean_is_exclusive(x_18872)) { + lean_ctor_release(x_18872, 0); + lean_ctor_release(x_18872, 1); + x_18876 = x_18872; +} else { + lean_dec_ref(x_18872); + x_18876 = lean_box(0); +} +x_18877 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_18877, 0, x_18874); +if (lean_is_scalar(x_18876)) { + x_18878 = lean_alloc_ctor(0, 2, 0); +} else { + x_18878 = x_18876; +} +lean_ctor_set(x_18878, 0, x_18877); +lean_ctor_set(x_18878, 1, x_18875); +x_18717 = x_18878; +x_18718 = x_18873; +goto block_18747; +} +else +{ +lean_object* x_18879; lean_object* x_18880; lean_object* x_18881; lean_object* x_18882; +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18879 = lean_ctor_get(x_18871, 0); +lean_inc(x_18879); +x_18880 = lean_ctor_get(x_18871, 1); +lean_inc(x_18880); +if (lean_is_exclusive(x_18871)) { + lean_ctor_release(x_18871, 0); + lean_ctor_release(x_18871, 1); + x_18881 = x_18871; +} else { + lean_dec_ref(x_18871); + x_18881 = lean_box(0); +} +if (lean_is_scalar(x_18881)) { + x_18882 = lean_alloc_ctor(1, 2, 0); +} else { + x_18882 = x_18881; +} +lean_ctor_set(x_18882, 0, x_18879); +lean_ctor_set(x_18882, 1, x_18880); +return x_18882; +} +} +else +{ +lean_object* x_18883; lean_object* x_18884; lean_object* x_18885; lean_object* x_18886; +lean_dec(x_18863); +lean_dec(x_18858); +lean_dec(x_18748); +lean_dec(x_18853); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18883 = lean_ctor_get(x_18866, 0); +lean_inc(x_18883); +x_18884 = lean_ctor_get(x_18866, 1); +lean_inc(x_18884); +if (lean_is_exclusive(x_18866)) { + lean_ctor_release(x_18866, 0); + lean_ctor_release(x_18866, 1); + x_18885 = x_18866; +} else { + lean_dec_ref(x_18866); + x_18885 = lean_box(0); +} +if (lean_is_scalar(x_18885)) { + x_18886 = lean_alloc_ctor(1, 2, 0); +} else { + x_18886 = x_18885; +} +lean_ctor_set(x_18886, 0, x_18883); +lean_ctor_set(x_18886, 1, x_18884); +return x_18886; +} +} +else +{ +lean_object* x_18887; lean_object* x_18888; lean_object* x_18889; lean_object* x_18890; lean_object* x_18891; lean_object* x_18892; lean_object* x_18893; lean_object* x_18894; +lean_dec(x_18848); +lean_dec(x_18846); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_18748, 6); +lean_ctor_set(x_18748, 1, x_17707); +lean_ctor_set(x_18748, 0, x_153); +x_18887 = lean_ctor_get(x_1, 0); +lean_inc(x_18887); +x_18888 = l_Lean_IR_ToIR_bindVar(x_18887, x_17713, x_4, x_5, x_18753); +x_18889 = lean_ctor_get(x_18888, 0); +lean_inc(x_18889); +x_18890 = lean_ctor_get(x_18888, 1); +lean_inc(x_18890); +lean_dec(x_18888); +x_18891 = lean_ctor_get(x_18889, 0); +lean_inc(x_18891); +x_18892 = lean_ctor_get(x_18889, 1); +lean_inc(x_18892); +lean_dec(x_18889); +x_18893 = lean_ctor_get(x_1, 2); +lean_inc(x_18893); +lean_inc(x_5); +lean_inc(x_4); +x_18894 = l_Lean_IR_ToIR_lowerType(x_18893, x_18892, x_4, x_5, x_18890); +if (lean_obj_tag(x_18894) == 0) +{ +lean_object* x_18895; lean_object* x_18896; lean_object* x_18897; lean_object* x_18898; lean_object* x_18899; +x_18895 = lean_ctor_get(x_18894, 0); +lean_inc(x_18895); +x_18896 = lean_ctor_get(x_18894, 1); +lean_inc(x_18896); +lean_dec(x_18894); +x_18897 = lean_ctor_get(x_18895, 0); +lean_inc(x_18897); +x_18898 = lean_ctor_get(x_18895, 1); +lean_inc(x_18898); +lean_dec(x_18895); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18899 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18891, x_18748, x_18897, x_18898, x_4, x_5, x_18896); +if (lean_obj_tag(x_18899) == 0) +{ +lean_object* x_18900; lean_object* x_18901; lean_object* x_18902; lean_object* x_18903; lean_object* x_18904; lean_object* x_18905; lean_object* x_18906; +x_18900 = lean_ctor_get(x_18899, 0); +lean_inc(x_18900); +x_18901 = lean_ctor_get(x_18899, 1); +lean_inc(x_18901); +lean_dec(x_18899); +x_18902 = lean_ctor_get(x_18900, 0); +lean_inc(x_18902); +x_18903 = lean_ctor_get(x_18900, 1); +lean_inc(x_18903); +if (lean_is_exclusive(x_18900)) { + lean_ctor_release(x_18900, 0); + lean_ctor_release(x_18900, 1); + x_18904 = x_18900; +} else { + lean_dec_ref(x_18900); + x_18904 = lean_box(0); +} +x_18905 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_18905, 0, x_18902); +if (lean_is_scalar(x_18904)) { + x_18906 = lean_alloc_ctor(0, 2, 0); +} else { + x_18906 = x_18904; +} +lean_ctor_set(x_18906, 0, x_18905); +lean_ctor_set(x_18906, 1, x_18903); +x_18717 = x_18906; +x_18718 = x_18901; +goto block_18747; +} +else +{ +lean_object* x_18907; lean_object* x_18908; lean_object* x_18909; lean_object* x_18910; +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18907 = lean_ctor_get(x_18899, 0); +lean_inc(x_18907); +x_18908 = lean_ctor_get(x_18899, 1); +lean_inc(x_18908); +if (lean_is_exclusive(x_18899)) { + lean_ctor_release(x_18899, 0); + lean_ctor_release(x_18899, 1); + x_18909 = x_18899; +} else { + lean_dec_ref(x_18899); + x_18909 = lean_box(0); +} +if (lean_is_scalar(x_18909)) { + x_18910 = lean_alloc_ctor(1, 2, 0); +} else { + x_18910 = x_18909; +} +lean_ctor_set(x_18910, 0, x_18907); +lean_ctor_set(x_18910, 1, x_18908); +return x_18910; +} +} +else +{ +lean_object* x_18911; lean_object* x_18912; lean_object* x_18913; lean_object* x_18914; +lean_dec(x_18891); +lean_dec(x_18748); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18911 = lean_ctor_get(x_18894, 0); +lean_inc(x_18911); +x_18912 = lean_ctor_get(x_18894, 1); +lean_inc(x_18912); +if (lean_is_exclusive(x_18894)) { + lean_ctor_release(x_18894, 0); + lean_ctor_release(x_18894, 1); + x_18913 = x_18894; +} else { + lean_dec_ref(x_18894); + x_18913 = lean_box(0); +} +if (lean_is_scalar(x_18913)) { + x_18914 = lean_alloc_ctor(1, 2, 0); +} else { + x_18914 = x_18913; +} +lean_ctor_set(x_18914, 0, x_18911); +lean_ctor_set(x_18914, 1, x_18912); +return x_18914; +} +} +} +else +{ +lean_object* x_18915; lean_object* x_18916; lean_object* x_18917; lean_object* x_18918; lean_object* x_18919; lean_object* x_18920; lean_object* x_18921; lean_object* x_18922; +lean_dec(x_18848); +lean_dec(x_18846); +lean_inc(x_17707); +lean_inc(x_153); +lean_ctor_set_tag(x_18748, 7); +lean_ctor_set(x_18748, 1, x_17707); +lean_ctor_set(x_18748, 0, x_153); +x_18915 = lean_ctor_get(x_1, 0); +lean_inc(x_18915); +x_18916 = l_Lean_IR_ToIR_bindVar(x_18915, x_17713, x_4, x_5, x_18753); +x_18917 = lean_ctor_get(x_18916, 0); +lean_inc(x_18917); +x_18918 = lean_ctor_get(x_18916, 1); +lean_inc(x_18918); +lean_dec(x_18916); +x_18919 = lean_ctor_get(x_18917, 0); +lean_inc(x_18919); +x_18920 = lean_ctor_get(x_18917, 1); +lean_inc(x_18920); +lean_dec(x_18917); +x_18921 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18922 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18919, x_18748, x_18921, x_18920, x_4, x_5, x_18918); +if (lean_obj_tag(x_18922) == 0) +{ +lean_object* x_18923; lean_object* x_18924; lean_object* x_18925; lean_object* x_18926; lean_object* x_18927; lean_object* x_18928; lean_object* x_18929; +x_18923 = lean_ctor_get(x_18922, 0); +lean_inc(x_18923); +x_18924 = lean_ctor_get(x_18922, 1); +lean_inc(x_18924); +lean_dec(x_18922); +x_18925 = lean_ctor_get(x_18923, 0); +lean_inc(x_18925); +x_18926 = lean_ctor_get(x_18923, 1); +lean_inc(x_18926); +if (lean_is_exclusive(x_18923)) { + lean_ctor_release(x_18923, 0); + lean_ctor_release(x_18923, 1); + x_18927 = x_18923; +} else { + lean_dec_ref(x_18923); + x_18927 = lean_box(0); +} +x_18928 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_18928, 0, x_18925); +if (lean_is_scalar(x_18927)) { + x_18929 = lean_alloc_ctor(0, 2, 0); +} else { + x_18929 = x_18927; +} +lean_ctor_set(x_18929, 0, x_18928); +lean_ctor_set(x_18929, 1, x_18926); +x_18717 = x_18929; +x_18718 = x_18924; +goto block_18747; +} +else +{ +lean_object* x_18930; lean_object* x_18931; lean_object* x_18932; lean_object* x_18933; +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18930 = lean_ctor_get(x_18922, 0); +lean_inc(x_18930); +x_18931 = lean_ctor_get(x_18922, 1); +lean_inc(x_18931); +if (lean_is_exclusive(x_18922)) { + lean_ctor_release(x_18922, 0); + lean_ctor_release(x_18922, 1); + x_18932 = x_18922; +} else { + lean_dec_ref(x_18922); + x_18932 = lean_box(0); +} +if (lean_is_scalar(x_18932)) { + x_18933 = lean_alloc_ctor(1, 2, 0); +} else { + x_18933 = x_18932; +} +lean_ctor_set(x_18933, 0, x_18930); +lean_ctor_set(x_18933, 1, x_18931); +return x_18933; +} +} +} +} +else +{ +lean_object* x_18934; lean_object* x_18935; lean_object* x_18936; lean_object* x_18937; lean_object* x_18938; lean_object* x_18939; uint8_t x_18940; +x_18934 = lean_ctor_get(x_18748, 1); +lean_inc(x_18934); +lean_dec(x_18748); +x_18935 = lean_ctor_get(x_18749, 0); +lean_inc(x_18935); +if (lean_is_exclusive(x_18749)) { + lean_ctor_release(x_18749, 0); + x_18936 = x_18749; +} else { + lean_dec_ref(x_18749); + x_18936 = lean_box(0); +} +x_18937 = lean_array_get_size(x_17707); +x_18938 = lean_ctor_get(x_18935, 3); +lean_inc(x_18938); +lean_dec(x_18935); +x_18939 = lean_array_get_size(x_18938); +lean_dec(x_18938); +x_18940 = lean_nat_dec_lt(x_18937, x_18939); +if (x_18940 == 0) +{ +uint8_t x_18941; +x_18941 = lean_nat_dec_eq(x_18937, x_18939); +if (x_18941 == 0) +{ +lean_object* x_18942; lean_object* x_18943; lean_object* x_18944; lean_object* x_18945; lean_object* x_18946; lean_object* x_18947; lean_object* x_18948; lean_object* x_18949; lean_object* x_18950; lean_object* x_18951; lean_object* x_18952; lean_object* x_18953; lean_object* x_18954; lean_object* x_18955; lean_object* x_18956; lean_object* x_18957; lean_object* x_18958; +x_18942 = lean_unsigned_to_nat(0u); +x_18943 = l_Array_extract___rarg(x_17707, x_18942, x_18939); +x_18944 = l_Array_extract___rarg(x_17707, x_18939, x_18937); +lean_dec(x_18937); +lean_inc(x_153); +x_18945 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_18945, 0, x_153); +lean_ctor_set(x_18945, 1, x_18943); +x_18946 = lean_ctor_get(x_1, 0); +lean_inc(x_18946); +x_18947 = l_Lean_IR_ToIR_bindVar(x_18946, x_17713, x_4, x_5, x_18934); +x_18948 = lean_ctor_get(x_18947, 0); +lean_inc(x_18948); +x_18949 = lean_ctor_get(x_18947, 1); +lean_inc(x_18949); +lean_dec(x_18947); +x_18950 = lean_ctor_get(x_18948, 0); +lean_inc(x_18950); +x_18951 = lean_ctor_get(x_18948, 1); +lean_inc(x_18951); +lean_dec(x_18948); +x_18952 = l_Lean_IR_ToIR_newVar(x_18951, x_4, x_5, x_18949); +x_18953 = lean_ctor_get(x_18952, 0); +lean_inc(x_18953); +x_18954 = lean_ctor_get(x_18952, 1); +lean_inc(x_18954); +lean_dec(x_18952); +x_18955 = lean_ctor_get(x_18953, 0); +lean_inc(x_18955); +x_18956 = lean_ctor_get(x_18953, 1); +lean_inc(x_18956); +lean_dec(x_18953); +x_18957 = lean_ctor_get(x_1, 2); +lean_inc(x_18957); +lean_inc(x_5); +lean_inc(x_4); +x_18958 = l_Lean_IR_ToIR_lowerType(x_18957, x_18956, x_4, x_5, x_18954); +if (lean_obj_tag(x_18958) == 0) +{ +lean_object* x_18959; lean_object* x_18960; lean_object* x_18961; lean_object* x_18962; lean_object* x_18963; +x_18959 = lean_ctor_get(x_18958, 0); +lean_inc(x_18959); +x_18960 = lean_ctor_get(x_18958, 1); +lean_inc(x_18960); +lean_dec(x_18958); +x_18961 = lean_ctor_get(x_18959, 0); +lean_inc(x_18961); +x_18962 = lean_ctor_get(x_18959, 1); +lean_inc(x_18962); +lean_dec(x_18959); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18963 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_18955, x_18944, x_18950, x_18945, x_18961, x_18962, x_4, x_5, x_18960); +if (lean_obj_tag(x_18963) == 0) +{ +lean_object* x_18964; lean_object* x_18965; lean_object* x_18966; lean_object* x_18967; lean_object* x_18968; lean_object* x_18969; lean_object* x_18970; +x_18964 = lean_ctor_get(x_18963, 0); +lean_inc(x_18964); +x_18965 = lean_ctor_get(x_18963, 1); +lean_inc(x_18965); +lean_dec(x_18963); +x_18966 = lean_ctor_get(x_18964, 0); +lean_inc(x_18966); +x_18967 = lean_ctor_get(x_18964, 1); +lean_inc(x_18967); +if (lean_is_exclusive(x_18964)) { + lean_ctor_release(x_18964, 0); + lean_ctor_release(x_18964, 1); + x_18968 = x_18964; +} else { + lean_dec_ref(x_18964); + x_18968 = lean_box(0); +} +if (lean_is_scalar(x_18936)) { + x_18969 = lean_alloc_ctor(1, 1, 0); +} else { + x_18969 = x_18936; +} +lean_ctor_set(x_18969, 0, x_18966); +if (lean_is_scalar(x_18968)) { + x_18970 = lean_alloc_ctor(0, 2, 0); +} else { + x_18970 = x_18968; +} +lean_ctor_set(x_18970, 0, x_18969); +lean_ctor_set(x_18970, 1, x_18967); +x_18717 = x_18970; +x_18718 = x_18965; +goto block_18747; +} +else +{ +lean_object* x_18971; lean_object* x_18972; lean_object* x_18973; lean_object* x_18974; +lean_dec(x_18936); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18971 = lean_ctor_get(x_18963, 0); +lean_inc(x_18971); +x_18972 = lean_ctor_get(x_18963, 1); +lean_inc(x_18972); +if (lean_is_exclusive(x_18963)) { + lean_ctor_release(x_18963, 0); + lean_ctor_release(x_18963, 1); + x_18973 = x_18963; +} else { + lean_dec_ref(x_18963); + x_18973 = lean_box(0); +} +if (lean_is_scalar(x_18973)) { + x_18974 = lean_alloc_ctor(1, 2, 0); +} else { + x_18974 = x_18973; +} +lean_ctor_set(x_18974, 0, x_18971); +lean_ctor_set(x_18974, 1, x_18972); +return x_18974; +} +} +else +{ +lean_object* x_18975; lean_object* x_18976; lean_object* x_18977; lean_object* x_18978; +lean_dec(x_18955); +lean_dec(x_18950); +lean_dec(x_18945); +lean_dec(x_18944); +lean_dec(x_18936); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18975 = lean_ctor_get(x_18958, 0); +lean_inc(x_18975); +x_18976 = lean_ctor_get(x_18958, 1); +lean_inc(x_18976); +if (lean_is_exclusive(x_18958)) { + lean_ctor_release(x_18958, 0); + lean_ctor_release(x_18958, 1); + x_18977 = x_18958; +} else { + lean_dec_ref(x_18958); + x_18977 = lean_box(0); +} +if (lean_is_scalar(x_18977)) { + x_18978 = lean_alloc_ctor(1, 2, 0); +} else { + x_18978 = x_18977; +} +lean_ctor_set(x_18978, 0, x_18975); +lean_ctor_set(x_18978, 1, x_18976); +return x_18978; +} +} +else +{ +lean_object* x_18979; lean_object* x_18980; lean_object* x_18981; lean_object* x_18982; lean_object* x_18983; lean_object* x_18984; lean_object* x_18985; lean_object* x_18986; lean_object* x_18987; +lean_dec(x_18939); +lean_dec(x_18937); +lean_inc(x_17707); +lean_inc(x_153); +x_18979 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_18979, 0, x_153); +lean_ctor_set(x_18979, 1, x_17707); +x_18980 = lean_ctor_get(x_1, 0); +lean_inc(x_18980); +x_18981 = l_Lean_IR_ToIR_bindVar(x_18980, x_17713, x_4, x_5, x_18934); +x_18982 = lean_ctor_get(x_18981, 0); +lean_inc(x_18982); +x_18983 = lean_ctor_get(x_18981, 1); +lean_inc(x_18983); +lean_dec(x_18981); +x_18984 = lean_ctor_get(x_18982, 0); +lean_inc(x_18984); +x_18985 = lean_ctor_get(x_18982, 1); +lean_inc(x_18985); +lean_dec(x_18982); +x_18986 = lean_ctor_get(x_1, 2); +lean_inc(x_18986); +lean_inc(x_5); +lean_inc(x_4); +x_18987 = l_Lean_IR_ToIR_lowerType(x_18986, x_18985, x_4, x_5, x_18983); +if (lean_obj_tag(x_18987) == 0) +{ +lean_object* x_18988; lean_object* x_18989; lean_object* x_18990; lean_object* x_18991; lean_object* x_18992; +x_18988 = lean_ctor_get(x_18987, 0); +lean_inc(x_18988); +x_18989 = lean_ctor_get(x_18987, 1); +lean_inc(x_18989); +lean_dec(x_18987); +x_18990 = lean_ctor_get(x_18988, 0); +lean_inc(x_18990); +x_18991 = lean_ctor_get(x_18988, 1); +lean_inc(x_18991); +lean_dec(x_18988); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_18992 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18984, x_18979, x_18990, x_18991, x_4, x_5, x_18989); +if (lean_obj_tag(x_18992) == 0) +{ +lean_object* x_18993; lean_object* x_18994; lean_object* x_18995; lean_object* x_18996; lean_object* x_18997; lean_object* x_18998; lean_object* x_18999; +x_18993 = lean_ctor_get(x_18992, 0); +lean_inc(x_18993); +x_18994 = lean_ctor_get(x_18992, 1); +lean_inc(x_18994); +lean_dec(x_18992); +x_18995 = lean_ctor_get(x_18993, 0); +lean_inc(x_18995); +x_18996 = lean_ctor_get(x_18993, 1); +lean_inc(x_18996); +if (lean_is_exclusive(x_18993)) { + lean_ctor_release(x_18993, 0); + lean_ctor_release(x_18993, 1); + x_18997 = x_18993; +} else { + lean_dec_ref(x_18993); + x_18997 = lean_box(0); +} +if (lean_is_scalar(x_18936)) { + x_18998 = lean_alloc_ctor(1, 1, 0); +} else { + x_18998 = x_18936; +} +lean_ctor_set(x_18998, 0, x_18995); +if (lean_is_scalar(x_18997)) { + x_18999 = lean_alloc_ctor(0, 2, 0); +} else { + x_18999 = x_18997; +} +lean_ctor_set(x_18999, 0, x_18998); +lean_ctor_set(x_18999, 1, x_18996); +x_18717 = x_18999; +x_18718 = x_18994; +goto block_18747; +} +else +{ +lean_object* x_19000; lean_object* x_19001; lean_object* x_19002; lean_object* x_19003; +lean_dec(x_18936); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19000 = lean_ctor_get(x_18992, 0); +lean_inc(x_19000); +x_19001 = lean_ctor_get(x_18992, 1); +lean_inc(x_19001); +if (lean_is_exclusive(x_18992)) { + lean_ctor_release(x_18992, 0); + lean_ctor_release(x_18992, 1); + x_19002 = x_18992; +} else { + lean_dec_ref(x_18992); + x_19002 = lean_box(0); +} +if (lean_is_scalar(x_19002)) { + x_19003 = lean_alloc_ctor(1, 2, 0); +} else { + x_19003 = x_19002; +} +lean_ctor_set(x_19003, 0, x_19000); +lean_ctor_set(x_19003, 1, x_19001); +return x_19003; +} +} +else +{ +lean_object* x_19004; lean_object* x_19005; lean_object* x_19006; lean_object* x_19007; +lean_dec(x_18984); +lean_dec(x_18979); +lean_dec(x_18936); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19004 = lean_ctor_get(x_18987, 0); +lean_inc(x_19004); +x_19005 = lean_ctor_get(x_18987, 1); +lean_inc(x_19005); +if (lean_is_exclusive(x_18987)) { + lean_ctor_release(x_18987, 0); + lean_ctor_release(x_18987, 1); + x_19006 = x_18987; +} else { + lean_dec_ref(x_18987); + x_19006 = lean_box(0); +} +if (lean_is_scalar(x_19006)) { + x_19007 = lean_alloc_ctor(1, 2, 0); +} else { + x_19007 = x_19006; +} +lean_ctor_set(x_19007, 0, x_19004); +lean_ctor_set(x_19007, 1, x_19005); +return x_19007; +} +} +} +else +{ +lean_object* x_19008; lean_object* x_19009; lean_object* x_19010; lean_object* x_19011; lean_object* x_19012; lean_object* x_19013; lean_object* x_19014; lean_object* x_19015; lean_object* x_19016; +lean_dec(x_18939); +lean_dec(x_18937); +lean_inc(x_17707); +lean_inc(x_153); +x_19008 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_19008, 0, x_153); +lean_ctor_set(x_19008, 1, x_17707); +x_19009 = lean_ctor_get(x_1, 0); +lean_inc(x_19009); +x_19010 = l_Lean_IR_ToIR_bindVar(x_19009, x_17713, x_4, x_5, x_18934); +x_19011 = lean_ctor_get(x_19010, 0); +lean_inc(x_19011); +x_19012 = lean_ctor_get(x_19010, 1); +lean_inc(x_19012); +lean_dec(x_19010); +x_19013 = lean_ctor_get(x_19011, 0); +lean_inc(x_19013); +x_19014 = lean_ctor_get(x_19011, 1); +lean_inc(x_19014); +lean_dec(x_19011); +x_19015 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19016 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19013, x_19008, x_19015, x_19014, x_4, x_5, x_19012); +if (lean_obj_tag(x_19016) == 0) +{ +lean_object* x_19017; lean_object* x_19018; lean_object* x_19019; lean_object* x_19020; lean_object* x_19021; lean_object* x_19022; lean_object* x_19023; +x_19017 = lean_ctor_get(x_19016, 0); +lean_inc(x_19017); +x_19018 = lean_ctor_get(x_19016, 1); +lean_inc(x_19018); +lean_dec(x_19016); +x_19019 = lean_ctor_get(x_19017, 0); +lean_inc(x_19019); +x_19020 = lean_ctor_get(x_19017, 1); +lean_inc(x_19020); +if (lean_is_exclusive(x_19017)) { + lean_ctor_release(x_19017, 0); + lean_ctor_release(x_19017, 1); + x_19021 = x_19017; +} else { + lean_dec_ref(x_19017); + x_19021 = lean_box(0); +} +if (lean_is_scalar(x_18936)) { + x_19022 = lean_alloc_ctor(1, 1, 0); +} else { + x_19022 = x_18936; +} +lean_ctor_set(x_19022, 0, x_19019); +if (lean_is_scalar(x_19021)) { + x_19023 = lean_alloc_ctor(0, 2, 0); +} else { + x_19023 = x_19021; +} +lean_ctor_set(x_19023, 0, x_19022); +lean_ctor_set(x_19023, 1, x_19020); +x_18717 = x_19023; +x_18718 = x_19018; +goto block_18747; +} +else +{ +lean_object* x_19024; lean_object* x_19025; lean_object* x_19026; lean_object* x_19027; +lean_dec(x_18936); +lean_dec(x_17718); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19024 = lean_ctor_get(x_19016, 0); +lean_inc(x_19024); +x_19025 = lean_ctor_get(x_19016, 1); +lean_inc(x_19025); +if (lean_is_exclusive(x_19016)) { + lean_ctor_release(x_19016, 0); + lean_ctor_release(x_19016, 1); + x_19026 = x_19016; +} else { + lean_dec_ref(x_19016); + x_19026 = lean_box(0); +} +if (lean_is_scalar(x_19026)) { + x_19027 = lean_alloc_ctor(1, 2, 0); +} else { + x_19027 = x_19026; +} +lean_ctor_set(x_19027, 0, x_19024); +lean_ctor_set(x_19027, 1, x_19025); +return x_19027; +} +} +} +} +block_18747: +{ +lean_object* x_18719; +x_18719 = lean_ctor_get(x_18717, 0); +lean_inc(x_18719); +if (lean_obj_tag(x_18719) == 0) +{ +lean_object* x_18720; lean_object* x_18721; lean_object* x_18722; lean_object* x_18723; lean_object* x_18724; lean_object* x_18725; lean_object* x_18726; lean_object* x_18727; lean_object* x_18728; lean_object* x_18729; +lean_dec(x_17718); +x_18720 = lean_ctor_get(x_18717, 1); +lean_inc(x_18720); +lean_dec(x_18717); +x_18721 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_18721, 0, x_153); +lean_ctor_set(x_18721, 1, x_17707); +x_18722 = lean_ctor_get(x_1, 0); +lean_inc(x_18722); +x_18723 = l_Lean_IR_ToIR_bindVar(x_18722, x_18720, x_4, x_5, x_18718); +x_18724 = lean_ctor_get(x_18723, 0); +lean_inc(x_18724); +x_18725 = lean_ctor_get(x_18723, 1); +lean_inc(x_18725); +lean_dec(x_18723); +x_18726 = lean_ctor_get(x_18724, 0); +lean_inc(x_18726); +x_18727 = lean_ctor_get(x_18724, 1); +lean_inc(x_18727); +lean_dec(x_18724); +x_18728 = lean_ctor_get(x_1, 2); +lean_inc(x_18728); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_18729 = l_Lean_IR_ToIR_lowerType(x_18728, x_18727, x_4, x_5, x_18725); +if (lean_obj_tag(x_18729) == 0) +{ +lean_object* x_18730; lean_object* x_18731; lean_object* x_18732; lean_object* x_18733; lean_object* x_18734; +x_18730 = lean_ctor_get(x_18729, 0); +lean_inc(x_18730); +x_18731 = lean_ctor_get(x_18729, 1); +lean_inc(x_18731); +lean_dec(x_18729); +x_18732 = lean_ctor_get(x_18730, 0); +lean_inc(x_18732); +x_18733 = lean_ctor_get(x_18730, 1); +lean_inc(x_18733); +lean_dec(x_18730); +x_18734 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_18726, x_18721, x_18732, x_18733, x_4, x_5, x_18731); +return x_18734; +} +else +{ +uint8_t x_18735; +lean_dec(x_18726); +lean_dec(x_18721); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_18735 = !lean_is_exclusive(x_18729); +if (x_18735 == 0) +{ +return x_18729; +} +else +{ +lean_object* x_18736; lean_object* x_18737; lean_object* x_18738; +x_18736 = lean_ctor_get(x_18729, 0); +x_18737 = lean_ctor_get(x_18729, 1); +lean_inc(x_18737); +lean_inc(x_18736); +lean_dec(x_18729); +x_18738 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18738, 0, x_18736); +lean_ctor_set(x_18738, 1, x_18737); +return x_18738; +} +} +} +else +{ +uint8_t x_18739; +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_18739 = !lean_is_exclusive(x_18717); +if (x_18739 == 0) +{ +lean_object* x_18740; lean_object* x_18741; lean_object* x_18742; +x_18740 = lean_ctor_get(x_18717, 0); +lean_dec(x_18740); +x_18741 = lean_ctor_get(x_18719, 0); +lean_inc(x_18741); +lean_dec(x_18719); +lean_ctor_set(x_18717, 0, x_18741); +if (lean_is_scalar(x_17718)) { + x_18742 = lean_alloc_ctor(0, 2, 0); +} else { + x_18742 = x_17718; +} +lean_ctor_set(x_18742, 0, x_18717); +lean_ctor_set(x_18742, 1, x_18718); +return x_18742; +} +else +{ +lean_object* x_18743; lean_object* x_18744; lean_object* x_18745; lean_object* x_18746; +x_18743 = lean_ctor_get(x_18717, 1); +lean_inc(x_18743); +lean_dec(x_18717); +x_18744 = lean_ctor_get(x_18719, 0); +lean_inc(x_18744); +lean_dec(x_18719); +x_18745 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18745, 0, x_18744); +lean_ctor_set(x_18745, 1, x_18743); +if (lean_is_scalar(x_17718)) { + x_18746 = lean_alloc_ctor(0, 2, 0); +} else { + x_18746 = x_17718; +} +lean_ctor_set(x_18746, 0, x_18745); +lean_ctor_set(x_18746, 1, x_18718); +return x_18746; +} +} +} +} +} +default: +{ +uint8_t x_19028; +lean_dec(x_17719); +lean_dec(x_17718); +lean_free_object(x_17709); +lean_dec(x_17707); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_2); +lean_dec(x_1); +x_19028 = !lean_is_exclusive(x_17724); +if (x_19028 == 0) +{ +lean_object* x_19029; uint8_t x_19030; lean_object* x_19031; lean_object* x_19032; lean_object* x_19033; lean_object* x_19034; lean_object* x_19035; lean_object* x_19036; lean_object* x_19037; lean_object* x_19038; +x_19029 = lean_ctor_get(x_17724, 0); +lean_dec(x_19029); +x_19030 = 1; +x_19031 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_19032 = l_Lean_Name_toString(x_153, x_19030, x_19031); +lean_ctor_set_tag(x_17724, 3); +lean_ctor_set(x_17724, 0, x_19032); +x_19033 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_19034 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19034, 0, x_19033); +lean_ctor_set(x_19034, 1, x_17724); +x_19035 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_19036 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19036, 0, x_19034); +lean_ctor_set(x_19036, 1, x_19035); +x_19037 = l_Lean_MessageData_ofFormat(x_19036); +x_19038 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_19037, x_17713, x_4, x_5, x_17717); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_17713); +return x_19038; +} +else +{ +uint8_t x_19039; lean_object* x_19040; lean_object* x_19041; lean_object* x_19042; lean_object* x_19043; lean_object* x_19044; lean_object* x_19045; lean_object* x_19046; lean_object* x_19047; lean_object* x_19048; +lean_dec(x_17724); +x_19039 = 1; +x_19040 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_19041 = l_Lean_Name_toString(x_153, x_19039, x_19040); +x_19042 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_19042, 0, x_19041); +x_19043 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_19044 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19044, 0, x_19043); +lean_ctor_set(x_19044, 1, x_19042); +x_19045 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_19046 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19046, 0, x_19044); +lean_ctor_set(x_19046, 1, x_19045); +x_19047 = l_Lean_MessageData_ofFormat(x_19046); +x_19048 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_19047, x_17713, x_4, x_5, x_17717); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_17713); +return x_19048; +} +} +} +} +} +else +{ +lean_object* x_19049; lean_object* x_19050; lean_object* x_19051; lean_object* x_19052; lean_object* x_19053; lean_object* x_19054; uint8_t x_19055; lean_object* x_19056; +x_19049 = lean_ctor_get(x_17709, 1); +lean_inc(x_19049); +lean_dec(x_17709); +x_19050 = lean_st_ref_get(x_5, x_17710); +x_19051 = lean_ctor_get(x_19050, 0); +lean_inc(x_19051); +x_19052 = lean_ctor_get(x_19050, 1); +lean_inc(x_19052); +if (lean_is_exclusive(x_19050)) { + lean_ctor_release(x_19050, 0); + lean_ctor_release(x_19050, 1); + x_19053 = x_19050; +} else { + lean_dec_ref(x_19050); + x_19053 = lean_box(0); +} +x_19054 = lean_ctor_get(x_19051, 0); +lean_inc(x_19054); +lean_dec(x_19051); +x_19055 = 0; +lean_inc(x_153); +lean_inc(x_19054); +x_19056 = l_Lean_Environment_find_x3f(x_19054, x_153, x_19055); +if (lean_obj_tag(x_19056) == 0) +{ +lean_object* x_19057; lean_object* x_19058; +lean_dec(x_19054); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_19057 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_19058 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_19057, x_19049, x_4, x_5, x_19052); +return x_19058; +} +else +{ +lean_object* x_19059; +x_19059 = lean_ctor_get(x_19056, 0); +lean_inc(x_19059); +lean_dec(x_19056); +switch (lean_obj_tag(x_19059)) { +case 0: +{ +lean_object* x_19060; lean_object* x_19061; uint8_t x_19062; +lean_dec(x_19054); +lean_dec(x_17699); +lean_dec(x_17698); +if (lean_is_exclusive(x_19059)) { + lean_ctor_release(x_19059, 0); + x_19060 = x_19059; +} else { + lean_dec_ref(x_19059); + x_19060 = lean_box(0); +} +x_19061 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_19062 = lean_name_eq(x_153, x_19061); +if (x_19062 == 0) +{ +lean_object* x_19063; uint8_t x_19064; +x_19063 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_19064 = lean_name_eq(x_153, x_19063); +if (x_19064 == 0) +{ +lean_object* x_19065; lean_object* x_19066; lean_object* x_19067; +lean_dec(x_19053); +lean_inc(x_153); +x_19065 = l_Lean_IR_ToIR_findDecl(x_153, x_19049, x_4, x_5, x_19052); +x_19066 = lean_ctor_get(x_19065, 0); +lean_inc(x_19066); +x_19067 = lean_ctor_get(x_19066, 0); +lean_inc(x_19067); +if (lean_obj_tag(x_19067) == 0) +{ +lean_object* x_19068; lean_object* x_19069; lean_object* x_19070; lean_object* x_19071; uint8_t x_19072; lean_object* x_19073; lean_object* x_19074; lean_object* x_19075; lean_object* x_19076; lean_object* x_19077; lean_object* x_19078; lean_object* x_19079; lean_object* x_19080; lean_object* x_19081; +lean_dec(x_17707); +lean_dec(x_2); +lean_dec(x_1); +x_19068 = lean_ctor_get(x_19065, 1); +lean_inc(x_19068); +if (lean_is_exclusive(x_19065)) { + lean_ctor_release(x_19065, 0); + lean_ctor_release(x_19065, 1); + x_19069 = x_19065; +} else { + lean_dec_ref(x_19065); + x_19069 = lean_box(0); +} +x_19070 = lean_ctor_get(x_19066, 1); +lean_inc(x_19070); +if (lean_is_exclusive(x_19066)) { + lean_ctor_release(x_19066, 0); + lean_ctor_release(x_19066, 1); + x_19071 = x_19066; +} else { + lean_dec_ref(x_19066); + x_19071 = lean_box(0); +} +x_19072 = 1; +x_19073 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_19074 = l_Lean_Name_toString(x_153, x_19072, x_19073); +if (lean_is_scalar(x_19060)) { + x_19075 = lean_alloc_ctor(3, 1, 0); +} else { + x_19075 = x_19060; + lean_ctor_set_tag(x_19075, 3); +} +lean_ctor_set(x_19075, 0, x_19074); +x_19076 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_19071)) { + x_19077 = lean_alloc_ctor(5, 2, 0); +} else { + x_19077 = x_19071; + lean_ctor_set_tag(x_19077, 5); +} +lean_ctor_set(x_19077, 0, x_19076); +lean_ctor_set(x_19077, 1, x_19075); +x_19078 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_19069)) { + x_19079 = lean_alloc_ctor(5, 2, 0); +} else { + x_19079 = x_19069; + lean_ctor_set_tag(x_19079, 5); +} +lean_ctor_set(x_19079, 0, x_19077); +lean_ctor_set(x_19079, 1, x_19078); +x_19080 = l_Lean_MessageData_ofFormat(x_19079); +x_19081 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_19080, x_19070, x_4, x_5, x_19068); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_19070); +return x_19081; +} +else +{ +lean_object* x_19082; lean_object* x_19083; lean_object* x_19084; lean_object* x_19085; lean_object* x_19086; lean_object* x_19087; lean_object* x_19088; uint8_t x_19089; +lean_dec(x_19060); +x_19082 = lean_ctor_get(x_19065, 1); +lean_inc(x_19082); +lean_dec(x_19065); +x_19083 = lean_ctor_get(x_19066, 1); +lean_inc(x_19083); +if (lean_is_exclusive(x_19066)) { + lean_ctor_release(x_19066, 0); + lean_ctor_release(x_19066, 1); + x_19084 = x_19066; +} else { + lean_dec_ref(x_19066); + x_19084 = lean_box(0); +} +x_19085 = lean_ctor_get(x_19067, 0); +lean_inc(x_19085); +lean_dec(x_19067); +x_19086 = lean_array_get_size(x_17707); +x_19087 = l_Lean_IR_Decl_params(x_19085); +lean_dec(x_19085); +x_19088 = lean_array_get_size(x_19087); +lean_dec(x_19087); +x_19089 = lean_nat_dec_lt(x_19086, x_19088); +if (x_19089 == 0) +{ +uint8_t x_19090; +x_19090 = lean_nat_dec_eq(x_19086, x_19088); +if (x_19090 == 0) +{ +lean_object* x_19091; lean_object* x_19092; lean_object* x_19093; lean_object* x_19094; lean_object* x_19095; lean_object* x_19096; lean_object* x_19097; lean_object* x_19098; lean_object* x_19099; lean_object* x_19100; lean_object* x_19101; lean_object* x_19102; lean_object* x_19103; lean_object* x_19104; lean_object* x_19105; lean_object* x_19106; lean_object* x_19107; +x_19091 = lean_unsigned_to_nat(0u); +x_19092 = l_Array_extract___rarg(x_17707, x_19091, x_19088); +x_19093 = l_Array_extract___rarg(x_17707, x_19088, x_19086); +lean_dec(x_19086); +lean_dec(x_17707); +if (lean_is_scalar(x_19084)) { + x_19094 = lean_alloc_ctor(6, 2, 0); +} else { + x_19094 = x_19084; + lean_ctor_set_tag(x_19094, 6); +} +lean_ctor_set(x_19094, 0, x_153); +lean_ctor_set(x_19094, 1, x_19092); +x_19095 = lean_ctor_get(x_1, 0); +lean_inc(x_19095); +x_19096 = l_Lean_IR_ToIR_bindVar(x_19095, x_19083, x_4, x_5, x_19082); +x_19097 = lean_ctor_get(x_19096, 0); +lean_inc(x_19097); +x_19098 = lean_ctor_get(x_19096, 1); +lean_inc(x_19098); +lean_dec(x_19096); +x_19099 = lean_ctor_get(x_19097, 0); +lean_inc(x_19099); +x_19100 = lean_ctor_get(x_19097, 1); +lean_inc(x_19100); +lean_dec(x_19097); +x_19101 = l_Lean_IR_ToIR_newVar(x_19100, x_4, x_5, x_19098); +x_19102 = lean_ctor_get(x_19101, 0); +lean_inc(x_19102); +x_19103 = lean_ctor_get(x_19101, 1); +lean_inc(x_19103); +lean_dec(x_19101); +x_19104 = lean_ctor_get(x_19102, 0); +lean_inc(x_19104); +x_19105 = lean_ctor_get(x_19102, 1); +lean_inc(x_19105); +lean_dec(x_19102); +x_19106 = lean_ctor_get(x_1, 2); +lean_inc(x_19106); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_19107 = l_Lean_IR_ToIR_lowerType(x_19106, x_19105, x_4, x_5, x_19103); +if (lean_obj_tag(x_19107) == 0) +{ +lean_object* x_19108; lean_object* x_19109; lean_object* x_19110; lean_object* x_19111; lean_object* x_19112; +x_19108 = lean_ctor_get(x_19107, 0); +lean_inc(x_19108); +x_19109 = lean_ctor_get(x_19107, 1); +lean_inc(x_19109); +lean_dec(x_19107); +x_19110 = lean_ctor_get(x_19108, 0); +lean_inc(x_19110); +x_19111 = lean_ctor_get(x_19108, 1); +lean_inc(x_19111); +lean_dec(x_19108); +x_19112 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_19104, x_19093, x_19099, x_19094, x_19110, x_19111, x_4, x_5, x_19109); +return x_19112; +} +else +{ +lean_object* x_19113; lean_object* x_19114; lean_object* x_19115; lean_object* x_19116; +lean_dec(x_19104); +lean_dec(x_19099); +lean_dec(x_19094); +lean_dec(x_19093); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_19113 = lean_ctor_get(x_19107, 0); +lean_inc(x_19113); +x_19114 = lean_ctor_get(x_19107, 1); +lean_inc(x_19114); +if (lean_is_exclusive(x_19107)) { + lean_ctor_release(x_19107, 0); + lean_ctor_release(x_19107, 1); + x_19115 = x_19107; +} else { + lean_dec_ref(x_19107); + x_19115 = lean_box(0); +} +if (lean_is_scalar(x_19115)) { + x_19116 = lean_alloc_ctor(1, 2, 0); +} else { + x_19116 = x_19115; +} +lean_ctor_set(x_19116, 0, x_19113); +lean_ctor_set(x_19116, 1, x_19114); +return x_19116; +} +} +else +{ +lean_object* x_19117; lean_object* x_19118; lean_object* x_19119; lean_object* x_19120; lean_object* x_19121; lean_object* x_19122; lean_object* x_19123; lean_object* x_19124; lean_object* x_19125; +lean_dec(x_19088); +lean_dec(x_19086); +if (lean_is_scalar(x_19084)) { + x_19117 = lean_alloc_ctor(6, 2, 0); +} else { + x_19117 = x_19084; + lean_ctor_set_tag(x_19117, 6); +} +lean_ctor_set(x_19117, 0, x_153); +lean_ctor_set(x_19117, 1, x_17707); +x_19118 = lean_ctor_get(x_1, 0); +lean_inc(x_19118); +x_19119 = l_Lean_IR_ToIR_bindVar(x_19118, x_19083, x_4, x_5, x_19082); +x_19120 = lean_ctor_get(x_19119, 0); +lean_inc(x_19120); +x_19121 = lean_ctor_get(x_19119, 1); +lean_inc(x_19121); +lean_dec(x_19119); +x_19122 = lean_ctor_get(x_19120, 0); +lean_inc(x_19122); +x_19123 = lean_ctor_get(x_19120, 1); +lean_inc(x_19123); +lean_dec(x_19120); +x_19124 = lean_ctor_get(x_1, 2); +lean_inc(x_19124); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_19125 = l_Lean_IR_ToIR_lowerType(x_19124, x_19123, x_4, x_5, x_19121); +if (lean_obj_tag(x_19125) == 0) +{ +lean_object* x_19126; lean_object* x_19127; lean_object* x_19128; lean_object* x_19129; lean_object* x_19130; +x_19126 = lean_ctor_get(x_19125, 0); +lean_inc(x_19126); +x_19127 = lean_ctor_get(x_19125, 1); +lean_inc(x_19127); +lean_dec(x_19125); +x_19128 = lean_ctor_get(x_19126, 0); +lean_inc(x_19128); +x_19129 = lean_ctor_get(x_19126, 1); +lean_inc(x_19129); +lean_dec(x_19126); +x_19130 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19122, x_19117, x_19128, x_19129, x_4, x_5, x_19127); +return x_19130; +} +else +{ +lean_object* x_19131; lean_object* x_19132; lean_object* x_19133; lean_object* x_19134; +lean_dec(x_19122); +lean_dec(x_19117); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_19131 = lean_ctor_get(x_19125, 0); +lean_inc(x_19131); +x_19132 = lean_ctor_get(x_19125, 1); +lean_inc(x_19132); +if (lean_is_exclusive(x_19125)) { + lean_ctor_release(x_19125, 0); + lean_ctor_release(x_19125, 1); + x_19133 = x_19125; +} else { + lean_dec_ref(x_19125); + x_19133 = lean_box(0); +} +if (lean_is_scalar(x_19133)) { + x_19134 = lean_alloc_ctor(1, 2, 0); +} else { + x_19134 = x_19133; +} +lean_ctor_set(x_19134, 0, x_19131); +lean_ctor_set(x_19134, 1, x_19132); +return x_19134; +} +} +} +else +{ +lean_object* x_19135; lean_object* x_19136; lean_object* x_19137; lean_object* x_19138; lean_object* x_19139; lean_object* x_19140; lean_object* x_19141; lean_object* x_19142; lean_object* x_19143; +lean_dec(x_19088); +lean_dec(x_19086); +if (lean_is_scalar(x_19084)) { + x_19135 = lean_alloc_ctor(7, 2, 0); +} else { + x_19135 = x_19084; + lean_ctor_set_tag(x_19135, 7); +} +lean_ctor_set(x_19135, 0, x_153); +lean_ctor_set(x_19135, 1, x_17707); +x_19136 = lean_ctor_get(x_1, 0); +lean_inc(x_19136); +lean_dec(x_1); +x_19137 = l_Lean_IR_ToIR_bindVar(x_19136, x_19083, x_4, x_5, x_19082); +x_19138 = lean_ctor_get(x_19137, 0); +lean_inc(x_19138); +x_19139 = lean_ctor_get(x_19137, 1); +lean_inc(x_19139); +lean_dec(x_19137); +x_19140 = lean_ctor_get(x_19138, 0); +lean_inc(x_19140); +x_19141 = lean_ctor_get(x_19138, 1); +lean_inc(x_19141); +lean_dec(x_19138); +x_19142 = lean_box(7); +x_19143 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19140, x_19135, x_19142, x_19141, x_4, x_5, x_19139); +return x_19143; +} +} +} +else +{ +lean_object* x_19144; lean_object* x_19145; lean_object* x_19146; +lean_dec(x_19060); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19144 = lean_box(13); +x_19145 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19145, 0, x_19144); +lean_ctor_set(x_19145, 1, x_19049); +if (lean_is_scalar(x_19053)) { + x_19146 = lean_alloc_ctor(0, 2, 0); +} else { + x_19146 = x_19053; +} +lean_ctor_set(x_19146, 0, x_19145); +lean_ctor_set(x_19146, 1, x_19052); +return x_19146; +} +} +else +{ +lean_object* x_19147; lean_object* x_19148; lean_object* x_19149; +lean_dec(x_19060); +lean_dec(x_19053); +lean_dec(x_153); +x_19147 = l_Lean_IR_instInhabitedArg; +x_19148 = lean_unsigned_to_nat(2u); +x_19149 = lean_array_get(x_19147, x_17707, x_19148); +lean_dec(x_17707); +if (lean_obj_tag(x_19149) == 0) +{ +lean_object* x_19150; lean_object* x_19151; lean_object* x_19152; lean_object* x_19153; lean_object* x_19154; lean_object* x_19155; lean_object* x_19156; +x_19150 = lean_ctor_get(x_19149, 0); +lean_inc(x_19150); +lean_dec(x_19149); +x_19151 = lean_ctor_get(x_1, 0); +lean_inc(x_19151); +lean_dec(x_1); +x_19152 = l_Lean_IR_ToIR_bindVarToVarId(x_19151, x_19150, x_19049, x_4, x_5, x_19052); +x_19153 = lean_ctor_get(x_19152, 0); +lean_inc(x_19153); +x_19154 = lean_ctor_get(x_19152, 1); +lean_inc(x_19154); +lean_dec(x_19152); +x_19155 = lean_ctor_get(x_19153, 1); +lean_inc(x_19155); +lean_dec(x_19153); +x_19156 = l_Lean_IR_ToIR_lowerCode(x_2, x_19155, x_4, x_5, x_19154); +return x_19156; +} +else +{ +lean_object* x_19157; lean_object* x_19158; lean_object* x_19159; lean_object* x_19160; lean_object* x_19161; lean_object* x_19162; +x_19157 = lean_ctor_get(x_1, 0); +lean_inc(x_19157); +lean_dec(x_1); +x_19158 = l_Lean_IR_ToIR_bindErased(x_19157, x_19049, x_4, x_5, x_19052); +x_19159 = lean_ctor_get(x_19158, 0); +lean_inc(x_19159); +x_19160 = lean_ctor_get(x_19158, 1); +lean_inc(x_19160); +lean_dec(x_19158); +x_19161 = lean_ctor_get(x_19159, 1); +lean_inc(x_19161); +lean_dec(x_19159); +x_19162 = l_Lean_IR_ToIR_lowerCode(x_2, x_19161, x_4, x_5, x_19160); +return x_19162; +} +} +} +case 1: +{ +lean_object* x_19163; lean_object* x_19164; lean_object* x_19191; lean_object* x_19192; +lean_dec(x_19059); +lean_dec(x_19054); +lean_dec(x_17699); +lean_dec(x_17698); +lean_inc(x_153); +x_19191 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_19052); +x_19192 = lean_ctor_get(x_19191, 0); +lean_inc(x_19192); +if (lean_obj_tag(x_19192) == 0) +{ +lean_object* x_19193; lean_object* x_19194; lean_object* x_19195; +x_19193 = lean_ctor_get(x_19191, 1); +lean_inc(x_19193); +lean_dec(x_19191); +x_19194 = lean_box(0); +x_19195 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19195, 0, x_19194); +lean_ctor_set(x_19195, 1, x_19049); +x_19163 = x_19195; +x_19164 = x_19193; +goto block_19190; +} +else +{ +lean_object* x_19196; lean_object* x_19197; lean_object* x_19198; lean_object* x_19199; lean_object* x_19200; lean_object* x_19201; lean_object* x_19202; uint8_t x_19203; +x_19196 = lean_ctor_get(x_19191, 1); +lean_inc(x_19196); +if (lean_is_exclusive(x_19191)) { + lean_ctor_release(x_19191, 0); + lean_ctor_release(x_19191, 1); + x_19197 = x_19191; +} else { + lean_dec_ref(x_19191); + x_19197 = lean_box(0); +} +x_19198 = lean_ctor_get(x_19192, 0); +lean_inc(x_19198); +if (lean_is_exclusive(x_19192)) { + lean_ctor_release(x_19192, 0); + x_19199 = x_19192; +} else { + lean_dec_ref(x_19192); + x_19199 = lean_box(0); +} +x_19200 = lean_array_get_size(x_17707); +x_19201 = lean_ctor_get(x_19198, 3); +lean_inc(x_19201); +lean_dec(x_19198); +x_19202 = lean_array_get_size(x_19201); +lean_dec(x_19201); +x_19203 = lean_nat_dec_lt(x_19200, x_19202); +if (x_19203 == 0) +{ +uint8_t x_19204; +x_19204 = lean_nat_dec_eq(x_19200, x_19202); +if (x_19204 == 0) +{ +lean_object* x_19205; lean_object* x_19206; lean_object* x_19207; lean_object* x_19208; lean_object* x_19209; lean_object* x_19210; lean_object* x_19211; lean_object* x_19212; lean_object* x_19213; lean_object* x_19214; lean_object* x_19215; lean_object* x_19216; lean_object* x_19217; lean_object* x_19218; lean_object* x_19219; lean_object* x_19220; lean_object* x_19221; +x_19205 = lean_unsigned_to_nat(0u); +x_19206 = l_Array_extract___rarg(x_17707, x_19205, x_19202); +x_19207 = l_Array_extract___rarg(x_17707, x_19202, x_19200); +lean_dec(x_19200); +lean_inc(x_153); +if (lean_is_scalar(x_19197)) { + x_19208 = lean_alloc_ctor(6, 2, 0); +} else { + x_19208 = x_19197; + lean_ctor_set_tag(x_19208, 6); +} +lean_ctor_set(x_19208, 0, x_153); +lean_ctor_set(x_19208, 1, x_19206); +x_19209 = lean_ctor_get(x_1, 0); +lean_inc(x_19209); +x_19210 = l_Lean_IR_ToIR_bindVar(x_19209, x_19049, x_4, x_5, x_19196); +x_19211 = lean_ctor_get(x_19210, 0); +lean_inc(x_19211); +x_19212 = lean_ctor_get(x_19210, 1); +lean_inc(x_19212); +lean_dec(x_19210); +x_19213 = lean_ctor_get(x_19211, 0); +lean_inc(x_19213); +x_19214 = lean_ctor_get(x_19211, 1); +lean_inc(x_19214); +lean_dec(x_19211); +x_19215 = l_Lean_IR_ToIR_newVar(x_19214, x_4, x_5, x_19212); +x_19216 = lean_ctor_get(x_19215, 0); +lean_inc(x_19216); +x_19217 = lean_ctor_get(x_19215, 1); +lean_inc(x_19217); +lean_dec(x_19215); +x_19218 = lean_ctor_get(x_19216, 0); +lean_inc(x_19218); +x_19219 = lean_ctor_get(x_19216, 1); +lean_inc(x_19219); +lean_dec(x_19216); +x_19220 = lean_ctor_get(x_1, 2); +lean_inc(x_19220); +lean_inc(x_5); +lean_inc(x_4); +x_19221 = l_Lean_IR_ToIR_lowerType(x_19220, x_19219, x_4, x_5, x_19217); +if (lean_obj_tag(x_19221) == 0) +{ +lean_object* x_19222; lean_object* x_19223; lean_object* x_19224; lean_object* x_19225; lean_object* x_19226; +x_19222 = lean_ctor_get(x_19221, 0); +lean_inc(x_19222); +x_19223 = lean_ctor_get(x_19221, 1); +lean_inc(x_19223); +lean_dec(x_19221); +x_19224 = lean_ctor_get(x_19222, 0); +lean_inc(x_19224); +x_19225 = lean_ctor_get(x_19222, 1); +lean_inc(x_19225); +lean_dec(x_19222); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19226 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_19218, x_19207, x_19213, x_19208, x_19224, x_19225, x_4, x_5, x_19223); +if (lean_obj_tag(x_19226) == 0) +{ +lean_object* x_19227; lean_object* x_19228; lean_object* x_19229; lean_object* x_19230; lean_object* x_19231; lean_object* x_19232; lean_object* x_19233; +x_19227 = lean_ctor_get(x_19226, 0); +lean_inc(x_19227); +x_19228 = lean_ctor_get(x_19226, 1); +lean_inc(x_19228); +lean_dec(x_19226); +x_19229 = lean_ctor_get(x_19227, 0); +lean_inc(x_19229); +x_19230 = lean_ctor_get(x_19227, 1); +lean_inc(x_19230); +if (lean_is_exclusive(x_19227)) { + lean_ctor_release(x_19227, 0); + lean_ctor_release(x_19227, 1); + x_19231 = x_19227; +} else { + lean_dec_ref(x_19227); + x_19231 = lean_box(0); +} +if (lean_is_scalar(x_19199)) { + x_19232 = lean_alloc_ctor(1, 1, 0); +} else { + x_19232 = x_19199; +} +lean_ctor_set(x_19232, 0, x_19229); +if (lean_is_scalar(x_19231)) { + x_19233 = lean_alloc_ctor(0, 2, 0); +} else { + x_19233 = x_19231; +} +lean_ctor_set(x_19233, 0, x_19232); +lean_ctor_set(x_19233, 1, x_19230); +x_19163 = x_19233; +x_19164 = x_19228; +goto block_19190; +} +else +{ +lean_object* x_19234; lean_object* x_19235; lean_object* x_19236; lean_object* x_19237; +lean_dec(x_19199); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19234 = lean_ctor_get(x_19226, 0); +lean_inc(x_19234); +x_19235 = lean_ctor_get(x_19226, 1); +lean_inc(x_19235); +if (lean_is_exclusive(x_19226)) { + lean_ctor_release(x_19226, 0); + lean_ctor_release(x_19226, 1); + x_19236 = x_19226; +} else { + lean_dec_ref(x_19226); + x_19236 = lean_box(0); +} +if (lean_is_scalar(x_19236)) { + x_19237 = lean_alloc_ctor(1, 2, 0); +} else { + x_19237 = x_19236; +} +lean_ctor_set(x_19237, 0, x_19234); +lean_ctor_set(x_19237, 1, x_19235); +return x_19237; +} +} +else +{ +lean_object* x_19238; lean_object* x_19239; lean_object* x_19240; lean_object* x_19241; +lean_dec(x_19218); +lean_dec(x_19213); +lean_dec(x_19208); +lean_dec(x_19207); +lean_dec(x_19199); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19238 = lean_ctor_get(x_19221, 0); +lean_inc(x_19238); +x_19239 = lean_ctor_get(x_19221, 1); +lean_inc(x_19239); +if (lean_is_exclusive(x_19221)) { + lean_ctor_release(x_19221, 0); + lean_ctor_release(x_19221, 1); + x_19240 = x_19221; +} else { + lean_dec_ref(x_19221); + x_19240 = lean_box(0); +} +if (lean_is_scalar(x_19240)) { + x_19241 = lean_alloc_ctor(1, 2, 0); +} else { + x_19241 = x_19240; +} +lean_ctor_set(x_19241, 0, x_19238); +lean_ctor_set(x_19241, 1, x_19239); +return x_19241; +} +} +else +{ +lean_object* x_19242; lean_object* x_19243; lean_object* x_19244; lean_object* x_19245; lean_object* x_19246; lean_object* x_19247; lean_object* x_19248; lean_object* x_19249; lean_object* x_19250; +lean_dec(x_19202); +lean_dec(x_19200); +lean_inc(x_17707); +lean_inc(x_153); +if (lean_is_scalar(x_19197)) { + x_19242 = lean_alloc_ctor(6, 2, 0); +} else { + x_19242 = x_19197; + lean_ctor_set_tag(x_19242, 6); +} +lean_ctor_set(x_19242, 0, x_153); +lean_ctor_set(x_19242, 1, x_17707); +x_19243 = lean_ctor_get(x_1, 0); +lean_inc(x_19243); +x_19244 = l_Lean_IR_ToIR_bindVar(x_19243, x_19049, x_4, x_5, x_19196); +x_19245 = lean_ctor_get(x_19244, 0); +lean_inc(x_19245); +x_19246 = lean_ctor_get(x_19244, 1); +lean_inc(x_19246); +lean_dec(x_19244); +x_19247 = lean_ctor_get(x_19245, 0); +lean_inc(x_19247); +x_19248 = lean_ctor_get(x_19245, 1); +lean_inc(x_19248); +lean_dec(x_19245); +x_19249 = lean_ctor_get(x_1, 2); +lean_inc(x_19249); +lean_inc(x_5); +lean_inc(x_4); +x_19250 = l_Lean_IR_ToIR_lowerType(x_19249, x_19248, x_4, x_5, x_19246); +if (lean_obj_tag(x_19250) == 0) +{ +lean_object* x_19251; lean_object* x_19252; lean_object* x_19253; lean_object* x_19254; lean_object* x_19255; +x_19251 = lean_ctor_get(x_19250, 0); +lean_inc(x_19251); +x_19252 = lean_ctor_get(x_19250, 1); +lean_inc(x_19252); +lean_dec(x_19250); +x_19253 = lean_ctor_get(x_19251, 0); +lean_inc(x_19253); +x_19254 = lean_ctor_get(x_19251, 1); +lean_inc(x_19254); +lean_dec(x_19251); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19255 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19247, x_19242, x_19253, x_19254, x_4, x_5, x_19252); +if (lean_obj_tag(x_19255) == 0) +{ +lean_object* x_19256; lean_object* x_19257; lean_object* x_19258; lean_object* x_19259; lean_object* x_19260; lean_object* x_19261; lean_object* x_19262; +x_19256 = lean_ctor_get(x_19255, 0); +lean_inc(x_19256); +x_19257 = lean_ctor_get(x_19255, 1); +lean_inc(x_19257); +lean_dec(x_19255); +x_19258 = lean_ctor_get(x_19256, 0); +lean_inc(x_19258); +x_19259 = lean_ctor_get(x_19256, 1); +lean_inc(x_19259); +if (lean_is_exclusive(x_19256)) { + lean_ctor_release(x_19256, 0); + lean_ctor_release(x_19256, 1); + x_19260 = x_19256; +} else { + lean_dec_ref(x_19256); + x_19260 = lean_box(0); +} +if (lean_is_scalar(x_19199)) { + x_19261 = lean_alloc_ctor(1, 1, 0); +} else { + x_19261 = x_19199; +} +lean_ctor_set(x_19261, 0, x_19258); +if (lean_is_scalar(x_19260)) { + x_19262 = lean_alloc_ctor(0, 2, 0); +} else { + x_19262 = x_19260; +} +lean_ctor_set(x_19262, 0, x_19261); +lean_ctor_set(x_19262, 1, x_19259); +x_19163 = x_19262; +x_19164 = x_19257; +goto block_19190; +} +else +{ +lean_object* x_19263; lean_object* x_19264; lean_object* x_19265; lean_object* x_19266; +lean_dec(x_19199); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19263 = lean_ctor_get(x_19255, 0); +lean_inc(x_19263); +x_19264 = lean_ctor_get(x_19255, 1); +lean_inc(x_19264); +if (lean_is_exclusive(x_19255)) { + lean_ctor_release(x_19255, 0); + lean_ctor_release(x_19255, 1); + x_19265 = x_19255; +} else { + lean_dec_ref(x_19255); + x_19265 = lean_box(0); +} +if (lean_is_scalar(x_19265)) { + x_19266 = lean_alloc_ctor(1, 2, 0); +} else { + x_19266 = x_19265; +} +lean_ctor_set(x_19266, 0, x_19263); +lean_ctor_set(x_19266, 1, x_19264); +return x_19266; +} +} +else +{ +lean_object* x_19267; lean_object* x_19268; lean_object* x_19269; lean_object* x_19270; +lean_dec(x_19247); +lean_dec(x_19242); +lean_dec(x_19199); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19267 = lean_ctor_get(x_19250, 0); +lean_inc(x_19267); +x_19268 = lean_ctor_get(x_19250, 1); +lean_inc(x_19268); +if (lean_is_exclusive(x_19250)) { + lean_ctor_release(x_19250, 0); + lean_ctor_release(x_19250, 1); + x_19269 = x_19250; +} else { + lean_dec_ref(x_19250); + x_19269 = lean_box(0); +} +if (lean_is_scalar(x_19269)) { + x_19270 = lean_alloc_ctor(1, 2, 0); +} else { + x_19270 = x_19269; +} +lean_ctor_set(x_19270, 0, x_19267); +lean_ctor_set(x_19270, 1, x_19268); +return x_19270; +} +} +} +else +{ +lean_object* x_19271; lean_object* x_19272; lean_object* x_19273; lean_object* x_19274; lean_object* x_19275; lean_object* x_19276; lean_object* x_19277; lean_object* x_19278; lean_object* x_19279; +lean_dec(x_19202); +lean_dec(x_19200); +lean_inc(x_17707); +lean_inc(x_153); +if (lean_is_scalar(x_19197)) { + x_19271 = lean_alloc_ctor(7, 2, 0); +} else { + x_19271 = x_19197; + lean_ctor_set_tag(x_19271, 7); +} +lean_ctor_set(x_19271, 0, x_153); +lean_ctor_set(x_19271, 1, x_17707); +x_19272 = lean_ctor_get(x_1, 0); +lean_inc(x_19272); +x_19273 = l_Lean_IR_ToIR_bindVar(x_19272, x_19049, x_4, x_5, x_19196); +x_19274 = lean_ctor_get(x_19273, 0); +lean_inc(x_19274); +x_19275 = lean_ctor_get(x_19273, 1); +lean_inc(x_19275); +lean_dec(x_19273); +x_19276 = lean_ctor_get(x_19274, 0); +lean_inc(x_19276); +x_19277 = lean_ctor_get(x_19274, 1); +lean_inc(x_19277); +lean_dec(x_19274); +x_19278 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19279 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19276, x_19271, x_19278, x_19277, x_4, x_5, x_19275); +if (lean_obj_tag(x_19279) == 0) +{ +lean_object* x_19280; lean_object* x_19281; lean_object* x_19282; lean_object* x_19283; lean_object* x_19284; lean_object* x_19285; lean_object* x_19286; +x_19280 = lean_ctor_get(x_19279, 0); +lean_inc(x_19280); +x_19281 = lean_ctor_get(x_19279, 1); +lean_inc(x_19281); +lean_dec(x_19279); +x_19282 = lean_ctor_get(x_19280, 0); +lean_inc(x_19282); +x_19283 = lean_ctor_get(x_19280, 1); +lean_inc(x_19283); +if (lean_is_exclusive(x_19280)) { + lean_ctor_release(x_19280, 0); + lean_ctor_release(x_19280, 1); + x_19284 = x_19280; +} else { + lean_dec_ref(x_19280); + x_19284 = lean_box(0); +} +if (lean_is_scalar(x_19199)) { + x_19285 = lean_alloc_ctor(1, 1, 0); +} else { + x_19285 = x_19199; +} +lean_ctor_set(x_19285, 0, x_19282); +if (lean_is_scalar(x_19284)) { + x_19286 = lean_alloc_ctor(0, 2, 0); +} else { + x_19286 = x_19284; +} +lean_ctor_set(x_19286, 0, x_19285); +lean_ctor_set(x_19286, 1, x_19283); +x_19163 = x_19286; +x_19164 = x_19281; +goto block_19190; +} +else +{ +lean_object* x_19287; lean_object* x_19288; lean_object* x_19289; lean_object* x_19290; +lean_dec(x_19199); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19287 = lean_ctor_get(x_19279, 0); +lean_inc(x_19287); +x_19288 = lean_ctor_get(x_19279, 1); +lean_inc(x_19288); +if (lean_is_exclusive(x_19279)) { + lean_ctor_release(x_19279, 0); + lean_ctor_release(x_19279, 1); + x_19289 = x_19279; +} else { + lean_dec_ref(x_19279); + x_19289 = lean_box(0); +} +if (lean_is_scalar(x_19289)) { + x_19290 = lean_alloc_ctor(1, 2, 0); +} else { + x_19290 = x_19289; +} +lean_ctor_set(x_19290, 0, x_19287); +lean_ctor_set(x_19290, 1, x_19288); +return x_19290; +} +} +} +block_19190: +{ +lean_object* x_19165; +x_19165 = lean_ctor_get(x_19163, 0); +lean_inc(x_19165); +if (lean_obj_tag(x_19165) == 0) +{ +lean_object* x_19166; lean_object* x_19167; lean_object* x_19168; lean_object* x_19169; lean_object* x_19170; lean_object* x_19171; lean_object* x_19172; lean_object* x_19173; lean_object* x_19174; lean_object* x_19175; +lean_dec(x_19053); +x_19166 = lean_ctor_get(x_19163, 1); +lean_inc(x_19166); +lean_dec(x_19163); +x_19167 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_19167, 0, x_153); +lean_ctor_set(x_19167, 1, x_17707); +x_19168 = lean_ctor_get(x_1, 0); +lean_inc(x_19168); +x_19169 = l_Lean_IR_ToIR_bindVar(x_19168, x_19166, x_4, x_5, x_19164); +x_19170 = lean_ctor_get(x_19169, 0); +lean_inc(x_19170); +x_19171 = lean_ctor_get(x_19169, 1); +lean_inc(x_19171); +lean_dec(x_19169); +x_19172 = lean_ctor_get(x_19170, 0); +lean_inc(x_19172); +x_19173 = lean_ctor_get(x_19170, 1); +lean_inc(x_19173); +lean_dec(x_19170); +x_19174 = lean_ctor_get(x_1, 2); +lean_inc(x_19174); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_19175 = l_Lean_IR_ToIR_lowerType(x_19174, x_19173, x_4, x_5, x_19171); +if (lean_obj_tag(x_19175) == 0) +{ +lean_object* x_19176; lean_object* x_19177; lean_object* x_19178; lean_object* x_19179; lean_object* x_19180; +x_19176 = lean_ctor_get(x_19175, 0); +lean_inc(x_19176); +x_19177 = lean_ctor_get(x_19175, 1); +lean_inc(x_19177); +lean_dec(x_19175); +x_19178 = lean_ctor_get(x_19176, 0); +lean_inc(x_19178); +x_19179 = lean_ctor_get(x_19176, 1); +lean_inc(x_19179); +lean_dec(x_19176); +x_19180 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19172, x_19167, x_19178, x_19179, x_4, x_5, x_19177); +return x_19180; +} +else +{ +lean_object* x_19181; lean_object* x_19182; lean_object* x_19183; lean_object* x_19184; +lean_dec(x_19172); +lean_dec(x_19167); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_19181 = lean_ctor_get(x_19175, 0); +lean_inc(x_19181); +x_19182 = lean_ctor_get(x_19175, 1); +lean_inc(x_19182); +if (lean_is_exclusive(x_19175)) { + lean_ctor_release(x_19175, 0); + lean_ctor_release(x_19175, 1); + x_19183 = x_19175; +} else { + lean_dec_ref(x_19175); + x_19183 = lean_box(0); +} +if (lean_is_scalar(x_19183)) { + x_19184 = lean_alloc_ctor(1, 2, 0); +} else { + x_19184 = x_19183; +} +lean_ctor_set(x_19184, 0, x_19181); +lean_ctor_set(x_19184, 1, x_19182); +return x_19184; +} +} +else +{ +lean_object* x_19185; lean_object* x_19186; lean_object* x_19187; lean_object* x_19188; lean_object* x_19189; +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19185 = lean_ctor_get(x_19163, 1); +lean_inc(x_19185); +if (lean_is_exclusive(x_19163)) { + lean_ctor_release(x_19163, 0); + lean_ctor_release(x_19163, 1); + x_19186 = x_19163; +} else { + lean_dec_ref(x_19163); + x_19186 = lean_box(0); +} +x_19187 = lean_ctor_get(x_19165, 0); +lean_inc(x_19187); +lean_dec(x_19165); +if (lean_is_scalar(x_19186)) { + x_19188 = lean_alloc_ctor(0, 2, 0); +} else { + x_19188 = x_19186; +} +lean_ctor_set(x_19188, 0, x_19187); +lean_ctor_set(x_19188, 1, x_19185); +if (lean_is_scalar(x_19053)) { + x_19189 = lean_alloc_ctor(0, 2, 0); +} else { + x_19189 = x_19053; +} +lean_ctor_set(x_19189, 0, x_19188); +lean_ctor_set(x_19189, 1, x_19164); +return x_19189; +} +} +} +case 2: +{ +lean_object* x_19291; lean_object* x_19292; +lean_dec(x_19059); +lean_dec(x_19054); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_19291 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_19292 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_19291, x_19049, x_4, x_5, x_19052); +return x_19292; +} +case 3: +{ +lean_object* x_19293; lean_object* x_19294; lean_object* x_19321; lean_object* x_19322; +lean_dec(x_19059); +lean_dec(x_19054); +lean_dec(x_17699); +lean_dec(x_17698); +lean_inc(x_153); +x_19321 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_19052); +x_19322 = lean_ctor_get(x_19321, 0); +lean_inc(x_19322); +if (lean_obj_tag(x_19322) == 0) +{ +lean_object* x_19323; lean_object* x_19324; lean_object* x_19325; +x_19323 = lean_ctor_get(x_19321, 1); +lean_inc(x_19323); +lean_dec(x_19321); +x_19324 = lean_box(0); +x_19325 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19325, 0, x_19324); +lean_ctor_set(x_19325, 1, x_19049); +x_19293 = x_19325; +x_19294 = x_19323; +goto block_19320; +} +else +{ +lean_object* x_19326; lean_object* x_19327; lean_object* x_19328; lean_object* x_19329; lean_object* x_19330; lean_object* x_19331; lean_object* x_19332; uint8_t x_19333; +x_19326 = lean_ctor_get(x_19321, 1); +lean_inc(x_19326); +if (lean_is_exclusive(x_19321)) { + lean_ctor_release(x_19321, 0); + lean_ctor_release(x_19321, 1); + x_19327 = x_19321; +} else { + lean_dec_ref(x_19321); + x_19327 = lean_box(0); +} +x_19328 = lean_ctor_get(x_19322, 0); +lean_inc(x_19328); +if (lean_is_exclusive(x_19322)) { + lean_ctor_release(x_19322, 0); + x_19329 = x_19322; +} else { + lean_dec_ref(x_19322); + x_19329 = lean_box(0); +} +x_19330 = lean_array_get_size(x_17707); +x_19331 = lean_ctor_get(x_19328, 3); +lean_inc(x_19331); +lean_dec(x_19328); +x_19332 = lean_array_get_size(x_19331); +lean_dec(x_19331); +x_19333 = lean_nat_dec_lt(x_19330, x_19332); +if (x_19333 == 0) +{ +uint8_t x_19334; +x_19334 = lean_nat_dec_eq(x_19330, x_19332); +if (x_19334 == 0) +{ +lean_object* x_19335; lean_object* x_19336; lean_object* x_19337; lean_object* x_19338; lean_object* x_19339; lean_object* x_19340; lean_object* x_19341; lean_object* x_19342; lean_object* x_19343; lean_object* x_19344; lean_object* x_19345; lean_object* x_19346; lean_object* x_19347; lean_object* x_19348; lean_object* x_19349; lean_object* x_19350; lean_object* x_19351; +x_19335 = lean_unsigned_to_nat(0u); +x_19336 = l_Array_extract___rarg(x_17707, x_19335, x_19332); +x_19337 = l_Array_extract___rarg(x_17707, x_19332, x_19330); +lean_dec(x_19330); +lean_inc(x_153); +if (lean_is_scalar(x_19327)) { + x_19338 = lean_alloc_ctor(6, 2, 0); +} else { + x_19338 = x_19327; + lean_ctor_set_tag(x_19338, 6); +} +lean_ctor_set(x_19338, 0, x_153); +lean_ctor_set(x_19338, 1, x_19336); +x_19339 = lean_ctor_get(x_1, 0); +lean_inc(x_19339); +x_19340 = l_Lean_IR_ToIR_bindVar(x_19339, x_19049, x_4, x_5, x_19326); +x_19341 = lean_ctor_get(x_19340, 0); +lean_inc(x_19341); +x_19342 = lean_ctor_get(x_19340, 1); +lean_inc(x_19342); +lean_dec(x_19340); +x_19343 = lean_ctor_get(x_19341, 0); +lean_inc(x_19343); +x_19344 = lean_ctor_get(x_19341, 1); +lean_inc(x_19344); +lean_dec(x_19341); +x_19345 = l_Lean_IR_ToIR_newVar(x_19344, x_4, x_5, x_19342); +x_19346 = lean_ctor_get(x_19345, 0); +lean_inc(x_19346); +x_19347 = lean_ctor_get(x_19345, 1); +lean_inc(x_19347); +lean_dec(x_19345); +x_19348 = lean_ctor_get(x_19346, 0); +lean_inc(x_19348); +x_19349 = lean_ctor_get(x_19346, 1); +lean_inc(x_19349); +lean_dec(x_19346); +x_19350 = lean_ctor_get(x_1, 2); +lean_inc(x_19350); +lean_inc(x_5); +lean_inc(x_4); +x_19351 = l_Lean_IR_ToIR_lowerType(x_19350, x_19349, x_4, x_5, x_19347); +if (lean_obj_tag(x_19351) == 0) +{ +lean_object* x_19352; lean_object* x_19353; lean_object* x_19354; lean_object* x_19355; lean_object* x_19356; +x_19352 = lean_ctor_get(x_19351, 0); +lean_inc(x_19352); +x_19353 = lean_ctor_get(x_19351, 1); +lean_inc(x_19353); +lean_dec(x_19351); +x_19354 = lean_ctor_get(x_19352, 0); +lean_inc(x_19354); +x_19355 = lean_ctor_get(x_19352, 1); +lean_inc(x_19355); +lean_dec(x_19352); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19356 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_19348, x_19337, x_19343, x_19338, x_19354, x_19355, x_4, x_5, x_19353); +if (lean_obj_tag(x_19356) == 0) +{ +lean_object* x_19357; lean_object* x_19358; lean_object* x_19359; lean_object* x_19360; lean_object* x_19361; lean_object* x_19362; lean_object* x_19363; +x_19357 = lean_ctor_get(x_19356, 0); +lean_inc(x_19357); +x_19358 = lean_ctor_get(x_19356, 1); +lean_inc(x_19358); +lean_dec(x_19356); +x_19359 = lean_ctor_get(x_19357, 0); +lean_inc(x_19359); +x_19360 = lean_ctor_get(x_19357, 1); +lean_inc(x_19360); +if (lean_is_exclusive(x_19357)) { + lean_ctor_release(x_19357, 0); + lean_ctor_release(x_19357, 1); + x_19361 = x_19357; +} else { + lean_dec_ref(x_19357); + x_19361 = lean_box(0); +} +if (lean_is_scalar(x_19329)) { + x_19362 = lean_alloc_ctor(1, 1, 0); +} else { + x_19362 = x_19329; +} +lean_ctor_set(x_19362, 0, x_19359); +if (lean_is_scalar(x_19361)) { + x_19363 = lean_alloc_ctor(0, 2, 0); +} else { + x_19363 = x_19361; +} +lean_ctor_set(x_19363, 0, x_19362); +lean_ctor_set(x_19363, 1, x_19360); +x_19293 = x_19363; +x_19294 = x_19358; +goto block_19320; +} +else +{ +lean_object* x_19364; lean_object* x_19365; lean_object* x_19366; lean_object* x_19367; +lean_dec(x_19329); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19364 = lean_ctor_get(x_19356, 0); +lean_inc(x_19364); +x_19365 = lean_ctor_get(x_19356, 1); +lean_inc(x_19365); +if (lean_is_exclusive(x_19356)) { + lean_ctor_release(x_19356, 0); + lean_ctor_release(x_19356, 1); + x_19366 = x_19356; +} else { + lean_dec_ref(x_19356); + x_19366 = lean_box(0); +} +if (lean_is_scalar(x_19366)) { + x_19367 = lean_alloc_ctor(1, 2, 0); +} else { + x_19367 = x_19366; +} +lean_ctor_set(x_19367, 0, x_19364); +lean_ctor_set(x_19367, 1, x_19365); +return x_19367; +} +} +else +{ +lean_object* x_19368; lean_object* x_19369; lean_object* x_19370; lean_object* x_19371; +lean_dec(x_19348); +lean_dec(x_19343); +lean_dec(x_19338); +lean_dec(x_19337); +lean_dec(x_19329); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19368 = lean_ctor_get(x_19351, 0); +lean_inc(x_19368); +x_19369 = lean_ctor_get(x_19351, 1); +lean_inc(x_19369); +if (lean_is_exclusive(x_19351)) { + lean_ctor_release(x_19351, 0); + lean_ctor_release(x_19351, 1); + x_19370 = x_19351; +} else { + lean_dec_ref(x_19351); + x_19370 = lean_box(0); +} +if (lean_is_scalar(x_19370)) { + x_19371 = lean_alloc_ctor(1, 2, 0); +} else { + x_19371 = x_19370; +} +lean_ctor_set(x_19371, 0, x_19368); +lean_ctor_set(x_19371, 1, x_19369); +return x_19371; +} +} +else +{ +lean_object* x_19372; lean_object* x_19373; lean_object* x_19374; lean_object* x_19375; lean_object* x_19376; lean_object* x_19377; lean_object* x_19378; lean_object* x_19379; lean_object* x_19380; +lean_dec(x_19332); +lean_dec(x_19330); +lean_inc(x_17707); +lean_inc(x_153); +if (lean_is_scalar(x_19327)) { + x_19372 = lean_alloc_ctor(6, 2, 0); +} else { + x_19372 = x_19327; + lean_ctor_set_tag(x_19372, 6); +} +lean_ctor_set(x_19372, 0, x_153); +lean_ctor_set(x_19372, 1, x_17707); +x_19373 = lean_ctor_get(x_1, 0); +lean_inc(x_19373); +x_19374 = l_Lean_IR_ToIR_bindVar(x_19373, x_19049, x_4, x_5, x_19326); +x_19375 = lean_ctor_get(x_19374, 0); +lean_inc(x_19375); +x_19376 = lean_ctor_get(x_19374, 1); +lean_inc(x_19376); +lean_dec(x_19374); +x_19377 = lean_ctor_get(x_19375, 0); +lean_inc(x_19377); +x_19378 = lean_ctor_get(x_19375, 1); +lean_inc(x_19378); +lean_dec(x_19375); +x_19379 = lean_ctor_get(x_1, 2); +lean_inc(x_19379); +lean_inc(x_5); +lean_inc(x_4); +x_19380 = l_Lean_IR_ToIR_lowerType(x_19379, x_19378, x_4, x_5, x_19376); +if (lean_obj_tag(x_19380) == 0) +{ +lean_object* x_19381; lean_object* x_19382; lean_object* x_19383; lean_object* x_19384; lean_object* x_19385; +x_19381 = lean_ctor_get(x_19380, 0); +lean_inc(x_19381); +x_19382 = lean_ctor_get(x_19380, 1); +lean_inc(x_19382); +lean_dec(x_19380); +x_19383 = lean_ctor_get(x_19381, 0); +lean_inc(x_19383); +x_19384 = lean_ctor_get(x_19381, 1); +lean_inc(x_19384); +lean_dec(x_19381); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19385 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19377, x_19372, x_19383, x_19384, x_4, x_5, x_19382); +if (lean_obj_tag(x_19385) == 0) +{ +lean_object* x_19386; lean_object* x_19387; lean_object* x_19388; lean_object* x_19389; lean_object* x_19390; lean_object* x_19391; lean_object* x_19392; +x_19386 = lean_ctor_get(x_19385, 0); +lean_inc(x_19386); +x_19387 = lean_ctor_get(x_19385, 1); +lean_inc(x_19387); +lean_dec(x_19385); +x_19388 = lean_ctor_get(x_19386, 0); +lean_inc(x_19388); +x_19389 = lean_ctor_get(x_19386, 1); +lean_inc(x_19389); +if (lean_is_exclusive(x_19386)) { + lean_ctor_release(x_19386, 0); + lean_ctor_release(x_19386, 1); + x_19390 = x_19386; +} else { + lean_dec_ref(x_19386); + x_19390 = lean_box(0); +} +if (lean_is_scalar(x_19329)) { + x_19391 = lean_alloc_ctor(1, 1, 0); +} else { + x_19391 = x_19329; +} +lean_ctor_set(x_19391, 0, x_19388); +if (lean_is_scalar(x_19390)) { + x_19392 = lean_alloc_ctor(0, 2, 0); +} else { + x_19392 = x_19390; +} +lean_ctor_set(x_19392, 0, x_19391); +lean_ctor_set(x_19392, 1, x_19389); +x_19293 = x_19392; +x_19294 = x_19387; +goto block_19320; +} +else +{ +lean_object* x_19393; lean_object* x_19394; lean_object* x_19395; lean_object* x_19396; +lean_dec(x_19329); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19393 = lean_ctor_get(x_19385, 0); +lean_inc(x_19393); +x_19394 = lean_ctor_get(x_19385, 1); +lean_inc(x_19394); +if (lean_is_exclusive(x_19385)) { + lean_ctor_release(x_19385, 0); + lean_ctor_release(x_19385, 1); + x_19395 = x_19385; +} else { + lean_dec_ref(x_19385); + x_19395 = lean_box(0); +} +if (lean_is_scalar(x_19395)) { + x_19396 = lean_alloc_ctor(1, 2, 0); +} else { + x_19396 = x_19395; +} +lean_ctor_set(x_19396, 0, x_19393); +lean_ctor_set(x_19396, 1, x_19394); +return x_19396; +} +} +else +{ +lean_object* x_19397; lean_object* x_19398; lean_object* x_19399; lean_object* x_19400; +lean_dec(x_19377); +lean_dec(x_19372); +lean_dec(x_19329); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19397 = lean_ctor_get(x_19380, 0); +lean_inc(x_19397); +x_19398 = lean_ctor_get(x_19380, 1); +lean_inc(x_19398); +if (lean_is_exclusive(x_19380)) { + lean_ctor_release(x_19380, 0); + lean_ctor_release(x_19380, 1); + x_19399 = x_19380; +} else { + lean_dec_ref(x_19380); + x_19399 = lean_box(0); +} +if (lean_is_scalar(x_19399)) { + x_19400 = lean_alloc_ctor(1, 2, 0); +} else { + x_19400 = x_19399; +} +lean_ctor_set(x_19400, 0, x_19397); +lean_ctor_set(x_19400, 1, x_19398); +return x_19400; +} +} +} +else +{ +lean_object* x_19401; lean_object* x_19402; lean_object* x_19403; lean_object* x_19404; lean_object* x_19405; lean_object* x_19406; lean_object* x_19407; lean_object* x_19408; lean_object* x_19409; +lean_dec(x_19332); +lean_dec(x_19330); +lean_inc(x_17707); +lean_inc(x_153); +if (lean_is_scalar(x_19327)) { + x_19401 = lean_alloc_ctor(7, 2, 0); +} else { + x_19401 = x_19327; + lean_ctor_set_tag(x_19401, 7); +} +lean_ctor_set(x_19401, 0, x_153); +lean_ctor_set(x_19401, 1, x_17707); +x_19402 = lean_ctor_get(x_1, 0); +lean_inc(x_19402); +x_19403 = l_Lean_IR_ToIR_bindVar(x_19402, x_19049, x_4, x_5, x_19326); +x_19404 = lean_ctor_get(x_19403, 0); +lean_inc(x_19404); +x_19405 = lean_ctor_get(x_19403, 1); +lean_inc(x_19405); +lean_dec(x_19403); +x_19406 = lean_ctor_get(x_19404, 0); +lean_inc(x_19406); +x_19407 = lean_ctor_get(x_19404, 1); +lean_inc(x_19407); +lean_dec(x_19404); +x_19408 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19409 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19406, x_19401, x_19408, x_19407, x_4, x_5, x_19405); +if (lean_obj_tag(x_19409) == 0) +{ +lean_object* x_19410; lean_object* x_19411; lean_object* x_19412; lean_object* x_19413; lean_object* x_19414; lean_object* x_19415; lean_object* x_19416; +x_19410 = lean_ctor_get(x_19409, 0); +lean_inc(x_19410); +x_19411 = lean_ctor_get(x_19409, 1); +lean_inc(x_19411); +lean_dec(x_19409); +x_19412 = lean_ctor_get(x_19410, 0); +lean_inc(x_19412); +x_19413 = lean_ctor_get(x_19410, 1); +lean_inc(x_19413); +if (lean_is_exclusive(x_19410)) { + lean_ctor_release(x_19410, 0); + lean_ctor_release(x_19410, 1); + x_19414 = x_19410; +} else { + lean_dec_ref(x_19410); + x_19414 = lean_box(0); +} +if (lean_is_scalar(x_19329)) { + x_19415 = lean_alloc_ctor(1, 1, 0); +} else { + x_19415 = x_19329; +} +lean_ctor_set(x_19415, 0, x_19412); +if (lean_is_scalar(x_19414)) { + x_19416 = lean_alloc_ctor(0, 2, 0); +} else { + x_19416 = x_19414; +} +lean_ctor_set(x_19416, 0, x_19415); +lean_ctor_set(x_19416, 1, x_19413); +x_19293 = x_19416; +x_19294 = x_19411; +goto block_19320; +} +else +{ +lean_object* x_19417; lean_object* x_19418; lean_object* x_19419; lean_object* x_19420; +lean_dec(x_19329); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19417 = lean_ctor_get(x_19409, 0); +lean_inc(x_19417); +x_19418 = lean_ctor_get(x_19409, 1); +lean_inc(x_19418); +if (lean_is_exclusive(x_19409)) { + lean_ctor_release(x_19409, 0); + lean_ctor_release(x_19409, 1); + x_19419 = x_19409; +} else { + lean_dec_ref(x_19409); + x_19419 = lean_box(0); +} +if (lean_is_scalar(x_19419)) { + x_19420 = lean_alloc_ctor(1, 2, 0); +} else { + x_19420 = x_19419; +} +lean_ctor_set(x_19420, 0, x_19417); +lean_ctor_set(x_19420, 1, x_19418); +return x_19420; +} +} +} +block_19320: +{ +lean_object* x_19295; +x_19295 = lean_ctor_get(x_19293, 0); +lean_inc(x_19295); +if (lean_obj_tag(x_19295) == 0) +{ +lean_object* x_19296; lean_object* x_19297; lean_object* x_19298; lean_object* x_19299; lean_object* x_19300; lean_object* x_19301; lean_object* x_19302; lean_object* x_19303; lean_object* x_19304; lean_object* x_19305; +lean_dec(x_19053); +x_19296 = lean_ctor_get(x_19293, 1); +lean_inc(x_19296); +lean_dec(x_19293); +x_19297 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_19297, 0, x_153); +lean_ctor_set(x_19297, 1, x_17707); +x_19298 = lean_ctor_get(x_1, 0); +lean_inc(x_19298); +x_19299 = l_Lean_IR_ToIR_bindVar(x_19298, x_19296, x_4, x_5, x_19294); +x_19300 = lean_ctor_get(x_19299, 0); +lean_inc(x_19300); +x_19301 = lean_ctor_get(x_19299, 1); +lean_inc(x_19301); +lean_dec(x_19299); +x_19302 = lean_ctor_get(x_19300, 0); +lean_inc(x_19302); +x_19303 = lean_ctor_get(x_19300, 1); +lean_inc(x_19303); +lean_dec(x_19300); +x_19304 = lean_ctor_get(x_1, 2); +lean_inc(x_19304); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_19305 = l_Lean_IR_ToIR_lowerType(x_19304, x_19303, x_4, x_5, x_19301); +if (lean_obj_tag(x_19305) == 0) +{ +lean_object* x_19306; lean_object* x_19307; lean_object* x_19308; lean_object* x_19309; lean_object* x_19310; +x_19306 = lean_ctor_get(x_19305, 0); +lean_inc(x_19306); +x_19307 = lean_ctor_get(x_19305, 1); +lean_inc(x_19307); +lean_dec(x_19305); +x_19308 = lean_ctor_get(x_19306, 0); +lean_inc(x_19308); +x_19309 = lean_ctor_get(x_19306, 1); +lean_inc(x_19309); +lean_dec(x_19306); +x_19310 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19302, x_19297, x_19308, x_19309, x_4, x_5, x_19307); +return x_19310; +} +else +{ +lean_object* x_19311; lean_object* x_19312; lean_object* x_19313; lean_object* x_19314; +lean_dec(x_19302); +lean_dec(x_19297); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_19311 = lean_ctor_get(x_19305, 0); +lean_inc(x_19311); +x_19312 = lean_ctor_get(x_19305, 1); +lean_inc(x_19312); +if (lean_is_exclusive(x_19305)) { + lean_ctor_release(x_19305, 0); + lean_ctor_release(x_19305, 1); + x_19313 = x_19305; +} else { + lean_dec_ref(x_19305); + x_19313 = lean_box(0); +} +if (lean_is_scalar(x_19313)) { + x_19314 = lean_alloc_ctor(1, 2, 0); +} else { + x_19314 = x_19313; +} +lean_ctor_set(x_19314, 0, x_19311); +lean_ctor_set(x_19314, 1, x_19312); +return x_19314; +} +} +else +{ +lean_object* x_19315; lean_object* x_19316; lean_object* x_19317; lean_object* x_19318; lean_object* x_19319; +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19315 = lean_ctor_get(x_19293, 1); +lean_inc(x_19315); +if (lean_is_exclusive(x_19293)) { + lean_ctor_release(x_19293, 0); + lean_ctor_release(x_19293, 1); + x_19316 = x_19293; +} else { + lean_dec_ref(x_19293); + x_19316 = lean_box(0); +} +x_19317 = lean_ctor_get(x_19295, 0); +lean_inc(x_19317); +lean_dec(x_19295); +if (lean_is_scalar(x_19316)) { + x_19318 = lean_alloc_ctor(0, 2, 0); +} else { + x_19318 = x_19316; +} +lean_ctor_set(x_19318, 0, x_19317); +lean_ctor_set(x_19318, 1, x_19315); +if (lean_is_scalar(x_19053)) { + x_19319 = lean_alloc_ctor(0, 2, 0); +} else { + x_19319 = x_19053; +} +lean_ctor_set(x_19319, 0, x_19318); +lean_ctor_set(x_19319, 1, x_19294); +return x_19319; +} +} +} +case 4: +{ +lean_object* x_19421; lean_object* x_19422; uint8_t x_19423; +lean_dec(x_19054); +lean_dec(x_19053); +lean_dec(x_17699); +lean_dec(x_17698); +if (lean_is_exclusive(x_19059)) { + lean_ctor_release(x_19059, 0); + x_19421 = x_19059; +} else { + lean_dec_ref(x_19059); + x_19421 = lean_box(0); +} +x_19422 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_19423 = lean_name_eq(x_153, x_19422); +if (x_19423 == 0) +{ +uint8_t x_19424; lean_object* x_19425; lean_object* x_19426; lean_object* x_19427; lean_object* x_19428; lean_object* x_19429; lean_object* x_19430; lean_object* x_19431; lean_object* x_19432; lean_object* x_19433; +lean_dec(x_17707); +lean_dec(x_2); +lean_dec(x_1); +x_19424 = 1; +x_19425 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_19426 = l_Lean_Name_toString(x_153, x_19424, x_19425); +if (lean_is_scalar(x_19421)) { + x_19427 = lean_alloc_ctor(3, 1, 0); +} else { + x_19427 = x_19421; + lean_ctor_set_tag(x_19427, 3); +} +lean_ctor_set(x_19427, 0, x_19426); +x_19428 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_19429 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19429, 0, x_19428); +lean_ctor_set(x_19429, 1, x_19427); +x_19430 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_19431 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19431, 0, x_19429); +lean_ctor_set(x_19431, 1, x_19430); +x_19432 = l_Lean_MessageData_ofFormat(x_19431); +x_19433 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_19432, x_19049, x_4, x_5, x_19052); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_19049); +return x_19433; +} +else +{ +lean_object* x_19434; lean_object* x_19435; lean_object* x_19436; +lean_dec(x_19421); +lean_dec(x_153); +x_19434 = l_Lean_IR_instInhabitedArg; +x_19435 = lean_unsigned_to_nat(2u); +x_19436 = lean_array_get(x_19434, x_17707, x_19435); +lean_dec(x_17707); +if (lean_obj_tag(x_19436) == 0) +{ +lean_object* x_19437; lean_object* x_19438; lean_object* x_19439; lean_object* x_19440; lean_object* x_19441; lean_object* x_19442; lean_object* x_19443; +x_19437 = lean_ctor_get(x_19436, 0); +lean_inc(x_19437); +lean_dec(x_19436); +x_19438 = lean_ctor_get(x_1, 0); +lean_inc(x_19438); +lean_dec(x_1); +x_19439 = l_Lean_IR_ToIR_bindVarToVarId(x_19438, x_19437, x_19049, x_4, x_5, x_19052); +x_19440 = lean_ctor_get(x_19439, 0); +lean_inc(x_19440); +x_19441 = lean_ctor_get(x_19439, 1); +lean_inc(x_19441); +lean_dec(x_19439); +x_19442 = lean_ctor_get(x_19440, 1); +lean_inc(x_19442); +lean_dec(x_19440); +x_19443 = l_Lean_IR_ToIR_lowerCode(x_2, x_19442, x_4, x_5, x_19441); +return x_19443; +} +else +{ +lean_object* x_19444; lean_object* x_19445; lean_object* x_19446; lean_object* x_19447; lean_object* x_19448; lean_object* x_19449; +x_19444 = lean_ctor_get(x_1, 0); +lean_inc(x_19444); +lean_dec(x_1); +x_19445 = l_Lean_IR_ToIR_bindErased(x_19444, x_19049, x_4, x_5, x_19052); +x_19446 = lean_ctor_get(x_19445, 0); +lean_inc(x_19446); +x_19447 = lean_ctor_get(x_19445, 1); +lean_inc(x_19447); +lean_dec(x_19445); +x_19448 = lean_ctor_get(x_19446, 1); +lean_inc(x_19448); +lean_dec(x_19446); +x_19449 = l_Lean_IR_ToIR_lowerCode(x_2, x_19448, x_4, x_5, x_19447); +return x_19449; +} +} +} +case 5: +{ +lean_object* x_19450; lean_object* x_19451; +lean_dec(x_19059); +lean_dec(x_19054); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_19450 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_19451 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_19450, x_19049, x_4, x_5, x_19052); +return x_19451; +} +case 6: +{ +lean_object* x_19452; uint8_t x_19453; +x_19452 = lean_ctor_get(x_19059, 0); +lean_inc(x_19452); +lean_dec(x_19059); +lean_inc(x_153); +x_19453 = l_Lean_isExtern(x_19054, x_153); +if (x_19453 == 0) +{ +lean_object* x_19454; +lean_dec(x_19053); +lean_dec(x_17707); +lean_inc(x_5); +lean_inc(x_4); +x_19454 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_19049, x_4, x_5, x_19052); +if (lean_obj_tag(x_19454) == 0) +{ +lean_object* x_19455; lean_object* x_19456; lean_object* x_19457; lean_object* x_19458; lean_object* x_19459; lean_object* x_19460; lean_object* x_19461; lean_object* x_19462; lean_object* x_19463; lean_object* x_19464; lean_object* x_19465; lean_object* x_19466; lean_object* x_19467; lean_object* x_19468; lean_object* x_19469; lean_object* x_19470; lean_object* x_19471; lean_object* x_19472; lean_object* x_19473; lean_object* x_19474; +x_19455 = lean_ctor_get(x_19454, 0); +lean_inc(x_19455); +x_19456 = lean_ctor_get(x_19455, 0); +lean_inc(x_19456); +x_19457 = lean_ctor_get(x_19454, 1); +lean_inc(x_19457); +lean_dec(x_19454); +x_19458 = lean_ctor_get(x_19455, 1); +lean_inc(x_19458); +lean_dec(x_19455); +x_19459 = lean_ctor_get(x_19456, 0); +lean_inc(x_19459); +x_19460 = lean_ctor_get(x_19456, 1); +lean_inc(x_19460); +lean_dec(x_19456); +x_19461 = lean_ctor_get(x_19452, 3); +lean_inc(x_19461); +lean_dec(x_19452); +x_19462 = lean_array_get_size(x_17698); +x_19463 = l_Array_extract___rarg(x_17698, x_19461, x_19462); +lean_dec(x_19462); +lean_dec(x_17698); +x_19464 = lean_array_get_size(x_19460); +x_19465 = lean_unsigned_to_nat(0u); +x_19466 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_17699)) { + x_19467 = lean_alloc_ctor(0, 3, 0); +} else { + x_19467 = x_17699; + lean_ctor_set_tag(x_19467, 0); +} +lean_ctor_set(x_19467, 0, x_19465); +lean_ctor_set(x_19467, 1, x_19464); +lean_ctor_set(x_19467, 2, x_19466); +x_19468 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_19469 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__8(x_19460, x_19463, x_19467, x_19467, x_19468, x_19465, lean_box(0), lean_box(0), x_19458, x_4, x_5, x_19457); +lean_dec(x_19467); +x_19470 = lean_ctor_get(x_19469, 0); +lean_inc(x_19470); +x_19471 = lean_ctor_get(x_19469, 1); +lean_inc(x_19471); +lean_dec(x_19469); +x_19472 = lean_ctor_get(x_19470, 0); +lean_inc(x_19472); +x_19473 = lean_ctor_get(x_19470, 1); +lean_inc(x_19473); +lean_dec(x_19470); +x_19474 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_19459, x_19460, x_19463, x_19472, x_19473, x_4, x_5, x_19471); +lean_dec(x_19463); +lean_dec(x_19460); +return x_19474; +} +else +{ +lean_object* x_19475; lean_object* x_19476; lean_object* x_19477; lean_object* x_19478; +lean_dec(x_19452); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19475 = lean_ctor_get(x_19454, 0); +lean_inc(x_19475); +x_19476 = lean_ctor_get(x_19454, 1); +lean_inc(x_19476); +if (lean_is_exclusive(x_19454)) { + lean_ctor_release(x_19454, 0); + lean_ctor_release(x_19454, 1); + x_19477 = x_19454; +} else { + lean_dec_ref(x_19454); + x_19477 = lean_box(0); +} +if (lean_is_scalar(x_19477)) { + x_19478 = lean_alloc_ctor(1, 2, 0); +} else { + x_19478 = x_19477; +} +lean_ctor_set(x_19478, 0, x_19475); +lean_ctor_set(x_19478, 1, x_19476); +return x_19478; +} +} +else +{ +lean_object* x_19479; lean_object* x_19480; lean_object* x_19507; lean_object* x_19508; +lean_dec(x_19452); +lean_dec(x_17699); +lean_dec(x_17698); +lean_inc(x_153); +x_19507 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_19052); +x_19508 = lean_ctor_get(x_19507, 0); +lean_inc(x_19508); +if (lean_obj_tag(x_19508) == 0) +{ +lean_object* x_19509; lean_object* x_19510; lean_object* x_19511; +x_19509 = lean_ctor_get(x_19507, 1); +lean_inc(x_19509); +lean_dec(x_19507); +x_19510 = lean_box(0); +x_19511 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19511, 0, x_19510); +lean_ctor_set(x_19511, 1, x_19049); +x_19479 = x_19511; +x_19480 = x_19509; +goto block_19506; +} +else +{ +lean_object* x_19512; lean_object* x_19513; lean_object* x_19514; lean_object* x_19515; lean_object* x_19516; lean_object* x_19517; lean_object* x_19518; uint8_t x_19519; +x_19512 = lean_ctor_get(x_19507, 1); +lean_inc(x_19512); +if (lean_is_exclusive(x_19507)) { + lean_ctor_release(x_19507, 0); + lean_ctor_release(x_19507, 1); + x_19513 = x_19507; +} else { + lean_dec_ref(x_19507); + x_19513 = lean_box(0); +} +x_19514 = lean_ctor_get(x_19508, 0); +lean_inc(x_19514); +if (lean_is_exclusive(x_19508)) { + lean_ctor_release(x_19508, 0); + x_19515 = x_19508; +} else { + lean_dec_ref(x_19508); + x_19515 = lean_box(0); +} +x_19516 = lean_array_get_size(x_17707); +x_19517 = lean_ctor_get(x_19514, 3); +lean_inc(x_19517); +lean_dec(x_19514); +x_19518 = lean_array_get_size(x_19517); +lean_dec(x_19517); +x_19519 = lean_nat_dec_lt(x_19516, x_19518); +if (x_19519 == 0) +{ +uint8_t x_19520; +x_19520 = lean_nat_dec_eq(x_19516, x_19518); +if (x_19520 == 0) +{ +lean_object* x_19521; lean_object* x_19522; lean_object* x_19523; lean_object* x_19524; lean_object* x_19525; lean_object* x_19526; lean_object* x_19527; lean_object* x_19528; lean_object* x_19529; lean_object* x_19530; lean_object* x_19531; lean_object* x_19532; lean_object* x_19533; lean_object* x_19534; lean_object* x_19535; lean_object* x_19536; lean_object* x_19537; +x_19521 = lean_unsigned_to_nat(0u); +x_19522 = l_Array_extract___rarg(x_17707, x_19521, x_19518); +x_19523 = l_Array_extract___rarg(x_17707, x_19518, x_19516); +lean_dec(x_19516); +lean_inc(x_153); +if (lean_is_scalar(x_19513)) { + x_19524 = lean_alloc_ctor(6, 2, 0); +} else { + x_19524 = x_19513; + lean_ctor_set_tag(x_19524, 6); +} +lean_ctor_set(x_19524, 0, x_153); +lean_ctor_set(x_19524, 1, x_19522); +x_19525 = lean_ctor_get(x_1, 0); +lean_inc(x_19525); +x_19526 = l_Lean_IR_ToIR_bindVar(x_19525, x_19049, x_4, x_5, x_19512); +x_19527 = lean_ctor_get(x_19526, 0); +lean_inc(x_19527); +x_19528 = lean_ctor_get(x_19526, 1); +lean_inc(x_19528); +lean_dec(x_19526); +x_19529 = lean_ctor_get(x_19527, 0); +lean_inc(x_19529); +x_19530 = lean_ctor_get(x_19527, 1); +lean_inc(x_19530); +lean_dec(x_19527); +x_19531 = l_Lean_IR_ToIR_newVar(x_19530, x_4, x_5, x_19528); +x_19532 = lean_ctor_get(x_19531, 0); +lean_inc(x_19532); +x_19533 = lean_ctor_get(x_19531, 1); +lean_inc(x_19533); +lean_dec(x_19531); +x_19534 = lean_ctor_get(x_19532, 0); +lean_inc(x_19534); +x_19535 = lean_ctor_get(x_19532, 1); +lean_inc(x_19535); +lean_dec(x_19532); +x_19536 = lean_ctor_get(x_1, 2); +lean_inc(x_19536); +lean_inc(x_5); +lean_inc(x_4); +x_19537 = l_Lean_IR_ToIR_lowerType(x_19536, x_19535, x_4, x_5, x_19533); +if (lean_obj_tag(x_19537) == 0) +{ +lean_object* x_19538; lean_object* x_19539; lean_object* x_19540; lean_object* x_19541; lean_object* x_19542; +x_19538 = lean_ctor_get(x_19537, 0); +lean_inc(x_19538); +x_19539 = lean_ctor_get(x_19537, 1); +lean_inc(x_19539); +lean_dec(x_19537); +x_19540 = lean_ctor_get(x_19538, 0); +lean_inc(x_19540); +x_19541 = lean_ctor_get(x_19538, 1); +lean_inc(x_19541); +lean_dec(x_19538); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19542 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_19534, x_19523, x_19529, x_19524, x_19540, x_19541, x_4, x_5, x_19539); +if (lean_obj_tag(x_19542) == 0) +{ +lean_object* x_19543; lean_object* x_19544; lean_object* x_19545; lean_object* x_19546; lean_object* x_19547; lean_object* x_19548; lean_object* x_19549; +x_19543 = lean_ctor_get(x_19542, 0); +lean_inc(x_19543); +x_19544 = lean_ctor_get(x_19542, 1); +lean_inc(x_19544); +lean_dec(x_19542); +x_19545 = lean_ctor_get(x_19543, 0); +lean_inc(x_19545); +x_19546 = lean_ctor_get(x_19543, 1); +lean_inc(x_19546); +if (lean_is_exclusive(x_19543)) { + lean_ctor_release(x_19543, 0); + lean_ctor_release(x_19543, 1); + x_19547 = x_19543; +} else { + lean_dec_ref(x_19543); + x_19547 = lean_box(0); +} +if (lean_is_scalar(x_19515)) { + x_19548 = lean_alloc_ctor(1, 1, 0); +} else { + x_19548 = x_19515; +} +lean_ctor_set(x_19548, 0, x_19545); +if (lean_is_scalar(x_19547)) { + x_19549 = lean_alloc_ctor(0, 2, 0); +} else { + x_19549 = x_19547; +} +lean_ctor_set(x_19549, 0, x_19548); +lean_ctor_set(x_19549, 1, x_19546); +x_19479 = x_19549; +x_19480 = x_19544; +goto block_19506; +} +else +{ +lean_object* x_19550; lean_object* x_19551; lean_object* x_19552; lean_object* x_19553; +lean_dec(x_19515); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19550 = lean_ctor_get(x_19542, 0); +lean_inc(x_19550); +x_19551 = lean_ctor_get(x_19542, 1); +lean_inc(x_19551); +if (lean_is_exclusive(x_19542)) { + lean_ctor_release(x_19542, 0); + lean_ctor_release(x_19542, 1); + x_19552 = x_19542; +} else { + lean_dec_ref(x_19542); + x_19552 = lean_box(0); +} +if (lean_is_scalar(x_19552)) { + x_19553 = lean_alloc_ctor(1, 2, 0); +} else { + x_19553 = x_19552; +} +lean_ctor_set(x_19553, 0, x_19550); +lean_ctor_set(x_19553, 1, x_19551); +return x_19553; +} +} +else +{ +lean_object* x_19554; lean_object* x_19555; lean_object* x_19556; lean_object* x_19557; +lean_dec(x_19534); +lean_dec(x_19529); +lean_dec(x_19524); +lean_dec(x_19523); +lean_dec(x_19515); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19554 = lean_ctor_get(x_19537, 0); +lean_inc(x_19554); +x_19555 = lean_ctor_get(x_19537, 1); +lean_inc(x_19555); +if (lean_is_exclusive(x_19537)) { + lean_ctor_release(x_19537, 0); + lean_ctor_release(x_19537, 1); + x_19556 = x_19537; +} else { + lean_dec_ref(x_19537); + x_19556 = lean_box(0); +} +if (lean_is_scalar(x_19556)) { + x_19557 = lean_alloc_ctor(1, 2, 0); +} else { + x_19557 = x_19556; +} +lean_ctor_set(x_19557, 0, x_19554); +lean_ctor_set(x_19557, 1, x_19555); +return x_19557; +} +} +else +{ +lean_object* x_19558; lean_object* x_19559; lean_object* x_19560; lean_object* x_19561; lean_object* x_19562; lean_object* x_19563; lean_object* x_19564; lean_object* x_19565; lean_object* x_19566; +lean_dec(x_19518); +lean_dec(x_19516); +lean_inc(x_17707); +lean_inc(x_153); +if (lean_is_scalar(x_19513)) { + x_19558 = lean_alloc_ctor(6, 2, 0); +} else { + x_19558 = x_19513; + lean_ctor_set_tag(x_19558, 6); +} +lean_ctor_set(x_19558, 0, x_153); +lean_ctor_set(x_19558, 1, x_17707); +x_19559 = lean_ctor_get(x_1, 0); +lean_inc(x_19559); +x_19560 = l_Lean_IR_ToIR_bindVar(x_19559, x_19049, x_4, x_5, x_19512); +x_19561 = lean_ctor_get(x_19560, 0); +lean_inc(x_19561); +x_19562 = lean_ctor_get(x_19560, 1); +lean_inc(x_19562); +lean_dec(x_19560); +x_19563 = lean_ctor_get(x_19561, 0); +lean_inc(x_19563); +x_19564 = lean_ctor_get(x_19561, 1); +lean_inc(x_19564); +lean_dec(x_19561); +x_19565 = lean_ctor_get(x_1, 2); +lean_inc(x_19565); +lean_inc(x_5); +lean_inc(x_4); +x_19566 = l_Lean_IR_ToIR_lowerType(x_19565, x_19564, x_4, x_5, x_19562); +if (lean_obj_tag(x_19566) == 0) +{ +lean_object* x_19567; lean_object* x_19568; lean_object* x_19569; lean_object* x_19570; lean_object* x_19571; +x_19567 = lean_ctor_get(x_19566, 0); +lean_inc(x_19567); +x_19568 = lean_ctor_get(x_19566, 1); +lean_inc(x_19568); +lean_dec(x_19566); +x_19569 = lean_ctor_get(x_19567, 0); +lean_inc(x_19569); +x_19570 = lean_ctor_get(x_19567, 1); +lean_inc(x_19570); +lean_dec(x_19567); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19571 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19563, x_19558, x_19569, x_19570, x_4, x_5, x_19568); +if (lean_obj_tag(x_19571) == 0) +{ +lean_object* x_19572; lean_object* x_19573; lean_object* x_19574; lean_object* x_19575; lean_object* x_19576; lean_object* x_19577; lean_object* x_19578; +x_19572 = lean_ctor_get(x_19571, 0); +lean_inc(x_19572); +x_19573 = lean_ctor_get(x_19571, 1); +lean_inc(x_19573); +lean_dec(x_19571); +x_19574 = lean_ctor_get(x_19572, 0); +lean_inc(x_19574); +x_19575 = lean_ctor_get(x_19572, 1); +lean_inc(x_19575); +if (lean_is_exclusive(x_19572)) { + lean_ctor_release(x_19572, 0); + lean_ctor_release(x_19572, 1); + x_19576 = x_19572; +} else { + lean_dec_ref(x_19572); + x_19576 = lean_box(0); +} +if (lean_is_scalar(x_19515)) { + x_19577 = lean_alloc_ctor(1, 1, 0); +} else { + x_19577 = x_19515; +} +lean_ctor_set(x_19577, 0, x_19574); +if (lean_is_scalar(x_19576)) { + x_19578 = lean_alloc_ctor(0, 2, 0); +} else { + x_19578 = x_19576; +} +lean_ctor_set(x_19578, 0, x_19577); +lean_ctor_set(x_19578, 1, x_19575); +x_19479 = x_19578; +x_19480 = x_19573; +goto block_19506; +} +else +{ +lean_object* x_19579; lean_object* x_19580; lean_object* x_19581; lean_object* x_19582; +lean_dec(x_19515); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19579 = lean_ctor_get(x_19571, 0); +lean_inc(x_19579); +x_19580 = lean_ctor_get(x_19571, 1); +lean_inc(x_19580); +if (lean_is_exclusive(x_19571)) { + lean_ctor_release(x_19571, 0); + lean_ctor_release(x_19571, 1); + x_19581 = x_19571; +} else { + lean_dec_ref(x_19571); + x_19581 = lean_box(0); +} +if (lean_is_scalar(x_19581)) { + x_19582 = lean_alloc_ctor(1, 2, 0); +} else { + x_19582 = x_19581; +} +lean_ctor_set(x_19582, 0, x_19579); +lean_ctor_set(x_19582, 1, x_19580); +return x_19582; +} +} +else +{ +lean_object* x_19583; lean_object* x_19584; lean_object* x_19585; lean_object* x_19586; +lean_dec(x_19563); +lean_dec(x_19558); +lean_dec(x_19515); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19583 = lean_ctor_get(x_19566, 0); +lean_inc(x_19583); +x_19584 = lean_ctor_get(x_19566, 1); +lean_inc(x_19584); +if (lean_is_exclusive(x_19566)) { + lean_ctor_release(x_19566, 0); + lean_ctor_release(x_19566, 1); + x_19585 = x_19566; +} else { + lean_dec_ref(x_19566); + x_19585 = lean_box(0); +} +if (lean_is_scalar(x_19585)) { + x_19586 = lean_alloc_ctor(1, 2, 0); +} else { + x_19586 = x_19585; +} +lean_ctor_set(x_19586, 0, x_19583); +lean_ctor_set(x_19586, 1, x_19584); +return x_19586; +} +} +} +else +{ +lean_object* x_19587; lean_object* x_19588; lean_object* x_19589; lean_object* x_19590; lean_object* x_19591; lean_object* x_19592; lean_object* x_19593; lean_object* x_19594; lean_object* x_19595; +lean_dec(x_19518); +lean_dec(x_19516); +lean_inc(x_17707); +lean_inc(x_153); +if (lean_is_scalar(x_19513)) { + x_19587 = lean_alloc_ctor(7, 2, 0); +} else { + x_19587 = x_19513; + lean_ctor_set_tag(x_19587, 7); +} +lean_ctor_set(x_19587, 0, x_153); +lean_ctor_set(x_19587, 1, x_17707); +x_19588 = lean_ctor_get(x_1, 0); +lean_inc(x_19588); +x_19589 = l_Lean_IR_ToIR_bindVar(x_19588, x_19049, x_4, x_5, x_19512); +x_19590 = lean_ctor_get(x_19589, 0); +lean_inc(x_19590); +x_19591 = lean_ctor_get(x_19589, 1); +lean_inc(x_19591); +lean_dec(x_19589); +x_19592 = lean_ctor_get(x_19590, 0); +lean_inc(x_19592); +x_19593 = lean_ctor_get(x_19590, 1); +lean_inc(x_19593); +lean_dec(x_19590); +x_19594 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_19595 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19592, x_19587, x_19594, x_19593, x_4, x_5, x_19591); +if (lean_obj_tag(x_19595) == 0) +{ +lean_object* x_19596; lean_object* x_19597; lean_object* x_19598; lean_object* x_19599; lean_object* x_19600; lean_object* x_19601; lean_object* x_19602; +x_19596 = lean_ctor_get(x_19595, 0); +lean_inc(x_19596); +x_19597 = lean_ctor_get(x_19595, 1); +lean_inc(x_19597); +lean_dec(x_19595); +x_19598 = lean_ctor_get(x_19596, 0); +lean_inc(x_19598); +x_19599 = lean_ctor_get(x_19596, 1); +lean_inc(x_19599); +if (lean_is_exclusive(x_19596)) { + lean_ctor_release(x_19596, 0); + lean_ctor_release(x_19596, 1); + x_19600 = x_19596; +} else { + lean_dec_ref(x_19596); + x_19600 = lean_box(0); +} +if (lean_is_scalar(x_19515)) { + x_19601 = lean_alloc_ctor(1, 1, 0); +} else { + x_19601 = x_19515; +} +lean_ctor_set(x_19601, 0, x_19598); +if (lean_is_scalar(x_19600)) { + x_19602 = lean_alloc_ctor(0, 2, 0); +} else { + x_19602 = x_19600; +} +lean_ctor_set(x_19602, 0, x_19601); +lean_ctor_set(x_19602, 1, x_19599); +x_19479 = x_19602; +x_19480 = x_19597; +goto block_19506; +} +else +{ +lean_object* x_19603; lean_object* x_19604; lean_object* x_19605; lean_object* x_19606; +lean_dec(x_19515); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19603 = lean_ctor_get(x_19595, 0); +lean_inc(x_19603); +x_19604 = lean_ctor_get(x_19595, 1); +lean_inc(x_19604); +if (lean_is_exclusive(x_19595)) { + lean_ctor_release(x_19595, 0); + lean_ctor_release(x_19595, 1); + x_19605 = x_19595; +} else { + lean_dec_ref(x_19595); + x_19605 = lean_box(0); +} +if (lean_is_scalar(x_19605)) { + x_19606 = lean_alloc_ctor(1, 2, 0); +} else { + x_19606 = x_19605; +} +lean_ctor_set(x_19606, 0, x_19603); +lean_ctor_set(x_19606, 1, x_19604); +return x_19606; +} +} +} +block_19506: +{ +lean_object* x_19481; +x_19481 = lean_ctor_get(x_19479, 0); +lean_inc(x_19481); +if (lean_obj_tag(x_19481) == 0) +{ +lean_object* x_19482; lean_object* x_19483; lean_object* x_19484; lean_object* x_19485; lean_object* x_19486; lean_object* x_19487; lean_object* x_19488; lean_object* x_19489; lean_object* x_19490; lean_object* x_19491; +lean_dec(x_19053); +x_19482 = lean_ctor_get(x_19479, 1); +lean_inc(x_19482); +lean_dec(x_19479); +x_19483 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_19483, 0, x_153); +lean_ctor_set(x_19483, 1, x_17707); +x_19484 = lean_ctor_get(x_1, 0); +lean_inc(x_19484); +x_19485 = l_Lean_IR_ToIR_bindVar(x_19484, x_19482, x_4, x_5, x_19480); +x_19486 = lean_ctor_get(x_19485, 0); +lean_inc(x_19486); +x_19487 = lean_ctor_get(x_19485, 1); +lean_inc(x_19487); +lean_dec(x_19485); +x_19488 = lean_ctor_get(x_19486, 0); +lean_inc(x_19488); +x_19489 = lean_ctor_get(x_19486, 1); +lean_inc(x_19489); +lean_dec(x_19486); +x_19490 = lean_ctor_get(x_1, 2); +lean_inc(x_19490); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_19491 = l_Lean_IR_ToIR_lowerType(x_19490, x_19489, x_4, x_5, x_19487); +if (lean_obj_tag(x_19491) == 0) +{ +lean_object* x_19492; lean_object* x_19493; lean_object* x_19494; lean_object* x_19495; lean_object* x_19496; +x_19492 = lean_ctor_get(x_19491, 0); +lean_inc(x_19492); +x_19493 = lean_ctor_get(x_19491, 1); +lean_inc(x_19493); +lean_dec(x_19491); +x_19494 = lean_ctor_get(x_19492, 0); +lean_inc(x_19494); +x_19495 = lean_ctor_get(x_19492, 1); +lean_inc(x_19495); +lean_dec(x_19492); +x_19496 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19488, x_19483, x_19494, x_19495, x_4, x_5, x_19493); +return x_19496; +} +else +{ +lean_object* x_19497; lean_object* x_19498; lean_object* x_19499; lean_object* x_19500; +lean_dec(x_19488); +lean_dec(x_19483); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_19497 = lean_ctor_get(x_19491, 0); +lean_inc(x_19497); +x_19498 = lean_ctor_get(x_19491, 1); +lean_inc(x_19498); +if (lean_is_exclusive(x_19491)) { + lean_ctor_release(x_19491, 0); + lean_ctor_release(x_19491, 1); + x_19499 = x_19491; +} else { + lean_dec_ref(x_19491); + x_19499 = lean_box(0); +} +if (lean_is_scalar(x_19499)) { + x_19500 = lean_alloc_ctor(1, 2, 0); +} else { + x_19500 = x_19499; +} +lean_ctor_set(x_19500, 0, x_19497); +lean_ctor_set(x_19500, 1, x_19498); +return x_19500; +} +} +else +{ +lean_object* x_19501; lean_object* x_19502; lean_object* x_19503; lean_object* x_19504; lean_object* x_19505; +lean_dec(x_17707); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19501 = lean_ctor_get(x_19479, 1); +lean_inc(x_19501); +if (lean_is_exclusive(x_19479)) { + lean_ctor_release(x_19479, 0); + lean_ctor_release(x_19479, 1); + x_19502 = x_19479; +} else { + lean_dec_ref(x_19479); + x_19502 = lean_box(0); +} +x_19503 = lean_ctor_get(x_19481, 0); +lean_inc(x_19503); +lean_dec(x_19481); +if (lean_is_scalar(x_19502)) { + x_19504 = lean_alloc_ctor(0, 2, 0); +} else { + x_19504 = x_19502; +} +lean_ctor_set(x_19504, 0, x_19503); +lean_ctor_set(x_19504, 1, x_19501); +if (lean_is_scalar(x_19053)) { + x_19505 = lean_alloc_ctor(0, 2, 0); +} else { + x_19505 = x_19053; +} +lean_ctor_set(x_19505, 0, x_19504); +lean_ctor_set(x_19505, 1, x_19480); +return x_19505; +} +} +} +} +default: +{ +lean_object* x_19607; uint8_t x_19608; lean_object* x_19609; lean_object* x_19610; lean_object* x_19611; lean_object* x_19612; lean_object* x_19613; lean_object* x_19614; lean_object* x_19615; lean_object* x_19616; lean_object* x_19617; +lean_dec(x_19054); +lean_dec(x_19053); +lean_dec(x_17707); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_19059)) { + lean_ctor_release(x_19059, 0); + x_19607 = x_19059; +} else { + lean_dec_ref(x_19059); + x_19607 = lean_box(0); +} +x_19608 = 1; +x_19609 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_19610 = l_Lean_Name_toString(x_153, x_19608, x_19609); +if (lean_is_scalar(x_19607)) { + x_19611 = lean_alloc_ctor(3, 1, 0); +} else { + x_19611 = x_19607; + lean_ctor_set_tag(x_19611, 3); +} +lean_ctor_set(x_19611, 0, x_19610); +x_19612 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_19613 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19613, 0, x_19612); +lean_ctor_set(x_19613, 1, x_19611); +x_19614 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_19615 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19615, 0, x_19613); +lean_ctor_set(x_19615, 1, x_19614); +x_19616 = l_Lean_MessageData_ofFormat(x_19615); +x_19617 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_19616, x_19049, x_4, x_5, x_19052); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_19049); +return x_19617; +} +} +} +} +} +else +{ +uint8_t x_19618; +lean_dec(x_17707); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_19618 = !lean_is_exclusive(x_17709); +if (x_19618 == 0) +{ +lean_object* x_19619; lean_object* x_19620; lean_object* x_19621; +x_19619 = lean_ctor_get(x_17709, 0); +lean_dec(x_19619); +x_19620 = lean_ctor_get(x_17711, 0); +lean_inc(x_19620); +lean_dec(x_17711); +lean_ctor_set(x_17709, 0, x_19620); +if (lean_is_scalar(x_17705)) { + x_19621 = lean_alloc_ctor(0, 2, 0); +} else { + x_19621 = x_17705; +} +lean_ctor_set(x_19621, 0, x_17709); +lean_ctor_set(x_19621, 1, x_17710); +return x_19621; +} +else +{ +lean_object* x_19622; lean_object* x_19623; lean_object* x_19624; lean_object* x_19625; +x_19622 = lean_ctor_get(x_17709, 1); +lean_inc(x_19622); +lean_dec(x_17709); +x_19623 = lean_ctor_get(x_17711, 0); +lean_inc(x_19623); +lean_dec(x_17711); +x_19624 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19624, 0, x_19623); +lean_ctor_set(x_19624, 1, x_19622); +if (lean_is_scalar(x_17705)) { + x_19625 = lean_alloc_ctor(0, 2, 0); +} else { + x_19625 = x_17705; +} +lean_ctor_set(x_19625, 0, x_19624); +lean_ctor_set(x_19625, 1, x_17710); +return x_19625; +} +} +} +} +else +{ +lean_object* x_19907; lean_object* x_19908; lean_object* x_19909; lean_object* x_19910; lean_object* x_20488; lean_object* x_20489; +x_19907 = lean_ctor_get(x_17703, 0); +x_19908 = lean_ctor_get(x_17703, 1); +lean_inc(x_19908); +lean_inc(x_19907); +lean_dec(x_17703); +lean_inc(x_153); +x_20488 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_17704); +x_20489 = lean_ctor_get(x_20488, 0); +lean_inc(x_20489); +if (lean_obj_tag(x_20489) == 0) +{ +lean_object* x_20490; lean_object* x_20491; lean_object* x_20492; +x_20490 = lean_ctor_get(x_20488, 1); +lean_inc(x_20490); +lean_dec(x_20488); +x_20491 = lean_box(0); +x_20492 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_20492, 0, x_20491); +lean_ctor_set(x_20492, 1, x_19908); +x_19909 = x_20492; +x_19910 = x_20490; +goto block_20487; +} +else +{ +lean_object* x_20493; lean_object* x_20494; lean_object* x_20495; lean_object* x_20496; lean_object* x_20497; lean_object* x_20498; lean_object* x_20499; uint8_t x_20500; +x_20493 = lean_ctor_get(x_20488, 1); +lean_inc(x_20493); +if (lean_is_exclusive(x_20488)) { + lean_ctor_release(x_20488, 0); + lean_ctor_release(x_20488, 1); + x_20494 = x_20488; +} else { + lean_dec_ref(x_20488); + x_20494 = lean_box(0); +} +x_20495 = lean_ctor_get(x_20489, 0); +lean_inc(x_20495); +if (lean_is_exclusive(x_20489)) { + lean_ctor_release(x_20489, 0); + x_20496 = x_20489; +} else { + lean_dec_ref(x_20489); + x_20496 = lean_box(0); +} +x_20497 = lean_array_get_size(x_19907); +x_20498 = lean_ctor_get(x_20495, 3); +lean_inc(x_20498); +lean_dec(x_20495); +x_20499 = lean_array_get_size(x_20498); +lean_dec(x_20498); +x_20500 = lean_nat_dec_lt(x_20497, x_20499); +if (x_20500 == 0) +{ +uint8_t x_20501; +x_20501 = lean_nat_dec_eq(x_20497, x_20499); +if (x_20501 == 0) +{ +lean_object* x_20502; lean_object* x_20503; lean_object* x_20504; lean_object* x_20505; lean_object* x_20506; lean_object* x_20507; lean_object* x_20508; lean_object* x_20509; lean_object* x_20510; lean_object* x_20511; lean_object* x_20512; lean_object* x_20513; lean_object* x_20514; lean_object* x_20515; lean_object* x_20516; lean_object* x_20517; lean_object* x_20518; +x_20502 = lean_unsigned_to_nat(0u); +x_20503 = l_Array_extract___rarg(x_19907, x_20502, x_20499); +x_20504 = l_Array_extract___rarg(x_19907, x_20499, x_20497); +lean_dec(x_20497); +lean_inc(x_153); +if (lean_is_scalar(x_20494)) { + x_20505 = lean_alloc_ctor(6, 2, 0); +} else { + x_20505 = x_20494; + lean_ctor_set_tag(x_20505, 6); +} +lean_ctor_set(x_20505, 0, x_153); +lean_ctor_set(x_20505, 1, x_20503); +x_20506 = lean_ctor_get(x_1, 0); +lean_inc(x_20506); +x_20507 = l_Lean_IR_ToIR_bindVar(x_20506, x_19908, x_4, x_5, x_20493); +x_20508 = lean_ctor_get(x_20507, 0); +lean_inc(x_20508); +x_20509 = lean_ctor_get(x_20507, 1); +lean_inc(x_20509); +lean_dec(x_20507); +x_20510 = lean_ctor_get(x_20508, 0); +lean_inc(x_20510); +x_20511 = lean_ctor_get(x_20508, 1); +lean_inc(x_20511); +lean_dec(x_20508); +x_20512 = l_Lean_IR_ToIR_newVar(x_20511, x_4, x_5, x_20509); +x_20513 = lean_ctor_get(x_20512, 0); +lean_inc(x_20513); +x_20514 = lean_ctor_get(x_20512, 1); +lean_inc(x_20514); +lean_dec(x_20512); +x_20515 = lean_ctor_get(x_20513, 0); +lean_inc(x_20515); +x_20516 = lean_ctor_get(x_20513, 1); +lean_inc(x_20516); +lean_dec(x_20513); +x_20517 = lean_ctor_get(x_1, 2); +lean_inc(x_20517); +lean_inc(x_5); +lean_inc(x_4); +x_20518 = l_Lean_IR_ToIR_lowerType(x_20517, x_20516, x_4, x_5, x_20514); +if (lean_obj_tag(x_20518) == 0) +{ +lean_object* x_20519; lean_object* x_20520; lean_object* x_20521; lean_object* x_20522; lean_object* x_20523; +x_20519 = lean_ctor_get(x_20518, 0); +lean_inc(x_20519); +x_20520 = lean_ctor_get(x_20518, 1); +lean_inc(x_20520); +lean_dec(x_20518); +x_20521 = lean_ctor_get(x_20519, 0); +lean_inc(x_20521); +x_20522 = lean_ctor_get(x_20519, 1); +lean_inc(x_20522); +lean_dec(x_20519); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20523 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_20515, x_20504, x_20510, x_20505, x_20521, x_20522, x_4, x_5, x_20520); +if (lean_obj_tag(x_20523) == 0) +{ +lean_object* x_20524; lean_object* x_20525; lean_object* x_20526; lean_object* x_20527; lean_object* x_20528; lean_object* x_20529; lean_object* x_20530; +x_20524 = lean_ctor_get(x_20523, 0); +lean_inc(x_20524); +x_20525 = lean_ctor_get(x_20523, 1); +lean_inc(x_20525); +lean_dec(x_20523); +x_20526 = lean_ctor_get(x_20524, 0); +lean_inc(x_20526); +x_20527 = lean_ctor_get(x_20524, 1); +lean_inc(x_20527); +if (lean_is_exclusive(x_20524)) { + lean_ctor_release(x_20524, 0); + lean_ctor_release(x_20524, 1); + x_20528 = x_20524; +} else { + lean_dec_ref(x_20524); + x_20528 = lean_box(0); +} +if (lean_is_scalar(x_20496)) { + x_20529 = lean_alloc_ctor(1, 1, 0); +} else { + x_20529 = x_20496; +} +lean_ctor_set(x_20529, 0, x_20526); +if (lean_is_scalar(x_20528)) { + x_20530 = lean_alloc_ctor(0, 2, 0); +} else { + x_20530 = x_20528; +} +lean_ctor_set(x_20530, 0, x_20529); +lean_ctor_set(x_20530, 1, x_20527); +x_19909 = x_20530; +x_19910 = x_20525; +goto block_20487; +} +else +{ +lean_object* x_20531; lean_object* x_20532; lean_object* x_20533; lean_object* x_20534; +lean_dec(x_20496); +lean_dec(x_19907); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20531 = lean_ctor_get(x_20523, 0); +lean_inc(x_20531); +x_20532 = lean_ctor_get(x_20523, 1); +lean_inc(x_20532); +if (lean_is_exclusive(x_20523)) { + lean_ctor_release(x_20523, 0); + lean_ctor_release(x_20523, 1); + x_20533 = x_20523; +} else { + lean_dec_ref(x_20523); + x_20533 = lean_box(0); +} +if (lean_is_scalar(x_20533)) { + x_20534 = lean_alloc_ctor(1, 2, 0); +} else { + x_20534 = x_20533; +} +lean_ctor_set(x_20534, 0, x_20531); +lean_ctor_set(x_20534, 1, x_20532); +return x_20534; +} +} +else +{ +lean_object* x_20535; lean_object* x_20536; lean_object* x_20537; lean_object* x_20538; +lean_dec(x_20515); +lean_dec(x_20510); +lean_dec(x_20505); +lean_dec(x_20504); +lean_dec(x_20496); +lean_dec(x_19907); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20535 = lean_ctor_get(x_20518, 0); +lean_inc(x_20535); +x_20536 = lean_ctor_get(x_20518, 1); +lean_inc(x_20536); +if (lean_is_exclusive(x_20518)) { + lean_ctor_release(x_20518, 0); + lean_ctor_release(x_20518, 1); + x_20537 = x_20518; +} else { + lean_dec_ref(x_20518); + x_20537 = lean_box(0); +} +if (lean_is_scalar(x_20537)) { + x_20538 = lean_alloc_ctor(1, 2, 0); +} else { + x_20538 = x_20537; +} +lean_ctor_set(x_20538, 0, x_20535); +lean_ctor_set(x_20538, 1, x_20536); +return x_20538; +} +} +else +{ +lean_object* x_20539; lean_object* x_20540; lean_object* x_20541; lean_object* x_20542; lean_object* x_20543; lean_object* x_20544; lean_object* x_20545; lean_object* x_20546; lean_object* x_20547; +lean_dec(x_20499); +lean_dec(x_20497); +lean_inc(x_19907); +lean_inc(x_153); +if (lean_is_scalar(x_20494)) { + x_20539 = lean_alloc_ctor(6, 2, 0); +} else { + x_20539 = x_20494; + lean_ctor_set_tag(x_20539, 6); +} +lean_ctor_set(x_20539, 0, x_153); +lean_ctor_set(x_20539, 1, x_19907); +x_20540 = lean_ctor_get(x_1, 0); +lean_inc(x_20540); +x_20541 = l_Lean_IR_ToIR_bindVar(x_20540, x_19908, x_4, x_5, x_20493); +x_20542 = lean_ctor_get(x_20541, 0); +lean_inc(x_20542); +x_20543 = lean_ctor_get(x_20541, 1); +lean_inc(x_20543); +lean_dec(x_20541); +x_20544 = lean_ctor_get(x_20542, 0); +lean_inc(x_20544); +x_20545 = lean_ctor_get(x_20542, 1); +lean_inc(x_20545); +lean_dec(x_20542); +x_20546 = lean_ctor_get(x_1, 2); +lean_inc(x_20546); +lean_inc(x_5); +lean_inc(x_4); +x_20547 = l_Lean_IR_ToIR_lowerType(x_20546, x_20545, x_4, x_5, x_20543); +if (lean_obj_tag(x_20547) == 0) +{ +lean_object* x_20548; lean_object* x_20549; lean_object* x_20550; lean_object* x_20551; lean_object* x_20552; +x_20548 = lean_ctor_get(x_20547, 0); +lean_inc(x_20548); +x_20549 = lean_ctor_get(x_20547, 1); +lean_inc(x_20549); +lean_dec(x_20547); +x_20550 = lean_ctor_get(x_20548, 0); +lean_inc(x_20550); +x_20551 = lean_ctor_get(x_20548, 1); +lean_inc(x_20551); +lean_dec(x_20548); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20552 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20544, x_20539, x_20550, x_20551, x_4, x_5, x_20549); +if (lean_obj_tag(x_20552) == 0) +{ +lean_object* x_20553; lean_object* x_20554; lean_object* x_20555; lean_object* x_20556; lean_object* x_20557; lean_object* x_20558; lean_object* x_20559; +x_20553 = lean_ctor_get(x_20552, 0); +lean_inc(x_20553); +x_20554 = lean_ctor_get(x_20552, 1); +lean_inc(x_20554); +lean_dec(x_20552); +x_20555 = lean_ctor_get(x_20553, 0); +lean_inc(x_20555); +x_20556 = lean_ctor_get(x_20553, 1); +lean_inc(x_20556); +if (lean_is_exclusive(x_20553)) { + lean_ctor_release(x_20553, 0); + lean_ctor_release(x_20553, 1); + x_20557 = x_20553; +} else { + lean_dec_ref(x_20553); + x_20557 = lean_box(0); +} +if (lean_is_scalar(x_20496)) { + x_20558 = lean_alloc_ctor(1, 1, 0); +} else { + x_20558 = x_20496; +} +lean_ctor_set(x_20558, 0, x_20555); +if (lean_is_scalar(x_20557)) { + x_20559 = lean_alloc_ctor(0, 2, 0); +} else { + x_20559 = x_20557; +} +lean_ctor_set(x_20559, 0, x_20558); +lean_ctor_set(x_20559, 1, x_20556); +x_19909 = x_20559; +x_19910 = x_20554; +goto block_20487; +} +else +{ +lean_object* x_20560; lean_object* x_20561; lean_object* x_20562; lean_object* x_20563; +lean_dec(x_20496); +lean_dec(x_19907); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20560 = lean_ctor_get(x_20552, 0); +lean_inc(x_20560); +x_20561 = lean_ctor_get(x_20552, 1); +lean_inc(x_20561); +if (lean_is_exclusive(x_20552)) { + lean_ctor_release(x_20552, 0); + lean_ctor_release(x_20552, 1); + x_20562 = x_20552; +} else { + lean_dec_ref(x_20552); + x_20562 = lean_box(0); +} +if (lean_is_scalar(x_20562)) { + x_20563 = lean_alloc_ctor(1, 2, 0); +} else { + x_20563 = x_20562; +} +lean_ctor_set(x_20563, 0, x_20560); +lean_ctor_set(x_20563, 1, x_20561); +return x_20563; +} +} +else +{ +lean_object* x_20564; lean_object* x_20565; lean_object* x_20566; lean_object* x_20567; +lean_dec(x_20544); +lean_dec(x_20539); +lean_dec(x_20496); +lean_dec(x_19907); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20564 = lean_ctor_get(x_20547, 0); +lean_inc(x_20564); +x_20565 = lean_ctor_get(x_20547, 1); +lean_inc(x_20565); +if (lean_is_exclusive(x_20547)) { + lean_ctor_release(x_20547, 0); + lean_ctor_release(x_20547, 1); + x_20566 = x_20547; +} else { + lean_dec_ref(x_20547); + x_20566 = lean_box(0); +} +if (lean_is_scalar(x_20566)) { + x_20567 = lean_alloc_ctor(1, 2, 0); +} else { + x_20567 = x_20566; +} +lean_ctor_set(x_20567, 0, x_20564); +lean_ctor_set(x_20567, 1, x_20565); +return x_20567; +} +} +} +else +{ +lean_object* x_20568; lean_object* x_20569; lean_object* x_20570; lean_object* x_20571; lean_object* x_20572; lean_object* x_20573; lean_object* x_20574; lean_object* x_20575; lean_object* x_20576; +lean_dec(x_20499); +lean_dec(x_20497); +lean_inc(x_19907); +lean_inc(x_153); +if (lean_is_scalar(x_20494)) { + x_20568 = lean_alloc_ctor(7, 2, 0); +} else { + x_20568 = x_20494; + lean_ctor_set_tag(x_20568, 7); +} +lean_ctor_set(x_20568, 0, x_153); +lean_ctor_set(x_20568, 1, x_19907); +x_20569 = lean_ctor_get(x_1, 0); +lean_inc(x_20569); +x_20570 = l_Lean_IR_ToIR_bindVar(x_20569, x_19908, x_4, x_5, x_20493); +x_20571 = lean_ctor_get(x_20570, 0); +lean_inc(x_20571); +x_20572 = lean_ctor_get(x_20570, 1); +lean_inc(x_20572); +lean_dec(x_20570); +x_20573 = lean_ctor_get(x_20571, 0); +lean_inc(x_20573); +x_20574 = lean_ctor_get(x_20571, 1); +lean_inc(x_20574); +lean_dec(x_20571); +x_20575 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20576 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20573, x_20568, x_20575, x_20574, x_4, x_5, x_20572); +if (lean_obj_tag(x_20576) == 0) +{ +lean_object* x_20577; lean_object* x_20578; lean_object* x_20579; lean_object* x_20580; lean_object* x_20581; lean_object* x_20582; lean_object* x_20583; +x_20577 = lean_ctor_get(x_20576, 0); +lean_inc(x_20577); +x_20578 = lean_ctor_get(x_20576, 1); +lean_inc(x_20578); +lean_dec(x_20576); +x_20579 = lean_ctor_get(x_20577, 0); +lean_inc(x_20579); +x_20580 = lean_ctor_get(x_20577, 1); +lean_inc(x_20580); +if (lean_is_exclusive(x_20577)) { + lean_ctor_release(x_20577, 0); + lean_ctor_release(x_20577, 1); + x_20581 = x_20577; +} else { + lean_dec_ref(x_20577); + x_20581 = lean_box(0); +} +if (lean_is_scalar(x_20496)) { + x_20582 = lean_alloc_ctor(1, 1, 0); +} else { + x_20582 = x_20496; +} +lean_ctor_set(x_20582, 0, x_20579); +if (lean_is_scalar(x_20581)) { + x_20583 = lean_alloc_ctor(0, 2, 0); +} else { + x_20583 = x_20581; +} +lean_ctor_set(x_20583, 0, x_20582); +lean_ctor_set(x_20583, 1, x_20580); +x_19909 = x_20583; +x_19910 = x_20578; +goto block_20487; +} +else +{ +lean_object* x_20584; lean_object* x_20585; lean_object* x_20586; lean_object* x_20587; +lean_dec(x_20496); +lean_dec(x_19907); +lean_dec(x_17705); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20584 = lean_ctor_get(x_20576, 0); +lean_inc(x_20584); +x_20585 = lean_ctor_get(x_20576, 1); +lean_inc(x_20585); +if (lean_is_exclusive(x_20576)) { + lean_ctor_release(x_20576, 0); + lean_ctor_release(x_20576, 1); + x_20586 = x_20576; +} else { + lean_dec_ref(x_20576); + x_20586 = lean_box(0); +} +if (lean_is_scalar(x_20586)) { + x_20587 = lean_alloc_ctor(1, 2, 0); +} else { + x_20587 = x_20586; +} +lean_ctor_set(x_20587, 0, x_20584); +lean_ctor_set(x_20587, 1, x_20585); +return x_20587; +} +} +} +block_20487: +{ +lean_object* x_19911; +x_19911 = lean_ctor_get(x_19909, 0); +lean_inc(x_19911); +if (lean_obj_tag(x_19911) == 0) +{ +lean_object* x_19912; lean_object* x_19913; lean_object* x_19914; lean_object* x_19915; lean_object* x_19916; lean_object* x_19917; lean_object* x_19918; uint8_t x_19919; lean_object* x_19920; +lean_dec(x_17705); +x_19912 = lean_ctor_get(x_19909, 1); +lean_inc(x_19912); +if (lean_is_exclusive(x_19909)) { + lean_ctor_release(x_19909, 0); + lean_ctor_release(x_19909, 1); + x_19913 = x_19909; +} else { + lean_dec_ref(x_19909); + x_19913 = lean_box(0); +} +x_19914 = lean_st_ref_get(x_5, x_19910); +x_19915 = lean_ctor_get(x_19914, 0); +lean_inc(x_19915); +x_19916 = lean_ctor_get(x_19914, 1); +lean_inc(x_19916); +if (lean_is_exclusive(x_19914)) { + lean_ctor_release(x_19914, 0); + lean_ctor_release(x_19914, 1); + x_19917 = x_19914; +} else { + lean_dec_ref(x_19914); + x_19917 = lean_box(0); +} +x_19918 = lean_ctor_get(x_19915, 0); +lean_inc(x_19918); +lean_dec(x_19915); +x_19919 = 0; +lean_inc(x_153); +lean_inc(x_19918); +x_19920 = l_Lean_Environment_find_x3f(x_19918, x_153, x_19919); +if (lean_obj_tag(x_19920) == 0) +{ +lean_object* x_19921; lean_object* x_19922; +lean_dec(x_19918); +lean_dec(x_19917); +lean_dec(x_19913); +lean_dec(x_19907); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_19921 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_19922 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_19921, x_19912, x_4, x_5, x_19916); +return x_19922; +} +else +{ +lean_object* x_19923; +x_19923 = lean_ctor_get(x_19920, 0); +lean_inc(x_19923); +lean_dec(x_19920); +switch (lean_obj_tag(x_19923)) { +case 0: +{ +lean_object* x_19924; lean_object* x_19925; uint8_t x_19926; +lean_dec(x_19918); +lean_dec(x_17699); +lean_dec(x_17698); +if (lean_is_exclusive(x_19923)) { + lean_ctor_release(x_19923, 0); + x_19924 = x_19923; +} else { + lean_dec_ref(x_19923); + x_19924 = lean_box(0); +} +x_19925 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_19926 = lean_name_eq(x_153, x_19925); +if (x_19926 == 0) +{ +lean_object* x_19927; uint8_t x_19928; +x_19927 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_19928 = lean_name_eq(x_153, x_19927); +if (x_19928 == 0) +{ +lean_object* x_19929; lean_object* x_19930; lean_object* x_19931; +lean_dec(x_19917); +lean_dec(x_19913); +lean_inc(x_153); +x_19929 = l_Lean_IR_ToIR_findDecl(x_153, x_19912, x_4, x_5, x_19916); +x_19930 = lean_ctor_get(x_19929, 0); +lean_inc(x_19930); +x_19931 = lean_ctor_get(x_19930, 0); +lean_inc(x_19931); +if (lean_obj_tag(x_19931) == 0) +{ +lean_object* x_19932; lean_object* x_19933; lean_object* x_19934; lean_object* x_19935; uint8_t x_19936; lean_object* x_19937; lean_object* x_19938; lean_object* x_19939; lean_object* x_19940; lean_object* x_19941; lean_object* x_19942; lean_object* x_19943; lean_object* x_19944; lean_object* x_19945; +lean_dec(x_19907); +lean_dec(x_2); +lean_dec(x_1); +x_19932 = lean_ctor_get(x_19929, 1); +lean_inc(x_19932); +if (lean_is_exclusive(x_19929)) { + lean_ctor_release(x_19929, 0); + lean_ctor_release(x_19929, 1); + x_19933 = x_19929; +} else { + lean_dec_ref(x_19929); + x_19933 = lean_box(0); +} +x_19934 = lean_ctor_get(x_19930, 1); +lean_inc(x_19934); +if (lean_is_exclusive(x_19930)) { + lean_ctor_release(x_19930, 0); + lean_ctor_release(x_19930, 1); + x_19935 = x_19930; +} else { + lean_dec_ref(x_19930); + x_19935 = lean_box(0); +} +x_19936 = 1; +x_19937 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_19938 = l_Lean_Name_toString(x_153, x_19936, x_19937); +if (lean_is_scalar(x_19924)) { + x_19939 = lean_alloc_ctor(3, 1, 0); +} else { + x_19939 = x_19924; + lean_ctor_set_tag(x_19939, 3); +} +lean_ctor_set(x_19939, 0, x_19938); +x_19940 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_19935)) { + x_19941 = lean_alloc_ctor(5, 2, 0); +} else { + x_19941 = x_19935; + lean_ctor_set_tag(x_19941, 5); +} +lean_ctor_set(x_19941, 0, x_19940); +lean_ctor_set(x_19941, 1, x_19939); +x_19942 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_19933)) { + x_19943 = lean_alloc_ctor(5, 2, 0); +} else { + x_19943 = x_19933; + lean_ctor_set_tag(x_19943, 5); +} +lean_ctor_set(x_19943, 0, x_19941); +lean_ctor_set(x_19943, 1, x_19942); +x_19944 = l_Lean_MessageData_ofFormat(x_19943); +x_19945 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_19944, x_19934, x_4, x_5, x_19932); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_19934); +return x_19945; +} +else +{ +lean_object* x_19946; lean_object* x_19947; lean_object* x_19948; lean_object* x_19949; lean_object* x_19950; lean_object* x_19951; lean_object* x_19952; uint8_t x_19953; +lean_dec(x_19924); +x_19946 = lean_ctor_get(x_19929, 1); +lean_inc(x_19946); +lean_dec(x_19929); +x_19947 = lean_ctor_get(x_19930, 1); +lean_inc(x_19947); +if (lean_is_exclusive(x_19930)) { + lean_ctor_release(x_19930, 0); + lean_ctor_release(x_19930, 1); + x_19948 = x_19930; +} else { + lean_dec_ref(x_19930); + x_19948 = lean_box(0); +} +x_19949 = lean_ctor_get(x_19931, 0); +lean_inc(x_19949); +lean_dec(x_19931); +x_19950 = lean_array_get_size(x_19907); +x_19951 = l_Lean_IR_Decl_params(x_19949); +lean_dec(x_19949); +x_19952 = lean_array_get_size(x_19951); +lean_dec(x_19951); +x_19953 = lean_nat_dec_lt(x_19950, x_19952); +if (x_19953 == 0) +{ +uint8_t x_19954; +x_19954 = lean_nat_dec_eq(x_19950, x_19952); +if (x_19954 == 0) +{ +lean_object* x_19955; lean_object* x_19956; lean_object* x_19957; lean_object* x_19958; lean_object* x_19959; lean_object* x_19960; lean_object* x_19961; lean_object* x_19962; lean_object* x_19963; lean_object* x_19964; lean_object* x_19965; lean_object* x_19966; lean_object* x_19967; lean_object* x_19968; lean_object* x_19969; lean_object* x_19970; lean_object* x_19971; +x_19955 = lean_unsigned_to_nat(0u); +x_19956 = l_Array_extract___rarg(x_19907, x_19955, x_19952); +x_19957 = l_Array_extract___rarg(x_19907, x_19952, x_19950); +lean_dec(x_19950); +lean_dec(x_19907); +if (lean_is_scalar(x_19948)) { + x_19958 = lean_alloc_ctor(6, 2, 0); +} else { + x_19958 = x_19948; + lean_ctor_set_tag(x_19958, 6); +} +lean_ctor_set(x_19958, 0, x_153); +lean_ctor_set(x_19958, 1, x_19956); +x_19959 = lean_ctor_get(x_1, 0); +lean_inc(x_19959); +x_19960 = l_Lean_IR_ToIR_bindVar(x_19959, x_19947, x_4, x_5, x_19946); +x_19961 = lean_ctor_get(x_19960, 0); +lean_inc(x_19961); +x_19962 = lean_ctor_get(x_19960, 1); +lean_inc(x_19962); +lean_dec(x_19960); +x_19963 = lean_ctor_get(x_19961, 0); +lean_inc(x_19963); +x_19964 = lean_ctor_get(x_19961, 1); +lean_inc(x_19964); +lean_dec(x_19961); +x_19965 = l_Lean_IR_ToIR_newVar(x_19964, x_4, x_5, x_19962); +x_19966 = lean_ctor_get(x_19965, 0); +lean_inc(x_19966); +x_19967 = lean_ctor_get(x_19965, 1); +lean_inc(x_19967); +lean_dec(x_19965); +x_19968 = lean_ctor_get(x_19966, 0); +lean_inc(x_19968); +x_19969 = lean_ctor_get(x_19966, 1); +lean_inc(x_19969); +lean_dec(x_19966); +x_19970 = lean_ctor_get(x_1, 2); +lean_inc(x_19970); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_19971 = l_Lean_IR_ToIR_lowerType(x_19970, x_19969, x_4, x_5, x_19967); +if (lean_obj_tag(x_19971) == 0) +{ +lean_object* x_19972; lean_object* x_19973; lean_object* x_19974; lean_object* x_19975; lean_object* x_19976; +x_19972 = lean_ctor_get(x_19971, 0); +lean_inc(x_19972); +x_19973 = lean_ctor_get(x_19971, 1); +lean_inc(x_19973); +lean_dec(x_19971); +x_19974 = lean_ctor_get(x_19972, 0); +lean_inc(x_19974); +x_19975 = lean_ctor_get(x_19972, 1); +lean_inc(x_19975); +lean_dec(x_19972); +x_19976 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_19968, x_19957, x_19963, x_19958, x_19974, x_19975, x_4, x_5, x_19973); +return x_19976; +} +else +{ +lean_object* x_19977; lean_object* x_19978; lean_object* x_19979; lean_object* x_19980; +lean_dec(x_19968); +lean_dec(x_19963); +lean_dec(x_19958); +lean_dec(x_19957); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_19977 = lean_ctor_get(x_19971, 0); +lean_inc(x_19977); +x_19978 = lean_ctor_get(x_19971, 1); +lean_inc(x_19978); +if (lean_is_exclusive(x_19971)) { + lean_ctor_release(x_19971, 0); + lean_ctor_release(x_19971, 1); + x_19979 = x_19971; +} else { + lean_dec_ref(x_19971); + x_19979 = lean_box(0); +} +if (lean_is_scalar(x_19979)) { + x_19980 = lean_alloc_ctor(1, 2, 0); +} else { + x_19980 = x_19979; +} +lean_ctor_set(x_19980, 0, x_19977); +lean_ctor_set(x_19980, 1, x_19978); +return x_19980; +} +} +else +{ +lean_object* x_19981; lean_object* x_19982; lean_object* x_19983; lean_object* x_19984; lean_object* x_19985; lean_object* x_19986; lean_object* x_19987; lean_object* x_19988; lean_object* x_19989; +lean_dec(x_19952); +lean_dec(x_19950); +if (lean_is_scalar(x_19948)) { + x_19981 = lean_alloc_ctor(6, 2, 0); +} else { + x_19981 = x_19948; + lean_ctor_set_tag(x_19981, 6); +} +lean_ctor_set(x_19981, 0, x_153); +lean_ctor_set(x_19981, 1, x_19907); +x_19982 = lean_ctor_get(x_1, 0); +lean_inc(x_19982); +x_19983 = l_Lean_IR_ToIR_bindVar(x_19982, x_19947, x_4, x_5, x_19946); +x_19984 = lean_ctor_get(x_19983, 0); +lean_inc(x_19984); +x_19985 = lean_ctor_get(x_19983, 1); +lean_inc(x_19985); +lean_dec(x_19983); +x_19986 = lean_ctor_get(x_19984, 0); +lean_inc(x_19986); +x_19987 = lean_ctor_get(x_19984, 1); +lean_inc(x_19987); +lean_dec(x_19984); +x_19988 = lean_ctor_get(x_1, 2); +lean_inc(x_19988); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_19989 = l_Lean_IR_ToIR_lowerType(x_19988, x_19987, x_4, x_5, x_19985); +if (lean_obj_tag(x_19989) == 0) +{ +lean_object* x_19990; lean_object* x_19991; lean_object* x_19992; lean_object* x_19993; lean_object* x_19994; +x_19990 = lean_ctor_get(x_19989, 0); +lean_inc(x_19990); +x_19991 = lean_ctor_get(x_19989, 1); +lean_inc(x_19991); +lean_dec(x_19989); +x_19992 = lean_ctor_get(x_19990, 0); +lean_inc(x_19992); +x_19993 = lean_ctor_get(x_19990, 1); +lean_inc(x_19993); +lean_dec(x_19990); +x_19994 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_19986, x_19981, x_19992, x_19993, x_4, x_5, x_19991); +return x_19994; +} +else +{ +lean_object* x_19995; lean_object* x_19996; lean_object* x_19997; lean_object* x_19998; +lean_dec(x_19986); +lean_dec(x_19981); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_19995 = lean_ctor_get(x_19989, 0); +lean_inc(x_19995); +x_19996 = lean_ctor_get(x_19989, 1); +lean_inc(x_19996); +if (lean_is_exclusive(x_19989)) { + lean_ctor_release(x_19989, 0); + lean_ctor_release(x_19989, 1); + x_19997 = x_19989; +} else { + lean_dec_ref(x_19989); + x_19997 = lean_box(0); +} +if (lean_is_scalar(x_19997)) { + x_19998 = lean_alloc_ctor(1, 2, 0); +} else { + x_19998 = x_19997; +} +lean_ctor_set(x_19998, 0, x_19995); +lean_ctor_set(x_19998, 1, x_19996); +return x_19998; +} +} +} +else +{ +lean_object* x_19999; lean_object* x_20000; lean_object* x_20001; lean_object* x_20002; lean_object* x_20003; lean_object* x_20004; lean_object* x_20005; lean_object* x_20006; lean_object* x_20007; +lean_dec(x_19952); +lean_dec(x_19950); +if (lean_is_scalar(x_19948)) { + x_19999 = lean_alloc_ctor(7, 2, 0); +} else { + x_19999 = x_19948; + lean_ctor_set_tag(x_19999, 7); +} +lean_ctor_set(x_19999, 0, x_153); +lean_ctor_set(x_19999, 1, x_19907); +x_20000 = lean_ctor_get(x_1, 0); +lean_inc(x_20000); +lean_dec(x_1); +x_20001 = l_Lean_IR_ToIR_bindVar(x_20000, x_19947, x_4, x_5, x_19946); +x_20002 = lean_ctor_get(x_20001, 0); +lean_inc(x_20002); +x_20003 = lean_ctor_get(x_20001, 1); +lean_inc(x_20003); +lean_dec(x_20001); +x_20004 = lean_ctor_get(x_20002, 0); +lean_inc(x_20004); +x_20005 = lean_ctor_get(x_20002, 1); +lean_inc(x_20005); +lean_dec(x_20002); +x_20006 = lean_box(7); +x_20007 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20004, x_19999, x_20006, x_20005, x_4, x_5, x_20003); +return x_20007; +} +} +} +else +{ +lean_object* x_20008; lean_object* x_20009; lean_object* x_20010; +lean_dec(x_19924); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20008 = lean_box(13); +if (lean_is_scalar(x_19913)) { + x_20009 = lean_alloc_ctor(0, 2, 0); +} else { + x_20009 = x_19913; +} +lean_ctor_set(x_20009, 0, x_20008); +lean_ctor_set(x_20009, 1, x_19912); +if (lean_is_scalar(x_19917)) { + x_20010 = lean_alloc_ctor(0, 2, 0); +} else { + x_20010 = x_19917; +} +lean_ctor_set(x_20010, 0, x_20009); +lean_ctor_set(x_20010, 1, x_19916); +return x_20010; +} +} +else +{ +lean_object* x_20011; lean_object* x_20012; lean_object* x_20013; +lean_dec(x_19924); +lean_dec(x_19917); +lean_dec(x_19913); +lean_dec(x_153); +x_20011 = l_Lean_IR_instInhabitedArg; +x_20012 = lean_unsigned_to_nat(2u); +x_20013 = lean_array_get(x_20011, x_19907, x_20012); +lean_dec(x_19907); +if (lean_obj_tag(x_20013) == 0) +{ +lean_object* x_20014; lean_object* x_20015; lean_object* x_20016; lean_object* x_20017; lean_object* x_20018; lean_object* x_20019; lean_object* x_20020; +x_20014 = lean_ctor_get(x_20013, 0); +lean_inc(x_20014); +lean_dec(x_20013); +x_20015 = lean_ctor_get(x_1, 0); +lean_inc(x_20015); +lean_dec(x_1); +x_20016 = l_Lean_IR_ToIR_bindVarToVarId(x_20015, x_20014, x_19912, x_4, x_5, x_19916); +x_20017 = lean_ctor_get(x_20016, 0); +lean_inc(x_20017); +x_20018 = lean_ctor_get(x_20016, 1); +lean_inc(x_20018); +lean_dec(x_20016); +x_20019 = lean_ctor_get(x_20017, 1); +lean_inc(x_20019); +lean_dec(x_20017); +x_20020 = l_Lean_IR_ToIR_lowerCode(x_2, x_20019, x_4, x_5, x_20018); +return x_20020; +} +else +{ +lean_object* x_20021; lean_object* x_20022; lean_object* x_20023; lean_object* x_20024; lean_object* x_20025; lean_object* x_20026; +x_20021 = lean_ctor_get(x_1, 0); +lean_inc(x_20021); +lean_dec(x_1); +x_20022 = l_Lean_IR_ToIR_bindErased(x_20021, x_19912, x_4, x_5, x_19916); +x_20023 = lean_ctor_get(x_20022, 0); +lean_inc(x_20023); +x_20024 = lean_ctor_get(x_20022, 1); +lean_inc(x_20024); +lean_dec(x_20022); +x_20025 = lean_ctor_get(x_20023, 1); +lean_inc(x_20025); +lean_dec(x_20023); +x_20026 = l_Lean_IR_ToIR_lowerCode(x_2, x_20025, x_4, x_5, x_20024); +return x_20026; +} +} +} +case 1: +{ +lean_object* x_20027; lean_object* x_20028; lean_object* x_20055; lean_object* x_20056; +lean_dec(x_19923); +lean_dec(x_19918); +lean_dec(x_17699); +lean_dec(x_17698); +lean_inc(x_153); +x_20055 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_19916); +x_20056 = lean_ctor_get(x_20055, 0); +lean_inc(x_20056); +if (lean_obj_tag(x_20056) == 0) +{ +lean_object* x_20057; lean_object* x_20058; lean_object* x_20059; +x_20057 = lean_ctor_get(x_20055, 1); +lean_inc(x_20057); +lean_dec(x_20055); +x_20058 = lean_box(0); +if (lean_is_scalar(x_19913)) { + x_20059 = lean_alloc_ctor(0, 2, 0); +} else { + x_20059 = x_19913; +} +lean_ctor_set(x_20059, 0, x_20058); +lean_ctor_set(x_20059, 1, x_19912); +x_20027 = x_20059; +x_20028 = x_20057; +goto block_20054; +} +else +{ +lean_object* x_20060; lean_object* x_20061; lean_object* x_20062; lean_object* x_20063; lean_object* x_20064; lean_object* x_20065; lean_object* x_20066; uint8_t x_20067; +lean_dec(x_19913); +x_20060 = lean_ctor_get(x_20055, 1); +lean_inc(x_20060); +if (lean_is_exclusive(x_20055)) { + lean_ctor_release(x_20055, 0); + lean_ctor_release(x_20055, 1); + x_20061 = x_20055; +} else { + lean_dec_ref(x_20055); + x_20061 = lean_box(0); +} +x_20062 = lean_ctor_get(x_20056, 0); +lean_inc(x_20062); +if (lean_is_exclusive(x_20056)) { + lean_ctor_release(x_20056, 0); + x_20063 = x_20056; +} else { + lean_dec_ref(x_20056); + x_20063 = lean_box(0); +} +x_20064 = lean_array_get_size(x_19907); +x_20065 = lean_ctor_get(x_20062, 3); +lean_inc(x_20065); +lean_dec(x_20062); +x_20066 = lean_array_get_size(x_20065); +lean_dec(x_20065); +x_20067 = lean_nat_dec_lt(x_20064, x_20066); +if (x_20067 == 0) +{ +uint8_t x_20068; +x_20068 = lean_nat_dec_eq(x_20064, x_20066); +if (x_20068 == 0) +{ +lean_object* x_20069; lean_object* x_20070; lean_object* x_20071; lean_object* x_20072; lean_object* x_20073; lean_object* x_20074; lean_object* x_20075; lean_object* x_20076; lean_object* x_20077; lean_object* x_20078; lean_object* x_20079; lean_object* x_20080; lean_object* x_20081; lean_object* x_20082; lean_object* x_20083; lean_object* x_20084; lean_object* x_20085; +x_20069 = lean_unsigned_to_nat(0u); +x_20070 = l_Array_extract___rarg(x_19907, x_20069, x_20066); +x_20071 = l_Array_extract___rarg(x_19907, x_20066, x_20064); +lean_dec(x_20064); +lean_inc(x_153); +if (lean_is_scalar(x_20061)) { + x_20072 = lean_alloc_ctor(6, 2, 0); +} else { + x_20072 = x_20061; + lean_ctor_set_tag(x_20072, 6); +} +lean_ctor_set(x_20072, 0, x_153); +lean_ctor_set(x_20072, 1, x_20070); +x_20073 = lean_ctor_get(x_1, 0); +lean_inc(x_20073); +x_20074 = l_Lean_IR_ToIR_bindVar(x_20073, x_19912, x_4, x_5, x_20060); +x_20075 = lean_ctor_get(x_20074, 0); +lean_inc(x_20075); +x_20076 = lean_ctor_get(x_20074, 1); +lean_inc(x_20076); +lean_dec(x_20074); +x_20077 = lean_ctor_get(x_20075, 0); +lean_inc(x_20077); +x_20078 = lean_ctor_get(x_20075, 1); +lean_inc(x_20078); +lean_dec(x_20075); +x_20079 = l_Lean_IR_ToIR_newVar(x_20078, x_4, x_5, x_20076); +x_20080 = lean_ctor_get(x_20079, 0); +lean_inc(x_20080); +x_20081 = lean_ctor_get(x_20079, 1); +lean_inc(x_20081); +lean_dec(x_20079); +x_20082 = lean_ctor_get(x_20080, 0); +lean_inc(x_20082); +x_20083 = lean_ctor_get(x_20080, 1); +lean_inc(x_20083); +lean_dec(x_20080); +x_20084 = lean_ctor_get(x_1, 2); +lean_inc(x_20084); +lean_inc(x_5); +lean_inc(x_4); +x_20085 = l_Lean_IR_ToIR_lowerType(x_20084, x_20083, x_4, x_5, x_20081); +if (lean_obj_tag(x_20085) == 0) +{ +lean_object* x_20086; lean_object* x_20087; lean_object* x_20088; lean_object* x_20089; lean_object* x_20090; +x_20086 = lean_ctor_get(x_20085, 0); +lean_inc(x_20086); +x_20087 = lean_ctor_get(x_20085, 1); +lean_inc(x_20087); +lean_dec(x_20085); +x_20088 = lean_ctor_get(x_20086, 0); +lean_inc(x_20088); +x_20089 = lean_ctor_get(x_20086, 1); +lean_inc(x_20089); +lean_dec(x_20086); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20090 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_20082, x_20071, x_20077, x_20072, x_20088, x_20089, x_4, x_5, x_20087); +if (lean_obj_tag(x_20090) == 0) +{ +lean_object* x_20091; lean_object* x_20092; lean_object* x_20093; lean_object* x_20094; lean_object* x_20095; lean_object* x_20096; lean_object* x_20097; +x_20091 = lean_ctor_get(x_20090, 0); +lean_inc(x_20091); +x_20092 = lean_ctor_get(x_20090, 1); +lean_inc(x_20092); +lean_dec(x_20090); +x_20093 = lean_ctor_get(x_20091, 0); +lean_inc(x_20093); +x_20094 = lean_ctor_get(x_20091, 1); +lean_inc(x_20094); +if (lean_is_exclusive(x_20091)) { + lean_ctor_release(x_20091, 0); + lean_ctor_release(x_20091, 1); + x_20095 = x_20091; +} else { + lean_dec_ref(x_20091); + x_20095 = lean_box(0); +} +if (lean_is_scalar(x_20063)) { + x_20096 = lean_alloc_ctor(1, 1, 0); +} else { + x_20096 = x_20063; +} +lean_ctor_set(x_20096, 0, x_20093); +if (lean_is_scalar(x_20095)) { + x_20097 = lean_alloc_ctor(0, 2, 0); +} else { + x_20097 = x_20095; +} +lean_ctor_set(x_20097, 0, x_20096); +lean_ctor_set(x_20097, 1, x_20094); +x_20027 = x_20097; +x_20028 = x_20092; +goto block_20054; +} +else +{ +lean_object* x_20098; lean_object* x_20099; lean_object* x_20100; lean_object* x_20101; +lean_dec(x_20063); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20098 = lean_ctor_get(x_20090, 0); +lean_inc(x_20098); +x_20099 = lean_ctor_get(x_20090, 1); +lean_inc(x_20099); +if (lean_is_exclusive(x_20090)) { + lean_ctor_release(x_20090, 0); + lean_ctor_release(x_20090, 1); + x_20100 = x_20090; +} else { + lean_dec_ref(x_20090); + x_20100 = lean_box(0); +} +if (lean_is_scalar(x_20100)) { + x_20101 = lean_alloc_ctor(1, 2, 0); +} else { + x_20101 = x_20100; +} +lean_ctor_set(x_20101, 0, x_20098); +lean_ctor_set(x_20101, 1, x_20099); +return x_20101; +} +} +else +{ +lean_object* x_20102; lean_object* x_20103; lean_object* x_20104; lean_object* x_20105; +lean_dec(x_20082); +lean_dec(x_20077); +lean_dec(x_20072); +lean_dec(x_20071); +lean_dec(x_20063); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20102 = lean_ctor_get(x_20085, 0); +lean_inc(x_20102); +x_20103 = lean_ctor_get(x_20085, 1); +lean_inc(x_20103); +if (lean_is_exclusive(x_20085)) { + lean_ctor_release(x_20085, 0); + lean_ctor_release(x_20085, 1); + x_20104 = x_20085; +} else { + lean_dec_ref(x_20085); + x_20104 = lean_box(0); +} +if (lean_is_scalar(x_20104)) { + x_20105 = lean_alloc_ctor(1, 2, 0); +} else { + x_20105 = x_20104; +} +lean_ctor_set(x_20105, 0, x_20102); +lean_ctor_set(x_20105, 1, x_20103); +return x_20105; +} +} +else +{ +lean_object* x_20106; lean_object* x_20107; lean_object* x_20108; lean_object* x_20109; lean_object* x_20110; lean_object* x_20111; lean_object* x_20112; lean_object* x_20113; lean_object* x_20114; +lean_dec(x_20066); +lean_dec(x_20064); +lean_inc(x_19907); +lean_inc(x_153); +if (lean_is_scalar(x_20061)) { + x_20106 = lean_alloc_ctor(6, 2, 0); +} else { + x_20106 = x_20061; + lean_ctor_set_tag(x_20106, 6); +} +lean_ctor_set(x_20106, 0, x_153); +lean_ctor_set(x_20106, 1, x_19907); +x_20107 = lean_ctor_get(x_1, 0); +lean_inc(x_20107); +x_20108 = l_Lean_IR_ToIR_bindVar(x_20107, x_19912, x_4, x_5, x_20060); +x_20109 = lean_ctor_get(x_20108, 0); +lean_inc(x_20109); +x_20110 = lean_ctor_get(x_20108, 1); +lean_inc(x_20110); +lean_dec(x_20108); +x_20111 = lean_ctor_get(x_20109, 0); +lean_inc(x_20111); +x_20112 = lean_ctor_get(x_20109, 1); +lean_inc(x_20112); +lean_dec(x_20109); +x_20113 = lean_ctor_get(x_1, 2); +lean_inc(x_20113); +lean_inc(x_5); +lean_inc(x_4); +x_20114 = l_Lean_IR_ToIR_lowerType(x_20113, x_20112, x_4, x_5, x_20110); +if (lean_obj_tag(x_20114) == 0) +{ +lean_object* x_20115; lean_object* x_20116; lean_object* x_20117; lean_object* x_20118; lean_object* x_20119; +x_20115 = lean_ctor_get(x_20114, 0); +lean_inc(x_20115); +x_20116 = lean_ctor_get(x_20114, 1); +lean_inc(x_20116); +lean_dec(x_20114); +x_20117 = lean_ctor_get(x_20115, 0); +lean_inc(x_20117); +x_20118 = lean_ctor_get(x_20115, 1); +lean_inc(x_20118); +lean_dec(x_20115); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20119 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20111, x_20106, x_20117, x_20118, x_4, x_5, x_20116); +if (lean_obj_tag(x_20119) == 0) +{ +lean_object* x_20120; lean_object* x_20121; lean_object* x_20122; lean_object* x_20123; lean_object* x_20124; lean_object* x_20125; lean_object* x_20126; +x_20120 = lean_ctor_get(x_20119, 0); +lean_inc(x_20120); +x_20121 = lean_ctor_get(x_20119, 1); +lean_inc(x_20121); +lean_dec(x_20119); +x_20122 = lean_ctor_get(x_20120, 0); +lean_inc(x_20122); +x_20123 = lean_ctor_get(x_20120, 1); +lean_inc(x_20123); +if (lean_is_exclusive(x_20120)) { + lean_ctor_release(x_20120, 0); + lean_ctor_release(x_20120, 1); + x_20124 = x_20120; +} else { + lean_dec_ref(x_20120); + x_20124 = lean_box(0); +} +if (lean_is_scalar(x_20063)) { + x_20125 = lean_alloc_ctor(1, 1, 0); +} else { + x_20125 = x_20063; +} +lean_ctor_set(x_20125, 0, x_20122); +if (lean_is_scalar(x_20124)) { + x_20126 = lean_alloc_ctor(0, 2, 0); +} else { + x_20126 = x_20124; +} +lean_ctor_set(x_20126, 0, x_20125); +lean_ctor_set(x_20126, 1, x_20123); +x_20027 = x_20126; +x_20028 = x_20121; +goto block_20054; +} +else +{ +lean_object* x_20127; lean_object* x_20128; lean_object* x_20129; lean_object* x_20130; +lean_dec(x_20063); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20127 = lean_ctor_get(x_20119, 0); +lean_inc(x_20127); +x_20128 = lean_ctor_get(x_20119, 1); +lean_inc(x_20128); +if (lean_is_exclusive(x_20119)) { + lean_ctor_release(x_20119, 0); + lean_ctor_release(x_20119, 1); + x_20129 = x_20119; +} else { + lean_dec_ref(x_20119); + x_20129 = lean_box(0); +} +if (lean_is_scalar(x_20129)) { + x_20130 = lean_alloc_ctor(1, 2, 0); +} else { + x_20130 = x_20129; +} +lean_ctor_set(x_20130, 0, x_20127); +lean_ctor_set(x_20130, 1, x_20128); +return x_20130; +} +} +else +{ +lean_object* x_20131; lean_object* x_20132; lean_object* x_20133; lean_object* x_20134; +lean_dec(x_20111); +lean_dec(x_20106); +lean_dec(x_20063); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20131 = lean_ctor_get(x_20114, 0); +lean_inc(x_20131); +x_20132 = lean_ctor_get(x_20114, 1); +lean_inc(x_20132); +if (lean_is_exclusive(x_20114)) { + lean_ctor_release(x_20114, 0); + lean_ctor_release(x_20114, 1); + x_20133 = x_20114; +} else { + lean_dec_ref(x_20114); + x_20133 = lean_box(0); +} +if (lean_is_scalar(x_20133)) { + x_20134 = lean_alloc_ctor(1, 2, 0); +} else { + x_20134 = x_20133; +} +lean_ctor_set(x_20134, 0, x_20131); +lean_ctor_set(x_20134, 1, x_20132); +return x_20134; +} +} +} +else +{ +lean_object* x_20135; lean_object* x_20136; lean_object* x_20137; lean_object* x_20138; lean_object* x_20139; lean_object* x_20140; lean_object* x_20141; lean_object* x_20142; lean_object* x_20143; +lean_dec(x_20066); +lean_dec(x_20064); +lean_inc(x_19907); +lean_inc(x_153); +if (lean_is_scalar(x_20061)) { + x_20135 = lean_alloc_ctor(7, 2, 0); +} else { + x_20135 = x_20061; + lean_ctor_set_tag(x_20135, 7); +} +lean_ctor_set(x_20135, 0, x_153); +lean_ctor_set(x_20135, 1, x_19907); +x_20136 = lean_ctor_get(x_1, 0); +lean_inc(x_20136); +x_20137 = l_Lean_IR_ToIR_bindVar(x_20136, x_19912, x_4, x_5, x_20060); +x_20138 = lean_ctor_get(x_20137, 0); +lean_inc(x_20138); +x_20139 = lean_ctor_get(x_20137, 1); +lean_inc(x_20139); +lean_dec(x_20137); +x_20140 = lean_ctor_get(x_20138, 0); +lean_inc(x_20140); +x_20141 = lean_ctor_get(x_20138, 1); +lean_inc(x_20141); +lean_dec(x_20138); +x_20142 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20143 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20140, x_20135, x_20142, x_20141, x_4, x_5, x_20139); +if (lean_obj_tag(x_20143) == 0) +{ +lean_object* x_20144; lean_object* x_20145; lean_object* x_20146; lean_object* x_20147; lean_object* x_20148; lean_object* x_20149; lean_object* x_20150; +x_20144 = lean_ctor_get(x_20143, 0); +lean_inc(x_20144); +x_20145 = lean_ctor_get(x_20143, 1); +lean_inc(x_20145); +lean_dec(x_20143); +x_20146 = lean_ctor_get(x_20144, 0); +lean_inc(x_20146); +x_20147 = lean_ctor_get(x_20144, 1); +lean_inc(x_20147); +if (lean_is_exclusive(x_20144)) { + lean_ctor_release(x_20144, 0); + lean_ctor_release(x_20144, 1); + x_20148 = x_20144; +} else { + lean_dec_ref(x_20144); + x_20148 = lean_box(0); +} +if (lean_is_scalar(x_20063)) { + x_20149 = lean_alloc_ctor(1, 1, 0); +} else { + x_20149 = x_20063; +} +lean_ctor_set(x_20149, 0, x_20146); +if (lean_is_scalar(x_20148)) { + x_20150 = lean_alloc_ctor(0, 2, 0); +} else { + x_20150 = x_20148; +} +lean_ctor_set(x_20150, 0, x_20149); +lean_ctor_set(x_20150, 1, x_20147); +x_20027 = x_20150; +x_20028 = x_20145; +goto block_20054; +} +else +{ +lean_object* x_20151; lean_object* x_20152; lean_object* x_20153; lean_object* x_20154; +lean_dec(x_20063); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20151 = lean_ctor_get(x_20143, 0); +lean_inc(x_20151); +x_20152 = lean_ctor_get(x_20143, 1); +lean_inc(x_20152); +if (lean_is_exclusive(x_20143)) { + lean_ctor_release(x_20143, 0); + lean_ctor_release(x_20143, 1); + x_20153 = x_20143; +} else { + lean_dec_ref(x_20143); + x_20153 = lean_box(0); +} +if (lean_is_scalar(x_20153)) { + x_20154 = lean_alloc_ctor(1, 2, 0); +} else { + x_20154 = x_20153; +} +lean_ctor_set(x_20154, 0, x_20151); +lean_ctor_set(x_20154, 1, x_20152); +return x_20154; +} +} +} +block_20054: +{ +lean_object* x_20029; +x_20029 = lean_ctor_get(x_20027, 0); +lean_inc(x_20029); +if (lean_obj_tag(x_20029) == 0) +{ +lean_object* x_20030; lean_object* x_20031; lean_object* x_20032; lean_object* x_20033; lean_object* x_20034; lean_object* x_20035; lean_object* x_20036; lean_object* x_20037; lean_object* x_20038; lean_object* x_20039; +lean_dec(x_19917); +x_20030 = lean_ctor_get(x_20027, 1); +lean_inc(x_20030); +lean_dec(x_20027); +x_20031 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_20031, 0, x_153); +lean_ctor_set(x_20031, 1, x_19907); +x_20032 = lean_ctor_get(x_1, 0); +lean_inc(x_20032); +x_20033 = l_Lean_IR_ToIR_bindVar(x_20032, x_20030, x_4, x_5, x_20028); +x_20034 = lean_ctor_get(x_20033, 0); +lean_inc(x_20034); +x_20035 = lean_ctor_get(x_20033, 1); +lean_inc(x_20035); +lean_dec(x_20033); +x_20036 = lean_ctor_get(x_20034, 0); +lean_inc(x_20036); +x_20037 = lean_ctor_get(x_20034, 1); +lean_inc(x_20037); +lean_dec(x_20034); +x_20038 = lean_ctor_get(x_1, 2); +lean_inc(x_20038); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_20039 = l_Lean_IR_ToIR_lowerType(x_20038, x_20037, x_4, x_5, x_20035); +if (lean_obj_tag(x_20039) == 0) +{ +lean_object* x_20040; lean_object* x_20041; lean_object* x_20042; lean_object* x_20043; lean_object* x_20044; +x_20040 = lean_ctor_get(x_20039, 0); +lean_inc(x_20040); +x_20041 = lean_ctor_get(x_20039, 1); +lean_inc(x_20041); +lean_dec(x_20039); +x_20042 = lean_ctor_get(x_20040, 0); +lean_inc(x_20042); +x_20043 = lean_ctor_get(x_20040, 1); +lean_inc(x_20043); +lean_dec(x_20040); +x_20044 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20036, x_20031, x_20042, x_20043, x_4, x_5, x_20041); +return x_20044; +} +else +{ +lean_object* x_20045; lean_object* x_20046; lean_object* x_20047; lean_object* x_20048; +lean_dec(x_20036); +lean_dec(x_20031); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_20045 = lean_ctor_get(x_20039, 0); +lean_inc(x_20045); +x_20046 = lean_ctor_get(x_20039, 1); +lean_inc(x_20046); +if (lean_is_exclusive(x_20039)) { + lean_ctor_release(x_20039, 0); + lean_ctor_release(x_20039, 1); + x_20047 = x_20039; +} else { + lean_dec_ref(x_20039); + x_20047 = lean_box(0); +} +if (lean_is_scalar(x_20047)) { + x_20048 = lean_alloc_ctor(1, 2, 0); +} else { + x_20048 = x_20047; +} +lean_ctor_set(x_20048, 0, x_20045); +lean_ctor_set(x_20048, 1, x_20046); +return x_20048; +} +} +else +{ +lean_object* x_20049; lean_object* x_20050; lean_object* x_20051; lean_object* x_20052; lean_object* x_20053; +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20049 = lean_ctor_get(x_20027, 1); +lean_inc(x_20049); +if (lean_is_exclusive(x_20027)) { + lean_ctor_release(x_20027, 0); + lean_ctor_release(x_20027, 1); + x_20050 = x_20027; +} else { + lean_dec_ref(x_20027); + x_20050 = lean_box(0); +} +x_20051 = lean_ctor_get(x_20029, 0); +lean_inc(x_20051); +lean_dec(x_20029); +if (lean_is_scalar(x_20050)) { + x_20052 = lean_alloc_ctor(0, 2, 0); +} else { + x_20052 = x_20050; +} +lean_ctor_set(x_20052, 0, x_20051); +lean_ctor_set(x_20052, 1, x_20049); +if (lean_is_scalar(x_19917)) { + x_20053 = lean_alloc_ctor(0, 2, 0); +} else { + x_20053 = x_19917; +} +lean_ctor_set(x_20053, 0, x_20052); +lean_ctor_set(x_20053, 1, x_20028); +return x_20053; +} +} +} +case 2: +{ +lean_object* x_20155; lean_object* x_20156; +lean_dec(x_19923); +lean_dec(x_19918); +lean_dec(x_19917); +lean_dec(x_19913); +lean_dec(x_19907); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_20155 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_20156 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_20155, x_19912, x_4, x_5, x_19916); +return x_20156; +} +case 3: +{ +lean_object* x_20157; lean_object* x_20158; lean_object* x_20185; lean_object* x_20186; +lean_dec(x_19923); +lean_dec(x_19918); +lean_dec(x_17699); +lean_dec(x_17698); +lean_inc(x_153); +x_20185 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_19916); +x_20186 = lean_ctor_get(x_20185, 0); +lean_inc(x_20186); +if (lean_obj_tag(x_20186) == 0) +{ +lean_object* x_20187; lean_object* x_20188; lean_object* x_20189; +x_20187 = lean_ctor_get(x_20185, 1); +lean_inc(x_20187); +lean_dec(x_20185); +x_20188 = lean_box(0); +if (lean_is_scalar(x_19913)) { + x_20189 = lean_alloc_ctor(0, 2, 0); +} else { + x_20189 = x_19913; +} +lean_ctor_set(x_20189, 0, x_20188); +lean_ctor_set(x_20189, 1, x_19912); +x_20157 = x_20189; +x_20158 = x_20187; +goto block_20184; +} +else +{ +lean_object* x_20190; lean_object* x_20191; lean_object* x_20192; lean_object* x_20193; lean_object* x_20194; lean_object* x_20195; lean_object* x_20196; uint8_t x_20197; +lean_dec(x_19913); +x_20190 = lean_ctor_get(x_20185, 1); +lean_inc(x_20190); +if (lean_is_exclusive(x_20185)) { + lean_ctor_release(x_20185, 0); + lean_ctor_release(x_20185, 1); + x_20191 = x_20185; +} else { + lean_dec_ref(x_20185); + x_20191 = lean_box(0); +} +x_20192 = lean_ctor_get(x_20186, 0); +lean_inc(x_20192); +if (lean_is_exclusive(x_20186)) { + lean_ctor_release(x_20186, 0); + x_20193 = x_20186; +} else { + lean_dec_ref(x_20186); + x_20193 = lean_box(0); +} +x_20194 = lean_array_get_size(x_19907); +x_20195 = lean_ctor_get(x_20192, 3); +lean_inc(x_20195); +lean_dec(x_20192); +x_20196 = lean_array_get_size(x_20195); +lean_dec(x_20195); +x_20197 = lean_nat_dec_lt(x_20194, x_20196); +if (x_20197 == 0) +{ +uint8_t x_20198; +x_20198 = lean_nat_dec_eq(x_20194, x_20196); +if (x_20198 == 0) +{ +lean_object* x_20199; lean_object* x_20200; lean_object* x_20201; lean_object* x_20202; lean_object* x_20203; lean_object* x_20204; lean_object* x_20205; lean_object* x_20206; lean_object* x_20207; lean_object* x_20208; lean_object* x_20209; lean_object* x_20210; lean_object* x_20211; lean_object* x_20212; lean_object* x_20213; lean_object* x_20214; lean_object* x_20215; +x_20199 = lean_unsigned_to_nat(0u); +x_20200 = l_Array_extract___rarg(x_19907, x_20199, x_20196); +x_20201 = l_Array_extract___rarg(x_19907, x_20196, x_20194); +lean_dec(x_20194); +lean_inc(x_153); +if (lean_is_scalar(x_20191)) { + x_20202 = lean_alloc_ctor(6, 2, 0); +} else { + x_20202 = x_20191; + lean_ctor_set_tag(x_20202, 6); +} +lean_ctor_set(x_20202, 0, x_153); +lean_ctor_set(x_20202, 1, x_20200); +x_20203 = lean_ctor_get(x_1, 0); +lean_inc(x_20203); +x_20204 = l_Lean_IR_ToIR_bindVar(x_20203, x_19912, x_4, x_5, x_20190); +x_20205 = lean_ctor_get(x_20204, 0); +lean_inc(x_20205); +x_20206 = lean_ctor_get(x_20204, 1); +lean_inc(x_20206); +lean_dec(x_20204); +x_20207 = lean_ctor_get(x_20205, 0); +lean_inc(x_20207); +x_20208 = lean_ctor_get(x_20205, 1); +lean_inc(x_20208); +lean_dec(x_20205); +x_20209 = l_Lean_IR_ToIR_newVar(x_20208, x_4, x_5, x_20206); +x_20210 = lean_ctor_get(x_20209, 0); +lean_inc(x_20210); +x_20211 = lean_ctor_get(x_20209, 1); +lean_inc(x_20211); +lean_dec(x_20209); +x_20212 = lean_ctor_get(x_20210, 0); +lean_inc(x_20212); +x_20213 = lean_ctor_get(x_20210, 1); +lean_inc(x_20213); +lean_dec(x_20210); +x_20214 = lean_ctor_get(x_1, 2); +lean_inc(x_20214); +lean_inc(x_5); +lean_inc(x_4); +x_20215 = l_Lean_IR_ToIR_lowerType(x_20214, x_20213, x_4, x_5, x_20211); +if (lean_obj_tag(x_20215) == 0) +{ +lean_object* x_20216; lean_object* x_20217; lean_object* x_20218; lean_object* x_20219; lean_object* x_20220; +x_20216 = lean_ctor_get(x_20215, 0); +lean_inc(x_20216); +x_20217 = lean_ctor_get(x_20215, 1); +lean_inc(x_20217); +lean_dec(x_20215); +x_20218 = lean_ctor_get(x_20216, 0); +lean_inc(x_20218); +x_20219 = lean_ctor_get(x_20216, 1); +lean_inc(x_20219); +lean_dec(x_20216); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20220 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_20212, x_20201, x_20207, x_20202, x_20218, x_20219, x_4, x_5, x_20217); +if (lean_obj_tag(x_20220) == 0) +{ +lean_object* x_20221; lean_object* x_20222; lean_object* x_20223; lean_object* x_20224; lean_object* x_20225; lean_object* x_20226; lean_object* x_20227; +x_20221 = lean_ctor_get(x_20220, 0); +lean_inc(x_20221); +x_20222 = lean_ctor_get(x_20220, 1); +lean_inc(x_20222); +lean_dec(x_20220); +x_20223 = lean_ctor_get(x_20221, 0); +lean_inc(x_20223); +x_20224 = lean_ctor_get(x_20221, 1); +lean_inc(x_20224); +if (lean_is_exclusive(x_20221)) { + lean_ctor_release(x_20221, 0); + lean_ctor_release(x_20221, 1); + x_20225 = x_20221; +} else { + lean_dec_ref(x_20221); + x_20225 = lean_box(0); +} +if (lean_is_scalar(x_20193)) { + x_20226 = lean_alloc_ctor(1, 1, 0); +} else { + x_20226 = x_20193; +} +lean_ctor_set(x_20226, 0, x_20223); +if (lean_is_scalar(x_20225)) { + x_20227 = lean_alloc_ctor(0, 2, 0); +} else { + x_20227 = x_20225; +} +lean_ctor_set(x_20227, 0, x_20226); +lean_ctor_set(x_20227, 1, x_20224); +x_20157 = x_20227; +x_20158 = x_20222; +goto block_20184; +} +else +{ +lean_object* x_20228; lean_object* x_20229; lean_object* x_20230; lean_object* x_20231; +lean_dec(x_20193); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20228 = lean_ctor_get(x_20220, 0); +lean_inc(x_20228); +x_20229 = lean_ctor_get(x_20220, 1); +lean_inc(x_20229); +if (lean_is_exclusive(x_20220)) { + lean_ctor_release(x_20220, 0); + lean_ctor_release(x_20220, 1); + x_20230 = x_20220; +} else { + lean_dec_ref(x_20220); + x_20230 = lean_box(0); +} +if (lean_is_scalar(x_20230)) { + x_20231 = lean_alloc_ctor(1, 2, 0); +} else { + x_20231 = x_20230; +} +lean_ctor_set(x_20231, 0, x_20228); +lean_ctor_set(x_20231, 1, x_20229); +return x_20231; +} +} +else +{ +lean_object* x_20232; lean_object* x_20233; lean_object* x_20234; lean_object* x_20235; +lean_dec(x_20212); +lean_dec(x_20207); +lean_dec(x_20202); +lean_dec(x_20201); +lean_dec(x_20193); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20232 = lean_ctor_get(x_20215, 0); +lean_inc(x_20232); +x_20233 = lean_ctor_get(x_20215, 1); +lean_inc(x_20233); +if (lean_is_exclusive(x_20215)) { + lean_ctor_release(x_20215, 0); + lean_ctor_release(x_20215, 1); + x_20234 = x_20215; +} else { + lean_dec_ref(x_20215); + x_20234 = lean_box(0); +} +if (lean_is_scalar(x_20234)) { + x_20235 = lean_alloc_ctor(1, 2, 0); +} else { + x_20235 = x_20234; +} +lean_ctor_set(x_20235, 0, x_20232); +lean_ctor_set(x_20235, 1, x_20233); +return x_20235; +} +} +else +{ +lean_object* x_20236; lean_object* x_20237; lean_object* x_20238; lean_object* x_20239; lean_object* x_20240; lean_object* x_20241; lean_object* x_20242; lean_object* x_20243; lean_object* x_20244; +lean_dec(x_20196); +lean_dec(x_20194); +lean_inc(x_19907); +lean_inc(x_153); +if (lean_is_scalar(x_20191)) { + x_20236 = lean_alloc_ctor(6, 2, 0); +} else { + x_20236 = x_20191; + lean_ctor_set_tag(x_20236, 6); +} +lean_ctor_set(x_20236, 0, x_153); +lean_ctor_set(x_20236, 1, x_19907); +x_20237 = lean_ctor_get(x_1, 0); +lean_inc(x_20237); +x_20238 = l_Lean_IR_ToIR_bindVar(x_20237, x_19912, x_4, x_5, x_20190); +x_20239 = lean_ctor_get(x_20238, 0); +lean_inc(x_20239); +x_20240 = lean_ctor_get(x_20238, 1); +lean_inc(x_20240); +lean_dec(x_20238); +x_20241 = lean_ctor_get(x_20239, 0); +lean_inc(x_20241); +x_20242 = lean_ctor_get(x_20239, 1); +lean_inc(x_20242); +lean_dec(x_20239); +x_20243 = lean_ctor_get(x_1, 2); +lean_inc(x_20243); +lean_inc(x_5); +lean_inc(x_4); +x_20244 = l_Lean_IR_ToIR_lowerType(x_20243, x_20242, x_4, x_5, x_20240); +if (lean_obj_tag(x_20244) == 0) +{ +lean_object* x_20245; lean_object* x_20246; lean_object* x_20247; lean_object* x_20248; lean_object* x_20249; +x_20245 = lean_ctor_get(x_20244, 0); +lean_inc(x_20245); +x_20246 = lean_ctor_get(x_20244, 1); +lean_inc(x_20246); +lean_dec(x_20244); +x_20247 = lean_ctor_get(x_20245, 0); +lean_inc(x_20247); +x_20248 = lean_ctor_get(x_20245, 1); +lean_inc(x_20248); +lean_dec(x_20245); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20249 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20241, x_20236, x_20247, x_20248, x_4, x_5, x_20246); +if (lean_obj_tag(x_20249) == 0) +{ +lean_object* x_20250; lean_object* x_20251; lean_object* x_20252; lean_object* x_20253; lean_object* x_20254; lean_object* x_20255; lean_object* x_20256; +x_20250 = lean_ctor_get(x_20249, 0); +lean_inc(x_20250); +x_20251 = lean_ctor_get(x_20249, 1); +lean_inc(x_20251); +lean_dec(x_20249); +x_20252 = lean_ctor_get(x_20250, 0); +lean_inc(x_20252); +x_20253 = lean_ctor_get(x_20250, 1); +lean_inc(x_20253); +if (lean_is_exclusive(x_20250)) { + lean_ctor_release(x_20250, 0); + lean_ctor_release(x_20250, 1); + x_20254 = x_20250; +} else { + lean_dec_ref(x_20250); + x_20254 = lean_box(0); +} +if (lean_is_scalar(x_20193)) { + x_20255 = lean_alloc_ctor(1, 1, 0); +} else { + x_20255 = x_20193; +} +lean_ctor_set(x_20255, 0, x_20252); +if (lean_is_scalar(x_20254)) { + x_20256 = lean_alloc_ctor(0, 2, 0); +} else { + x_20256 = x_20254; +} +lean_ctor_set(x_20256, 0, x_20255); +lean_ctor_set(x_20256, 1, x_20253); +x_20157 = x_20256; +x_20158 = x_20251; +goto block_20184; +} +else +{ +lean_object* x_20257; lean_object* x_20258; lean_object* x_20259; lean_object* x_20260; +lean_dec(x_20193); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20257 = lean_ctor_get(x_20249, 0); +lean_inc(x_20257); +x_20258 = lean_ctor_get(x_20249, 1); +lean_inc(x_20258); +if (lean_is_exclusive(x_20249)) { + lean_ctor_release(x_20249, 0); + lean_ctor_release(x_20249, 1); + x_20259 = x_20249; +} else { + lean_dec_ref(x_20249); + x_20259 = lean_box(0); +} +if (lean_is_scalar(x_20259)) { + x_20260 = lean_alloc_ctor(1, 2, 0); +} else { + x_20260 = x_20259; +} +lean_ctor_set(x_20260, 0, x_20257); +lean_ctor_set(x_20260, 1, x_20258); +return x_20260; +} +} +else +{ +lean_object* x_20261; lean_object* x_20262; lean_object* x_20263; lean_object* x_20264; +lean_dec(x_20241); +lean_dec(x_20236); +lean_dec(x_20193); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20261 = lean_ctor_get(x_20244, 0); +lean_inc(x_20261); +x_20262 = lean_ctor_get(x_20244, 1); +lean_inc(x_20262); +if (lean_is_exclusive(x_20244)) { + lean_ctor_release(x_20244, 0); + lean_ctor_release(x_20244, 1); + x_20263 = x_20244; +} else { + lean_dec_ref(x_20244); + x_20263 = lean_box(0); +} +if (lean_is_scalar(x_20263)) { + x_20264 = lean_alloc_ctor(1, 2, 0); +} else { + x_20264 = x_20263; +} +lean_ctor_set(x_20264, 0, x_20261); +lean_ctor_set(x_20264, 1, x_20262); +return x_20264; +} +} +} +else +{ +lean_object* x_20265; lean_object* x_20266; lean_object* x_20267; lean_object* x_20268; lean_object* x_20269; lean_object* x_20270; lean_object* x_20271; lean_object* x_20272; lean_object* x_20273; +lean_dec(x_20196); +lean_dec(x_20194); +lean_inc(x_19907); +lean_inc(x_153); +if (lean_is_scalar(x_20191)) { + x_20265 = lean_alloc_ctor(7, 2, 0); +} else { + x_20265 = x_20191; + lean_ctor_set_tag(x_20265, 7); +} +lean_ctor_set(x_20265, 0, x_153); +lean_ctor_set(x_20265, 1, x_19907); +x_20266 = lean_ctor_get(x_1, 0); +lean_inc(x_20266); +x_20267 = l_Lean_IR_ToIR_bindVar(x_20266, x_19912, x_4, x_5, x_20190); +x_20268 = lean_ctor_get(x_20267, 0); +lean_inc(x_20268); +x_20269 = lean_ctor_get(x_20267, 1); +lean_inc(x_20269); +lean_dec(x_20267); +x_20270 = lean_ctor_get(x_20268, 0); +lean_inc(x_20270); +x_20271 = lean_ctor_get(x_20268, 1); +lean_inc(x_20271); +lean_dec(x_20268); +x_20272 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20273 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20270, x_20265, x_20272, x_20271, x_4, x_5, x_20269); +if (lean_obj_tag(x_20273) == 0) +{ +lean_object* x_20274; lean_object* x_20275; lean_object* x_20276; lean_object* x_20277; lean_object* x_20278; lean_object* x_20279; lean_object* x_20280; +x_20274 = lean_ctor_get(x_20273, 0); +lean_inc(x_20274); +x_20275 = lean_ctor_get(x_20273, 1); +lean_inc(x_20275); +lean_dec(x_20273); +x_20276 = lean_ctor_get(x_20274, 0); +lean_inc(x_20276); +x_20277 = lean_ctor_get(x_20274, 1); +lean_inc(x_20277); +if (lean_is_exclusive(x_20274)) { + lean_ctor_release(x_20274, 0); + lean_ctor_release(x_20274, 1); + x_20278 = x_20274; +} else { + lean_dec_ref(x_20274); + x_20278 = lean_box(0); +} +if (lean_is_scalar(x_20193)) { + x_20279 = lean_alloc_ctor(1, 1, 0); +} else { + x_20279 = x_20193; +} +lean_ctor_set(x_20279, 0, x_20276); +if (lean_is_scalar(x_20278)) { + x_20280 = lean_alloc_ctor(0, 2, 0); +} else { + x_20280 = x_20278; +} +lean_ctor_set(x_20280, 0, x_20279); +lean_ctor_set(x_20280, 1, x_20277); +x_20157 = x_20280; +x_20158 = x_20275; +goto block_20184; +} +else +{ +lean_object* x_20281; lean_object* x_20282; lean_object* x_20283; lean_object* x_20284; +lean_dec(x_20193); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20281 = lean_ctor_get(x_20273, 0); +lean_inc(x_20281); +x_20282 = lean_ctor_get(x_20273, 1); +lean_inc(x_20282); +if (lean_is_exclusive(x_20273)) { + lean_ctor_release(x_20273, 0); + lean_ctor_release(x_20273, 1); + x_20283 = x_20273; +} else { + lean_dec_ref(x_20273); + x_20283 = lean_box(0); +} +if (lean_is_scalar(x_20283)) { + x_20284 = lean_alloc_ctor(1, 2, 0); +} else { + x_20284 = x_20283; +} +lean_ctor_set(x_20284, 0, x_20281); +lean_ctor_set(x_20284, 1, x_20282); +return x_20284; +} +} +} +block_20184: +{ +lean_object* x_20159; +x_20159 = lean_ctor_get(x_20157, 0); +lean_inc(x_20159); +if (lean_obj_tag(x_20159) == 0) +{ +lean_object* x_20160; lean_object* x_20161; lean_object* x_20162; lean_object* x_20163; lean_object* x_20164; lean_object* x_20165; lean_object* x_20166; lean_object* x_20167; lean_object* x_20168; lean_object* x_20169; +lean_dec(x_19917); +x_20160 = lean_ctor_get(x_20157, 1); +lean_inc(x_20160); +lean_dec(x_20157); +x_20161 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_20161, 0, x_153); +lean_ctor_set(x_20161, 1, x_19907); +x_20162 = lean_ctor_get(x_1, 0); +lean_inc(x_20162); +x_20163 = l_Lean_IR_ToIR_bindVar(x_20162, x_20160, x_4, x_5, x_20158); +x_20164 = lean_ctor_get(x_20163, 0); +lean_inc(x_20164); +x_20165 = lean_ctor_get(x_20163, 1); +lean_inc(x_20165); +lean_dec(x_20163); +x_20166 = lean_ctor_get(x_20164, 0); +lean_inc(x_20166); +x_20167 = lean_ctor_get(x_20164, 1); +lean_inc(x_20167); +lean_dec(x_20164); +x_20168 = lean_ctor_get(x_1, 2); +lean_inc(x_20168); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_20169 = l_Lean_IR_ToIR_lowerType(x_20168, x_20167, x_4, x_5, x_20165); +if (lean_obj_tag(x_20169) == 0) +{ +lean_object* x_20170; lean_object* x_20171; lean_object* x_20172; lean_object* x_20173; lean_object* x_20174; +x_20170 = lean_ctor_get(x_20169, 0); +lean_inc(x_20170); +x_20171 = lean_ctor_get(x_20169, 1); +lean_inc(x_20171); +lean_dec(x_20169); +x_20172 = lean_ctor_get(x_20170, 0); +lean_inc(x_20172); +x_20173 = lean_ctor_get(x_20170, 1); +lean_inc(x_20173); +lean_dec(x_20170); +x_20174 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20166, x_20161, x_20172, x_20173, x_4, x_5, x_20171); +return x_20174; +} +else +{ +lean_object* x_20175; lean_object* x_20176; lean_object* x_20177; lean_object* x_20178; +lean_dec(x_20166); +lean_dec(x_20161); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_20175 = lean_ctor_get(x_20169, 0); +lean_inc(x_20175); +x_20176 = lean_ctor_get(x_20169, 1); +lean_inc(x_20176); +if (lean_is_exclusive(x_20169)) { + lean_ctor_release(x_20169, 0); + lean_ctor_release(x_20169, 1); + x_20177 = x_20169; +} else { + lean_dec_ref(x_20169); + x_20177 = lean_box(0); +} +if (lean_is_scalar(x_20177)) { + x_20178 = lean_alloc_ctor(1, 2, 0); +} else { + x_20178 = x_20177; +} +lean_ctor_set(x_20178, 0, x_20175); +lean_ctor_set(x_20178, 1, x_20176); +return x_20178; +} +} +else +{ +lean_object* x_20179; lean_object* x_20180; lean_object* x_20181; lean_object* x_20182; lean_object* x_20183; +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20179 = lean_ctor_get(x_20157, 1); +lean_inc(x_20179); +if (lean_is_exclusive(x_20157)) { + lean_ctor_release(x_20157, 0); + lean_ctor_release(x_20157, 1); + x_20180 = x_20157; +} else { + lean_dec_ref(x_20157); + x_20180 = lean_box(0); +} +x_20181 = lean_ctor_get(x_20159, 0); +lean_inc(x_20181); +lean_dec(x_20159); +if (lean_is_scalar(x_20180)) { + x_20182 = lean_alloc_ctor(0, 2, 0); +} else { + x_20182 = x_20180; +} +lean_ctor_set(x_20182, 0, x_20181); +lean_ctor_set(x_20182, 1, x_20179); +if (lean_is_scalar(x_19917)) { + x_20183 = lean_alloc_ctor(0, 2, 0); +} else { + x_20183 = x_19917; +} +lean_ctor_set(x_20183, 0, x_20182); +lean_ctor_set(x_20183, 1, x_20158); +return x_20183; +} +} +} +case 4: +{ +lean_object* x_20285; lean_object* x_20286; uint8_t x_20287; +lean_dec(x_19918); +lean_dec(x_19917); +lean_dec(x_19913); +lean_dec(x_17699); +lean_dec(x_17698); +if (lean_is_exclusive(x_19923)) { + lean_ctor_release(x_19923, 0); + x_20285 = x_19923; +} else { + lean_dec_ref(x_19923); + x_20285 = lean_box(0); +} +x_20286 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_20287 = lean_name_eq(x_153, x_20286); +if (x_20287 == 0) +{ +uint8_t x_20288; lean_object* x_20289; lean_object* x_20290; lean_object* x_20291; lean_object* x_20292; lean_object* x_20293; lean_object* x_20294; lean_object* x_20295; lean_object* x_20296; lean_object* x_20297; +lean_dec(x_19907); +lean_dec(x_2); +lean_dec(x_1); +x_20288 = 1; +x_20289 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_20290 = l_Lean_Name_toString(x_153, x_20288, x_20289); +if (lean_is_scalar(x_20285)) { + x_20291 = lean_alloc_ctor(3, 1, 0); +} else { + x_20291 = x_20285; + lean_ctor_set_tag(x_20291, 3); +} +lean_ctor_set(x_20291, 0, x_20290); +x_20292 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_20293 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_20293, 0, x_20292); +lean_ctor_set(x_20293, 1, x_20291); +x_20294 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_20295 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_20295, 0, x_20293); +lean_ctor_set(x_20295, 1, x_20294); +x_20296 = l_Lean_MessageData_ofFormat(x_20295); +x_20297 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_20296, x_19912, x_4, x_5, x_19916); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_19912); +return x_20297; +} +else +{ +lean_object* x_20298; lean_object* x_20299; lean_object* x_20300; +lean_dec(x_20285); +lean_dec(x_153); +x_20298 = l_Lean_IR_instInhabitedArg; +x_20299 = lean_unsigned_to_nat(2u); +x_20300 = lean_array_get(x_20298, x_19907, x_20299); +lean_dec(x_19907); +if (lean_obj_tag(x_20300) == 0) +{ +lean_object* x_20301; lean_object* x_20302; lean_object* x_20303; lean_object* x_20304; lean_object* x_20305; lean_object* x_20306; lean_object* x_20307; +x_20301 = lean_ctor_get(x_20300, 0); +lean_inc(x_20301); +lean_dec(x_20300); +x_20302 = lean_ctor_get(x_1, 0); +lean_inc(x_20302); +lean_dec(x_1); +x_20303 = l_Lean_IR_ToIR_bindVarToVarId(x_20302, x_20301, x_19912, x_4, x_5, x_19916); +x_20304 = lean_ctor_get(x_20303, 0); +lean_inc(x_20304); +x_20305 = lean_ctor_get(x_20303, 1); +lean_inc(x_20305); +lean_dec(x_20303); +x_20306 = lean_ctor_get(x_20304, 1); +lean_inc(x_20306); +lean_dec(x_20304); +x_20307 = l_Lean_IR_ToIR_lowerCode(x_2, x_20306, x_4, x_5, x_20305); +return x_20307; +} +else +{ +lean_object* x_20308; lean_object* x_20309; lean_object* x_20310; lean_object* x_20311; lean_object* x_20312; lean_object* x_20313; +x_20308 = lean_ctor_get(x_1, 0); +lean_inc(x_20308); +lean_dec(x_1); +x_20309 = l_Lean_IR_ToIR_bindErased(x_20308, x_19912, x_4, x_5, x_19916); +x_20310 = lean_ctor_get(x_20309, 0); +lean_inc(x_20310); +x_20311 = lean_ctor_get(x_20309, 1); +lean_inc(x_20311); +lean_dec(x_20309); +x_20312 = lean_ctor_get(x_20310, 1); +lean_inc(x_20312); +lean_dec(x_20310); +x_20313 = l_Lean_IR_ToIR_lowerCode(x_2, x_20312, x_4, x_5, x_20311); +return x_20313; +} +} +} +case 5: +{ +lean_object* x_20314; lean_object* x_20315; +lean_dec(x_19923); +lean_dec(x_19918); +lean_dec(x_19917); +lean_dec(x_19913); +lean_dec(x_19907); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_20314 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_20315 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_20314, x_19912, x_4, x_5, x_19916); +return x_20315; +} +case 6: +{ +lean_object* x_20316; uint8_t x_20317; +x_20316 = lean_ctor_get(x_19923, 0); +lean_inc(x_20316); +lean_dec(x_19923); +lean_inc(x_153); +x_20317 = l_Lean_isExtern(x_19918, x_153); +if (x_20317 == 0) +{ +lean_object* x_20318; +lean_dec(x_19917); +lean_dec(x_19913); +lean_dec(x_19907); +lean_inc(x_5); +lean_inc(x_4); +x_20318 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_19912, x_4, x_5, x_19916); +if (lean_obj_tag(x_20318) == 0) +{ +lean_object* x_20319; lean_object* x_20320; lean_object* x_20321; lean_object* x_20322; lean_object* x_20323; lean_object* x_20324; lean_object* x_20325; lean_object* x_20326; lean_object* x_20327; lean_object* x_20328; lean_object* x_20329; lean_object* x_20330; lean_object* x_20331; lean_object* x_20332; lean_object* x_20333; lean_object* x_20334; lean_object* x_20335; lean_object* x_20336; lean_object* x_20337; lean_object* x_20338; +x_20319 = lean_ctor_get(x_20318, 0); +lean_inc(x_20319); +x_20320 = lean_ctor_get(x_20319, 0); +lean_inc(x_20320); +x_20321 = lean_ctor_get(x_20318, 1); +lean_inc(x_20321); +lean_dec(x_20318); +x_20322 = lean_ctor_get(x_20319, 1); +lean_inc(x_20322); +lean_dec(x_20319); +x_20323 = lean_ctor_get(x_20320, 0); +lean_inc(x_20323); +x_20324 = lean_ctor_get(x_20320, 1); +lean_inc(x_20324); +lean_dec(x_20320); +x_20325 = lean_ctor_get(x_20316, 3); +lean_inc(x_20325); +lean_dec(x_20316); +x_20326 = lean_array_get_size(x_17698); +x_20327 = l_Array_extract___rarg(x_17698, x_20325, x_20326); +lean_dec(x_20326); +lean_dec(x_17698); +x_20328 = lean_array_get_size(x_20324); +x_20329 = lean_unsigned_to_nat(0u); +x_20330 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_17699)) { + x_20331 = lean_alloc_ctor(0, 3, 0); +} else { + x_20331 = x_17699; + lean_ctor_set_tag(x_20331, 0); +} +lean_ctor_set(x_20331, 0, x_20329); +lean_ctor_set(x_20331, 1, x_20328); +lean_ctor_set(x_20331, 2, x_20330); +x_20332 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_20333 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__8(x_20324, x_20327, x_20331, x_20331, x_20332, x_20329, lean_box(0), lean_box(0), x_20322, x_4, x_5, x_20321); +lean_dec(x_20331); +x_20334 = lean_ctor_get(x_20333, 0); +lean_inc(x_20334); +x_20335 = lean_ctor_get(x_20333, 1); +lean_inc(x_20335); +lean_dec(x_20333); +x_20336 = lean_ctor_get(x_20334, 0); +lean_inc(x_20336); +x_20337 = lean_ctor_get(x_20334, 1); +lean_inc(x_20337); +lean_dec(x_20334); +x_20338 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_20323, x_20324, x_20327, x_20336, x_20337, x_4, x_5, x_20335); +lean_dec(x_20327); +lean_dec(x_20324); +return x_20338; +} +else +{ +lean_object* x_20339; lean_object* x_20340; lean_object* x_20341; lean_object* x_20342; +lean_dec(x_20316); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20339 = lean_ctor_get(x_20318, 0); +lean_inc(x_20339); +x_20340 = lean_ctor_get(x_20318, 1); +lean_inc(x_20340); +if (lean_is_exclusive(x_20318)) { + lean_ctor_release(x_20318, 0); + lean_ctor_release(x_20318, 1); + x_20341 = x_20318; +} else { + lean_dec_ref(x_20318); + x_20341 = lean_box(0); +} +if (lean_is_scalar(x_20341)) { + x_20342 = lean_alloc_ctor(1, 2, 0); +} else { + x_20342 = x_20341; +} +lean_ctor_set(x_20342, 0, x_20339); +lean_ctor_set(x_20342, 1, x_20340); +return x_20342; +} +} +else +{ +lean_object* x_20343; lean_object* x_20344; lean_object* x_20371; lean_object* x_20372; +lean_dec(x_20316); +lean_dec(x_17699); +lean_dec(x_17698); +lean_inc(x_153); +x_20371 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_19916); +x_20372 = lean_ctor_get(x_20371, 0); +lean_inc(x_20372); +if (lean_obj_tag(x_20372) == 0) +{ +lean_object* x_20373; lean_object* x_20374; lean_object* x_20375; +x_20373 = lean_ctor_get(x_20371, 1); +lean_inc(x_20373); +lean_dec(x_20371); +x_20374 = lean_box(0); +if (lean_is_scalar(x_19913)) { + x_20375 = lean_alloc_ctor(0, 2, 0); +} else { + x_20375 = x_19913; +} +lean_ctor_set(x_20375, 0, x_20374); +lean_ctor_set(x_20375, 1, x_19912); +x_20343 = x_20375; +x_20344 = x_20373; +goto block_20370; +} +else +{ +lean_object* x_20376; lean_object* x_20377; lean_object* x_20378; lean_object* x_20379; lean_object* x_20380; lean_object* x_20381; lean_object* x_20382; uint8_t x_20383; +lean_dec(x_19913); +x_20376 = lean_ctor_get(x_20371, 1); +lean_inc(x_20376); +if (lean_is_exclusive(x_20371)) { + lean_ctor_release(x_20371, 0); + lean_ctor_release(x_20371, 1); + x_20377 = x_20371; +} else { + lean_dec_ref(x_20371); + x_20377 = lean_box(0); +} +x_20378 = lean_ctor_get(x_20372, 0); +lean_inc(x_20378); +if (lean_is_exclusive(x_20372)) { + lean_ctor_release(x_20372, 0); + x_20379 = x_20372; +} else { + lean_dec_ref(x_20372); + x_20379 = lean_box(0); +} +x_20380 = lean_array_get_size(x_19907); +x_20381 = lean_ctor_get(x_20378, 3); +lean_inc(x_20381); +lean_dec(x_20378); +x_20382 = lean_array_get_size(x_20381); +lean_dec(x_20381); +x_20383 = lean_nat_dec_lt(x_20380, x_20382); +if (x_20383 == 0) +{ +uint8_t x_20384; +x_20384 = lean_nat_dec_eq(x_20380, x_20382); +if (x_20384 == 0) +{ +lean_object* x_20385; lean_object* x_20386; lean_object* x_20387; lean_object* x_20388; lean_object* x_20389; lean_object* x_20390; lean_object* x_20391; lean_object* x_20392; lean_object* x_20393; lean_object* x_20394; lean_object* x_20395; lean_object* x_20396; lean_object* x_20397; lean_object* x_20398; lean_object* x_20399; lean_object* x_20400; lean_object* x_20401; +x_20385 = lean_unsigned_to_nat(0u); +x_20386 = l_Array_extract___rarg(x_19907, x_20385, x_20382); +x_20387 = l_Array_extract___rarg(x_19907, x_20382, x_20380); +lean_dec(x_20380); +lean_inc(x_153); +if (lean_is_scalar(x_20377)) { + x_20388 = lean_alloc_ctor(6, 2, 0); +} else { + x_20388 = x_20377; + lean_ctor_set_tag(x_20388, 6); +} +lean_ctor_set(x_20388, 0, x_153); +lean_ctor_set(x_20388, 1, x_20386); +x_20389 = lean_ctor_get(x_1, 0); +lean_inc(x_20389); +x_20390 = l_Lean_IR_ToIR_bindVar(x_20389, x_19912, x_4, x_5, x_20376); +x_20391 = lean_ctor_get(x_20390, 0); +lean_inc(x_20391); +x_20392 = lean_ctor_get(x_20390, 1); +lean_inc(x_20392); +lean_dec(x_20390); +x_20393 = lean_ctor_get(x_20391, 0); +lean_inc(x_20393); +x_20394 = lean_ctor_get(x_20391, 1); +lean_inc(x_20394); +lean_dec(x_20391); +x_20395 = l_Lean_IR_ToIR_newVar(x_20394, x_4, x_5, x_20392); +x_20396 = lean_ctor_get(x_20395, 0); +lean_inc(x_20396); +x_20397 = lean_ctor_get(x_20395, 1); +lean_inc(x_20397); +lean_dec(x_20395); +x_20398 = lean_ctor_get(x_20396, 0); +lean_inc(x_20398); +x_20399 = lean_ctor_get(x_20396, 1); +lean_inc(x_20399); +lean_dec(x_20396); +x_20400 = lean_ctor_get(x_1, 2); +lean_inc(x_20400); +lean_inc(x_5); +lean_inc(x_4); +x_20401 = l_Lean_IR_ToIR_lowerType(x_20400, x_20399, x_4, x_5, x_20397); +if (lean_obj_tag(x_20401) == 0) +{ +lean_object* x_20402; lean_object* x_20403; lean_object* x_20404; lean_object* x_20405; lean_object* x_20406; +x_20402 = lean_ctor_get(x_20401, 0); +lean_inc(x_20402); +x_20403 = lean_ctor_get(x_20401, 1); +lean_inc(x_20403); +lean_dec(x_20401); +x_20404 = lean_ctor_get(x_20402, 0); +lean_inc(x_20404); +x_20405 = lean_ctor_get(x_20402, 1); +lean_inc(x_20405); +lean_dec(x_20402); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20406 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_20398, x_20387, x_20393, x_20388, x_20404, x_20405, x_4, x_5, x_20403); +if (lean_obj_tag(x_20406) == 0) +{ +lean_object* x_20407; lean_object* x_20408; lean_object* x_20409; lean_object* x_20410; lean_object* x_20411; lean_object* x_20412; lean_object* x_20413; +x_20407 = lean_ctor_get(x_20406, 0); +lean_inc(x_20407); +x_20408 = lean_ctor_get(x_20406, 1); +lean_inc(x_20408); +lean_dec(x_20406); +x_20409 = lean_ctor_get(x_20407, 0); +lean_inc(x_20409); +x_20410 = lean_ctor_get(x_20407, 1); +lean_inc(x_20410); +if (lean_is_exclusive(x_20407)) { + lean_ctor_release(x_20407, 0); + lean_ctor_release(x_20407, 1); + x_20411 = x_20407; +} else { + lean_dec_ref(x_20407); + x_20411 = lean_box(0); +} +if (lean_is_scalar(x_20379)) { + x_20412 = lean_alloc_ctor(1, 1, 0); +} else { + x_20412 = x_20379; +} +lean_ctor_set(x_20412, 0, x_20409); +if (lean_is_scalar(x_20411)) { + x_20413 = lean_alloc_ctor(0, 2, 0); +} else { + x_20413 = x_20411; +} +lean_ctor_set(x_20413, 0, x_20412); +lean_ctor_set(x_20413, 1, x_20410); +x_20343 = x_20413; +x_20344 = x_20408; +goto block_20370; +} +else +{ +lean_object* x_20414; lean_object* x_20415; lean_object* x_20416; lean_object* x_20417; +lean_dec(x_20379); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20414 = lean_ctor_get(x_20406, 0); +lean_inc(x_20414); +x_20415 = lean_ctor_get(x_20406, 1); +lean_inc(x_20415); +if (lean_is_exclusive(x_20406)) { + lean_ctor_release(x_20406, 0); + lean_ctor_release(x_20406, 1); + x_20416 = x_20406; +} else { + lean_dec_ref(x_20406); + x_20416 = lean_box(0); +} +if (lean_is_scalar(x_20416)) { + x_20417 = lean_alloc_ctor(1, 2, 0); +} else { + x_20417 = x_20416; +} +lean_ctor_set(x_20417, 0, x_20414); +lean_ctor_set(x_20417, 1, x_20415); +return x_20417; +} +} +else +{ +lean_object* x_20418; lean_object* x_20419; lean_object* x_20420; lean_object* x_20421; +lean_dec(x_20398); +lean_dec(x_20393); +lean_dec(x_20388); +lean_dec(x_20387); +lean_dec(x_20379); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20418 = lean_ctor_get(x_20401, 0); +lean_inc(x_20418); +x_20419 = lean_ctor_get(x_20401, 1); +lean_inc(x_20419); +if (lean_is_exclusive(x_20401)) { + lean_ctor_release(x_20401, 0); + lean_ctor_release(x_20401, 1); + x_20420 = x_20401; +} else { + lean_dec_ref(x_20401); + x_20420 = lean_box(0); +} +if (lean_is_scalar(x_20420)) { + x_20421 = lean_alloc_ctor(1, 2, 0); +} else { + x_20421 = x_20420; +} +lean_ctor_set(x_20421, 0, x_20418); +lean_ctor_set(x_20421, 1, x_20419); +return x_20421; +} +} +else +{ +lean_object* x_20422; lean_object* x_20423; lean_object* x_20424; lean_object* x_20425; lean_object* x_20426; lean_object* x_20427; lean_object* x_20428; lean_object* x_20429; lean_object* x_20430; +lean_dec(x_20382); +lean_dec(x_20380); +lean_inc(x_19907); +lean_inc(x_153); +if (lean_is_scalar(x_20377)) { + x_20422 = lean_alloc_ctor(6, 2, 0); +} else { + x_20422 = x_20377; + lean_ctor_set_tag(x_20422, 6); +} +lean_ctor_set(x_20422, 0, x_153); +lean_ctor_set(x_20422, 1, x_19907); +x_20423 = lean_ctor_get(x_1, 0); +lean_inc(x_20423); +x_20424 = l_Lean_IR_ToIR_bindVar(x_20423, x_19912, x_4, x_5, x_20376); +x_20425 = lean_ctor_get(x_20424, 0); +lean_inc(x_20425); +x_20426 = lean_ctor_get(x_20424, 1); +lean_inc(x_20426); +lean_dec(x_20424); +x_20427 = lean_ctor_get(x_20425, 0); +lean_inc(x_20427); +x_20428 = lean_ctor_get(x_20425, 1); +lean_inc(x_20428); +lean_dec(x_20425); +x_20429 = lean_ctor_get(x_1, 2); +lean_inc(x_20429); +lean_inc(x_5); +lean_inc(x_4); +x_20430 = l_Lean_IR_ToIR_lowerType(x_20429, x_20428, x_4, x_5, x_20426); +if (lean_obj_tag(x_20430) == 0) +{ +lean_object* x_20431; lean_object* x_20432; lean_object* x_20433; lean_object* x_20434; lean_object* x_20435; +x_20431 = lean_ctor_get(x_20430, 0); +lean_inc(x_20431); +x_20432 = lean_ctor_get(x_20430, 1); +lean_inc(x_20432); +lean_dec(x_20430); +x_20433 = lean_ctor_get(x_20431, 0); +lean_inc(x_20433); +x_20434 = lean_ctor_get(x_20431, 1); +lean_inc(x_20434); +lean_dec(x_20431); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20435 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20427, x_20422, x_20433, x_20434, x_4, x_5, x_20432); +if (lean_obj_tag(x_20435) == 0) +{ +lean_object* x_20436; lean_object* x_20437; lean_object* x_20438; lean_object* x_20439; lean_object* x_20440; lean_object* x_20441; lean_object* x_20442; +x_20436 = lean_ctor_get(x_20435, 0); +lean_inc(x_20436); +x_20437 = lean_ctor_get(x_20435, 1); +lean_inc(x_20437); +lean_dec(x_20435); +x_20438 = lean_ctor_get(x_20436, 0); +lean_inc(x_20438); +x_20439 = lean_ctor_get(x_20436, 1); +lean_inc(x_20439); +if (lean_is_exclusive(x_20436)) { + lean_ctor_release(x_20436, 0); + lean_ctor_release(x_20436, 1); + x_20440 = x_20436; +} else { + lean_dec_ref(x_20436); + x_20440 = lean_box(0); +} +if (lean_is_scalar(x_20379)) { + x_20441 = lean_alloc_ctor(1, 1, 0); +} else { + x_20441 = x_20379; +} +lean_ctor_set(x_20441, 0, x_20438); +if (lean_is_scalar(x_20440)) { + x_20442 = lean_alloc_ctor(0, 2, 0); +} else { + x_20442 = x_20440; +} +lean_ctor_set(x_20442, 0, x_20441); +lean_ctor_set(x_20442, 1, x_20439); +x_20343 = x_20442; +x_20344 = x_20437; +goto block_20370; +} +else +{ +lean_object* x_20443; lean_object* x_20444; lean_object* x_20445; lean_object* x_20446; +lean_dec(x_20379); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20443 = lean_ctor_get(x_20435, 0); +lean_inc(x_20443); +x_20444 = lean_ctor_get(x_20435, 1); +lean_inc(x_20444); +if (lean_is_exclusive(x_20435)) { + lean_ctor_release(x_20435, 0); + lean_ctor_release(x_20435, 1); + x_20445 = x_20435; +} else { + lean_dec_ref(x_20435); + x_20445 = lean_box(0); +} +if (lean_is_scalar(x_20445)) { + x_20446 = lean_alloc_ctor(1, 2, 0); +} else { + x_20446 = x_20445; +} +lean_ctor_set(x_20446, 0, x_20443); +lean_ctor_set(x_20446, 1, x_20444); +return x_20446; +} +} +else +{ +lean_object* x_20447; lean_object* x_20448; lean_object* x_20449; lean_object* x_20450; +lean_dec(x_20427); +lean_dec(x_20422); +lean_dec(x_20379); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20447 = lean_ctor_get(x_20430, 0); +lean_inc(x_20447); +x_20448 = lean_ctor_get(x_20430, 1); +lean_inc(x_20448); +if (lean_is_exclusive(x_20430)) { + lean_ctor_release(x_20430, 0); + lean_ctor_release(x_20430, 1); + x_20449 = x_20430; +} else { + lean_dec_ref(x_20430); + x_20449 = lean_box(0); +} +if (lean_is_scalar(x_20449)) { + x_20450 = lean_alloc_ctor(1, 2, 0); +} else { + x_20450 = x_20449; +} +lean_ctor_set(x_20450, 0, x_20447); +lean_ctor_set(x_20450, 1, x_20448); +return x_20450; +} +} +} +else +{ +lean_object* x_20451; lean_object* x_20452; lean_object* x_20453; lean_object* x_20454; lean_object* x_20455; lean_object* x_20456; lean_object* x_20457; lean_object* x_20458; lean_object* x_20459; +lean_dec(x_20382); +lean_dec(x_20380); +lean_inc(x_19907); +lean_inc(x_153); +if (lean_is_scalar(x_20377)) { + x_20451 = lean_alloc_ctor(7, 2, 0); +} else { + x_20451 = x_20377; + lean_ctor_set_tag(x_20451, 7); +} +lean_ctor_set(x_20451, 0, x_153); +lean_ctor_set(x_20451, 1, x_19907); +x_20452 = lean_ctor_get(x_1, 0); +lean_inc(x_20452); +x_20453 = l_Lean_IR_ToIR_bindVar(x_20452, x_19912, x_4, x_5, x_20376); +x_20454 = lean_ctor_get(x_20453, 0); +lean_inc(x_20454); +x_20455 = lean_ctor_get(x_20453, 1); +lean_inc(x_20455); +lean_dec(x_20453); +x_20456 = lean_ctor_get(x_20454, 0); +lean_inc(x_20456); +x_20457 = lean_ctor_get(x_20454, 1); +lean_inc(x_20457); +lean_dec(x_20454); +x_20458 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20459 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20456, x_20451, x_20458, x_20457, x_4, x_5, x_20455); +if (lean_obj_tag(x_20459) == 0) +{ +lean_object* x_20460; lean_object* x_20461; lean_object* x_20462; lean_object* x_20463; lean_object* x_20464; lean_object* x_20465; lean_object* x_20466; +x_20460 = lean_ctor_get(x_20459, 0); +lean_inc(x_20460); +x_20461 = lean_ctor_get(x_20459, 1); +lean_inc(x_20461); +lean_dec(x_20459); +x_20462 = lean_ctor_get(x_20460, 0); +lean_inc(x_20462); +x_20463 = lean_ctor_get(x_20460, 1); +lean_inc(x_20463); +if (lean_is_exclusive(x_20460)) { + lean_ctor_release(x_20460, 0); + lean_ctor_release(x_20460, 1); + x_20464 = x_20460; +} else { + lean_dec_ref(x_20460); + x_20464 = lean_box(0); +} +if (lean_is_scalar(x_20379)) { + x_20465 = lean_alloc_ctor(1, 1, 0); +} else { + x_20465 = x_20379; +} +lean_ctor_set(x_20465, 0, x_20462); +if (lean_is_scalar(x_20464)) { + x_20466 = lean_alloc_ctor(0, 2, 0); +} else { + x_20466 = x_20464; +} +lean_ctor_set(x_20466, 0, x_20465); +lean_ctor_set(x_20466, 1, x_20463); +x_20343 = x_20466; +x_20344 = x_20461; +goto block_20370; +} +else +{ +lean_object* x_20467; lean_object* x_20468; lean_object* x_20469; lean_object* x_20470; +lean_dec(x_20379); +lean_dec(x_19917); +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20467 = lean_ctor_get(x_20459, 0); +lean_inc(x_20467); +x_20468 = lean_ctor_get(x_20459, 1); +lean_inc(x_20468); +if (lean_is_exclusive(x_20459)) { + lean_ctor_release(x_20459, 0); + lean_ctor_release(x_20459, 1); + x_20469 = x_20459; +} else { + lean_dec_ref(x_20459); + x_20469 = lean_box(0); +} +if (lean_is_scalar(x_20469)) { + x_20470 = lean_alloc_ctor(1, 2, 0); +} else { + x_20470 = x_20469; +} +lean_ctor_set(x_20470, 0, x_20467); +lean_ctor_set(x_20470, 1, x_20468); +return x_20470; +} +} +} +block_20370: +{ +lean_object* x_20345; +x_20345 = lean_ctor_get(x_20343, 0); +lean_inc(x_20345); +if (lean_obj_tag(x_20345) == 0) +{ +lean_object* x_20346; lean_object* x_20347; lean_object* x_20348; lean_object* x_20349; lean_object* x_20350; lean_object* x_20351; lean_object* x_20352; lean_object* x_20353; lean_object* x_20354; lean_object* x_20355; +lean_dec(x_19917); +x_20346 = lean_ctor_get(x_20343, 1); +lean_inc(x_20346); +lean_dec(x_20343); +x_20347 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_20347, 0, x_153); +lean_ctor_set(x_20347, 1, x_19907); +x_20348 = lean_ctor_get(x_1, 0); +lean_inc(x_20348); +x_20349 = l_Lean_IR_ToIR_bindVar(x_20348, x_20346, x_4, x_5, x_20344); +x_20350 = lean_ctor_get(x_20349, 0); +lean_inc(x_20350); +x_20351 = lean_ctor_get(x_20349, 1); +lean_inc(x_20351); +lean_dec(x_20349); +x_20352 = lean_ctor_get(x_20350, 0); +lean_inc(x_20352); +x_20353 = lean_ctor_get(x_20350, 1); +lean_inc(x_20353); +lean_dec(x_20350); +x_20354 = lean_ctor_get(x_1, 2); +lean_inc(x_20354); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_20355 = l_Lean_IR_ToIR_lowerType(x_20354, x_20353, x_4, x_5, x_20351); +if (lean_obj_tag(x_20355) == 0) +{ +lean_object* x_20356; lean_object* x_20357; lean_object* x_20358; lean_object* x_20359; lean_object* x_20360; +x_20356 = lean_ctor_get(x_20355, 0); +lean_inc(x_20356); +x_20357 = lean_ctor_get(x_20355, 1); +lean_inc(x_20357); +lean_dec(x_20355); +x_20358 = lean_ctor_get(x_20356, 0); +lean_inc(x_20358); +x_20359 = lean_ctor_get(x_20356, 1); +lean_inc(x_20359); +lean_dec(x_20356); +x_20360 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20352, x_20347, x_20358, x_20359, x_4, x_5, x_20357); +return x_20360; +} +else +{ +lean_object* x_20361; lean_object* x_20362; lean_object* x_20363; lean_object* x_20364; +lean_dec(x_20352); +lean_dec(x_20347); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_20361 = lean_ctor_get(x_20355, 0); +lean_inc(x_20361); +x_20362 = lean_ctor_get(x_20355, 1); +lean_inc(x_20362); +if (lean_is_exclusive(x_20355)) { + lean_ctor_release(x_20355, 0); + lean_ctor_release(x_20355, 1); + x_20363 = x_20355; +} else { + lean_dec_ref(x_20355); + x_20363 = lean_box(0); +} +if (lean_is_scalar(x_20363)) { + x_20364 = lean_alloc_ctor(1, 2, 0); +} else { + x_20364 = x_20363; +} +lean_ctor_set(x_20364, 0, x_20361); +lean_ctor_set(x_20364, 1, x_20362); +return x_20364; +} +} +else +{ +lean_object* x_20365; lean_object* x_20366; lean_object* x_20367; lean_object* x_20368; lean_object* x_20369; +lean_dec(x_19907); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20365 = lean_ctor_get(x_20343, 1); +lean_inc(x_20365); +if (lean_is_exclusive(x_20343)) { + lean_ctor_release(x_20343, 0); + lean_ctor_release(x_20343, 1); + x_20366 = x_20343; +} else { + lean_dec_ref(x_20343); + x_20366 = lean_box(0); +} +x_20367 = lean_ctor_get(x_20345, 0); +lean_inc(x_20367); +lean_dec(x_20345); +if (lean_is_scalar(x_20366)) { + x_20368 = lean_alloc_ctor(0, 2, 0); +} else { + x_20368 = x_20366; +} +lean_ctor_set(x_20368, 0, x_20367); +lean_ctor_set(x_20368, 1, x_20365); +if (lean_is_scalar(x_19917)) { + x_20369 = lean_alloc_ctor(0, 2, 0); +} else { + x_20369 = x_19917; +} +lean_ctor_set(x_20369, 0, x_20368); +lean_ctor_set(x_20369, 1, x_20344); +return x_20369; +} +} +} +} +default: +{ +lean_object* x_20471; uint8_t x_20472; lean_object* x_20473; lean_object* x_20474; lean_object* x_20475; lean_object* x_20476; lean_object* x_20477; lean_object* x_20478; lean_object* x_20479; lean_object* x_20480; lean_object* x_20481; +lean_dec(x_19918); +lean_dec(x_19917); +lean_dec(x_19913); +lean_dec(x_19907); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_19923)) { + lean_ctor_release(x_19923, 0); + x_20471 = x_19923; +} else { + lean_dec_ref(x_19923); + x_20471 = lean_box(0); +} +x_20472 = 1; +x_20473 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_20474 = l_Lean_Name_toString(x_153, x_20472, x_20473); +if (lean_is_scalar(x_20471)) { + x_20475 = lean_alloc_ctor(3, 1, 0); +} else { + x_20475 = x_20471; + lean_ctor_set_tag(x_20475, 3); +} +lean_ctor_set(x_20475, 0, x_20474); +x_20476 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_20477 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_20477, 0, x_20476); +lean_ctor_set(x_20477, 1, x_20475); +x_20478 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_20479 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_20479, 0, x_20477); +lean_ctor_set(x_20479, 1, x_20478); +x_20480 = l_Lean_MessageData_ofFormat(x_20479); +x_20481 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_20480, x_19912, x_4, x_5, x_19916); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_19912); +return x_20481; +} +} +} +} +else +{ +lean_object* x_20482; lean_object* x_20483; lean_object* x_20484; lean_object* x_20485; lean_object* x_20486; +lean_dec(x_19907); +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20482 = lean_ctor_get(x_19909, 1); +lean_inc(x_20482); +if (lean_is_exclusive(x_19909)) { + lean_ctor_release(x_19909, 0); + lean_ctor_release(x_19909, 1); + x_20483 = x_19909; +} else { + lean_dec_ref(x_19909); + x_20483 = lean_box(0); +} +x_20484 = lean_ctor_get(x_19911, 0); +lean_inc(x_20484); +lean_dec(x_19911); +if (lean_is_scalar(x_20483)) { + x_20485 = lean_alloc_ctor(0, 2, 0); +} else { + x_20485 = x_20483; +} +lean_ctor_set(x_20485, 0, x_20484); +lean_ctor_set(x_20485, 1, x_20482); +if (lean_is_scalar(x_17705)) { + x_20486 = lean_alloc_ctor(0, 2, 0); +} else { + x_20486 = x_17705; +} +lean_ctor_set(x_20486, 0, x_20485); +lean_ctor_set(x_20486, 1, x_19910); +return x_20486; +} +} +} +} +else +{ +uint8_t x_20588; +lean_dec(x_17699); +lean_dec(x_17698); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20588 = !lean_is_exclusive(x_17702); +if (x_20588 == 0) +{ +return x_17702; +} +else +{ +lean_object* x_20589; lean_object* x_20590; lean_object* x_20591; +x_20589 = lean_ctor_get(x_17702, 0); +x_20590 = lean_ctor_get(x_17702, 1); +lean_inc(x_20590); +lean_inc(x_20589); +lean_dec(x_17702); +x_20591 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_20591, 0, x_20589); +lean_ctor_set(x_20591, 1, x_20590); +return x_20591; +} +} +} +} +} +default: +{ +lean_object* x_20592; lean_object* x_20593; size_t x_20594; size_t x_20595; lean_object* x_20596; +x_20592 = lean_ctor_get(x_7, 2); +lean_inc(x_20592); +if (lean_is_exclusive(x_7)) { + lean_ctor_release(x_7, 0); + lean_ctor_release(x_7, 1); + lean_ctor_release(x_7, 2); + x_20593 = x_7; +} else { + lean_dec_ref(x_7); + x_20593 = lean_box(0); +} +x_20594 = lean_array_size(x_20592); +x_20595 = 0; +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_20592); +x_20596 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_20594, x_20595, x_20592, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_20596) == 0) +{ +lean_object* x_20597; lean_object* x_20598; lean_object* x_20599; uint8_t x_20600; +x_20597 = lean_ctor_get(x_20596, 0); +lean_inc(x_20597); +x_20598 = lean_ctor_get(x_20596, 1); +lean_inc(x_20598); +if (lean_is_exclusive(x_20596)) { + lean_ctor_release(x_20596, 0); + lean_ctor_release(x_20596, 1); + x_20599 = x_20596; +} else { + lean_dec_ref(x_20596); + x_20599 = lean_box(0); +} +x_20600 = !lean_is_exclusive(x_20597); +if (x_20600 == 0) +{ +lean_object* x_20601; lean_object* x_20602; lean_object* x_20603; lean_object* x_20604; lean_object* x_22521; lean_object* x_22522; +x_20601 = lean_ctor_get(x_20597, 0); +x_20602 = lean_ctor_get(x_20597, 1); +lean_inc(x_153); +x_22521 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_20598); +x_22522 = lean_ctor_get(x_22521, 0); +lean_inc(x_22522); +if (lean_obj_tag(x_22522) == 0) +{ +lean_object* x_22523; lean_object* x_22524; +x_22523 = lean_ctor_get(x_22521, 1); +lean_inc(x_22523); +lean_dec(x_22521); +x_22524 = lean_box(0); +lean_ctor_set(x_20597, 0, x_22524); +x_20603 = x_20597; +x_20604 = x_22523; +goto block_22520; +} +else +{ +uint8_t x_22525; +lean_free_object(x_20597); +x_22525 = !lean_is_exclusive(x_22521); +if (x_22525 == 0) +{ +lean_object* x_22526; lean_object* x_22527; uint8_t x_22528; +x_22526 = lean_ctor_get(x_22521, 1); +x_22527 = lean_ctor_get(x_22521, 0); +lean_dec(x_22527); +x_22528 = !lean_is_exclusive(x_22522); +if (x_22528 == 0) +{ +lean_object* x_22529; lean_object* x_22530; lean_object* x_22531; lean_object* x_22532; uint8_t x_22533; +x_22529 = lean_ctor_get(x_22522, 0); +x_22530 = lean_array_get_size(x_20601); +x_22531 = lean_ctor_get(x_22529, 3); +lean_inc(x_22531); +lean_dec(x_22529); +x_22532 = lean_array_get_size(x_22531); +lean_dec(x_22531); +x_22533 = lean_nat_dec_lt(x_22530, x_22532); +if (x_22533 == 0) +{ +uint8_t x_22534; +x_22534 = lean_nat_dec_eq(x_22530, x_22532); +if (x_22534 == 0) +{ +lean_object* x_22535; lean_object* x_22536; lean_object* x_22537; lean_object* x_22538; lean_object* x_22539; lean_object* x_22540; lean_object* x_22541; lean_object* x_22542; lean_object* x_22543; lean_object* x_22544; lean_object* x_22545; lean_object* x_22546; lean_object* x_22547; lean_object* x_22548; lean_object* x_22549; lean_object* x_22550; +x_22535 = lean_unsigned_to_nat(0u); +x_22536 = l_Array_extract___rarg(x_20601, x_22535, x_22532); +x_22537 = l_Array_extract___rarg(x_20601, x_22532, x_22530); +lean_dec(x_22530); +lean_inc(x_153); +lean_ctor_set_tag(x_22521, 6); +lean_ctor_set(x_22521, 1, x_22536); +lean_ctor_set(x_22521, 0, x_153); +x_22538 = lean_ctor_get(x_1, 0); +lean_inc(x_22538); +x_22539 = l_Lean_IR_ToIR_bindVar(x_22538, x_20602, x_4, x_5, x_22526); +x_22540 = lean_ctor_get(x_22539, 0); +lean_inc(x_22540); +x_22541 = lean_ctor_get(x_22539, 1); +lean_inc(x_22541); +lean_dec(x_22539); +x_22542 = lean_ctor_get(x_22540, 0); +lean_inc(x_22542); +x_22543 = lean_ctor_get(x_22540, 1); +lean_inc(x_22543); +lean_dec(x_22540); +x_22544 = l_Lean_IR_ToIR_newVar(x_22543, x_4, x_5, x_22541); +x_22545 = lean_ctor_get(x_22544, 0); +lean_inc(x_22545); +x_22546 = lean_ctor_get(x_22544, 1); +lean_inc(x_22546); +lean_dec(x_22544); +x_22547 = lean_ctor_get(x_22545, 0); +lean_inc(x_22547); +x_22548 = lean_ctor_get(x_22545, 1); +lean_inc(x_22548); +lean_dec(x_22545); +x_22549 = lean_ctor_get(x_1, 2); +lean_inc(x_22549); +lean_inc(x_5); +lean_inc(x_4); +x_22550 = l_Lean_IR_ToIR_lowerType(x_22549, x_22548, x_4, x_5, x_22546); +if (lean_obj_tag(x_22550) == 0) +{ +lean_object* x_22551; lean_object* x_22552; lean_object* x_22553; lean_object* x_22554; lean_object* x_22555; +x_22551 = lean_ctor_get(x_22550, 0); +lean_inc(x_22551); +x_22552 = lean_ctor_get(x_22550, 1); +lean_inc(x_22552); +lean_dec(x_22550); +x_22553 = lean_ctor_get(x_22551, 0); +lean_inc(x_22553); +x_22554 = lean_ctor_get(x_22551, 1); +lean_inc(x_22554); +lean_dec(x_22551); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22555 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_22547, x_22537, x_22542, x_22521, x_22553, x_22554, x_4, x_5, x_22552); +if (lean_obj_tag(x_22555) == 0) +{ +lean_object* x_22556; lean_object* x_22557; uint8_t x_22558; +x_22556 = lean_ctor_get(x_22555, 0); +lean_inc(x_22556); +x_22557 = lean_ctor_get(x_22555, 1); +lean_inc(x_22557); +lean_dec(x_22555); +x_22558 = !lean_is_exclusive(x_22556); +if (x_22558 == 0) +{ +lean_object* x_22559; +x_22559 = lean_ctor_get(x_22556, 0); +lean_ctor_set(x_22522, 0, x_22559); +lean_ctor_set(x_22556, 0, x_22522); +x_20603 = x_22556; +x_20604 = x_22557; +goto block_22520; +} +else +{ +lean_object* x_22560; lean_object* x_22561; lean_object* x_22562; +x_22560 = lean_ctor_get(x_22556, 0); +x_22561 = lean_ctor_get(x_22556, 1); +lean_inc(x_22561); +lean_inc(x_22560); +lean_dec(x_22556); +lean_ctor_set(x_22522, 0, x_22560); +x_22562 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_22562, 0, x_22522); +lean_ctor_set(x_22562, 1, x_22561); +x_20603 = x_22562; +x_20604 = x_22557; +goto block_22520; +} +} +else +{ +uint8_t x_22563; +lean_free_object(x_22522); +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22563 = !lean_is_exclusive(x_22555); +if (x_22563 == 0) +{ +return x_22555; +} +else +{ +lean_object* x_22564; lean_object* x_22565; lean_object* x_22566; +x_22564 = lean_ctor_get(x_22555, 0); +x_22565 = lean_ctor_get(x_22555, 1); +lean_inc(x_22565); +lean_inc(x_22564); +lean_dec(x_22555); +x_22566 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_22566, 0, x_22564); +lean_ctor_set(x_22566, 1, x_22565); +return x_22566; +} +} +} +else +{ +uint8_t x_22567; +lean_dec(x_22547); +lean_dec(x_22542); +lean_dec(x_22521); +lean_dec(x_22537); +lean_free_object(x_22522); +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22567 = !lean_is_exclusive(x_22550); +if (x_22567 == 0) +{ +return x_22550; +} +else +{ +lean_object* x_22568; lean_object* x_22569; lean_object* x_22570; +x_22568 = lean_ctor_get(x_22550, 0); +x_22569 = lean_ctor_get(x_22550, 1); +lean_inc(x_22569); +lean_inc(x_22568); +lean_dec(x_22550); +x_22570 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_22570, 0, x_22568); +lean_ctor_set(x_22570, 1, x_22569); +return x_22570; +} +} +} +else +{ +lean_object* x_22571; lean_object* x_22572; lean_object* x_22573; lean_object* x_22574; lean_object* x_22575; lean_object* x_22576; lean_object* x_22577; lean_object* x_22578; +lean_dec(x_22532); +lean_dec(x_22530); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_22521, 6); +lean_ctor_set(x_22521, 1, x_20601); +lean_ctor_set(x_22521, 0, x_153); +x_22571 = lean_ctor_get(x_1, 0); +lean_inc(x_22571); +x_22572 = l_Lean_IR_ToIR_bindVar(x_22571, x_20602, x_4, x_5, x_22526); +x_22573 = lean_ctor_get(x_22572, 0); +lean_inc(x_22573); +x_22574 = lean_ctor_get(x_22572, 1); +lean_inc(x_22574); +lean_dec(x_22572); +x_22575 = lean_ctor_get(x_22573, 0); +lean_inc(x_22575); +x_22576 = lean_ctor_get(x_22573, 1); +lean_inc(x_22576); +lean_dec(x_22573); +x_22577 = lean_ctor_get(x_1, 2); +lean_inc(x_22577); +lean_inc(x_5); +lean_inc(x_4); +x_22578 = l_Lean_IR_ToIR_lowerType(x_22577, x_22576, x_4, x_5, x_22574); +if (lean_obj_tag(x_22578) == 0) +{ +lean_object* x_22579; lean_object* x_22580; lean_object* x_22581; lean_object* x_22582; lean_object* x_22583; +x_22579 = lean_ctor_get(x_22578, 0); +lean_inc(x_22579); +x_22580 = lean_ctor_get(x_22578, 1); +lean_inc(x_22580); +lean_dec(x_22578); +x_22581 = lean_ctor_get(x_22579, 0); +lean_inc(x_22581); +x_22582 = lean_ctor_get(x_22579, 1); +lean_inc(x_22582); +lean_dec(x_22579); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22583 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22575, x_22521, x_22581, x_22582, x_4, x_5, x_22580); +if (lean_obj_tag(x_22583) == 0) +{ +lean_object* x_22584; lean_object* x_22585; uint8_t x_22586; +x_22584 = lean_ctor_get(x_22583, 0); +lean_inc(x_22584); +x_22585 = lean_ctor_get(x_22583, 1); +lean_inc(x_22585); +lean_dec(x_22583); +x_22586 = !lean_is_exclusive(x_22584); +if (x_22586 == 0) +{ +lean_object* x_22587; +x_22587 = lean_ctor_get(x_22584, 0); +lean_ctor_set(x_22522, 0, x_22587); +lean_ctor_set(x_22584, 0, x_22522); +x_20603 = x_22584; +x_20604 = x_22585; +goto block_22520; +} +else +{ +lean_object* x_22588; lean_object* x_22589; lean_object* x_22590; +x_22588 = lean_ctor_get(x_22584, 0); +x_22589 = lean_ctor_get(x_22584, 1); +lean_inc(x_22589); +lean_inc(x_22588); +lean_dec(x_22584); +lean_ctor_set(x_22522, 0, x_22588); +x_22590 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_22590, 0, x_22522); +lean_ctor_set(x_22590, 1, x_22589); +x_20603 = x_22590; +x_20604 = x_22585; +goto block_22520; +} +} +else +{ +uint8_t x_22591; +lean_free_object(x_22522); +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22591 = !lean_is_exclusive(x_22583); +if (x_22591 == 0) +{ +return x_22583; +} +else +{ +lean_object* x_22592; lean_object* x_22593; lean_object* x_22594; +x_22592 = lean_ctor_get(x_22583, 0); +x_22593 = lean_ctor_get(x_22583, 1); +lean_inc(x_22593); +lean_inc(x_22592); +lean_dec(x_22583); +x_22594 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_22594, 0, x_22592); +lean_ctor_set(x_22594, 1, x_22593); +return x_22594; +} +} +} +else +{ +uint8_t x_22595; +lean_dec(x_22575); +lean_dec(x_22521); +lean_free_object(x_22522); +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22595 = !lean_is_exclusive(x_22578); +if (x_22595 == 0) +{ +return x_22578; +} +else +{ +lean_object* x_22596; lean_object* x_22597; lean_object* x_22598; +x_22596 = lean_ctor_get(x_22578, 0); +x_22597 = lean_ctor_get(x_22578, 1); +lean_inc(x_22597); +lean_inc(x_22596); +lean_dec(x_22578); +x_22598 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_22598, 0, x_22596); +lean_ctor_set(x_22598, 1, x_22597); +return x_22598; +} +} +} +} +else +{ +lean_object* x_22599; lean_object* x_22600; lean_object* x_22601; lean_object* x_22602; lean_object* x_22603; lean_object* x_22604; lean_object* x_22605; lean_object* x_22606; +lean_dec(x_22532); +lean_dec(x_22530); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_22521, 7); +lean_ctor_set(x_22521, 1, x_20601); +lean_ctor_set(x_22521, 0, x_153); +x_22599 = lean_ctor_get(x_1, 0); +lean_inc(x_22599); +x_22600 = l_Lean_IR_ToIR_bindVar(x_22599, x_20602, x_4, x_5, x_22526); +x_22601 = lean_ctor_get(x_22600, 0); +lean_inc(x_22601); +x_22602 = lean_ctor_get(x_22600, 1); +lean_inc(x_22602); +lean_dec(x_22600); +x_22603 = lean_ctor_get(x_22601, 0); +lean_inc(x_22603); +x_22604 = lean_ctor_get(x_22601, 1); +lean_inc(x_22604); +lean_dec(x_22601); +x_22605 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22606 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22603, x_22521, x_22605, x_22604, x_4, x_5, x_22602); +if (lean_obj_tag(x_22606) == 0) +{ +lean_object* x_22607; lean_object* x_22608; uint8_t x_22609; +x_22607 = lean_ctor_get(x_22606, 0); +lean_inc(x_22607); +x_22608 = lean_ctor_get(x_22606, 1); +lean_inc(x_22608); +lean_dec(x_22606); +x_22609 = !lean_is_exclusive(x_22607); +if (x_22609 == 0) +{ +lean_object* x_22610; +x_22610 = lean_ctor_get(x_22607, 0); +lean_ctor_set(x_22522, 0, x_22610); +lean_ctor_set(x_22607, 0, x_22522); +x_20603 = x_22607; +x_20604 = x_22608; +goto block_22520; +} +else +{ +lean_object* x_22611; lean_object* x_22612; lean_object* x_22613; +x_22611 = lean_ctor_get(x_22607, 0); +x_22612 = lean_ctor_get(x_22607, 1); +lean_inc(x_22612); +lean_inc(x_22611); +lean_dec(x_22607); +lean_ctor_set(x_22522, 0, x_22611); +x_22613 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_22613, 0, x_22522); +lean_ctor_set(x_22613, 1, x_22612); +x_20603 = x_22613; +x_20604 = x_22608; +goto block_22520; +} +} +else +{ +uint8_t x_22614; +lean_free_object(x_22522); +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22614 = !lean_is_exclusive(x_22606); +if (x_22614 == 0) +{ +return x_22606; +} +else +{ +lean_object* x_22615; lean_object* x_22616; lean_object* x_22617; +x_22615 = lean_ctor_get(x_22606, 0); +x_22616 = lean_ctor_get(x_22606, 1); +lean_inc(x_22616); +lean_inc(x_22615); +lean_dec(x_22606); +x_22617 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_22617, 0, x_22615); +lean_ctor_set(x_22617, 1, x_22616); +return x_22617; +} +} +} +} +else +{ +lean_object* x_22618; lean_object* x_22619; lean_object* x_22620; lean_object* x_22621; uint8_t x_22622; +x_22618 = lean_ctor_get(x_22522, 0); +lean_inc(x_22618); +lean_dec(x_22522); +x_22619 = lean_array_get_size(x_20601); +x_22620 = lean_ctor_get(x_22618, 3); +lean_inc(x_22620); +lean_dec(x_22618); +x_22621 = lean_array_get_size(x_22620); +lean_dec(x_22620); +x_22622 = lean_nat_dec_lt(x_22619, x_22621); +if (x_22622 == 0) +{ +uint8_t x_22623; +x_22623 = lean_nat_dec_eq(x_22619, x_22621); +if (x_22623 == 0) +{ +lean_object* x_22624; lean_object* x_22625; lean_object* x_22626; lean_object* x_22627; lean_object* x_22628; lean_object* x_22629; lean_object* x_22630; lean_object* x_22631; lean_object* x_22632; lean_object* x_22633; lean_object* x_22634; lean_object* x_22635; lean_object* x_22636; lean_object* x_22637; lean_object* x_22638; lean_object* x_22639; +x_22624 = lean_unsigned_to_nat(0u); +x_22625 = l_Array_extract___rarg(x_20601, x_22624, x_22621); +x_22626 = l_Array_extract___rarg(x_20601, x_22621, x_22619); +lean_dec(x_22619); +lean_inc(x_153); +lean_ctor_set_tag(x_22521, 6); +lean_ctor_set(x_22521, 1, x_22625); +lean_ctor_set(x_22521, 0, x_153); +x_22627 = lean_ctor_get(x_1, 0); +lean_inc(x_22627); +x_22628 = l_Lean_IR_ToIR_bindVar(x_22627, x_20602, x_4, x_5, x_22526); +x_22629 = lean_ctor_get(x_22628, 0); +lean_inc(x_22629); +x_22630 = lean_ctor_get(x_22628, 1); +lean_inc(x_22630); +lean_dec(x_22628); +x_22631 = lean_ctor_get(x_22629, 0); +lean_inc(x_22631); +x_22632 = lean_ctor_get(x_22629, 1); +lean_inc(x_22632); +lean_dec(x_22629); +x_22633 = l_Lean_IR_ToIR_newVar(x_22632, x_4, x_5, x_22630); +x_22634 = lean_ctor_get(x_22633, 0); +lean_inc(x_22634); +x_22635 = lean_ctor_get(x_22633, 1); +lean_inc(x_22635); +lean_dec(x_22633); +x_22636 = lean_ctor_get(x_22634, 0); +lean_inc(x_22636); +x_22637 = lean_ctor_get(x_22634, 1); +lean_inc(x_22637); +lean_dec(x_22634); +x_22638 = lean_ctor_get(x_1, 2); +lean_inc(x_22638); +lean_inc(x_5); +lean_inc(x_4); +x_22639 = l_Lean_IR_ToIR_lowerType(x_22638, x_22637, x_4, x_5, x_22635); +if (lean_obj_tag(x_22639) == 0) +{ +lean_object* x_22640; lean_object* x_22641; lean_object* x_22642; lean_object* x_22643; lean_object* x_22644; +x_22640 = lean_ctor_get(x_22639, 0); +lean_inc(x_22640); +x_22641 = lean_ctor_get(x_22639, 1); +lean_inc(x_22641); +lean_dec(x_22639); +x_22642 = lean_ctor_get(x_22640, 0); +lean_inc(x_22642); +x_22643 = lean_ctor_get(x_22640, 1); +lean_inc(x_22643); +lean_dec(x_22640); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22644 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_22636, x_22626, x_22631, x_22521, x_22642, x_22643, x_4, x_5, x_22641); +if (lean_obj_tag(x_22644) == 0) +{ +lean_object* x_22645; lean_object* x_22646; lean_object* x_22647; lean_object* x_22648; lean_object* x_22649; lean_object* x_22650; lean_object* x_22651; +x_22645 = lean_ctor_get(x_22644, 0); +lean_inc(x_22645); +x_22646 = lean_ctor_get(x_22644, 1); +lean_inc(x_22646); +lean_dec(x_22644); +x_22647 = lean_ctor_get(x_22645, 0); +lean_inc(x_22647); +x_22648 = lean_ctor_get(x_22645, 1); +lean_inc(x_22648); +if (lean_is_exclusive(x_22645)) { + lean_ctor_release(x_22645, 0); + lean_ctor_release(x_22645, 1); + x_22649 = x_22645; +} else { + lean_dec_ref(x_22645); + x_22649 = lean_box(0); +} +x_22650 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_22650, 0, x_22647); +if (lean_is_scalar(x_22649)) { + x_22651 = lean_alloc_ctor(0, 2, 0); +} else { + x_22651 = x_22649; +} +lean_ctor_set(x_22651, 0, x_22650); +lean_ctor_set(x_22651, 1, x_22648); +x_20603 = x_22651; +x_20604 = x_22646; +goto block_22520; +} +else +{ +lean_object* x_22652; lean_object* x_22653; lean_object* x_22654; lean_object* x_22655; +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22652 = lean_ctor_get(x_22644, 0); +lean_inc(x_22652); +x_22653 = lean_ctor_get(x_22644, 1); +lean_inc(x_22653); +if (lean_is_exclusive(x_22644)) { + lean_ctor_release(x_22644, 0); + lean_ctor_release(x_22644, 1); + x_22654 = x_22644; +} else { + lean_dec_ref(x_22644); + x_22654 = lean_box(0); +} +if (lean_is_scalar(x_22654)) { + x_22655 = lean_alloc_ctor(1, 2, 0); +} else { + x_22655 = x_22654; +} +lean_ctor_set(x_22655, 0, x_22652); +lean_ctor_set(x_22655, 1, x_22653); +return x_22655; +} +} +else +{ +lean_object* x_22656; lean_object* x_22657; lean_object* x_22658; lean_object* x_22659; +lean_dec(x_22636); +lean_dec(x_22631); +lean_dec(x_22521); +lean_dec(x_22626); +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22656 = lean_ctor_get(x_22639, 0); +lean_inc(x_22656); +x_22657 = lean_ctor_get(x_22639, 1); +lean_inc(x_22657); +if (lean_is_exclusive(x_22639)) { + lean_ctor_release(x_22639, 0); + lean_ctor_release(x_22639, 1); + x_22658 = x_22639; +} else { + lean_dec_ref(x_22639); + x_22658 = lean_box(0); +} +if (lean_is_scalar(x_22658)) { + x_22659 = lean_alloc_ctor(1, 2, 0); +} else { + x_22659 = x_22658; +} +lean_ctor_set(x_22659, 0, x_22656); +lean_ctor_set(x_22659, 1, x_22657); +return x_22659; +} +} +else +{ +lean_object* x_22660; lean_object* x_22661; lean_object* x_22662; lean_object* x_22663; lean_object* x_22664; lean_object* x_22665; lean_object* x_22666; lean_object* x_22667; +lean_dec(x_22621); +lean_dec(x_22619); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_22521, 6); +lean_ctor_set(x_22521, 1, x_20601); +lean_ctor_set(x_22521, 0, x_153); +x_22660 = lean_ctor_get(x_1, 0); +lean_inc(x_22660); +x_22661 = l_Lean_IR_ToIR_bindVar(x_22660, x_20602, x_4, x_5, x_22526); +x_22662 = lean_ctor_get(x_22661, 0); +lean_inc(x_22662); +x_22663 = lean_ctor_get(x_22661, 1); +lean_inc(x_22663); +lean_dec(x_22661); +x_22664 = lean_ctor_get(x_22662, 0); +lean_inc(x_22664); +x_22665 = lean_ctor_get(x_22662, 1); +lean_inc(x_22665); +lean_dec(x_22662); +x_22666 = lean_ctor_get(x_1, 2); +lean_inc(x_22666); +lean_inc(x_5); +lean_inc(x_4); +x_22667 = l_Lean_IR_ToIR_lowerType(x_22666, x_22665, x_4, x_5, x_22663); +if (lean_obj_tag(x_22667) == 0) +{ +lean_object* x_22668; lean_object* x_22669; lean_object* x_22670; lean_object* x_22671; lean_object* x_22672; +x_22668 = lean_ctor_get(x_22667, 0); +lean_inc(x_22668); +x_22669 = lean_ctor_get(x_22667, 1); +lean_inc(x_22669); +lean_dec(x_22667); +x_22670 = lean_ctor_get(x_22668, 0); +lean_inc(x_22670); +x_22671 = lean_ctor_get(x_22668, 1); +lean_inc(x_22671); +lean_dec(x_22668); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22672 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22664, x_22521, x_22670, x_22671, x_4, x_5, x_22669); +if (lean_obj_tag(x_22672) == 0) +{ +lean_object* x_22673; lean_object* x_22674; lean_object* x_22675; lean_object* x_22676; lean_object* x_22677; lean_object* x_22678; lean_object* x_22679; +x_22673 = lean_ctor_get(x_22672, 0); +lean_inc(x_22673); +x_22674 = lean_ctor_get(x_22672, 1); +lean_inc(x_22674); +lean_dec(x_22672); +x_22675 = lean_ctor_get(x_22673, 0); +lean_inc(x_22675); +x_22676 = lean_ctor_get(x_22673, 1); +lean_inc(x_22676); +if (lean_is_exclusive(x_22673)) { + lean_ctor_release(x_22673, 0); + lean_ctor_release(x_22673, 1); + x_22677 = x_22673; +} else { + lean_dec_ref(x_22673); + x_22677 = lean_box(0); +} +x_22678 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_22678, 0, x_22675); +if (lean_is_scalar(x_22677)) { + x_22679 = lean_alloc_ctor(0, 2, 0); +} else { + x_22679 = x_22677; +} +lean_ctor_set(x_22679, 0, x_22678); +lean_ctor_set(x_22679, 1, x_22676); +x_20603 = x_22679; +x_20604 = x_22674; +goto block_22520; +} +else +{ +lean_object* x_22680; lean_object* x_22681; lean_object* x_22682; lean_object* x_22683; +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22680 = lean_ctor_get(x_22672, 0); +lean_inc(x_22680); +x_22681 = lean_ctor_get(x_22672, 1); +lean_inc(x_22681); +if (lean_is_exclusive(x_22672)) { + lean_ctor_release(x_22672, 0); + lean_ctor_release(x_22672, 1); + x_22682 = x_22672; +} else { + lean_dec_ref(x_22672); + x_22682 = lean_box(0); +} +if (lean_is_scalar(x_22682)) { + x_22683 = lean_alloc_ctor(1, 2, 0); +} else { + x_22683 = x_22682; +} +lean_ctor_set(x_22683, 0, x_22680); +lean_ctor_set(x_22683, 1, x_22681); +return x_22683; +} +} +else +{ +lean_object* x_22684; lean_object* x_22685; lean_object* x_22686; lean_object* x_22687; +lean_dec(x_22664); +lean_dec(x_22521); +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22684 = lean_ctor_get(x_22667, 0); +lean_inc(x_22684); +x_22685 = lean_ctor_get(x_22667, 1); +lean_inc(x_22685); +if (lean_is_exclusive(x_22667)) { + lean_ctor_release(x_22667, 0); + lean_ctor_release(x_22667, 1); + x_22686 = x_22667; +} else { + lean_dec_ref(x_22667); + x_22686 = lean_box(0); +} +if (lean_is_scalar(x_22686)) { + x_22687 = lean_alloc_ctor(1, 2, 0); +} else { + x_22687 = x_22686; +} +lean_ctor_set(x_22687, 0, x_22684); +lean_ctor_set(x_22687, 1, x_22685); +return x_22687; +} +} +} +else +{ +lean_object* x_22688; lean_object* x_22689; lean_object* x_22690; lean_object* x_22691; lean_object* x_22692; lean_object* x_22693; lean_object* x_22694; lean_object* x_22695; +lean_dec(x_22621); +lean_dec(x_22619); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_22521, 7); +lean_ctor_set(x_22521, 1, x_20601); +lean_ctor_set(x_22521, 0, x_153); +x_22688 = lean_ctor_get(x_1, 0); +lean_inc(x_22688); +x_22689 = l_Lean_IR_ToIR_bindVar(x_22688, x_20602, x_4, x_5, x_22526); +x_22690 = lean_ctor_get(x_22689, 0); +lean_inc(x_22690); +x_22691 = lean_ctor_get(x_22689, 1); +lean_inc(x_22691); +lean_dec(x_22689); +x_22692 = lean_ctor_get(x_22690, 0); +lean_inc(x_22692); +x_22693 = lean_ctor_get(x_22690, 1); +lean_inc(x_22693); +lean_dec(x_22690); +x_22694 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22695 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22692, x_22521, x_22694, x_22693, x_4, x_5, x_22691); +if (lean_obj_tag(x_22695) == 0) +{ +lean_object* x_22696; lean_object* x_22697; lean_object* x_22698; lean_object* x_22699; lean_object* x_22700; lean_object* x_22701; lean_object* x_22702; +x_22696 = lean_ctor_get(x_22695, 0); +lean_inc(x_22696); +x_22697 = lean_ctor_get(x_22695, 1); +lean_inc(x_22697); +lean_dec(x_22695); +x_22698 = lean_ctor_get(x_22696, 0); +lean_inc(x_22698); +x_22699 = lean_ctor_get(x_22696, 1); +lean_inc(x_22699); +if (lean_is_exclusive(x_22696)) { + lean_ctor_release(x_22696, 0); + lean_ctor_release(x_22696, 1); + x_22700 = x_22696; +} else { + lean_dec_ref(x_22696); + x_22700 = lean_box(0); +} +x_22701 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_22701, 0, x_22698); +if (lean_is_scalar(x_22700)) { + x_22702 = lean_alloc_ctor(0, 2, 0); +} else { + x_22702 = x_22700; +} +lean_ctor_set(x_22702, 0, x_22701); +lean_ctor_set(x_22702, 1, x_22699); +x_20603 = x_22702; +x_20604 = x_22697; +goto block_22520; +} +else +{ +lean_object* x_22703; lean_object* x_22704; lean_object* x_22705; lean_object* x_22706; +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22703 = lean_ctor_get(x_22695, 0); +lean_inc(x_22703); +x_22704 = lean_ctor_get(x_22695, 1); +lean_inc(x_22704); +if (lean_is_exclusive(x_22695)) { + lean_ctor_release(x_22695, 0); + lean_ctor_release(x_22695, 1); + x_22705 = x_22695; +} else { + lean_dec_ref(x_22695); + x_22705 = lean_box(0); +} +if (lean_is_scalar(x_22705)) { + x_22706 = lean_alloc_ctor(1, 2, 0); +} else { + x_22706 = x_22705; +} +lean_ctor_set(x_22706, 0, x_22703); +lean_ctor_set(x_22706, 1, x_22704); +return x_22706; +} +} +} +} +else +{ +lean_object* x_22707; lean_object* x_22708; lean_object* x_22709; lean_object* x_22710; lean_object* x_22711; lean_object* x_22712; uint8_t x_22713; +x_22707 = lean_ctor_get(x_22521, 1); +lean_inc(x_22707); +lean_dec(x_22521); +x_22708 = lean_ctor_get(x_22522, 0); +lean_inc(x_22708); +if (lean_is_exclusive(x_22522)) { + lean_ctor_release(x_22522, 0); + x_22709 = x_22522; +} else { + lean_dec_ref(x_22522); + x_22709 = lean_box(0); +} +x_22710 = lean_array_get_size(x_20601); +x_22711 = lean_ctor_get(x_22708, 3); +lean_inc(x_22711); +lean_dec(x_22708); +x_22712 = lean_array_get_size(x_22711); +lean_dec(x_22711); +x_22713 = lean_nat_dec_lt(x_22710, x_22712); +if (x_22713 == 0) +{ +uint8_t x_22714; +x_22714 = lean_nat_dec_eq(x_22710, x_22712); +if (x_22714 == 0) +{ +lean_object* x_22715; lean_object* x_22716; lean_object* x_22717; lean_object* x_22718; lean_object* x_22719; lean_object* x_22720; lean_object* x_22721; lean_object* x_22722; lean_object* x_22723; lean_object* x_22724; lean_object* x_22725; lean_object* x_22726; lean_object* x_22727; lean_object* x_22728; lean_object* x_22729; lean_object* x_22730; lean_object* x_22731; +x_22715 = lean_unsigned_to_nat(0u); +x_22716 = l_Array_extract___rarg(x_20601, x_22715, x_22712); +x_22717 = l_Array_extract___rarg(x_20601, x_22712, x_22710); +lean_dec(x_22710); +lean_inc(x_153); +x_22718 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_22718, 0, x_153); +lean_ctor_set(x_22718, 1, x_22716); +x_22719 = lean_ctor_get(x_1, 0); +lean_inc(x_22719); +x_22720 = l_Lean_IR_ToIR_bindVar(x_22719, x_20602, x_4, x_5, x_22707); +x_22721 = lean_ctor_get(x_22720, 0); +lean_inc(x_22721); +x_22722 = lean_ctor_get(x_22720, 1); +lean_inc(x_22722); +lean_dec(x_22720); +x_22723 = lean_ctor_get(x_22721, 0); +lean_inc(x_22723); +x_22724 = lean_ctor_get(x_22721, 1); +lean_inc(x_22724); +lean_dec(x_22721); +x_22725 = l_Lean_IR_ToIR_newVar(x_22724, x_4, x_5, x_22722); +x_22726 = lean_ctor_get(x_22725, 0); +lean_inc(x_22726); +x_22727 = lean_ctor_get(x_22725, 1); +lean_inc(x_22727); +lean_dec(x_22725); +x_22728 = lean_ctor_get(x_22726, 0); +lean_inc(x_22728); +x_22729 = lean_ctor_get(x_22726, 1); +lean_inc(x_22729); +lean_dec(x_22726); +x_22730 = lean_ctor_get(x_1, 2); +lean_inc(x_22730); +lean_inc(x_5); +lean_inc(x_4); +x_22731 = l_Lean_IR_ToIR_lowerType(x_22730, x_22729, x_4, x_5, x_22727); +if (lean_obj_tag(x_22731) == 0) +{ +lean_object* x_22732; lean_object* x_22733; lean_object* x_22734; lean_object* x_22735; lean_object* x_22736; +x_22732 = lean_ctor_get(x_22731, 0); +lean_inc(x_22732); +x_22733 = lean_ctor_get(x_22731, 1); +lean_inc(x_22733); +lean_dec(x_22731); +x_22734 = lean_ctor_get(x_22732, 0); +lean_inc(x_22734); +x_22735 = lean_ctor_get(x_22732, 1); +lean_inc(x_22735); +lean_dec(x_22732); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22736 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_22728, x_22717, x_22723, x_22718, x_22734, x_22735, x_4, x_5, x_22733); +if (lean_obj_tag(x_22736) == 0) +{ +lean_object* x_22737; lean_object* x_22738; lean_object* x_22739; lean_object* x_22740; lean_object* x_22741; lean_object* x_22742; lean_object* x_22743; +x_22737 = lean_ctor_get(x_22736, 0); +lean_inc(x_22737); +x_22738 = lean_ctor_get(x_22736, 1); +lean_inc(x_22738); +lean_dec(x_22736); +x_22739 = lean_ctor_get(x_22737, 0); +lean_inc(x_22739); +x_22740 = lean_ctor_get(x_22737, 1); +lean_inc(x_22740); +if (lean_is_exclusive(x_22737)) { + lean_ctor_release(x_22737, 0); + lean_ctor_release(x_22737, 1); + x_22741 = x_22737; +} else { + lean_dec_ref(x_22737); + x_22741 = lean_box(0); +} +if (lean_is_scalar(x_22709)) { + x_22742 = lean_alloc_ctor(1, 1, 0); +} else { + x_22742 = x_22709; +} +lean_ctor_set(x_22742, 0, x_22739); +if (lean_is_scalar(x_22741)) { + x_22743 = lean_alloc_ctor(0, 2, 0); +} else { + x_22743 = x_22741; +} +lean_ctor_set(x_22743, 0, x_22742); +lean_ctor_set(x_22743, 1, x_22740); +x_20603 = x_22743; +x_20604 = x_22738; +goto block_22520; +} +else +{ +lean_object* x_22744; lean_object* x_22745; lean_object* x_22746; lean_object* x_22747; +lean_dec(x_22709); +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22744 = lean_ctor_get(x_22736, 0); +lean_inc(x_22744); +x_22745 = lean_ctor_get(x_22736, 1); +lean_inc(x_22745); +if (lean_is_exclusive(x_22736)) { + lean_ctor_release(x_22736, 0); + lean_ctor_release(x_22736, 1); + x_22746 = x_22736; +} else { + lean_dec_ref(x_22736); + x_22746 = lean_box(0); +} +if (lean_is_scalar(x_22746)) { + x_22747 = lean_alloc_ctor(1, 2, 0); +} else { + x_22747 = x_22746; +} +lean_ctor_set(x_22747, 0, x_22744); +lean_ctor_set(x_22747, 1, x_22745); +return x_22747; +} +} +else +{ +lean_object* x_22748; lean_object* x_22749; lean_object* x_22750; lean_object* x_22751; +lean_dec(x_22728); +lean_dec(x_22723); +lean_dec(x_22718); +lean_dec(x_22717); +lean_dec(x_22709); +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22748 = lean_ctor_get(x_22731, 0); +lean_inc(x_22748); +x_22749 = lean_ctor_get(x_22731, 1); +lean_inc(x_22749); +if (lean_is_exclusive(x_22731)) { + lean_ctor_release(x_22731, 0); + lean_ctor_release(x_22731, 1); + x_22750 = x_22731; +} else { + lean_dec_ref(x_22731); + x_22750 = lean_box(0); +} +if (lean_is_scalar(x_22750)) { + x_22751 = lean_alloc_ctor(1, 2, 0); +} else { + x_22751 = x_22750; +} +lean_ctor_set(x_22751, 0, x_22748); +lean_ctor_set(x_22751, 1, x_22749); +return x_22751; +} +} +else +{ +lean_object* x_22752; lean_object* x_22753; lean_object* x_22754; lean_object* x_22755; lean_object* x_22756; lean_object* x_22757; lean_object* x_22758; lean_object* x_22759; lean_object* x_22760; +lean_dec(x_22712); +lean_dec(x_22710); +lean_inc(x_20601); +lean_inc(x_153); +x_22752 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_22752, 0, x_153); +lean_ctor_set(x_22752, 1, x_20601); +x_22753 = lean_ctor_get(x_1, 0); +lean_inc(x_22753); +x_22754 = l_Lean_IR_ToIR_bindVar(x_22753, x_20602, x_4, x_5, x_22707); +x_22755 = lean_ctor_get(x_22754, 0); +lean_inc(x_22755); +x_22756 = lean_ctor_get(x_22754, 1); +lean_inc(x_22756); +lean_dec(x_22754); +x_22757 = lean_ctor_get(x_22755, 0); +lean_inc(x_22757); +x_22758 = lean_ctor_get(x_22755, 1); +lean_inc(x_22758); +lean_dec(x_22755); +x_22759 = lean_ctor_get(x_1, 2); +lean_inc(x_22759); +lean_inc(x_5); +lean_inc(x_4); +x_22760 = l_Lean_IR_ToIR_lowerType(x_22759, x_22758, x_4, x_5, x_22756); +if (lean_obj_tag(x_22760) == 0) +{ +lean_object* x_22761; lean_object* x_22762; lean_object* x_22763; lean_object* x_22764; lean_object* x_22765; +x_22761 = lean_ctor_get(x_22760, 0); +lean_inc(x_22761); +x_22762 = lean_ctor_get(x_22760, 1); +lean_inc(x_22762); +lean_dec(x_22760); +x_22763 = lean_ctor_get(x_22761, 0); +lean_inc(x_22763); +x_22764 = lean_ctor_get(x_22761, 1); +lean_inc(x_22764); +lean_dec(x_22761); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22765 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22757, x_22752, x_22763, x_22764, x_4, x_5, x_22762); +if (lean_obj_tag(x_22765) == 0) +{ +lean_object* x_22766; lean_object* x_22767; lean_object* x_22768; lean_object* x_22769; lean_object* x_22770; lean_object* x_22771; lean_object* x_22772; +x_22766 = lean_ctor_get(x_22765, 0); +lean_inc(x_22766); +x_22767 = lean_ctor_get(x_22765, 1); +lean_inc(x_22767); +lean_dec(x_22765); +x_22768 = lean_ctor_get(x_22766, 0); +lean_inc(x_22768); +x_22769 = lean_ctor_get(x_22766, 1); +lean_inc(x_22769); +if (lean_is_exclusive(x_22766)) { + lean_ctor_release(x_22766, 0); + lean_ctor_release(x_22766, 1); + x_22770 = x_22766; +} else { + lean_dec_ref(x_22766); + x_22770 = lean_box(0); +} +if (lean_is_scalar(x_22709)) { + x_22771 = lean_alloc_ctor(1, 1, 0); +} else { + x_22771 = x_22709; +} +lean_ctor_set(x_22771, 0, x_22768); +if (lean_is_scalar(x_22770)) { + x_22772 = lean_alloc_ctor(0, 2, 0); +} else { + x_22772 = x_22770; +} +lean_ctor_set(x_22772, 0, x_22771); +lean_ctor_set(x_22772, 1, x_22769); +x_20603 = x_22772; +x_20604 = x_22767; +goto block_22520; +} +else +{ +lean_object* x_22773; lean_object* x_22774; lean_object* x_22775; lean_object* x_22776; +lean_dec(x_22709); +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22773 = lean_ctor_get(x_22765, 0); +lean_inc(x_22773); +x_22774 = lean_ctor_get(x_22765, 1); +lean_inc(x_22774); +if (lean_is_exclusive(x_22765)) { + lean_ctor_release(x_22765, 0); + lean_ctor_release(x_22765, 1); + x_22775 = x_22765; +} else { + lean_dec_ref(x_22765); + x_22775 = lean_box(0); +} +if (lean_is_scalar(x_22775)) { + x_22776 = lean_alloc_ctor(1, 2, 0); +} else { + x_22776 = x_22775; +} +lean_ctor_set(x_22776, 0, x_22773); +lean_ctor_set(x_22776, 1, x_22774); +return x_22776; +} +} +else +{ +lean_object* x_22777; lean_object* x_22778; lean_object* x_22779; lean_object* x_22780; +lean_dec(x_22757); +lean_dec(x_22752); +lean_dec(x_22709); +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22777 = lean_ctor_get(x_22760, 0); +lean_inc(x_22777); +x_22778 = lean_ctor_get(x_22760, 1); +lean_inc(x_22778); +if (lean_is_exclusive(x_22760)) { + lean_ctor_release(x_22760, 0); + lean_ctor_release(x_22760, 1); + x_22779 = x_22760; +} else { + lean_dec_ref(x_22760); + x_22779 = lean_box(0); +} +if (lean_is_scalar(x_22779)) { + x_22780 = lean_alloc_ctor(1, 2, 0); +} else { + x_22780 = x_22779; +} +lean_ctor_set(x_22780, 0, x_22777); +lean_ctor_set(x_22780, 1, x_22778); +return x_22780; +} +} +} +else +{ +lean_object* x_22781; lean_object* x_22782; lean_object* x_22783; lean_object* x_22784; lean_object* x_22785; lean_object* x_22786; lean_object* x_22787; lean_object* x_22788; lean_object* x_22789; +lean_dec(x_22712); +lean_dec(x_22710); +lean_inc(x_20601); +lean_inc(x_153); +x_22781 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_22781, 0, x_153); +lean_ctor_set(x_22781, 1, x_20601); +x_22782 = lean_ctor_get(x_1, 0); +lean_inc(x_22782); +x_22783 = l_Lean_IR_ToIR_bindVar(x_22782, x_20602, x_4, x_5, x_22707); +x_22784 = lean_ctor_get(x_22783, 0); +lean_inc(x_22784); +x_22785 = lean_ctor_get(x_22783, 1); +lean_inc(x_22785); +lean_dec(x_22783); +x_22786 = lean_ctor_get(x_22784, 0); +lean_inc(x_22786); +x_22787 = lean_ctor_get(x_22784, 1); +lean_inc(x_22787); +lean_dec(x_22784); +x_22788 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22789 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22786, x_22781, x_22788, x_22787, x_4, x_5, x_22785); +if (lean_obj_tag(x_22789) == 0) +{ +lean_object* x_22790; lean_object* x_22791; lean_object* x_22792; lean_object* x_22793; lean_object* x_22794; lean_object* x_22795; lean_object* x_22796; +x_22790 = lean_ctor_get(x_22789, 0); +lean_inc(x_22790); +x_22791 = lean_ctor_get(x_22789, 1); +lean_inc(x_22791); +lean_dec(x_22789); +x_22792 = lean_ctor_get(x_22790, 0); +lean_inc(x_22792); +x_22793 = lean_ctor_get(x_22790, 1); +lean_inc(x_22793); +if (lean_is_exclusive(x_22790)) { + lean_ctor_release(x_22790, 0); + lean_ctor_release(x_22790, 1); + x_22794 = x_22790; +} else { + lean_dec_ref(x_22790); + x_22794 = lean_box(0); +} +if (lean_is_scalar(x_22709)) { + x_22795 = lean_alloc_ctor(1, 1, 0); +} else { + x_22795 = x_22709; +} +lean_ctor_set(x_22795, 0, x_22792); +if (lean_is_scalar(x_22794)) { + x_22796 = lean_alloc_ctor(0, 2, 0); +} else { + x_22796 = x_22794; +} +lean_ctor_set(x_22796, 0, x_22795); +lean_ctor_set(x_22796, 1, x_22793); +x_20603 = x_22796; +x_20604 = x_22791; +goto block_22520; +} +else +{ +lean_object* x_22797; lean_object* x_22798; lean_object* x_22799; lean_object* x_22800; +lean_dec(x_22709); +lean_dec(x_20601); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22797 = lean_ctor_get(x_22789, 0); +lean_inc(x_22797); +x_22798 = lean_ctor_get(x_22789, 1); +lean_inc(x_22798); +if (lean_is_exclusive(x_22789)) { + lean_ctor_release(x_22789, 0); + lean_ctor_release(x_22789, 1); + x_22799 = x_22789; +} else { + lean_dec_ref(x_22789); + x_22799 = lean_box(0); +} +if (lean_is_scalar(x_22799)) { + x_22800 = lean_alloc_ctor(1, 2, 0); +} else { + x_22800 = x_22799; +} +lean_ctor_set(x_22800, 0, x_22797); +lean_ctor_set(x_22800, 1, x_22798); +return x_22800; +} +} +} +} +block_22520: +{ +lean_object* x_20605; +x_20605 = lean_ctor_get(x_20603, 0); +lean_inc(x_20605); +if (lean_obj_tag(x_20605) == 0) +{ +uint8_t x_20606; +lean_dec(x_20599); +x_20606 = !lean_is_exclusive(x_20603); +if (x_20606 == 0) +{ +lean_object* x_20607; lean_object* x_20608; lean_object* x_20609; lean_object* x_20610; lean_object* x_20611; lean_object* x_20612; lean_object* x_20613; uint8_t x_20614; lean_object* x_20615; +x_20607 = lean_ctor_get(x_20603, 1); +x_20608 = lean_ctor_get(x_20603, 0); +lean_dec(x_20608); +x_20609 = lean_st_ref_get(x_5, x_20604); +x_20610 = lean_ctor_get(x_20609, 0); +lean_inc(x_20610); +x_20611 = lean_ctor_get(x_20609, 1); +lean_inc(x_20611); +if (lean_is_exclusive(x_20609)) { + lean_ctor_release(x_20609, 0); + lean_ctor_release(x_20609, 1); + x_20612 = x_20609; +} else { + lean_dec_ref(x_20609); + x_20612 = lean_box(0); +} +x_20613 = lean_ctor_get(x_20610, 0); +lean_inc(x_20613); +lean_dec(x_20610); +x_20614 = 0; +lean_inc(x_153); +lean_inc(x_20613); +x_20615 = l_Lean_Environment_find_x3f(x_20613, x_153, x_20614); +if (lean_obj_tag(x_20615) == 0) +{ +lean_object* x_20616; lean_object* x_20617; +lean_dec(x_20613); +lean_dec(x_20612); +lean_free_object(x_20603); +lean_dec(x_20601); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_20616 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_20617 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_20616, x_20607, x_4, x_5, x_20611); +return x_20617; +} +else +{ +lean_object* x_20618; +x_20618 = lean_ctor_get(x_20615, 0); +lean_inc(x_20618); +lean_dec(x_20615); +switch (lean_obj_tag(x_20618)) { +case 0: +{ +uint8_t x_20619; +lean_dec(x_20613); +lean_dec(x_20593); +lean_dec(x_20592); +x_20619 = !lean_is_exclusive(x_20618); +if (x_20619 == 0) +{ +lean_object* x_20620; lean_object* x_20621; uint8_t x_20622; +x_20620 = lean_ctor_get(x_20618, 0); +lean_dec(x_20620); +x_20621 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_20622 = lean_name_eq(x_153, x_20621); +if (x_20622 == 0) +{ +lean_object* x_20623; uint8_t x_20624; +x_20623 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_20624 = lean_name_eq(x_153, x_20623); +if (x_20624 == 0) +{ +lean_object* x_20625; lean_object* x_20626; lean_object* x_20627; +lean_dec(x_20612); +lean_free_object(x_20603); +lean_inc(x_153); +x_20625 = l_Lean_IR_ToIR_findDecl(x_153, x_20607, x_4, x_5, x_20611); +x_20626 = lean_ctor_get(x_20625, 0); +lean_inc(x_20626); +x_20627 = lean_ctor_get(x_20626, 0); +lean_inc(x_20627); +if (lean_obj_tag(x_20627) == 0) +{ +uint8_t x_20628; +lean_dec(x_20601); +lean_dec(x_2); +lean_dec(x_1); +x_20628 = !lean_is_exclusive(x_20625); +if (x_20628 == 0) +{ +lean_object* x_20629; lean_object* x_20630; uint8_t x_20631; +x_20629 = lean_ctor_get(x_20625, 1); +x_20630 = lean_ctor_get(x_20625, 0); +lean_dec(x_20630); +x_20631 = !lean_is_exclusive(x_20626); +if (x_20631 == 0) +{ +lean_object* x_20632; lean_object* x_20633; uint8_t x_20634; lean_object* x_20635; lean_object* x_20636; lean_object* x_20637; lean_object* x_20638; lean_object* x_20639; lean_object* x_20640; +x_20632 = lean_ctor_get(x_20626, 1); +x_20633 = lean_ctor_get(x_20626, 0); +lean_dec(x_20633); +x_20634 = 1; +x_20635 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_20636 = l_Lean_Name_toString(x_153, x_20634, x_20635); +lean_ctor_set_tag(x_20618, 3); +lean_ctor_set(x_20618, 0, x_20636); +x_20637 = l_Lean_IR_ToIR_lowerLet___closed__13; +lean_ctor_set_tag(x_20626, 5); +lean_ctor_set(x_20626, 1, x_20618); +lean_ctor_set(x_20626, 0, x_20637); +x_20638 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_20625, 5); +lean_ctor_set(x_20625, 1, x_20638); +x_20639 = l_Lean_MessageData_ofFormat(x_20625); +x_20640 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_20639, x_20632, x_4, x_5, x_20629); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_20632); +return x_20640; +} +else +{ +lean_object* x_20641; uint8_t x_20642; lean_object* x_20643; lean_object* x_20644; lean_object* x_20645; lean_object* x_20646; lean_object* x_20647; lean_object* x_20648; lean_object* x_20649; +x_20641 = lean_ctor_get(x_20626, 1); +lean_inc(x_20641); +lean_dec(x_20626); +x_20642 = 1; +x_20643 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_20644 = l_Lean_Name_toString(x_153, x_20642, x_20643); +lean_ctor_set_tag(x_20618, 3); +lean_ctor_set(x_20618, 0, x_20644); +x_20645 = l_Lean_IR_ToIR_lowerLet___closed__13; +x_20646 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_20646, 0, x_20645); +lean_ctor_set(x_20646, 1, x_20618); +x_20647 = l_Lean_IR_ToIR_lowerLet___closed__16; +lean_ctor_set_tag(x_20625, 5); +lean_ctor_set(x_20625, 1, x_20647); +lean_ctor_set(x_20625, 0, x_20646); +x_20648 = l_Lean_MessageData_ofFormat(x_20625); +x_20649 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_20648, x_20641, x_4, x_5, x_20629); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_20641); +return x_20649; +} +} +else +{ +lean_object* x_20650; lean_object* x_20651; lean_object* x_20652; uint8_t x_20653; lean_object* x_20654; lean_object* x_20655; lean_object* x_20656; lean_object* x_20657; lean_object* x_20658; lean_object* x_20659; lean_object* x_20660; lean_object* x_20661; +x_20650 = lean_ctor_get(x_20625, 1); +lean_inc(x_20650); +lean_dec(x_20625); +x_20651 = lean_ctor_get(x_20626, 1); +lean_inc(x_20651); +if (lean_is_exclusive(x_20626)) { + lean_ctor_release(x_20626, 0); + lean_ctor_release(x_20626, 1); + x_20652 = x_20626; +} else { + lean_dec_ref(x_20626); + x_20652 = lean_box(0); +} +x_20653 = 1; +x_20654 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_20655 = l_Lean_Name_toString(x_153, x_20653, x_20654); +lean_ctor_set_tag(x_20618, 3); +lean_ctor_set(x_20618, 0, x_20655); +x_20656 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_20652)) { + x_20657 = lean_alloc_ctor(5, 2, 0); +} else { + x_20657 = x_20652; + lean_ctor_set_tag(x_20657, 5); +} +lean_ctor_set(x_20657, 0, x_20656); +lean_ctor_set(x_20657, 1, x_20618); +x_20658 = l_Lean_IR_ToIR_lowerLet___closed__16; +x_20659 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_20659, 0, x_20657); +lean_ctor_set(x_20659, 1, x_20658); +x_20660 = l_Lean_MessageData_ofFormat(x_20659); +x_20661 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_20660, x_20651, x_4, x_5, x_20650); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_20651); +return x_20661; +} +} +else +{ +lean_object* x_20662; uint8_t x_20663; +lean_free_object(x_20618); +x_20662 = lean_ctor_get(x_20625, 1); +lean_inc(x_20662); +lean_dec(x_20625); +x_20663 = !lean_is_exclusive(x_20626); +if (x_20663 == 0) +{ +lean_object* x_20664; lean_object* x_20665; lean_object* x_20666; lean_object* x_20667; lean_object* x_20668; lean_object* x_20669; uint8_t x_20670; +x_20664 = lean_ctor_get(x_20626, 1); +x_20665 = lean_ctor_get(x_20626, 0); +lean_dec(x_20665); +x_20666 = lean_ctor_get(x_20627, 0); +lean_inc(x_20666); +lean_dec(x_20627); +x_20667 = lean_array_get_size(x_20601); +x_20668 = l_Lean_IR_Decl_params(x_20666); +lean_dec(x_20666); +x_20669 = lean_array_get_size(x_20668); +lean_dec(x_20668); +x_20670 = lean_nat_dec_lt(x_20667, x_20669); +if (x_20670 == 0) +{ +uint8_t x_20671; +x_20671 = lean_nat_dec_eq(x_20667, x_20669); +if (x_20671 == 0) +{ +lean_object* x_20672; lean_object* x_20673; lean_object* x_20674; lean_object* x_20675; lean_object* x_20676; lean_object* x_20677; lean_object* x_20678; lean_object* x_20679; lean_object* x_20680; lean_object* x_20681; lean_object* x_20682; lean_object* x_20683; lean_object* x_20684; lean_object* x_20685; lean_object* x_20686; lean_object* x_20687; +x_20672 = lean_unsigned_to_nat(0u); +x_20673 = l_Array_extract___rarg(x_20601, x_20672, x_20669); +x_20674 = l_Array_extract___rarg(x_20601, x_20669, x_20667); +lean_dec(x_20667); +lean_dec(x_20601); +lean_ctor_set_tag(x_20626, 6); +lean_ctor_set(x_20626, 1, x_20673); +lean_ctor_set(x_20626, 0, x_153); +x_20675 = lean_ctor_get(x_1, 0); +lean_inc(x_20675); +x_20676 = l_Lean_IR_ToIR_bindVar(x_20675, x_20664, x_4, x_5, x_20662); +x_20677 = lean_ctor_get(x_20676, 0); +lean_inc(x_20677); +x_20678 = lean_ctor_get(x_20676, 1); +lean_inc(x_20678); +lean_dec(x_20676); +x_20679 = lean_ctor_get(x_20677, 0); +lean_inc(x_20679); +x_20680 = lean_ctor_get(x_20677, 1); +lean_inc(x_20680); +lean_dec(x_20677); +x_20681 = l_Lean_IR_ToIR_newVar(x_20680, x_4, x_5, x_20678); +x_20682 = lean_ctor_get(x_20681, 0); +lean_inc(x_20682); +x_20683 = lean_ctor_get(x_20681, 1); +lean_inc(x_20683); +lean_dec(x_20681); +x_20684 = lean_ctor_get(x_20682, 0); +lean_inc(x_20684); +x_20685 = lean_ctor_get(x_20682, 1); +lean_inc(x_20685); +lean_dec(x_20682); +x_20686 = lean_ctor_get(x_1, 2); +lean_inc(x_20686); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_20687 = l_Lean_IR_ToIR_lowerType(x_20686, x_20685, x_4, x_5, x_20683); +if (lean_obj_tag(x_20687) == 0) +{ +lean_object* x_20688; lean_object* x_20689; lean_object* x_20690; lean_object* x_20691; lean_object* x_20692; +x_20688 = lean_ctor_get(x_20687, 0); +lean_inc(x_20688); +x_20689 = lean_ctor_get(x_20687, 1); +lean_inc(x_20689); +lean_dec(x_20687); +x_20690 = lean_ctor_get(x_20688, 0); +lean_inc(x_20690); +x_20691 = lean_ctor_get(x_20688, 1); +lean_inc(x_20691); +lean_dec(x_20688); +x_20692 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_20684, x_20674, x_20679, x_20626, x_20690, x_20691, x_4, x_5, x_20689); +return x_20692; +} +else +{ +uint8_t x_20693; +lean_dec(x_20684); +lean_dec(x_20679); +lean_dec(x_20626); +lean_dec(x_20674); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_20693 = !lean_is_exclusive(x_20687); +if (x_20693 == 0) +{ +return x_20687; +} +else +{ +lean_object* x_20694; lean_object* x_20695; lean_object* x_20696; +x_20694 = lean_ctor_get(x_20687, 0); +x_20695 = lean_ctor_get(x_20687, 1); +lean_inc(x_20695); +lean_inc(x_20694); +lean_dec(x_20687); +x_20696 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_20696, 0, x_20694); +lean_ctor_set(x_20696, 1, x_20695); +return x_20696; +} +} +} +else +{ +lean_object* x_20697; lean_object* x_20698; lean_object* x_20699; lean_object* x_20700; lean_object* x_20701; lean_object* x_20702; lean_object* x_20703; lean_object* x_20704; +lean_dec(x_20669); +lean_dec(x_20667); +lean_ctor_set_tag(x_20626, 6); +lean_ctor_set(x_20626, 1, x_20601); +lean_ctor_set(x_20626, 0, x_153); +x_20697 = lean_ctor_get(x_1, 0); +lean_inc(x_20697); +x_20698 = l_Lean_IR_ToIR_bindVar(x_20697, x_20664, x_4, x_5, x_20662); +x_20699 = lean_ctor_get(x_20698, 0); +lean_inc(x_20699); +x_20700 = lean_ctor_get(x_20698, 1); +lean_inc(x_20700); +lean_dec(x_20698); +x_20701 = lean_ctor_get(x_20699, 0); +lean_inc(x_20701); +x_20702 = lean_ctor_get(x_20699, 1); +lean_inc(x_20702); +lean_dec(x_20699); +x_20703 = lean_ctor_get(x_1, 2); +lean_inc(x_20703); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_20704 = l_Lean_IR_ToIR_lowerType(x_20703, x_20702, x_4, x_5, x_20700); +if (lean_obj_tag(x_20704) == 0) +{ +lean_object* x_20705; lean_object* x_20706; lean_object* x_20707; lean_object* x_20708; lean_object* x_20709; +x_20705 = lean_ctor_get(x_20704, 0); +lean_inc(x_20705); +x_20706 = lean_ctor_get(x_20704, 1); +lean_inc(x_20706); +lean_dec(x_20704); +x_20707 = lean_ctor_get(x_20705, 0); +lean_inc(x_20707); +x_20708 = lean_ctor_get(x_20705, 1); +lean_inc(x_20708); +lean_dec(x_20705); +x_20709 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20701, x_20626, x_20707, x_20708, x_4, x_5, x_20706); +return x_20709; +} +else +{ +uint8_t x_20710; +lean_dec(x_20701); +lean_dec(x_20626); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_20710 = !lean_is_exclusive(x_20704); +if (x_20710 == 0) +{ +return x_20704; +} +else +{ +lean_object* x_20711; lean_object* x_20712; lean_object* x_20713; +x_20711 = lean_ctor_get(x_20704, 0); +x_20712 = lean_ctor_get(x_20704, 1); +lean_inc(x_20712); +lean_inc(x_20711); +lean_dec(x_20704); +x_20713 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_20713, 0, x_20711); +lean_ctor_set(x_20713, 1, x_20712); +return x_20713; +} +} +} +} +else +{ +lean_object* x_20714; lean_object* x_20715; lean_object* x_20716; lean_object* x_20717; lean_object* x_20718; lean_object* x_20719; lean_object* x_20720; lean_object* x_20721; +lean_dec(x_20669); +lean_dec(x_20667); +lean_ctor_set_tag(x_20626, 7); +lean_ctor_set(x_20626, 1, x_20601); +lean_ctor_set(x_20626, 0, x_153); +x_20714 = lean_ctor_get(x_1, 0); +lean_inc(x_20714); +lean_dec(x_1); +x_20715 = l_Lean_IR_ToIR_bindVar(x_20714, x_20664, x_4, x_5, x_20662); +x_20716 = lean_ctor_get(x_20715, 0); +lean_inc(x_20716); +x_20717 = lean_ctor_get(x_20715, 1); +lean_inc(x_20717); +lean_dec(x_20715); +x_20718 = lean_ctor_get(x_20716, 0); +lean_inc(x_20718); +x_20719 = lean_ctor_get(x_20716, 1); +lean_inc(x_20719); +lean_dec(x_20716); +x_20720 = lean_box(7); +x_20721 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20718, x_20626, x_20720, x_20719, x_4, x_5, x_20717); +return x_20721; +} +} +else +{ +lean_object* x_20722; lean_object* x_20723; lean_object* x_20724; lean_object* x_20725; lean_object* x_20726; uint8_t x_20727; +x_20722 = lean_ctor_get(x_20626, 1); +lean_inc(x_20722); +lean_dec(x_20626); +x_20723 = lean_ctor_get(x_20627, 0); +lean_inc(x_20723); +lean_dec(x_20627); +x_20724 = lean_array_get_size(x_20601); +x_20725 = l_Lean_IR_Decl_params(x_20723); +lean_dec(x_20723); +x_20726 = lean_array_get_size(x_20725); +lean_dec(x_20725); +x_20727 = lean_nat_dec_lt(x_20724, x_20726); +if (x_20727 == 0) +{ +uint8_t x_20728; +x_20728 = lean_nat_dec_eq(x_20724, x_20726); +if (x_20728 == 0) +{ +lean_object* x_20729; lean_object* x_20730; lean_object* x_20731; lean_object* x_20732; lean_object* x_20733; lean_object* x_20734; lean_object* x_20735; lean_object* x_20736; lean_object* x_20737; lean_object* x_20738; lean_object* x_20739; lean_object* x_20740; lean_object* x_20741; lean_object* x_20742; lean_object* x_20743; lean_object* x_20744; lean_object* x_20745; +x_20729 = lean_unsigned_to_nat(0u); +x_20730 = l_Array_extract___rarg(x_20601, x_20729, x_20726); +x_20731 = l_Array_extract___rarg(x_20601, x_20726, x_20724); +lean_dec(x_20724); +lean_dec(x_20601); +x_20732 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_20732, 0, x_153); +lean_ctor_set(x_20732, 1, x_20730); +x_20733 = lean_ctor_get(x_1, 0); +lean_inc(x_20733); +x_20734 = l_Lean_IR_ToIR_bindVar(x_20733, x_20722, x_4, x_5, x_20662); +x_20735 = lean_ctor_get(x_20734, 0); +lean_inc(x_20735); +x_20736 = lean_ctor_get(x_20734, 1); +lean_inc(x_20736); +lean_dec(x_20734); +x_20737 = lean_ctor_get(x_20735, 0); +lean_inc(x_20737); +x_20738 = lean_ctor_get(x_20735, 1); +lean_inc(x_20738); +lean_dec(x_20735); +x_20739 = l_Lean_IR_ToIR_newVar(x_20738, x_4, x_5, x_20736); +x_20740 = lean_ctor_get(x_20739, 0); +lean_inc(x_20740); +x_20741 = lean_ctor_get(x_20739, 1); +lean_inc(x_20741); +lean_dec(x_20739); +x_20742 = lean_ctor_get(x_20740, 0); +lean_inc(x_20742); +x_20743 = lean_ctor_get(x_20740, 1); +lean_inc(x_20743); +lean_dec(x_20740); +x_20744 = lean_ctor_get(x_1, 2); +lean_inc(x_20744); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_20745 = l_Lean_IR_ToIR_lowerType(x_20744, x_20743, x_4, x_5, x_20741); +if (lean_obj_tag(x_20745) == 0) +{ +lean_object* x_20746; lean_object* x_20747; lean_object* x_20748; lean_object* x_20749; lean_object* x_20750; +x_20746 = lean_ctor_get(x_20745, 0); +lean_inc(x_20746); +x_20747 = lean_ctor_get(x_20745, 1); +lean_inc(x_20747); +lean_dec(x_20745); +x_20748 = lean_ctor_get(x_20746, 0); +lean_inc(x_20748); +x_20749 = lean_ctor_get(x_20746, 1); +lean_inc(x_20749); +lean_dec(x_20746); +x_20750 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_20742, x_20731, x_20737, x_20732, x_20748, x_20749, x_4, x_5, x_20747); +return x_20750; +} +else +{ +lean_object* x_20751; lean_object* x_20752; lean_object* x_20753; lean_object* x_20754; +lean_dec(x_20742); +lean_dec(x_20737); +lean_dec(x_20732); +lean_dec(x_20731); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_20751 = lean_ctor_get(x_20745, 0); +lean_inc(x_20751); +x_20752 = lean_ctor_get(x_20745, 1); +lean_inc(x_20752); +if (lean_is_exclusive(x_20745)) { + lean_ctor_release(x_20745, 0); + lean_ctor_release(x_20745, 1); + x_20753 = x_20745; +} else { + lean_dec_ref(x_20745); + x_20753 = lean_box(0); +} +if (lean_is_scalar(x_20753)) { + x_20754 = lean_alloc_ctor(1, 2, 0); +} else { + x_20754 = x_20753; +} +lean_ctor_set(x_20754, 0, x_20751); +lean_ctor_set(x_20754, 1, x_20752); +return x_20754; +} +} +else +{ +lean_object* x_20755; lean_object* x_20756; lean_object* x_20757; lean_object* x_20758; lean_object* x_20759; lean_object* x_20760; lean_object* x_20761; lean_object* x_20762; lean_object* x_20763; +lean_dec(x_20726); +lean_dec(x_20724); +x_20755 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_20755, 0, x_153); +lean_ctor_set(x_20755, 1, x_20601); +x_20756 = lean_ctor_get(x_1, 0); +lean_inc(x_20756); +x_20757 = l_Lean_IR_ToIR_bindVar(x_20756, x_20722, x_4, x_5, x_20662); +x_20758 = lean_ctor_get(x_20757, 0); +lean_inc(x_20758); +x_20759 = lean_ctor_get(x_20757, 1); +lean_inc(x_20759); +lean_dec(x_20757); +x_20760 = lean_ctor_get(x_20758, 0); +lean_inc(x_20760); +x_20761 = lean_ctor_get(x_20758, 1); +lean_inc(x_20761); +lean_dec(x_20758); +x_20762 = lean_ctor_get(x_1, 2); +lean_inc(x_20762); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_20763 = l_Lean_IR_ToIR_lowerType(x_20762, x_20761, x_4, x_5, x_20759); +if (lean_obj_tag(x_20763) == 0) +{ +lean_object* x_20764; lean_object* x_20765; lean_object* x_20766; lean_object* x_20767; lean_object* x_20768; +x_20764 = lean_ctor_get(x_20763, 0); +lean_inc(x_20764); +x_20765 = lean_ctor_get(x_20763, 1); +lean_inc(x_20765); +lean_dec(x_20763); +x_20766 = lean_ctor_get(x_20764, 0); +lean_inc(x_20766); +x_20767 = lean_ctor_get(x_20764, 1); +lean_inc(x_20767); +lean_dec(x_20764); +x_20768 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20760, x_20755, x_20766, x_20767, x_4, x_5, x_20765); +return x_20768; +} +else +{ +lean_object* x_20769; lean_object* x_20770; lean_object* x_20771; lean_object* x_20772; +lean_dec(x_20760); +lean_dec(x_20755); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_20769 = lean_ctor_get(x_20763, 0); +lean_inc(x_20769); +x_20770 = lean_ctor_get(x_20763, 1); +lean_inc(x_20770); +if (lean_is_exclusive(x_20763)) { + lean_ctor_release(x_20763, 0); + lean_ctor_release(x_20763, 1); + x_20771 = x_20763; +} else { + lean_dec_ref(x_20763); + x_20771 = lean_box(0); +} +if (lean_is_scalar(x_20771)) { + x_20772 = lean_alloc_ctor(1, 2, 0); +} else { + x_20772 = x_20771; +} +lean_ctor_set(x_20772, 0, x_20769); +lean_ctor_set(x_20772, 1, x_20770); +return x_20772; +} +} +} +else +{ +lean_object* x_20773; lean_object* x_20774; lean_object* x_20775; lean_object* x_20776; lean_object* x_20777; lean_object* x_20778; lean_object* x_20779; lean_object* x_20780; lean_object* x_20781; +lean_dec(x_20726); +lean_dec(x_20724); +x_20773 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_20773, 0, x_153); +lean_ctor_set(x_20773, 1, x_20601); +x_20774 = lean_ctor_get(x_1, 0); +lean_inc(x_20774); +lean_dec(x_1); +x_20775 = l_Lean_IR_ToIR_bindVar(x_20774, x_20722, x_4, x_5, x_20662); +x_20776 = lean_ctor_get(x_20775, 0); +lean_inc(x_20776); +x_20777 = lean_ctor_get(x_20775, 1); +lean_inc(x_20777); +lean_dec(x_20775); +x_20778 = lean_ctor_get(x_20776, 0); +lean_inc(x_20778); +x_20779 = lean_ctor_get(x_20776, 1); +lean_inc(x_20779); +lean_dec(x_20776); +x_20780 = lean_box(7); +x_20781 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20778, x_20773, x_20780, x_20779, x_4, x_5, x_20777); +return x_20781; +} +} +} +} +else +{ +lean_object* x_20782; lean_object* x_20783; +lean_free_object(x_20618); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20782 = lean_box(13); +lean_ctor_set(x_20603, 0, x_20782); +if (lean_is_scalar(x_20612)) { + x_20783 = lean_alloc_ctor(0, 2, 0); +} else { + x_20783 = x_20612; +} +lean_ctor_set(x_20783, 0, x_20603); +lean_ctor_set(x_20783, 1, x_20611); +return x_20783; +} +} +else +{ +lean_object* x_20784; lean_object* x_20785; lean_object* x_20786; +lean_free_object(x_20618); +lean_dec(x_20612); +lean_free_object(x_20603); +lean_dec(x_153); +x_20784 = l_Lean_IR_instInhabitedArg; +x_20785 = lean_unsigned_to_nat(2u); +x_20786 = lean_array_get(x_20784, x_20601, x_20785); +lean_dec(x_20601); +if (lean_obj_tag(x_20786) == 0) +{ +lean_object* x_20787; lean_object* x_20788; lean_object* x_20789; lean_object* x_20790; lean_object* x_20791; lean_object* x_20792; lean_object* x_20793; +x_20787 = lean_ctor_get(x_20786, 0); +lean_inc(x_20787); +lean_dec(x_20786); +x_20788 = lean_ctor_get(x_1, 0); +lean_inc(x_20788); +lean_dec(x_1); +x_20789 = l_Lean_IR_ToIR_bindVarToVarId(x_20788, x_20787, x_20607, x_4, x_5, x_20611); +x_20790 = lean_ctor_get(x_20789, 0); +lean_inc(x_20790); +x_20791 = lean_ctor_get(x_20789, 1); +lean_inc(x_20791); +lean_dec(x_20789); +x_20792 = lean_ctor_get(x_20790, 1); +lean_inc(x_20792); +lean_dec(x_20790); +x_20793 = l_Lean_IR_ToIR_lowerCode(x_2, x_20792, x_4, x_5, x_20791); +return x_20793; +} +else +{ +lean_object* x_20794; lean_object* x_20795; lean_object* x_20796; lean_object* x_20797; lean_object* x_20798; lean_object* x_20799; +x_20794 = lean_ctor_get(x_1, 0); +lean_inc(x_20794); +lean_dec(x_1); +x_20795 = l_Lean_IR_ToIR_bindErased(x_20794, x_20607, x_4, x_5, x_20611); +x_20796 = lean_ctor_get(x_20795, 0); +lean_inc(x_20796); +x_20797 = lean_ctor_get(x_20795, 1); +lean_inc(x_20797); +lean_dec(x_20795); +x_20798 = lean_ctor_get(x_20796, 1); +lean_inc(x_20798); +lean_dec(x_20796); +x_20799 = l_Lean_IR_ToIR_lowerCode(x_2, x_20798, x_4, x_5, x_20797); +return x_20799; +} +} +} +else +{ +lean_object* x_20800; uint8_t x_20801; +lean_dec(x_20618); +x_20800 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_20801 = lean_name_eq(x_153, x_20800); +if (x_20801 == 0) +{ +lean_object* x_20802; uint8_t x_20803; +x_20802 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_20803 = lean_name_eq(x_153, x_20802); +if (x_20803 == 0) +{ +lean_object* x_20804; lean_object* x_20805; lean_object* x_20806; +lean_dec(x_20612); +lean_free_object(x_20603); +lean_inc(x_153); +x_20804 = l_Lean_IR_ToIR_findDecl(x_153, x_20607, x_4, x_5, x_20611); +x_20805 = lean_ctor_get(x_20804, 0); +lean_inc(x_20805); +x_20806 = lean_ctor_get(x_20805, 0); +lean_inc(x_20806); +if (lean_obj_tag(x_20806) == 0) +{ +lean_object* x_20807; lean_object* x_20808; lean_object* x_20809; lean_object* x_20810; uint8_t x_20811; lean_object* x_20812; lean_object* x_20813; lean_object* x_20814; lean_object* x_20815; lean_object* x_20816; lean_object* x_20817; lean_object* x_20818; lean_object* x_20819; lean_object* x_20820; +lean_dec(x_20601); +lean_dec(x_2); +lean_dec(x_1); +x_20807 = lean_ctor_get(x_20804, 1); +lean_inc(x_20807); +if (lean_is_exclusive(x_20804)) { + lean_ctor_release(x_20804, 0); + lean_ctor_release(x_20804, 1); + x_20808 = x_20804; +} else { + lean_dec_ref(x_20804); + x_20808 = lean_box(0); +} +x_20809 = lean_ctor_get(x_20805, 1); +lean_inc(x_20809); +if (lean_is_exclusive(x_20805)) { + lean_ctor_release(x_20805, 0); + lean_ctor_release(x_20805, 1); + x_20810 = x_20805; +} else { + lean_dec_ref(x_20805); + x_20810 = lean_box(0); +} +x_20811 = 1; +x_20812 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_20813 = l_Lean_Name_toString(x_153, x_20811, x_20812); +x_20814 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_20814, 0, x_20813); +x_20815 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_20810)) { + x_20816 = lean_alloc_ctor(5, 2, 0); +} else { + x_20816 = x_20810; + lean_ctor_set_tag(x_20816, 5); +} +lean_ctor_set(x_20816, 0, x_20815); +lean_ctor_set(x_20816, 1, x_20814); +x_20817 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_20808)) { + x_20818 = lean_alloc_ctor(5, 2, 0); +} else { + x_20818 = x_20808; + lean_ctor_set_tag(x_20818, 5); +} +lean_ctor_set(x_20818, 0, x_20816); +lean_ctor_set(x_20818, 1, x_20817); +x_20819 = l_Lean_MessageData_ofFormat(x_20818); +x_20820 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_20819, x_20809, x_4, x_5, x_20807); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_20809); +return x_20820; +} +else +{ +lean_object* x_20821; lean_object* x_20822; lean_object* x_20823; lean_object* x_20824; lean_object* x_20825; lean_object* x_20826; lean_object* x_20827; uint8_t x_20828; +x_20821 = lean_ctor_get(x_20804, 1); +lean_inc(x_20821); +lean_dec(x_20804); +x_20822 = lean_ctor_get(x_20805, 1); +lean_inc(x_20822); +if (lean_is_exclusive(x_20805)) { + lean_ctor_release(x_20805, 0); + lean_ctor_release(x_20805, 1); + x_20823 = x_20805; +} else { + lean_dec_ref(x_20805); + x_20823 = lean_box(0); +} +x_20824 = lean_ctor_get(x_20806, 0); +lean_inc(x_20824); +lean_dec(x_20806); +x_20825 = lean_array_get_size(x_20601); +x_20826 = l_Lean_IR_Decl_params(x_20824); +lean_dec(x_20824); +x_20827 = lean_array_get_size(x_20826); +lean_dec(x_20826); +x_20828 = lean_nat_dec_lt(x_20825, x_20827); +if (x_20828 == 0) +{ +uint8_t x_20829; +x_20829 = lean_nat_dec_eq(x_20825, x_20827); +if (x_20829 == 0) +{ +lean_object* x_20830; lean_object* x_20831; lean_object* x_20832; lean_object* x_20833; lean_object* x_20834; lean_object* x_20835; lean_object* x_20836; lean_object* x_20837; lean_object* x_20838; lean_object* x_20839; lean_object* x_20840; lean_object* x_20841; lean_object* x_20842; lean_object* x_20843; lean_object* x_20844; lean_object* x_20845; lean_object* x_20846; +x_20830 = lean_unsigned_to_nat(0u); +x_20831 = l_Array_extract___rarg(x_20601, x_20830, x_20827); +x_20832 = l_Array_extract___rarg(x_20601, x_20827, x_20825); +lean_dec(x_20825); +lean_dec(x_20601); +if (lean_is_scalar(x_20823)) { + x_20833 = lean_alloc_ctor(6, 2, 0); +} else { + x_20833 = x_20823; + lean_ctor_set_tag(x_20833, 6); +} +lean_ctor_set(x_20833, 0, x_153); +lean_ctor_set(x_20833, 1, x_20831); +x_20834 = lean_ctor_get(x_1, 0); +lean_inc(x_20834); +x_20835 = l_Lean_IR_ToIR_bindVar(x_20834, x_20822, x_4, x_5, x_20821); +x_20836 = lean_ctor_get(x_20835, 0); +lean_inc(x_20836); +x_20837 = lean_ctor_get(x_20835, 1); +lean_inc(x_20837); +lean_dec(x_20835); +x_20838 = lean_ctor_get(x_20836, 0); +lean_inc(x_20838); +x_20839 = lean_ctor_get(x_20836, 1); +lean_inc(x_20839); +lean_dec(x_20836); +x_20840 = l_Lean_IR_ToIR_newVar(x_20839, x_4, x_5, x_20837); +x_20841 = lean_ctor_get(x_20840, 0); +lean_inc(x_20841); +x_20842 = lean_ctor_get(x_20840, 1); +lean_inc(x_20842); +lean_dec(x_20840); +x_20843 = lean_ctor_get(x_20841, 0); +lean_inc(x_20843); +x_20844 = lean_ctor_get(x_20841, 1); +lean_inc(x_20844); +lean_dec(x_20841); +x_20845 = lean_ctor_get(x_1, 2); +lean_inc(x_20845); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_20846 = l_Lean_IR_ToIR_lowerType(x_20845, x_20844, x_4, x_5, x_20842); +if (lean_obj_tag(x_20846) == 0) +{ +lean_object* x_20847; lean_object* x_20848; lean_object* x_20849; lean_object* x_20850; lean_object* x_20851; +x_20847 = lean_ctor_get(x_20846, 0); +lean_inc(x_20847); +x_20848 = lean_ctor_get(x_20846, 1); +lean_inc(x_20848); +lean_dec(x_20846); +x_20849 = lean_ctor_get(x_20847, 0); +lean_inc(x_20849); +x_20850 = lean_ctor_get(x_20847, 1); +lean_inc(x_20850); +lean_dec(x_20847); +x_20851 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_20843, x_20832, x_20838, x_20833, x_20849, x_20850, x_4, x_5, x_20848); +return x_20851; +} +else +{ +lean_object* x_20852; lean_object* x_20853; lean_object* x_20854; lean_object* x_20855; +lean_dec(x_20843); +lean_dec(x_20838); +lean_dec(x_20833); +lean_dec(x_20832); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_20852 = lean_ctor_get(x_20846, 0); +lean_inc(x_20852); +x_20853 = lean_ctor_get(x_20846, 1); +lean_inc(x_20853); +if (lean_is_exclusive(x_20846)) { + lean_ctor_release(x_20846, 0); + lean_ctor_release(x_20846, 1); + x_20854 = x_20846; +} else { + lean_dec_ref(x_20846); + x_20854 = lean_box(0); +} +if (lean_is_scalar(x_20854)) { + x_20855 = lean_alloc_ctor(1, 2, 0); +} else { + x_20855 = x_20854; +} +lean_ctor_set(x_20855, 0, x_20852); +lean_ctor_set(x_20855, 1, x_20853); +return x_20855; +} +} +else +{ +lean_object* x_20856; lean_object* x_20857; lean_object* x_20858; lean_object* x_20859; lean_object* x_20860; lean_object* x_20861; lean_object* x_20862; lean_object* x_20863; lean_object* x_20864; +lean_dec(x_20827); +lean_dec(x_20825); +if (lean_is_scalar(x_20823)) { + x_20856 = lean_alloc_ctor(6, 2, 0); +} else { + x_20856 = x_20823; + lean_ctor_set_tag(x_20856, 6); +} +lean_ctor_set(x_20856, 0, x_153); +lean_ctor_set(x_20856, 1, x_20601); +x_20857 = lean_ctor_get(x_1, 0); +lean_inc(x_20857); +x_20858 = l_Lean_IR_ToIR_bindVar(x_20857, x_20822, x_4, x_5, x_20821); +x_20859 = lean_ctor_get(x_20858, 0); +lean_inc(x_20859); +x_20860 = lean_ctor_get(x_20858, 1); +lean_inc(x_20860); +lean_dec(x_20858); +x_20861 = lean_ctor_get(x_20859, 0); +lean_inc(x_20861); +x_20862 = lean_ctor_get(x_20859, 1); +lean_inc(x_20862); +lean_dec(x_20859); +x_20863 = lean_ctor_get(x_1, 2); +lean_inc(x_20863); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_20864 = l_Lean_IR_ToIR_lowerType(x_20863, x_20862, x_4, x_5, x_20860); +if (lean_obj_tag(x_20864) == 0) +{ +lean_object* x_20865; lean_object* x_20866; lean_object* x_20867; lean_object* x_20868; lean_object* x_20869; +x_20865 = lean_ctor_get(x_20864, 0); +lean_inc(x_20865); +x_20866 = lean_ctor_get(x_20864, 1); +lean_inc(x_20866); +lean_dec(x_20864); +x_20867 = lean_ctor_get(x_20865, 0); +lean_inc(x_20867); +x_20868 = lean_ctor_get(x_20865, 1); +lean_inc(x_20868); +lean_dec(x_20865); +x_20869 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20861, x_20856, x_20867, x_20868, x_4, x_5, x_20866); +return x_20869; +} +else +{ +lean_object* x_20870; lean_object* x_20871; lean_object* x_20872; lean_object* x_20873; +lean_dec(x_20861); +lean_dec(x_20856); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_20870 = lean_ctor_get(x_20864, 0); +lean_inc(x_20870); +x_20871 = lean_ctor_get(x_20864, 1); +lean_inc(x_20871); +if (lean_is_exclusive(x_20864)) { + lean_ctor_release(x_20864, 0); + lean_ctor_release(x_20864, 1); + x_20872 = x_20864; +} else { + lean_dec_ref(x_20864); + x_20872 = lean_box(0); +} +if (lean_is_scalar(x_20872)) { + x_20873 = lean_alloc_ctor(1, 2, 0); +} else { + x_20873 = x_20872; +} +lean_ctor_set(x_20873, 0, x_20870); +lean_ctor_set(x_20873, 1, x_20871); +return x_20873; +} +} +} +else +{ +lean_object* x_20874; lean_object* x_20875; lean_object* x_20876; lean_object* x_20877; lean_object* x_20878; lean_object* x_20879; lean_object* x_20880; lean_object* x_20881; lean_object* x_20882; +lean_dec(x_20827); +lean_dec(x_20825); +if (lean_is_scalar(x_20823)) { + x_20874 = lean_alloc_ctor(7, 2, 0); +} else { + x_20874 = x_20823; + lean_ctor_set_tag(x_20874, 7); +} +lean_ctor_set(x_20874, 0, x_153); +lean_ctor_set(x_20874, 1, x_20601); +x_20875 = lean_ctor_get(x_1, 0); +lean_inc(x_20875); +lean_dec(x_1); +x_20876 = l_Lean_IR_ToIR_bindVar(x_20875, x_20822, x_4, x_5, x_20821); +x_20877 = lean_ctor_get(x_20876, 0); +lean_inc(x_20877); +x_20878 = lean_ctor_get(x_20876, 1); +lean_inc(x_20878); +lean_dec(x_20876); +x_20879 = lean_ctor_get(x_20877, 0); +lean_inc(x_20879); +x_20880 = lean_ctor_get(x_20877, 1); +lean_inc(x_20880); +lean_dec(x_20877); +x_20881 = lean_box(7); +x_20882 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20879, x_20874, x_20881, x_20880, x_4, x_5, x_20878); +return x_20882; +} +} +} +else +{ +lean_object* x_20883; lean_object* x_20884; +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20883 = lean_box(13); +lean_ctor_set(x_20603, 0, x_20883); +if (lean_is_scalar(x_20612)) { + x_20884 = lean_alloc_ctor(0, 2, 0); +} else { + x_20884 = x_20612; +} +lean_ctor_set(x_20884, 0, x_20603); +lean_ctor_set(x_20884, 1, x_20611); +return x_20884; +} +} +else +{ +lean_object* x_20885; lean_object* x_20886; lean_object* x_20887; +lean_dec(x_20612); +lean_free_object(x_20603); +lean_dec(x_153); +x_20885 = l_Lean_IR_instInhabitedArg; +x_20886 = lean_unsigned_to_nat(2u); +x_20887 = lean_array_get(x_20885, x_20601, x_20886); +lean_dec(x_20601); +if (lean_obj_tag(x_20887) == 0) +{ +lean_object* x_20888; lean_object* x_20889; lean_object* x_20890; lean_object* x_20891; lean_object* x_20892; lean_object* x_20893; lean_object* x_20894; +x_20888 = lean_ctor_get(x_20887, 0); +lean_inc(x_20888); +lean_dec(x_20887); +x_20889 = lean_ctor_get(x_1, 0); +lean_inc(x_20889); +lean_dec(x_1); +x_20890 = l_Lean_IR_ToIR_bindVarToVarId(x_20889, x_20888, x_20607, x_4, x_5, x_20611); +x_20891 = lean_ctor_get(x_20890, 0); +lean_inc(x_20891); +x_20892 = lean_ctor_get(x_20890, 1); +lean_inc(x_20892); +lean_dec(x_20890); +x_20893 = lean_ctor_get(x_20891, 1); +lean_inc(x_20893); +lean_dec(x_20891); +x_20894 = l_Lean_IR_ToIR_lowerCode(x_2, x_20893, x_4, x_5, x_20892); +return x_20894; +} +else +{ +lean_object* x_20895; lean_object* x_20896; lean_object* x_20897; lean_object* x_20898; lean_object* x_20899; lean_object* x_20900; +x_20895 = lean_ctor_get(x_1, 0); +lean_inc(x_20895); +lean_dec(x_1); +x_20896 = l_Lean_IR_ToIR_bindErased(x_20895, x_20607, x_4, x_5, x_20611); +x_20897 = lean_ctor_get(x_20896, 0); +lean_inc(x_20897); +x_20898 = lean_ctor_get(x_20896, 1); +lean_inc(x_20898); +lean_dec(x_20896); +x_20899 = lean_ctor_get(x_20897, 1); +lean_inc(x_20899); +lean_dec(x_20897); +x_20900 = l_Lean_IR_ToIR_lowerCode(x_2, x_20899, x_4, x_5, x_20898); +return x_20900; +} +} +} +} +case 1: +{ +lean_object* x_20901; lean_object* x_20902; lean_object* x_20932; lean_object* x_20933; +lean_dec(x_20618); +lean_dec(x_20613); +lean_dec(x_20593); +lean_dec(x_20592); +lean_inc(x_153); +x_20932 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_20611); +x_20933 = lean_ctor_get(x_20932, 0); +lean_inc(x_20933); +if (lean_obj_tag(x_20933) == 0) +{ +lean_object* x_20934; lean_object* x_20935; +x_20934 = lean_ctor_get(x_20932, 1); +lean_inc(x_20934); +lean_dec(x_20932); +x_20935 = lean_box(0); +lean_ctor_set(x_20603, 0, x_20935); +x_20901 = x_20603; +x_20902 = x_20934; +goto block_20931; +} +else +{ +uint8_t x_20936; +lean_free_object(x_20603); +x_20936 = !lean_is_exclusive(x_20932); +if (x_20936 == 0) +{ +lean_object* x_20937; lean_object* x_20938; uint8_t x_20939; +x_20937 = lean_ctor_get(x_20932, 1); +x_20938 = lean_ctor_get(x_20932, 0); +lean_dec(x_20938); +x_20939 = !lean_is_exclusive(x_20933); +if (x_20939 == 0) +{ +lean_object* x_20940; lean_object* x_20941; lean_object* x_20942; lean_object* x_20943; uint8_t x_20944; +x_20940 = lean_ctor_get(x_20933, 0); +x_20941 = lean_array_get_size(x_20601); +x_20942 = lean_ctor_get(x_20940, 3); +lean_inc(x_20942); +lean_dec(x_20940); +x_20943 = lean_array_get_size(x_20942); +lean_dec(x_20942); +x_20944 = lean_nat_dec_lt(x_20941, x_20943); +if (x_20944 == 0) +{ +uint8_t x_20945; +x_20945 = lean_nat_dec_eq(x_20941, x_20943); +if (x_20945 == 0) +{ +lean_object* x_20946; lean_object* x_20947; lean_object* x_20948; lean_object* x_20949; lean_object* x_20950; lean_object* x_20951; lean_object* x_20952; lean_object* x_20953; lean_object* x_20954; lean_object* x_20955; lean_object* x_20956; lean_object* x_20957; lean_object* x_20958; lean_object* x_20959; lean_object* x_20960; lean_object* x_20961; +x_20946 = lean_unsigned_to_nat(0u); +x_20947 = l_Array_extract___rarg(x_20601, x_20946, x_20943); +x_20948 = l_Array_extract___rarg(x_20601, x_20943, x_20941); +lean_dec(x_20941); +lean_inc(x_153); +lean_ctor_set_tag(x_20932, 6); +lean_ctor_set(x_20932, 1, x_20947); +lean_ctor_set(x_20932, 0, x_153); +x_20949 = lean_ctor_get(x_1, 0); +lean_inc(x_20949); +x_20950 = l_Lean_IR_ToIR_bindVar(x_20949, x_20607, x_4, x_5, x_20937); +x_20951 = lean_ctor_get(x_20950, 0); +lean_inc(x_20951); +x_20952 = lean_ctor_get(x_20950, 1); +lean_inc(x_20952); +lean_dec(x_20950); +x_20953 = lean_ctor_get(x_20951, 0); +lean_inc(x_20953); +x_20954 = lean_ctor_get(x_20951, 1); +lean_inc(x_20954); +lean_dec(x_20951); +x_20955 = l_Lean_IR_ToIR_newVar(x_20954, x_4, x_5, x_20952); +x_20956 = lean_ctor_get(x_20955, 0); +lean_inc(x_20956); +x_20957 = lean_ctor_get(x_20955, 1); +lean_inc(x_20957); +lean_dec(x_20955); +x_20958 = lean_ctor_get(x_20956, 0); +lean_inc(x_20958); +x_20959 = lean_ctor_get(x_20956, 1); +lean_inc(x_20959); +lean_dec(x_20956); +x_20960 = lean_ctor_get(x_1, 2); +lean_inc(x_20960); +lean_inc(x_5); +lean_inc(x_4); +x_20961 = l_Lean_IR_ToIR_lowerType(x_20960, x_20959, x_4, x_5, x_20957); +if (lean_obj_tag(x_20961) == 0) +{ +lean_object* x_20962; lean_object* x_20963; lean_object* x_20964; lean_object* x_20965; lean_object* x_20966; +x_20962 = lean_ctor_get(x_20961, 0); +lean_inc(x_20962); +x_20963 = lean_ctor_get(x_20961, 1); +lean_inc(x_20963); +lean_dec(x_20961); +x_20964 = lean_ctor_get(x_20962, 0); +lean_inc(x_20964); +x_20965 = lean_ctor_get(x_20962, 1); +lean_inc(x_20965); +lean_dec(x_20962); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20966 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_20958, x_20948, x_20953, x_20932, x_20964, x_20965, x_4, x_5, x_20963); +if (lean_obj_tag(x_20966) == 0) +{ +lean_object* x_20967; lean_object* x_20968; uint8_t x_20969; +x_20967 = lean_ctor_get(x_20966, 0); +lean_inc(x_20967); +x_20968 = lean_ctor_get(x_20966, 1); +lean_inc(x_20968); +lean_dec(x_20966); +x_20969 = !lean_is_exclusive(x_20967); +if (x_20969 == 0) +{ +lean_object* x_20970; +x_20970 = lean_ctor_get(x_20967, 0); +lean_ctor_set(x_20933, 0, x_20970); +lean_ctor_set(x_20967, 0, x_20933); +x_20901 = x_20967; +x_20902 = x_20968; +goto block_20931; +} +else +{ +lean_object* x_20971; lean_object* x_20972; lean_object* x_20973; +x_20971 = lean_ctor_get(x_20967, 0); +x_20972 = lean_ctor_get(x_20967, 1); +lean_inc(x_20972); +lean_inc(x_20971); +lean_dec(x_20967); +lean_ctor_set(x_20933, 0, x_20971); +x_20973 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_20973, 0, x_20933); +lean_ctor_set(x_20973, 1, x_20972); +x_20901 = x_20973; +x_20902 = x_20968; +goto block_20931; +} +} +else +{ +uint8_t x_20974; +lean_free_object(x_20933); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20974 = !lean_is_exclusive(x_20966); +if (x_20974 == 0) +{ +return x_20966; +} +else +{ +lean_object* x_20975; lean_object* x_20976; lean_object* x_20977; +x_20975 = lean_ctor_get(x_20966, 0); +x_20976 = lean_ctor_get(x_20966, 1); +lean_inc(x_20976); +lean_inc(x_20975); +lean_dec(x_20966); +x_20977 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_20977, 0, x_20975); +lean_ctor_set(x_20977, 1, x_20976); +return x_20977; +} +} +} +else +{ +uint8_t x_20978; +lean_dec(x_20958); +lean_dec(x_20953); +lean_dec(x_20932); +lean_dec(x_20948); +lean_free_object(x_20933); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20978 = !lean_is_exclusive(x_20961); +if (x_20978 == 0) +{ +return x_20961; +} +else +{ +lean_object* x_20979; lean_object* x_20980; lean_object* x_20981; +x_20979 = lean_ctor_get(x_20961, 0); +x_20980 = lean_ctor_get(x_20961, 1); +lean_inc(x_20980); +lean_inc(x_20979); +lean_dec(x_20961); +x_20981 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_20981, 0, x_20979); +lean_ctor_set(x_20981, 1, x_20980); +return x_20981; +} +} +} +else +{ +lean_object* x_20982; lean_object* x_20983; lean_object* x_20984; lean_object* x_20985; lean_object* x_20986; lean_object* x_20987; lean_object* x_20988; lean_object* x_20989; +lean_dec(x_20943); +lean_dec(x_20941); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_20932, 6); +lean_ctor_set(x_20932, 1, x_20601); +lean_ctor_set(x_20932, 0, x_153); +x_20982 = lean_ctor_get(x_1, 0); +lean_inc(x_20982); +x_20983 = l_Lean_IR_ToIR_bindVar(x_20982, x_20607, x_4, x_5, x_20937); +x_20984 = lean_ctor_get(x_20983, 0); +lean_inc(x_20984); +x_20985 = lean_ctor_get(x_20983, 1); +lean_inc(x_20985); +lean_dec(x_20983); +x_20986 = lean_ctor_get(x_20984, 0); +lean_inc(x_20986); +x_20987 = lean_ctor_get(x_20984, 1); +lean_inc(x_20987); +lean_dec(x_20984); +x_20988 = lean_ctor_get(x_1, 2); +lean_inc(x_20988); +lean_inc(x_5); +lean_inc(x_4); +x_20989 = l_Lean_IR_ToIR_lowerType(x_20988, x_20987, x_4, x_5, x_20985); +if (lean_obj_tag(x_20989) == 0) +{ +lean_object* x_20990; lean_object* x_20991; lean_object* x_20992; lean_object* x_20993; lean_object* x_20994; +x_20990 = lean_ctor_get(x_20989, 0); +lean_inc(x_20990); +x_20991 = lean_ctor_get(x_20989, 1); +lean_inc(x_20991); +lean_dec(x_20989); +x_20992 = lean_ctor_get(x_20990, 0); +lean_inc(x_20992); +x_20993 = lean_ctor_get(x_20990, 1); +lean_inc(x_20993); +lean_dec(x_20990); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_20994 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20986, x_20932, x_20992, x_20993, x_4, x_5, x_20991); +if (lean_obj_tag(x_20994) == 0) +{ +lean_object* x_20995; lean_object* x_20996; uint8_t x_20997; +x_20995 = lean_ctor_get(x_20994, 0); +lean_inc(x_20995); +x_20996 = lean_ctor_get(x_20994, 1); +lean_inc(x_20996); +lean_dec(x_20994); +x_20997 = !lean_is_exclusive(x_20995); +if (x_20997 == 0) +{ +lean_object* x_20998; +x_20998 = lean_ctor_get(x_20995, 0); +lean_ctor_set(x_20933, 0, x_20998); +lean_ctor_set(x_20995, 0, x_20933); +x_20901 = x_20995; +x_20902 = x_20996; +goto block_20931; +} +else +{ +lean_object* x_20999; lean_object* x_21000; lean_object* x_21001; +x_20999 = lean_ctor_get(x_20995, 0); +x_21000 = lean_ctor_get(x_20995, 1); +lean_inc(x_21000); +lean_inc(x_20999); +lean_dec(x_20995); +lean_ctor_set(x_20933, 0, x_20999); +x_21001 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21001, 0, x_20933); +lean_ctor_set(x_21001, 1, x_21000); +x_20901 = x_21001; +x_20902 = x_20996; +goto block_20931; +} +} +else +{ +uint8_t x_21002; +lean_free_object(x_20933); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21002 = !lean_is_exclusive(x_20994); +if (x_21002 == 0) +{ +return x_20994; +} +else +{ +lean_object* x_21003; lean_object* x_21004; lean_object* x_21005; +x_21003 = lean_ctor_get(x_20994, 0); +x_21004 = lean_ctor_get(x_20994, 1); +lean_inc(x_21004); +lean_inc(x_21003); +lean_dec(x_20994); +x_21005 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21005, 0, x_21003); +lean_ctor_set(x_21005, 1, x_21004); +return x_21005; +} +} +} +else +{ +uint8_t x_21006; +lean_dec(x_20986); +lean_dec(x_20932); +lean_free_object(x_20933); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21006 = !lean_is_exclusive(x_20989); +if (x_21006 == 0) +{ +return x_20989; +} +else +{ +lean_object* x_21007; lean_object* x_21008; lean_object* x_21009; +x_21007 = lean_ctor_get(x_20989, 0); +x_21008 = lean_ctor_get(x_20989, 1); +lean_inc(x_21008); +lean_inc(x_21007); +lean_dec(x_20989); +x_21009 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21009, 0, x_21007); +lean_ctor_set(x_21009, 1, x_21008); +return x_21009; +} +} +} +} +else +{ +lean_object* x_21010; lean_object* x_21011; lean_object* x_21012; lean_object* x_21013; lean_object* x_21014; lean_object* x_21015; lean_object* x_21016; lean_object* x_21017; +lean_dec(x_20943); +lean_dec(x_20941); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_20932, 7); +lean_ctor_set(x_20932, 1, x_20601); +lean_ctor_set(x_20932, 0, x_153); +x_21010 = lean_ctor_get(x_1, 0); +lean_inc(x_21010); +x_21011 = l_Lean_IR_ToIR_bindVar(x_21010, x_20607, x_4, x_5, x_20937); +x_21012 = lean_ctor_get(x_21011, 0); +lean_inc(x_21012); +x_21013 = lean_ctor_get(x_21011, 1); +lean_inc(x_21013); +lean_dec(x_21011); +x_21014 = lean_ctor_get(x_21012, 0); +lean_inc(x_21014); +x_21015 = lean_ctor_get(x_21012, 1); +lean_inc(x_21015); +lean_dec(x_21012); +x_21016 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21017 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21014, x_20932, x_21016, x_21015, x_4, x_5, x_21013); +if (lean_obj_tag(x_21017) == 0) +{ +lean_object* x_21018; lean_object* x_21019; uint8_t x_21020; +x_21018 = lean_ctor_get(x_21017, 0); +lean_inc(x_21018); +x_21019 = lean_ctor_get(x_21017, 1); +lean_inc(x_21019); +lean_dec(x_21017); +x_21020 = !lean_is_exclusive(x_21018); +if (x_21020 == 0) +{ +lean_object* x_21021; +x_21021 = lean_ctor_get(x_21018, 0); +lean_ctor_set(x_20933, 0, x_21021); +lean_ctor_set(x_21018, 0, x_20933); +x_20901 = x_21018; +x_20902 = x_21019; +goto block_20931; +} +else +{ +lean_object* x_21022; lean_object* x_21023; lean_object* x_21024; +x_21022 = lean_ctor_get(x_21018, 0); +x_21023 = lean_ctor_get(x_21018, 1); +lean_inc(x_21023); +lean_inc(x_21022); +lean_dec(x_21018); +lean_ctor_set(x_20933, 0, x_21022); +x_21024 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21024, 0, x_20933); +lean_ctor_set(x_21024, 1, x_21023); +x_20901 = x_21024; +x_20902 = x_21019; +goto block_20931; +} +} +else +{ +uint8_t x_21025; +lean_free_object(x_20933); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21025 = !lean_is_exclusive(x_21017); +if (x_21025 == 0) +{ +return x_21017; +} +else +{ +lean_object* x_21026; lean_object* x_21027; lean_object* x_21028; +x_21026 = lean_ctor_get(x_21017, 0); +x_21027 = lean_ctor_get(x_21017, 1); +lean_inc(x_21027); +lean_inc(x_21026); +lean_dec(x_21017); +x_21028 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21028, 0, x_21026); +lean_ctor_set(x_21028, 1, x_21027); +return x_21028; +} +} +} +} +else +{ +lean_object* x_21029; lean_object* x_21030; lean_object* x_21031; lean_object* x_21032; uint8_t x_21033; +x_21029 = lean_ctor_get(x_20933, 0); +lean_inc(x_21029); +lean_dec(x_20933); +x_21030 = lean_array_get_size(x_20601); +x_21031 = lean_ctor_get(x_21029, 3); +lean_inc(x_21031); +lean_dec(x_21029); +x_21032 = lean_array_get_size(x_21031); +lean_dec(x_21031); +x_21033 = lean_nat_dec_lt(x_21030, x_21032); +if (x_21033 == 0) +{ +uint8_t x_21034; +x_21034 = lean_nat_dec_eq(x_21030, x_21032); +if (x_21034 == 0) +{ +lean_object* x_21035; lean_object* x_21036; lean_object* x_21037; lean_object* x_21038; lean_object* x_21039; lean_object* x_21040; lean_object* x_21041; lean_object* x_21042; lean_object* x_21043; lean_object* x_21044; lean_object* x_21045; lean_object* x_21046; lean_object* x_21047; lean_object* x_21048; lean_object* x_21049; lean_object* x_21050; +x_21035 = lean_unsigned_to_nat(0u); +x_21036 = l_Array_extract___rarg(x_20601, x_21035, x_21032); +x_21037 = l_Array_extract___rarg(x_20601, x_21032, x_21030); +lean_dec(x_21030); +lean_inc(x_153); +lean_ctor_set_tag(x_20932, 6); +lean_ctor_set(x_20932, 1, x_21036); +lean_ctor_set(x_20932, 0, x_153); +x_21038 = lean_ctor_get(x_1, 0); +lean_inc(x_21038); +x_21039 = l_Lean_IR_ToIR_bindVar(x_21038, x_20607, x_4, x_5, x_20937); +x_21040 = lean_ctor_get(x_21039, 0); +lean_inc(x_21040); +x_21041 = lean_ctor_get(x_21039, 1); +lean_inc(x_21041); +lean_dec(x_21039); +x_21042 = lean_ctor_get(x_21040, 0); +lean_inc(x_21042); +x_21043 = lean_ctor_get(x_21040, 1); +lean_inc(x_21043); +lean_dec(x_21040); +x_21044 = l_Lean_IR_ToIR_newVar(x_21043, x_4, x_5, x_21041); +x_21045 = lean_ctor_get(x_21044, 0); +lean_inc(x_21045); +x_21046 = lean_ctor_get(x_21044, 1); +lean_inc(x_21046); +lean_dec(x_21044); +x_21047 = lean_ctor_get(x_21045, 0); +lean_inc(x_21047); +x_21048 = lean_ctor_get(x_21045, 1); +lean_inc(x_21048); +lean_dec(x_21045); +x_21049 = lean_ctor_get(x_1, 2); +lean_inc(x_21049); +lean_inc(x_5); +lean_inc(x_4); +x_21050 = l_Lean_IR_ToIR_lowerType(x_21049, x_21048, x_4, x_5, x_21046); +if (lean_obj_tag(x_21050) == 0) +{ +lean_object* x_21051; lean_object* x_21052; lean_object* x_21053; lean_object* x_21054; lean_object* x_21055; +x_21051 = lean_ctor_get(x_21050, 0); +lean_inc(x_21051); +x_21052 = lean_ctor_get(x_21050, 1); +lean_inc(x_21052); +lean_dec(x_21050); +x_21053 = lean_ctor_get(x_21051, 0); +lean_inc(x_21053); +x_21054 = lean_ctor_get(x_21051, 1); +lean_inc(x_21054); +lean_dec(x_21051); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21055 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_21047, x_21037, x_21042, x_20932, x_21053, x_21054, x_4, x_5, x_21052); +if (lean_obj_tag(x_21055) == 0) +{ +lean_object* x_21056; lean_object* x_21057; lean_object* x_21058; lean_object* x_21059; lean_object* x_21060; lean_object* x_21061; lean_object* x_21062; +x_21056 = lean_ctor_get(x_21055, 0); +lean_inc(x_21056); +x_21057 = lean_ctor_get(x_21055, 1); +lean_inc(x_21057); +lean_dec(x_21055); +x_21058 = lean_ctor_get(x_21056, 0); +lean_inc(x_21058); +x_21059 = lean_ctor_get(x_21056, 1); +lean_inc(x_21059); +if (lean_is_exclusive(x_21056)) { + lean_ctor_release(x_21056, 0); + lean_ctor_release(x_21056, 1); + x_21060 = x_21056; +} else { + lean_dec_ref(x_21056); + x_21060 = lean_box(0); +} +x_21061 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_21061, 0, x_21058); +if (lean_is_scalar(x_21060)) { + x_21062 = lean_alloc_ctor(0, 2, 0); +} else { + x_21062 = x_21060; +} +lean_ctor_set(x_21062, 0, x_21061); +lean_ctor_set(x_21062, 1, x_21059); +x_20901 = x_21062; +x_20902 = x_21057; +goto block_20931; +} +else +{ +lean_object* x_21063; lean_object* x_21064; lean_object* x_21065; lean_object* x_21066; +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21063 = lean_ctor_get(x_21055, 0); +lean_inc(x_21063); +x_21064 = lean_ctor_get(x_21055, 1); +lean_inc(x_21064); +if (lean_is_exclusive(x_21055)) { + lean_ctor_release(x_21055, 0); + lean_ctor_release(x_21055, 1); + x_21065 = x_21055; +} else { + lean_dec_ref(x_21055); + x_21065 = lean_box(0); +} +if (lean_is_scalar(x_21065)) { + x_21066 = lean_alloc_ctor(1, 2, 0); +} else { + x_21066 = x_21065; +} +lean_ctor_set(x_21066, 0, x_21063); +lean_ctor_set(x_21066, 1, x_21064); +return x_21066; +} +} +else +{ +lean_object* x_21067; lean_object* x_21068; lean_object* x_21069; lean_object* x_21070; +lean_dec(x_21047); +lean_dec(x_21042); +lean_dec(x_20932); +lean_dec(x_21037); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21067 = lean_ctor_get(x_21050, 0); +lean_inc(x_21067); +x_21068 = lean_ctor_get(x_21050, 1); +lean_inc(x_21068); +if (lean_is_exclusive(x_21050)) { + lean_ctor_release(x_21050, 0); + lean_ctor_release(x_21050, 1); + x_21069 = x_21050; +} else { + lean_dec_ref(x_21050); + x_21069 = lean_box(0); +} +if (lean_is_scalar(x_21069)) { + x_21070 = lean_alloc_ctor(1, 2, 0); +} else { + x_21070 = x_21069; +} +lean_ctor_set(x_21070, 0, x_21067); +lean_ctor_set(x_21070, 1, x_21068); +return x_21070; +} +} +else +{ +lean_object* x_21071; lean_object* x_21072; lean_object* x_21073; lean_object* x_21074; lean_object* x_21075; lean_object* x_21076; lean_object* x_21077; lean_object* x_21078; +lean_dec(x_21032); +lean_dec(x_21030); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_20932, 6); +lean_ctor_set(x_20932, 1, x_20601); +lean_ctor_set(x_20932, 0, x_153); +x_21071 = lean_ctor_get(x_1, 0); +lean_inc(x_21071); +x_21072 = l_Lean_IR_ToIR_bindVar(x_21071, x_20607, x_4, x_5, x_20937); +x_21073 = lean_ctor_get(x_21072, 0); +lean_inc(x_21073); +x_21074 = lean_ctor_get(x_21072, 1); +lean_inc(x_21074); +lean_dec(x_21072); +x_21075 = lean_ctor_get(x_21073, 0); +lean_inc(x_21075); +x_21076 = lean_ctor_get(x_21073, 1); +lean_inc(x_21076); +lean_dec(x_21073); +x_21077 = lean_ctor_get(x_1, 2); +lean_inc(x_21077); +lean_inc(x_5); +lean_inc(x_4); +x_21078 = l_Lean_IR_ToIR_lowerType(x_21077, x_21076, x_4, x_5, x_21074); +if (lean_obj_tag(x_21078) == 0) +{ +lean_object* x_21079; lean_object* x_21080; lean_object* x_21081; lean_object* x_21082; lean_object* x_21083; +x_21079 = lean_ctor_get(x_21078, 0); +lean_inc(x_21079); +x_21080 = lean_ctor_get(x_21078, 1); +lean_inc(x_21080); +lean_dec(x_21078); +x_21081 = lean_ctor_get(x_21079, 0); +lean_inc(x_21081); +x_21082 = lean_ctor_get(x_21079, 1); +lean_inc(x_21082); +lean_dec(x_21079); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21083 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21075, x_20932, x_21081, x_21082, x_4, x_5, x_21080); +if (lean_obj_tag(x_21083) == 0) +{ +lean_object* x_21084; lean_object* x_21085; lean_object* x_21086; lean_object* x_21087; lean_object* x_21088; lean_object* x_21089; lean_object* x_21090; +x_21084 = lean_ctor_get(x_21083, 0); +lean_inc(x_21084); +x_21085 = lean_ctor_get(x_21083, 1); +lean_inc(x_21085); +lean_dec(x_21083); +x_21086 = lean_ctor_get(x_21084, 0); +lean_inc(x_21086); +x_21087 = lean_ctor_get(x_21084, 1); +lean_inc(x_21087); +if (lean_is_exclusive(x_21084)) { + lean_ctor_release(x_21084, 0); + lean_ctor_release(x_21084, 1); + x_21088 = x_21084; +} else { + lean_dec_ref(x_21084); + x_21088 = lean_box(0); +} +x_21089 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_21089, 0, x_21086); +if (lean_is_scalar(x_21088)) { + x_21090 = lean_alloc_ctor(0, 2, 0); +} else { + x_21090 = x_21088; +} +lean_ctor_set(x_21090, 0, x_21089); +lean_ctor_set(x_21090, 1, x_21087); +x_20901 = x_21090; +x_20902 = x_21085; +goto block_20931; +} +else +{ +lean_object* x_21091; lean_object* x_21092; lean_object* x_21093; lean_object* x_21094; +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21091 = lean_ctor_get(x_21083, 0); +lean_inc(x_21091); +x_21092 = lean_ctor_get(x_21083, 1); +lean_inc(x_21092); +if (lean_is_exclusive(x_21083)) { + lean_ctor_release(x_21083, 0); + lean_ctor_release(x_21083, 1); + x_21093 = x_21083; +} else { + lean_dec_ref(x_21083); + x_21093 = lean_box(0); +} +if (lean_is_scalar(x_21093)) { + x_21094 = lean_alloc_ctor(1, 2, 0); +} else { + x_21094 = x_21093; +} +lean_ctor_set(x_21094, 0, x_21091); +lean_ctor_set(x_21094, 1, x_21092); +return x_21094; +} +} +else +{ +lean_object* x_21095; lean_object* x_21096; lean_object* x_21097; lean_object* x_21098; +lean_dec(x_21075); +lean_dec(x_20932); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21095 = lean_ctor_get(x_21078, 0); +lean_inc(x_21095); +x_21096 = lean_ctor_get(x_21078, 1); +lean_inc(x_21096); +if (lean_is_exclusive(x_21078)) { + lean_ctor_release(x_21078, 0); + lean_ctor_release(x_21078, 1); + x_21097 = x_21078; +} else { + lean_dec_ref(x_21078); + x_21097 = lean_box(0); +} +if (lean_is_scalar(x_21097)) { + x_21098 = lean_alloc_ctor(1, 2, 0); +} else { + x_21098 = x_21097; +} +lean_ctor_set(x_21098, 0, x_21095); +lean_ctor_set(x_21098, 1, x_21096); +return x_21098; +} +} +} +else +{ +lean_object* x_21099; lean_object* x_21100; lean_object* x_21101; lean_object* x_21102; lean_object* x_21103; lean_object* x_21104; lean_object* x_21105; lean_object* x_21106; +lean_dec(x_21032); +lean_dec(x_21030); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_20932, 7); +lean_ctor_set(x_20932, 1, x_20601); +lean_ctor_set(x_20932, 0, x_153); +x_21099 = lean_ctor_get(x_1, 0); +lean_inc(x_21099); +x_21100 = l_Lean_IR_ToIR_bindVar(x_21099, x_20607, x_4, x_5, x_20937); +x_21101 = lean_ctor_get(x_21100, 0); +lean_inc(x_21101); +x_21102 = lean_ctor_get(x_21100, 1); +lean_inc(x_21102); +lean_dec(x_21100); +x_21103 = lean_ctor_get(x_21101, 0); +lean_inc(x_21103); +x_21104 = lean_ctor_get(x_21101, 1); +lean_inc(x_21104); +lean_dec(x_21101); +x_21105 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21106 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21103, x_20932, x_21105, x_21104, x_4, x_5, x_21102); +if (lean_obj_tag(x_21106) == 0) +{ +lean_object* x_21107; lean_object* x_21108; lean_object* x_21109; lean_object* x_21110; lean_object* x_21111; lean_object* x_21112; lean_object* x_21113; +x_21107 = lean_ctor_get(x_21106, 0); +lean_inc(x_21107); +x_21108 = lean_ctor_get(x_21106, 1); +lean_inc(x_21108); +lean_dec(x_21106); +x_21109 = lean_ctor_get(x_21107, 0); +lean_inc(x_21109); +x_21110 = lean_ctor_get(x_21107, 1); +lean_inc(x_21110); +if (lean_is_exclusive(x_21107)) { + lean_ctor_release(x_21107, 0); + lean_ctor_release(x_21107, 1); + x_21111 = x_21107; +} else { + lean_dec_ref(x_21107); + x_21111 = lean_box(0); +} +x_21112 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_21112, 0, x_21109); +if (lean_is_scalar(x_21111)) { + x_21113 = lean_alloc_ctor(0, 2, 0); +} else { + x_21113 = x_21111; +} +lean_ctor_set(x_21113, 0, x_21112); +lean_ctor_set(x_21113, 1, x_21110); +x_20901 = x_21113; +x_20902 = x_21108; +goto block_20931; +} +else +{ +lean_object* x_21114; lean_object* x_21115; lean_object* x_21116; lean_object* x_21117; +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21114 = lean_ctor_get(x_21106, 0); +lean_inc(x_21114); +x_21115 = lean_ctor_get(x_21106, 1); +lean_inc(x_21115); +if (lean_is_exclusive(x_21106)) { + lean_ctor_release(x_21106, 0); + lean_ctor_release(x_21106, 1); + x_21116 = x_21106; +} else { + lean_dec_ref(x_21106); + x_21116 = lean_box(0); +} +if (lean_is_scalar(x_21116)) { + x_21117 = lean_alloc_ctor(1, 2, 0); +} else { + x_21117 = x_21116; +} +lean_ctor_set(x_21117, 0, x_21114); +lean_ctor_set(x_21117, 1, x_21115); +return x_21117; +} +} +} +} +else +{ +lean_object* x_21118; lean_object* x_21119; lean_object* x_21120; lean_object* x_21121; lean_object* x_21122; lean_object* x_21123; uint8_t x_21124; +x_21118 = lean_ctor_get(x_20932, 1); +lean_inc(x_21118); +lean_dec(x_20932); +x_21119 = lean_ctor_get(x_20933, 0); +lean_inc(x_21119); +if (lean_is_exclusive(x_20933)) { + lean_ctor_release(x_20933, 0); + x_21120 = x_20933; +} else { + lean_dec_ref(x_20933); + x_21120 = lean_box(0); +} +x_21121 = lean_array_get_size(x_20601); +x_21122 = lean_ctor_get(x_21119, 3); +lean_inc(x_21122); +lean_dec(x_21119); +x_21123 = lean_array_get_size(x_21122); +lean_dec(x_21122); +x_21124 = lean_nat_dec_lt(x_21121, x_21123); +if (x_21124 == 0) +{ +uint8_t x_21125; +x_21125 = lean_nat_dec_eq(x_21121, x_21123); +if (x_21125 == 0) +{ +lean_object* x_21126; lean_object* x_21127; lean_object* x_21128; lean_object* x_21129; lean_object* x_21130; lean_object* x_21131; lean_object* x_21132; lean_object* x_21133; lean_object* x_21134; lean_object* x_21135; lean_object* x_21136; lean_object* x_21137; lean_object* x_21138; lean_object* x_21139; lean_object* x_21140; lean_object* x_21141; lean_object* x_21142; +x_21126 = lean_unsigned_to_nat(0u); +x_21127 = l_Array_extract___rarg(x_20601, x_21126, x_21123); +x_21128 = l_Array_extract___rarg(x_20601, x_21123, x_21121); +lean_dec(x_21121); +lean_inc(x_153); +x_21129 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_21129, 0, x_153); +lean_ctor_set(x_21129, 1, x_21127); +x_21130 = lean_ctor_get(x_1, 0); +lean_inc(x_21130); +x_21131 = l_Lean_IR_ToIR_bindVar(x_21130, x_20607, x_4, x_5, x_21118); +x_21132 = lean_ctor_get(x_21131, 0); +lean_inc(x_21132); +x_21133 = lean_ctor_get(x_21131, 1); +lean_inc(x_21133); +lean_dec(x_21131); +x_21134 = lean_ctor_get(x_21132, 0); +lean_inc(x_21134); +x_21135 = lean_ctor_get(x_21132, 1); +lean_inc(x_21135); +lean_dec(x_21132); +x_21136 = l_Lean_IR_ToIR_newVar(x_21135, x_4, x_5, x_21133); +x_21137 = lean_ctor_get(x_21136, 0); +lean_inc(x_21137); +x_21138 = lean_ctor_get(x_21136, 1); +lean_inc(x_21138); +lean_dec(x_21136); +x_21139 = lean_ctor_get(x_21137, 0); +lean_inc(x_21139); +x_21140 = lean_ctor_get(x_21137, 1); +lean_inc(x_21140); +lean_dec(x_21137); +x_21141 = lean_ctor_get(x_1, 2); +lean_inc(x_21141); +lean_inc(x_5); +lean_inc(x_4); +x_21142 = l_Lean_IR_ToIR_lowerType(x_21141, x_21140, x_4, x_5, x_21138); +if (lean_obj_tag(x_21142) == 0) +{ +lean_object* x_21143; lean_object* x_21144; lean_object* x_21145; lean_object* x_21146; lean_object* x_21147; +x_21143 = lean_ctor_get(x_21142, 0); +lean_inc(x_21143); +x_21144 = lean_ctor_get(x_21142, 1); +lean_inc(x_21144); +lean_dec(x_21142); +x_21145 = lean_ctor_get(x_21143, 0); +lean_inc(x_21145); +x_21146 = lean_ctor_get(x_21143, 1); +lean_inc(x_21146); +lean_dec(x_21143); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21147 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_21139, x_21128, x_21134, x_21129, x_21145, x_21146, x_4, x_5, x_21144); +if (lean_obj_tag(x_21147) == 0) +{ +lean_object* x_21148; lean_object* x_21149; lean_object* x_21150; lean_object* x_21151; lean_object* x_21152; lean_object* x_21153; lean_object* x_21154; +x_21148 = lean_ctor_get(x_21147, 0); +lean_inc(x_21148); +x_21149 = lean_ctor_get(x_21147, 1); +lean_inc(x_21149); +lean_dec(x_21147); +x_21150 = lean_ctor_get(x_21148, 0); +lean_inc(x_21150); +x_21151 = lean_ctor_get(x_21148, 1); +lean_inc(x_21151); +if (lean_is_exclusive(x_21148)) { + lean_ctor_release(x_21148, 0); + lean_ctor_release(x_21148, 1); + x_21152 = x_21148; +} else { + lean_dec_ref(x_21148); + x_21152 = lean_box(0); +} +if (lean_is_scalar(x_21120)) { + x_21153 = lean_alloc_ctor(1, 1, 0); +} else { + x_21153 = x_21120; +} +lean_ctor_set(x_21153, 0, x_21150); +if (lean_is_scalar(x_21152)) { + x_21154 = lean_alloc_ctor(0, 2, 0); +} else { + x_21154 = x_21152; +} +lean_ctor_set(x_21154, 0, x_21153); +lean_ctor_set(x_21154, 1, x_21151); +x_20901 = x_21154; +x_20902 = x_21149; +goto block_20931; +} +else +{ +lean_object* x_21155; lean_object* x_21156; lean_object* x_21157; lean_object* x_21158; +lean_dec(x_21120); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21155 = lean_ctor_get(x_21147, 0); +lean_inc(x_21155); +x_21156 = lean_ctor_get(x_21147, 1); +lean_inc(x_21156); +if (lean_is_exclusive(x_21147)) { + lean_ctor_release(x_21147, 0); + lean_ctor_release(x_21147, 1); + x_21157 = x_21147; +} else { + lean_dec_ref(x_21147); + x_21157 = lean_box(0); +} +if (lean_is_scalar(x_21157)) { + x_21158 = lean_alloc_ctor(1, 2, 0); +} else { + x_21158 = x_21157; +} +lean_ctor_set(x_21158, 0, x_21155); +lean_ctor_set(x_21158, 1, x_21156); +return x_21158; +} +} +else +{ +lean_object* x_21159; lean_object* x_21160; lean_object* x_21161; lean_object* x_21162; +lean_dec(x_21139); +lean_dec(x_21134); +lean_dec(x_21129); +lean_dec(x_21128); +lean_dec(x_21120); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21159 = lean_ctor_get(x_21142, 0); +lean_inc(x_21159); +x_21160 = lean_ctor_get(x_21142, 1); +lean_inc(x_21160); +if (lean_is_exclusive(x_21142)) { + lean_ctor_release(x_21142, 0); + lean_ctor_release(x_21142, 1); + x_21161 = x_21142; +} else { + lean_dec_ref(x_21142); + x_21161 = lean_box(0); +} +if (lean_is_scalar(x_21161)) { + x_21162 = lean_alloc_ctor(1, 2, 0); +} else { + x_21162 = x_21161; +} +lean_ctor_set(x_21162, 0, x_21159); +lean_ctor_set(x_21162, 1, x_21160); +return x_21162; +} +} +else +{ +lean_object* x_21163; lean_object* x_21164; lean_object* x_21165; lean_object* x_21166; lean_object* x_21167; lean_object* x_21168; lean_object* x_21169; lean_object* x_21170; lean_object* x_21171; +lean_dec(x_21123); +lean_dec(x_21121); +lean_inc(x_20601); +lean_inc(x_153); +x_21163 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_21163, 0, x_153); +lean_ctor_set(x_21163, 1, x_20601); +x_21164 = lean_ctor_get(x_1, 0); +lean_inc(x_21164); +x_21165 = l_Lean_IR_ToIR_bindVar(x_21164, x_20607, x_4, x_5, x_21118); +x_21166 = lean_ctor_get(x_21165, 0); +lean_inc(x_21166); +x_21167 = lean_ctor_get(x_21165, 1); +lean_inc(x_21167); +lean_dec(x_21165); +x_21168 = lean_ctor_get(x_21166, 0); +lean_inc(x_21168); +x_21169 = lean_ctor_get(x_21166, 1); +lean_inc(x_21169); +lean_dec(x_21166); +x_21170 = lean_ctor_get(x_1, 2); +lean_inc(x_21170); +lean_inc(x_5); +lean_inc(x_4); +x_21171 = l_Lean_IR_ToIR_lowerType(x_21170, x_21169, x_4, x_5, x_21167); +if (lean_obj_tag(x_21171) == 0) +{ +lean_object* x_21172; lean_object* x_21173; lean_object* x_21174; lean_object* x_21175; lean_object* x_21176; +x_21172 = lean_ctor_get(x_21171, 0); +lean_inc(x_21172); +x_21173 = lean_ctor_get(x_21171, 1); +lean_inc(x_21173); +lean_dec(x_21171); +x_21174 = lean_ctor_get(x_21172, 0); +lean_inc(x_21174); +x_21175 = lean_ctor_get(x_21172, 1); +lean_inc(x_21175); +lean_dec(x_21172); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21176 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21168, x_21163, x_21174, x_21175, x_4, x_5, x_21173); +if (lean_obj_tag(x_21176) == 0) +{ +lean_object* x_21177; lean_object* x_21178; lean_object* x_21179; lean_object* x_21180; lean_object* x_21181; lean_object* x_21182; lean_object* x_21183; +x_21177 = lean_ctor_get(x_21176, 0); +lean_inc(x_21177); +x_21178 = lean_ctor_get(x_21176, 1); +lean_inc(x_21178); +lean_dec(x_21176); +x_21179 = lean_ctor_get(x_21177, 0); +lean_inc(x_21179); +x_21180 = lean_ctor_get(x_21177, 1); +lean_inc(x_21180); +if (lean_is_exclusive(x_21177)) { + lean_ctor_release(x_21177, 0); + lean_ctor_release(x_21177, 1); + x_21181 = x_21177; +} else { + lean_dec_ref(x_21177); + x_21181 = lean_box(0); +} +if (lean_is_scalar(x_21120)) { + x_21182 = lean_alloc_ctor(1, 1, 0); +} else { + x_21182 = x_21120; +} +lean_ctor_set(x_21182, 0, x_21179); +if (lean_is_scalar(x_21181)) { + x_21183 = lean_alloc_ctor(0, 2, 0); +} else { + x_21183 = x_21181; +} +lean_ctor_set(x_21183, 0, x_21182); +lean_ctor_set(x_21183, 1, x_21180); +x_20901 = x_21183; +x_20902 = x_21178; +goto block_20931; +} +else +{ +lean_object* x_21184; lean_object* x_21185; lean_object* x_21186; lean_object* x_21187; +lean_dec(x_21120); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21184 = lean_ctor_get(x_21176, 0); +lean_inc(x_21184); +x_21185 = lean_ctor_get(x_21176, 1); +lean_inc(x_21185); +if (lean_is_exclusive(x_21176)) { + lean_ctor_release(x_21176, 0); + lean_ctor_release(x_21176, 1); + x_21186 = x_21176; +} else { + lean_dec_ref(x_21176); + x_21186 = lean_box(0); +} +if (lean_is_scalar(x_21186)) { + x_21187 = lean_alloc_ctor(1, 2, 0); +} else { + x_21187 = x_21186; +} +lean_ctor_set(x_21187, 0, x_21184); +lean_ctor_set(x_21187, 1, x_21185); +return x_21187; +} +} +else +{ +lean_object* x_21188; lean_object* x_21189; lean_object* x_21190; lean_object* x_21191; +lean_dec(x_21168); +lean_dec(x_21163); +lean_dec(x_21120); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21188 = lean_ctor_get(x_21171, 0); +lean_inc(x_21188); +x_21189 = lean_ctor_get(x_21171, 1); +lean_inc(x_21189); +if (lean_is_exclusive(x_21171)) { + lean_ctor_release(x_21171, 0); + lean_ctor_release(x_21171, 1); + x_21190 = x_21171; +} else { + lean_dec_ref(x_21171); + x_21190 = lean_box(0); +} +if (lean_is_scalar(x_21190)) { + x_21191 = lean_alloc_ctor(1, 2, 0); +} else { + x_21191 = x_21190; +} +lean_ctor_set(x_21191, 0, x_21188); +lean_ctor_set(x_21191, 1, x_21189); +return x_21191; +} +} +} +else +{ +lean_object* x_21192; lean_object* x_21193; lean_object* x_21194; lean_object* x_21195; lean_object* x_21196; lean_object* x_21197; lean_object* x_21198; lean_object* x_21199; lean_object* x_21200; +lean_dec(x_21123); +lean_dec(x_21121); +lean_inc(x_20601); +lean_inc(x_153); +x_21192 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_21192, 0, x_153); +lean_ctor_set(x_21192, 1, x_20601); +x_21193 = lean_ctor_get(x_1, 0); +lean_inc(x_21193); +x_21194 = l_Lean_IR_ToIR_bindVar(x_21193, x_20607, x_4, x_5, x_21118); +x_21195 = lean_ctor_get(x_21194, 0); +lean_inc(x_21195); +x_21196 = lean_ctor_get(x_21194, 1); +lean_inc(x_21196); +lean_dec(x_21194); +x_21197 = lean_ctor_get(x_21195, 0); +lean_inc(x_21197); +x_21198 = lean_ctor_get(x_21195, 1); +lean_inc(x_21198); +lean_dec(x_21195); +x_21199 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21200 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21197, x_21192, x_21199, x_21198, x_4, x_5, x_21196); +if (lean_obj_tag(x_21200) == 0) +{ +lean_object* x_21201; lean_object* x_21202; lean_object* x_21203; lean_object* x_21204; lean_object* x_21205; lean_object* x_21206; lean_object* x_21207; +x_21201 = lean_ctor_get(x_21200, 0); +lean_inc(x_21201); +x_21202 = lean_ctor_get(x_21200, 1); +lean_inc(x_21202); +lean_dec(x_21200); +x_21203 = lean_ctor_get(x_21201, 0); +lean_inc(x_21203); +x_21204 = lean_ctor_get(x_21201, 1); +lean_inc(x_21204); +if (lean_is_exclusive(x_21201)) { + lean_ctor_release(x_21201, 0); + lean_ctor_release(x_21201, 1); + x_21205 = x_21201; +} else { + lean_dec_ref(x_21201); + x_21205 = lean_box(0); +} +if (lean_is_scalar(x_21120)) { + x_21206 = lean_alloc_ctor(1, 1, 0); +} else { + x_21206 = x_21120; +} +lean_ctor_set(x_21206, 0, x_21203); +if (lean_is_scalar(x_21205)) { + x_21207 = lean_alloc_ctor(0, 2, 0); +} else { + x_21207 = x_21205; +} +lean_ctor_set(x_21207, 0, x_21206); +lean_ctor_set(x_21207, 1, x_21204); +x_20901 = x_21207; +x_20902 = x_21202; +goto block_20931; +} +else +{ +lean_object* x_21208; lean_object* x_21209; lean_object* x_21210; lean_object* x_21211; +lean_dec(x_21120); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21208 = lean_ctor_get(x_21200, 0); +lean_inc(x_21208); +x_21209 = lean_ctor_get(x_21200, 1); +lean_inc(x_21209); +if (lean_is_exclusive(x_21200)) { + lean_ctor_release(x_21200, 0); + lean_ctor_release(x_21200, 1); + x_21210 = x_21200; +} else { + lean_dec_ref(x_21200); + x_21210 = lean_box(0); +} +if (lean_is_scalar(x_21210)) { + x_21211 = lean_alloc_ctor(1, 2, 0); +} else { + x_21211 = x_21210; +} +lean_ctor_set(x_21211, 0, x_21208); +lean_ctor_set(x_21211, 1, x_21209); +return x_21211; +} +} +} +} +block_20931: +{ +lean_object* x_20903; +x_20903 = lean_ctor_get(x_20901, 0); +lean_inc(x_20903); +if (lean_obj_tag(x_20903) == 0) +{ +lean_object* x_20904; lean_object* x_20905; lean_object* x_20906; lean_object* x_20907; lean_object* x_20908; lean_object* x_20909; lean_object* x_20910; lean_object* x_20911; lean_object* x_20912; lean_object* x_20913; +lean_dec(x_20612); +x_20904 = lean_ctor_get(x_20901, 1); +lean_inc(x_20904); +lean_dec(x_20901); +x_20905 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_20905, 0, x_153); +lean_ctor_set(x_20905, 1, x_20601); +x_20906 = lean_ctor_get(x_1, 0); +lean_inc(x_20906); +x_20907 = l_Lean_IR_ToIR_bindVar(x_20906, x_20904, x_4, x_5, x_20902); +x_20908 = lean_ctor_get(x_20907, 0); +lean_inc(x_20908); +x_20909 = lean_ctor_get(x_20907, 1); +lean_inc(x_20909); +lean_dec(x_20907); +x_20910 = lean_ctor_get(x_20908, 0); +lean_inc(x_20910); +x_20911 = lean_ctor_get(x_20908, 1); +lean_inc(x_20911); +lean_dec(x_20908); +x_20912 = lean_ctor_get(x_1, 2); +lean_inc(x_20912); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_20913 = l_Lean_IR_ToIR_lowerType(x_20912, x_20911, x_4, x_5, x_20909); +if (lean_obj_tag(x_20913) == 0) +{ +lean_object* x_20914; lean_object* x_20915; lean_object* x_20916; lean_object* x_20917; lean_object* x_20918; +x_20914 = lean_ctor_get(x_20913, 0); +lean_inc(x_20914); +x_20915 = lean_ctor_get(x_20913, 1); +lean_inc(x_20915); +lean_dec(x_20913); +x_20916 = lean_ctor_get(x_20914, 0); +lean_inc(x_20916); +x_20917 = lean_ctor_get(x_20914, 1); +lean_inc(x_20917); +lean_dec(x_20914); +x_20918 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_20910, x_20905, x_20916, x_20917, x_4, x_5, x_20915); +return x_20918; +} +else +{ +uint8_t x_20919; +lean_dec(x_20910); +lean_dec(x_20905); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_20919 = !lean_is_exclusive(x_20913); +if (x_20919 == 0) +{ +return x_20913; +} +else +{ +lean_object* x_20920; lean_object* x_20921; lean_object* x_20922; +x_20920 = lean_ctor_get(x_20913, 0); +x_20921 = lean_ctor_get(x_20913, 1); +lean_inc(x_20921); +lean_inc(x_20920); +lean_dec(x_20913); +x_20922 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_20922, 0, x_20920); +lean_ctor_set(x_20922, 1, x_20921); +return x_20922; +} +} +} +else +{ +uint8_t x_20923; +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_20923 = !lean_is_exclusive(x_20901); +if (x_20923 == 0) +{ +lean_object* x_20924; lean_object* x_20925; lean_object* x_20926; +x_20924 = lean_ctor_get(x_20901, 0); +lean_dec(x_20924); +x_20925 = lean_ctor_get(x_20903, 0); +lean_inc(x_20925); +lean_dec(x_20903); +lean_ctor_set(x_20901, 0, x_20925); +if (lean_is_scalar(x_20612)) { + x_20926 = lean_alloc_ctor(0, 2, 0); +} else { + x_20926 = x_20612; +} +lean_ctor_set(x_20926, 0, x_20901); +lean_ctor_set(x_20926, 1, x_20902); +return x_20926; +} +else +{ +lean_object* x_20927; lean_object* x_20928; lean_object* x_20929; lean_object* x_20930; +x_20927 = lean_ctor_get(x_20901, 1); +lean_inc(x_20927); +lean_dec(x_20901); +x_20928 = lean_ctor_get(x_20903, 0); +lean_inc(x_20928); +lean_dec(x_20903); +x_20929 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_20929, 0, x_20928); +lean_ctor_set(x_20929, 1, x_20927); +if (lean_is_scalar(x_20612)) { + x_20930 = lean_alloc_ctor(0, 2, 0); +} else { + x_20930 = x_20612; +} +lean_ctor_set(x_20930, 0, x_20929); +lean_ctor_set(x_20930, 1, x_20902); +return x_20930; +} +} +} +} +case 2: +{ +lean_object* x_21212; lean_object* x_21213; +lean_dec(x_20618); +lean_dec(x_20613); +lean_dec(x_20612); +lean_free_object(x_20603); +lean_dec(x_20601); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_21212 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_21213 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_21212, x_20607, x_4, x_5, x_20611); +return x_21213; +} +case 3: +{ +lean_object* x_21214; lean_object* x_21215; lean_object* x_21245; lean_object* x_21246; +lean_dec(x_20618); +lean_dec(x_20613); +lean_dec(x_20593); +lean_dec(x_20592); +lean_inc(x_153); +x_21245 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_20611); +x_21246 = lean_ctor_get(x_21245, 0); +lean_inc(x_21246); +if (lean_obj_tag(x_21246) == 0) +{ +lean_object* x_21247; lean_object* x_21248; +x_21247 = lean_ctor_get(x_21245, 1); +lean_inc(x_21247); +lean_dec(x_21245); +x_21248 = lean_box(0); +lean_ctor_set(x_20603, 0, x_21248); +x_21214 = x_20603; +x_21215 = x_21247; +goto block_21244; +} +else +{ +uint8_t x_21249; +lean_free_object(x_20603); +x_21249 = !lean_is_exclusive(x_21245); +if (x_21249 == 0) +{ +lean_object* x_21250; lean_object* x_21251; uint8_t x_21252; +x_21250 = lean_ctor_get(x_21245, 1); +x_21251 = lean_ctor_get(x_21245, 0); +lean_dec(x_21251); +x_21252 = !lean_is_exclusive(x_21246); +if (x_21252 == 0) +{ +lean_object* x_21253; lean_object* x_21254; lean_object* x_21255; lean_object* x_21256; uint8_t x_21257; +x_21253 = lean_ctor_get(x_21246, 0); +x_21254 = lean_array_get_size(x_20601); +x_21255 = lean_ctor_get(x_21253, 3); +lean_inc(x_21255); +lean_dec(x_21253); +x_21256 = lean_array_get_size(x_21255); +lean_dec(x_21255); +x_21257 = lean_nat_dec_lt(x_21254, x_21256); +if (x_21257 == 0) +{ +uint8_t x_21258; +x_21258 = lean_nat_dec_eq(x_21254, x_21256); +if (x_21258 == 0) +{ +lean_object* x_21259; lean_object* x_21260; lean_object* x_21261; lean_object* x_21262; lean_object* x_21263; lean_object* x_21264; lean_object* x_21265; lean_object* x_21266; lean_object* x_21267; lean_object* x_21268; lean_object* x_21269; lean_object* x_21270; lean_object* x_21271; lean_object* x_21272; lean_object* x_21273; lean_object* x_21274; +x_21259 = lean_unsigned_to_nat(0u); +x_21260 = l_Array_extract___rarg(x_20601, x_21259, x_21256); +x_21261 = l_Array_extract___rarg(x_20601, x_21256, x_21254); +lean_dec(x_21254); +lean_inc(x_153); +lean_ctor_set_tag(x_21245, 6); +lean_ctor_set(x_21245, 1, x_21260); +lean_ctor_set(x_21245, 0, x_153); +x_21262 = lean_ctor_get(x_1, 0); +lean_inc(x_21262); +x_21263 = l_Lean_IR_ToIR_bindVar(x_21262, x_20607, x_4, x_5, x_21250); +x_21264 = lean_ctor_get(x_21263, 0); +lean_inc(x_21264); +x_21265 = lean_ctor_get(x_21263, 1); +lean_inc(x_21265); +lean_dec(x_21263); +x_21266 = lean_ctor_get(x_21264, 0); +lean_inc(x_21266); +x_21267 = lean_ctor_get(x_21264, 1); +lean_inc(x_21267); +lean_dec(x_21264); +x_21268 = l_Lean_IR_ToIR_newVar(x_21267, x_4, x_5, x_21265); +x_21269 = lean_ctor_get(x_21268, 0); +lean_inc(x_21269); +x_21270 = lean_ctor_get(x_21268, 1); +lean_inc(x_21270); +lean_dec(x_21268); +x_21271 = lean_ctor_get(x_21269, 0); +lean_inc(x_21271); +x_21272 = lean_ctor_get(x_21269, 1); +lean_inc(x_21272); +lean_dec(x_21269); +x_21273 = lean_ctor_get(x_1, 2); +lean_inc(x_21273); +lean_inc(x_5); +lean_inc(x_4); +x_21274 = l_Lean_IR_ToIR_lowerType(x_21273, x_21272, x_4, x_5, x_21270); +if (lean_obj_tag(x_21274) == 0) +{ +lean_object* x_21275; lean_object* x_21276; lean_object* x_21277; lean_object* x_21278; lean_object* x_21279; +x_21275 = lean_ctor_get(x_21274, 0); +lean_inc(x_21275); +x_21276 = lean_ctor_get(x_21274, 1); +lean_inc(x_21276); +lean_dec(x_21274); +x_21277 = lean_ctor_get(x_21275, 0); +lean_inc(x_21277); +x_21278 = lean_ctor_get(x_21275, 1); +lean_inc(x_21278); +lean_dec(x_21275); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21279 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_21271, x_21261, x_21266, x_21245, x_21277, x_21278, x_4, x_5, x_21276); +if (lean_obj_tag(x_21279) == 0) +{ +lean_object* x_21280; lean_object* x_21281; uint8_t x_21282; +x_21280 = lean_ctor_get(x_21279, 0); +lean_inc(x_21280); +x_21281 = lean_ctor_get(x_21279, 1); +lean_inc(x_21281); +lean_dec(x_21279); +x_21282 = !lean_is_exclusive(x_21280); +if (x_21282 == 0) +{ +lean_object* x_21283; +x_21283 = lean_ctor_get(x_21280, 0); +lean_ctor_set(x_21246, 0, x_21283); +lean_ctor_set(x_21280, 0, x_21246); +x_21214 = x_21280; +x_21215 = x_21281; +goto block_21244; +} +else +{ +lean_object* x_21284; lean_object* x_21285; lean_object* x_21286; +x_21284 = lean_ctor_get(x_21280, 0); +x_21285 = lean_ctor_get(x_21280, 1); +lean_inc(x_21285); +lean_inc(x_21284); +lean_dec(x_21280); +lean_ctor_set(x_21246, 0, x_21284); +x_21286 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21286, 0, x_21246); +lean_ctor_set(x_21286, 1, x_21285); +x_21214 = x_21286; +x_21215 = x_21281; +goto block_21244; +} +} +else +{ +uint8_t x_21287; +lean_free_object(x_21246); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21287 = !lean_is_exclusive(x_21279); +if (x_21287 == 0) +{ +return x_21279; +} +else +{ +lean_object* x_21288; lean_object* x_21289; lean_object* x_21290; +x_21288 = lean_ctor_get(x_21279, 0); +x_21289 = lean_ctor_get(x_21279, 1); +lean_inc(x_21289); +lean_inc(x_21288); +lean_dec(x_21279); +x_21290 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21290, 0, x_21288); +lean_ctor_set(x_21290, 1, x_21289); +return x_21290; +} +} +} +else +{ +uint8_t x_21291; +lean_dec(x_21271); +lean_dec(x_21266); +lean_dec(x_21245); +lean_dec(x_21261); +lean_free_object(x_21246); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21291 = !lean_is_exclusive(x_21274); +if (x_21291 == 0) +{ +return x_21274; +} +else +{ +lean_object* x_21292; lean_object* x_21293; lean_object* x_21294; +x_21292 = lean_ctor_get(x_21274, 0); +x_21293 = lean_ctor_get(x_21274, 1); +lean_inc(x_21293); +lean_inc(x_21292); +lean_dec(x_21274); +x_21294 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21294, 0, x_21292); +lean_ctor_set(x_21294, 1, x_21293); +return x_21294; +} +} +} +else +{ +lean_object* x_21295; lean_object* x_21296; lean_object* x_21297; lean_object* x_21298; lean_object* x_21299; lean_object* x_21300; lean_object* x_21301; lean_object* x_21302; +lean_dec(x_21256); +lean_dec(x_21254); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_21245, 6); +lean_ctor_set(x_21245, 1, x_20601); +lean_ctor_set(x_21245, 0, x_153); +x_21295 = lean_ctor_get(x_1, 0); +lean_inc(x_21295); +x_21296 = l_Lean_IR_ToIR_bindVar(x_21295, x_20607, x_4, x_5, x_21250); +x_21297 = lean_ctor_get(x_21296, 0); +lean_inc(x_21297); +x_21298 = lean_ctor_get(x_21296, 1); +lean_inc(x_21298); +lean_dec(x_21296); +x_21299 = lean_ctor_get(x_21297, 0); +lean_inc(x_21299); +x_21300 = lean_ctor_get(x_21297, 1); +lean_inc(x_21300); +lean_dec(x_21297); +x_21301 = lean_ctor_get(x_1, 2); +lean_inc(x_21301); +lean_inc(x_5); +lean_inc(x_4); +x_21302 = l_Lean_IR_ToIR_lowerType(x_21301, x_21300, x_4, x_5, x_21298); +if (lean_obj_tag(x_21302) == 0) +{ +lean_object* x_21303; lean_object* x_21304; lean_object* x_21305; lean_object* x_21306; lean_object* x_21307; +x_21303 = lean_ctor_get(x_21302, 0); +lean_inc(x_21303); +x_21304 = lean_ctor_get(x_21302, 1); +lean_inc(x_21304); +lean_dec(x_21302); +x_21305 = lean_ctor_get(x_21303, 0); +lean_inc(x_21305); +x_21306 = lean_ctor_get(x_21303, 1); +lean_inc(x_21306); +lean_dec(x_21303); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21307 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21299, x_21245, x_21305, x_21306, x_4, x_5, x_21304); +if (lean_obj_tag(x_21307) == 0) +{ +lean_object* x_21308; lean_object* x_21309; uint8_t x_21310; +x_21308 = lean_ctor_get(x_21307, 0); +lean_inc(x_21308); +x_21309 = lean_ctor_get(x_21307, 1); +lean_inc(x_21309); +lean_dec(x_21307); +x_21310 = !lean_is_exclusive(x_21308); +if (x_21310 == 0) +{ +lean_object* x_21311; +x_21311 = lean_ctor_get(x_21308, 0); +lean_ctor_set(x_21246, 0, x_21311); +lean_ctor_set(x_21308, 0, x_21246); +x_21214 = x_21308; +x_21215 = x_21309; +goto block_21244; +} +else +{ +lean_object* x_21312; lean_object* x_21313; lean_object* x_21314; +x_21312 = lean_ctor_get(x_21308, 0); +x_21313 = lean_ctor_get(x_21308, 1); +lean_inc(x_21313); +lean_inc(x_21312); +lean_dec(x_21308); +lean_ctor_set(x_21246, 0, x_21312); +x_21314 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21314, 0, x_21246); +lean_ctor_set(x_21314, 1, x_21313); +x_21214 = x_21314; +x_21215 = x_21309; +goto block_21244; +} +} +else +{ +uint8_t x_21315; +lean_free_object(x_21246); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21315 = !lean_is_exclusive(x_21307); +if (x_21315 == 0) +{ +return x_21307; +} +else +{ +lean_object* x_21316; lean_object* x_21317; lean_object* x_21318; +x_21316 = lean_ctor_get(x_21307, 0); +x_21317 = lean_ctor_get(x_21307, 1); +lean_inc(x_21317); +lean_inc(x_21316); +lean_dec(x_21307); +x_21318 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21318, 0, x_21316); +lean_ctor_set(x_21318, 1, x_21317); +return x_21318; +} +} +} +else +{ +uint8_t x_21319; +lean_dec(x_21299); +lean_dec(x_21245); +lean_free_object(x_21246); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21319 = !lean_is_exclusive(x_21302); +if (x_21319 == 0) +{ +return x_21302; +} +else +{ +lean_object* x_21320; lean_object* x_21321; lean_object* x_21322; +x_21320 = lean_ctor_get(x_21302, 0); +x_21321 = lean_ctor_get(x_21302, 1); +lean_inc(x_21321); +lean_inc(x_21320); +lean_dec(x_21302); +x_21322 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21322, 0, x_21320); +lean_ctor_set(x_21322, 1, x_21321); +return x_21322; +} +} +} +} +else +{ +lean_object* x_21323; lean_object* x_21324; lean_object* x_21325; lean_object* x_21326; lean_object* x_21327; lean_object* x_21328; lean_object* x_21329; lean_object* x_21330; +lean_dec(x_21256); +lean_dec(x_21254); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_21245, 7); +lean_ctor_set(x_21245, 1, x_20601); +lean_ctor_set(x_21245, 0, x_153); +x_21323 = lean_ctor_get(x_1, 0); +lean_inc(x_21323); +x_21324 = l_Lean_IR_ToIR_bindVar(x_21323, x_20607, x_4, x_5, x_21250); +x_21325 = lean_ctor_get(x_21324, 0); +lean_inc(x_21325); +x_21326 = lean_ctor_get(x_21324, 1); +lean_inc(x_21326); +lean_dec(x_21324); +x_21327 = lean_ctor_get(x_21325, 0); +lean_inc(x_21327); +x_21328 = lean_ctor_get(x_21325, 1); +lean_inc(x_21328); +lean_dec(x_21325); +x_21329 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21330 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21327, x_21245, x_21329, x_21328, x_4, x_5, x_21326); +if (lean_obj_tag(x_21330) == 0) +{ +lean_object* x_21331; lean_object* x_21332; uint8_t x_21333; +x_21331 = lean_ctor_get(x_21330, 0); +lean_inc(x_21331); +x_21332 = lean_ctor_get(x_21330, 1); +lean_inc(x_21332); +lean_dec(x_21330); +x_21333 = !lean_is_exclusive(x_21331); +if (x_21333 == 0) +{ +lean_object* x_21334; +x_21334 = lean_ctor_get(x_21331, 0); +lean_ctor_set(x_21246, 0, x_21334); +lean_ctor_set(x_21331, 0, x_21246); +x_21214 = x_21331; +x_21215 = x_21332; +goto block_21244; +} +else +{ +lean_object* x_21335; lean_object* x_21336; lean_object* x_21337; +x_21335 = lean_ctor_get(x_21331, 0); +x_21336 = lean_ctor_get(x_21331, 1); +lean_inc(x_21336); +lean_inc(x_21335); +lean_dec(x_21331); +lean_ctor_set(x_21246, 0, x_21335); +x_21337 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21337, 0, x_21246); +lean_ctor_set(x_21337, 1, x_21336); +x_21214 = x_21337; +x_21215 = x_21332; +goto block_21244; +} +} +else +{ +uint8_t x_21338; +lean_free_object(x_21246); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21338 = !lean_is_exclusive(x_21330); +if (x_21338 == 0) +{ +return x_21330; +} +else +{ +lean_object* x_21339; lean_object* x_21340; lean_object* x_21341; +x_21339 = lean_ctor_get(x_21330, 0); +x_21340 = lean_ctor_get(x_21330, 1); +lean_inc(x_21340); +lean_inc(x_21339); +lean_dec(x_21330); +x_21341 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21341, 0, x_21339); +lean_ctor_set(x_21341, 1, x_21340); +return x_21341; +} +} +} +} +else +{ +lean_object* x_21342; lean_object* x_21343; lean_object* x_21344; lean_object* x_21345; uint8_t x_21346; +x_21342 = lean_ctor_get(x_21246, 0); +lean_inc(x_21342); +lean_dec(x_21246); +x_21343 = lean_array_get_size(x_20601); +x_21344 = lean_ctor_get(x_21342, 3); +lean_inc(x_21344); +lean_dec(x_21342); +x_21345 = lean_array_get_size(x_21344); +lean_dec(x_21344); +x_21346 = lean_nat_dec_lt(x_21343, x_21345); +if (x_21346 == 0) +{ +uint8_t x_21347; +x_21347 = lean_nat_dec_eq(x_21343, x_21345); +if (x_21347 == 0) +{ +lean_object* x_21348; lean_object* x_21349; lean_object* x_21350; lean_object* x_21351; lean_object* x_21352; lean_object* x_21353; lean_object* x_21354; lean_object* x_21355; lean_object* x_21356; lean_object* x_21357; lean_object* x_21358; lean_object* x_21359; lean_object* x_21360; lean_object* x_21361; lean_object* x_21362; lean_object* x_21363; +x_21348 = lean_unsigned_to_nat(0u); +x_21349 = l_Array_extract___rarg(x_20601, x_21348, x_21345); +x_21350 = l_Array_extract___rarg(x_20601, x_21345, x_21343); +lean_dec(x_21343); +lean_inc(x_153); +lean_ctor_set_tag(x_21245, 6); +lean_ctor_set(x_21245, 1, x_21349); +lean_ctor_set(x_21245, 0, x_153); +x_21351 = lean_ctor_get(x_1, 0); +lean_inc(x_21351); +x_21352 = l_Lean_IR_ToIR_bindVar(x_21351, x_20607, x_4, x_5, x_21250); +x_21353 = lean_ctor_get(x_21352, 0); +lean_inc(x_21353); +x_21354 = lean_ctor_get(x_21352, 1); +lean_inc(x_21354); +lean_dec(x_21352); +x_21355 = lean_ctor_get(x_21353, 0); +lean_inc(x_21355); +x_21356 = lean_ctor_get(x_21353, 1); +lean_inc(x_21356); +lean_dec(x_21353); +x_21357 = l_Lean_IR_ToIR_newVar(x_21356, x_4, x_5, x_21354); +x_21358 = lean_ctor_get(x_21357, 0); +lean_inc(x_21358); +x_21359 = lean_ctor_get(x_21357, 1); +lean_inc(x_21359); +lean_dec(x_21357); +x_21360 = lean_ctor_get(x_21358, 0); +lean_inc(x_21360); +x_21361 = lean_ctor_get(x_21358, 1); +lean_inc(x_21361); +lean_dec(x_21358); +x_21362 = lean_ctor_get(x_1, 2); +lean_inc(x_21362); +lean_inc(x_5); +lean_inc(x_4); +x_21363 = l_Lean_IR_ToIR_lowerType(x_21362, x_21361, x_4, x_5, x_21359); +if (lean_obj_tag(x_21363) == 0) +{ +lean_object* x_21364; lean_object* x_21365; lean_object* x_21366; lean_object* x_21367; lean_object* x_21368; +x_21364 = lean_ctor_get(x_21363, 0); +lean_inc(x_21364); +x_21365 = lean_ctor_get(x_21363, 1); +lean_inc(x_21365); +lean_dec(x_21363); +x_21366 = lean_ctor_get(x_21364, 0); +lean_inc(x_21366); +x_21367 = lean_ctor_get(x_21364, 1); +lean_inc(x_21367); +lean_dec(x_21364); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21368 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_21360, x_21350, x_21355, x_21245, x_21366, x_21367, x_4, x_5, x_21365); +if (lean_obj_tag(x_21368) == 0) +{ +lean_object* x_21369; lean_object* x_21370; lean_object* x_21371; lean_object* x_21372; lean_object* x_21373; lean_object* x_21374; lean_object* x_21375; +x_21369 = lean_ctor_get(x_21368, 0); +lean_inc(x_21369); +x_21370 = lean_ctor_get(x_21368, 1); +lean_inc(x_21370); +lean_dec(x_21368); +x_21371 = lean_ctor_get(x_21369, 0); +lean_inc(x_21371); +x_21372 = lean_ctor_get(x_21369, 1); +lean_inc(x_21372); +if (lean_is_exclusive(x_21369)) { + lean_ctor_release(x_21369, 0); + lean_ctor_release(x_21369, 1); + x_21373 = x_21369; +} else { + lean_dec_ref(x_21369); + x_21373 = lean_box(0); +} +x_21374 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_21374, 0, x_21371); +if (lean_is_scalar(x_21373)) { + x_21375 = lean_alloc_ctor(0, 2, 0); +} else { + x_21375 = x_21373; +} +lean_ctor_set(x_21375, 0, x_21374); +lean_ctor_set(x_21375, 1, x_21372); +x_21214 = x_21375; +x_21215 = x_21370; +goto block_21244; +} +else +{ +lean_object* x_21376; lean_object* x_21377; lean_object* x_21378; lean_object* x_21379; +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21376 = lean_ctor_get(x_21368, 0); +lean_inc(x_21376); +x_21377 = lean_ctor_get(x_21368, 1); +lean_inc(x_21377); +if (lean_is_exclusive(x_21368)) { + lean_ctor_release(x_21368, 0); + lean_ctor_release(x_21368, 1); + x_21378 = x_21368; +} else { + lean_dec_ref(x_21368); + x_21378 = lean_box(0); +} +if (lean_is_scalar(x_21378)) { + x_21379 = lean_alloc_ctor(1, 2, 0); +} else { + x_21379 = x_21378; +} +lean_ctor_set(x_21379, 0, x_21376); +lean_ctor_set(x_21379, 1, x_21377); +return x_21379; +} +} +else +{ +lean_object* x_21380; lean_object* x_21381; lean_object* x_21382; lean_object* x_21383; +lean_dec(x_21360); +lean_dec(x_21355); +lean_dec(x_21245); +lean_dec(x_21350); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21380 = lean_ctor_get(x_21363, 0); +lean_inc(x_21380); +x_21381 = lean_ctor_get(x_21363, 1); +lean_inc(x_21381); +if (lean_is_exclusive(x_21363)) { + lean_ctor_release(x_21363, 0); + lean_ctor_release(x_21363, 1); + x_21382 = x_21363; +} else { + lean_dec_ref(x_21363); + x_21382 = lean_box(0); +} +if (lean_is_scalar(x_21382)) { + x_21383 = lean_alloc_ctor(1, 2, 0); +} else { + x_21383 = x_21382; +} +lean_ctor_set(x_21383, 0, x_21380); +lean_ctor_set(x_21383, 1, x_21381); +return x_21383; +} +} +else +{ +lean_object* x_21384; lean_object* x_21385; lean_object* x_21386; lean_object* x_21387; lean_object* x_21388; lean_object* x_21389; lean_object* x_21390; lean_object* x_21391; +lean_dec(x_21345); +lean_dec(x_21343); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_21245, 6); +lean_ctor_set(x_21245, 1, x_20601); +lean_ctor_set(x_21245, 0, x_153); +x_21384 = lean_ctor_get(x_1, 0); +lean_inc(x_21384); +x_21385 = l_Lean_IR_ToIR_bindVar(x_21384, x_20607, x_4, x_5, x_21250); +x_21386 = lean_ctor_get(x_21385, 0); +lean_inc(x_21386); +x_21387 = lean_ctor_get(x_21385, 1); +lean_inc(x_21387); +lean_dec(x_21385); +x_21388 = lean_ctor_get(x_21386, 0); +lean_inc(x_21388); +x_21389 = lean_ctor_get(x_21386, 1); +lean_inc(x_21389); +lean_dec(x_21386); +x_21390 = lean_ctor_get(x_1, 2); +lean_inc(x_21390); +lean_inc(x_5); +lean_inc(x_4); +x_21391 = l_Lean_IR_ToIR_lowerType(x_21390, x_21389, x_4, x_5, x_21387); +if (lean_obj_tag(x_21391) == 0) +{ +lean_object* x_21392; lean_object* x_21393; lean_object* x_21394; lean_object* x_21395; lean_object* x_21396; +x_21392 = lean_ctor_get(x_21391, 0); +lean_inc(x_21392); +x_21393 = lean_ctor_get(x_21391, 1); +lean_inc(x_21393); +lean_dec(x_21391); +x_21394 = lean_ctor_get(x_21392, 0); +lean_inc(x_21394); +x_21395 = lean_ctor_get(x_21392, 1); +lean_inc(x_21395); +lean_dec(x_21392); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21396 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21388, x_21245, x_21394, x_21395, x_4, x_5, x_21393); +if (lean_obj_tag(x_21396) == 0) +{ +lean_object* x_21397; lean_object* x_21398; lean_object* x_21399; lean_object* x_21400; lean_object* x_21401; lean_object* x_21402; lean_object* x_21403; +x_21397 = lean_ctor_get(x_21396, 0); +lean_inc(x_21397); +x_21398 = lean_ctor_get(x_21396, 1); +lean_inc(x_21398); +lean_dec(x_21396); +x_21399 = lean_ctor_get(x_21397, 0); +lean_inc(x_21399); +x_21400 = lean_ctor_get(x_21397, 1); +lean_inc(x_21400); +if (lean_is_exclusive(x_21397)) { + lean_ctor_release(x_21397, 0); + lean_ctor_release(x_21397, 1); + x_21401 = x_21397; +} else { + lean_dec_ref(x_21397); + x_21401 = lean_box(0); +} +x_21402 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_21402, 0, x_21399); +if (lean_is_scalar(x_21401)) { + x_21403 = lean_alloc_ctor(0, 2, 0); +} else { + x_21403 = x_21401; +} +lean_ctor_set(x_21403, 0, x_21402); +lean_ctor_set(x_21403, 1, x_21400); +x_21214 = x_21403; +x_21215 = x_21398; +goto block_21244; +} +else +{ +lean_object* x_21404; lean_object* x_21405; lean_object* x_21406; lean_object* x_21407; +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21404 = lean_ctor_get(x_21396, 0); +lean_inc(x_21404); +x_21405 = lean_ctor_get(x_21396, 1); +lean_inc(x_21405); +if (lean_is_exclusive(x_21396)) { + lean_ctor_release(x_21396, 0); + lean_ctor_release(x_21396, 1); + x_21406 = x_21396; +} else { + lean_dec_ref(x_21396); + x_21406 = lean_box(0); +} +if (lean_is_scalar(x_21406)) { + x_21407 = lean_alloc_ctor(1, 2, 0); +} else { + x_21407 = x_21406; +} +lean_ctor_set(x_21407, 0, x_21404); +lean_ctor_set(x_21407, 1, x_21405); +return x_21407; +} +} +else +{ +lean_object* x_21408; lean_object* x_21409; lean_object* x_21410; lean_object* x_21411; +lean_dec(x_21388); +lean_dec(x_21245); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21408 = lean_ctor_get(x_21391, 0); +lean_inc(x_21408); +x_21409 = lean_ctor_get(x_21391, 1); +lean_inc(x_21409); +if (lean_is_exclusive(x_21391)) { + lean_ctor_release(x_21391, 0); + lean_ctor_release(x_21391, 1); + x_21410 = x_21391; +} else { + lean_dec_ref(x_21391); + x_21410 = lean_box(0); +} +if (lean_is_scalar(x_21410)) { + x_21411 = lean_alloc_ctor(1, 2, 0); +} else { + x_21411 = x_21410; +} +lean_ctor_set(x_21411, 0, x_21408); +lean_ctor_set(x_21411, 1, x_21409); +return x_21411; +} +} +} +else +{ +lean_object* x_21412; lean_object* x_21413; lean_object* x_21414; lean_object* x_21415; lean_object* x_21416; lean_object* x_21417; lean_object* x_21418; lean_object* x_21419; +lean_dec(x_21345); +lean_dec(x_21343); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_21245, 7); +lean_ctor_set(x_21245, 1, x_20601); +lean_ctor_set(x_21245, 0, x_153); +x_21412 = lean_ctor_get(x_1, 0); +lean_inc(x_21412); +x_21413 = l_Lean_IR_ToIR_bindVar(x_21412, x_20607, x_4, x_5, x_21250); +x_21414 = lean_ctor_get(x_21413, 0); +lean_inc(x_21414); +x_21415 = lean_ctor_get(x_21413, 1); +lean_inc(x_21415); +lean_dec(x_21413); +x_21416 = lean_ctor_get(x_21414, 0); +lean_inc(x_21416); +x_21417 = lean_ctor_get(x_21414, 1); +lean_inc(x_21417); +lean_dec(x_21414); +x_21418 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21419 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21416, x_21245, x_21418, x_21417, x_4, x_5, x_21415); +if (lean_obj_tag(x_21419) == 0) +{ +lean_object* x_21420; lean_object* x_21421; lean_object* x_21422; lean_object* x_21423; lean_object* x_21424; lean_object* x_21425; lean_object* x_21426; +x_21420 = lean_ctor_get(x_21419, 0); +lean_inc(x_21420); +x_21421 = lean_ctor_get(x_21419, 1); +lean_inc(x_21421); +lean_dec(x_21419); +x_21422 = lean_ctor_get(x_21420, 0); +lean_inc(x_21422); +x_21423 = lean_ctor_get(x_21420, 1); +lean_inc(x_21423); +if (lean_is_exclusive(x_21420)) { + lean_ctor_release(x_21420, 0); + lean_ctor_release(x_21420, 1); + x_21424 = x_21420; +} else { + lean_dec_ref(x_21420); + x_21424 = lean_box(0); +} +x_21425 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_21425, 0, x_21422); +if (lean_is_scalar(x_21424)) { + x_21426 = lean_alloc_ctor(0, 2, 0); +} else { + x_21426 = x_21424; +} +lean_ctor_set(x_21426, 0, x_21425); +lean_ctor_set(x_21426, 1, x_21423); +x_21214 = x_21426; +x_21215 = x_21421; +goto block_21244; +} +else +{ +lean_object* x_21427; lean_object* x_21428; lean_object* x_21429; lean_object* x_21430; +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21427 = lean_ctor_get(x_21419, 0); +lean_inc(x_21427); +x_21428 = lean_ctor_get(x_21419, 1); +lean_inc(x_21428); +if (lean_is_exclusive(x_21419)) { + lean_ctor_release(x_21419, 0); + lean_ctor_release(x_21419, 1); + x_21429 = x_21419; +} else { + lean_dec_ref(x_21419); + x_21429 = lean_box(0); +} +if (lean_is_scalar(x_21429)) { + x_21430 = lean_alloc_ctor(1, 2, 0); +} else { + x_21430 = x_21429; +} +lean_ctor_set(x_21430, 0, x_21427); +lean_ctor_set(x_21430, 1, x_21428); +return x_21430; +} +} +} +} +else +{ +lean_object* x_21431; lean_object* x_21432; lean_object* x_21433; lean_object* x_21434; lean_object* x_21435; lean_object* x_21436; uint8_t x_21437; +x_21431 = lean_ctor_get(x_21245, 1); +lean_inc(x_21431); +lean_dec(x_21245); +x_21432 = lean_ctor_get(x_21246, 0); +lean_inc(x_21432); +if (lean_is_exclusive(x_21246)) { + lean_ctor_release(x_21246, 0); + x_21433 = x_21246; +} else { + lean_dec_ref(x_21246); + x_21433 = lean_box(0); +} +x_21434 = lean_array_get_size(x_20601); +x_21435 = lean_ctor_get(x_21432, 3); +lean_inc(x_21435); +lean_dec(x_21432); +x_21436 = lean_array_get_size(x_21435); +lean_dec(x_21435); +x_21437 = lean_nat_dec_lt(x_21434, x_21436); +if (x_21437 == 0) +{ +uint8_t x_21438; +x_21438 = lean_nat_dec_eq(x_21434, x_21436); +if (x_21438 == 0) +{ +lean_object* x_21439; lean_object* x_21440; lean_object* x_21441; lean_object* x_21442; lean_object* x_21443; lean_object* x_21444; lean_object* x_21445; lean_object* x_21446; lean_object* x_21447; lean_object* x_21448; lean_object* x_21449; lean_object* x_21450; lean_object* x_21451; lean_object* x_21452; lean_object* x_21453; lean_object* x_21454; lean_object* x_21455; +x_21439 = lean_unsigned_to_nat(0u); +x_21440 = l_Array_extract___rarg(x_20601, x_21439, x_21436); +x_21441 = l_Array_extract___rarg(x_20601, x_21436, x_21434); +lean_dec(x_21434); +lean_inc(x_153); +x_21442 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_21442, 0, x_153); +lean_ctor_set(x_21442, 1, x_21440); +x_21443 = lean_ctor_get(x_1, 0); +lean_inc(x_21443); +x_21444 = l_Lean_IR_ToIR_bindVar(x_21443, x_20607, x_4, x_5, x_21431); +x_21445 = lean_ctor_get(x_21444, 0); +lean_inc(x_21445); +x_21446 = lean_ctor_get(x_21444, 1); +lean_inc(x_21446); +lean_dec(x_21444); +x_21447 = lean_ctor_get(x_21445, 0); +lean_inc(x_21447); +x_21448 = lean_ctor_get(x_21445, 1); +lean_inc(x_21448); +lean_dec(x_21445); +x_21449 = l_Lean_IR_ToIR_newVar(x_21448, x_4, x_5, x_21446); +x_21450 = lean_ctor_get(x_21449, 0); +lean_inc(x_21450); +x_21451 = lean_ctor_get(x_21449, 1); +lean_inc(x_21451); +lean_dec(x_21449); +x_21452 = lean_ctor_get(x_21450, 0); +lean_inc(x_21452); +x_21453 = lean_ctor_get(x_21450, 1); +lean_inc(x_21453); +lean_dec(x_21450); +x_21454 = lean_ctor_get(x_1, 2); +lean_inc(x_21454); +lean_inc(x_5); +lean_inc(x_4); +x_21455 = l_Lean_IR_ToIR_lowerType(x_21454, x_21453, x_4, x_5, x_21451); +if (lean_obj_tag(x_21455) == 0) +{ +lean_object* x_21456; lean_object* x_21457; lean_object* x_21458; lean_object* x_21459; lean_object* x_21460; +x_21456 = lean_ctor_get(x_21455, 0); +lean_inc(x_21456); +x_21457 = lean_ctor_get(x_21455, 1); +lean_inc(x_21457); +lean_dec(x_21455); +x_21458 = lean_ctor_get(x_21456, 0); +lean_inc(x_21458); +x_21459 = lean_ctor_get(x_21456, 1); +lean_inc(x_21459); +lean_dec(x_21456); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21460 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_21452, x_21441, x_21447, x_21442, x_21458, x_21459, x_4, x_5, x_21457); +if (lean_obj_tag(x_21460) == 0) +{ +lean_object* x_21461; lean_object* x_21462; lean_object* x_21463; lean_object* x_21464; lean_object* x_21465; lean_object* x_21466; lean_object* x_21467; +x_21461 = lean_ctor_get(x_21460, 0); +lean_inc(x_21461); +x_21462 = lean_ctor_get(x_21460, 1); +lean_inc(x_21462); +lean_dec(x_21460); +x_21463 = lean_ctor_get(x_21461, 0); +lean_inc(x_21463); +x_21464 = lean_ctor_get(x_21461, 1); +lean_inc(x_21464); +if (lean_is_exclusive(x_21461)) { + lean_ctor_release(x_21461, 0); + lean_ctor_release(x_21461, 1); + x_21465 = x_21461; +} else { + lean_dec_ref(x_21461); + x_21465 = lean_box(0); +} +if (lean_is_scalar(x_21433)) { + x_21466 = lean_alloc_ctor(1, 1, 0); +} else { + x_21466 = x_21433; +} +lean_ctor_set(x_21466, 0, x_21463); +if (lean_is_scalar(x_21465)) { + x_21467 = lean_alloc_ctor(0, 2, 0); +} else { + x_21467 = x_21465; +} +lean_ctor_set(x_21467, 0, x_21466); +lean_ctor_set(x_21467, 1, x_21464); +x_21214 = x_21467; +x_21215 = x_21462; +goto block_21244; +} +else +{ +lean_object* x_21468; lean_object* x_21469; lean_object* x_21470; lean_object* x_21471; +lean_dec(x_21433); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21468 = lean_ctor_get(x_21460, 0); +lean_inc(x_21468); +x_21469 = lean_ctor_get(x_21460, 1); +lean_inc(x_21469); +if (lean_is_exclusive(x_21460)) { + lean_ctor_release(x_21460, 0); + lean_ctor_release(x_21460, 1); + x_21470 = x_21460; +} else { + lean_dec_ref(x_21460); + x_21470 = lean_box(0); +} +if (lean_is_scalar(x_21470)) { + x_21471 = lean_alloc_ctor(1, 2, 0); +} else { + x_21471 = x_21470; +} +lean_ctor_set(x_21471, 0, x_21468); +lean_ctor_set(x_21471, 1, x_21469); +return x_21471; +} +} +else +{ +lean_object* x_21472; lean_object* x_21473; lean_object* x_21474; lean_object* x_21475; +lean_dec(x_21452); +lean_dec(x_21447); +lean_dec(x_21442); +lean_dec(x_21441); +lean_dec(x_21433); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21472 = lean_ctor_get(x_21455, 0); +lean_inc(x_21472); +x_21473 = lean_ctor_get(x_21455, 1); +lean_inc(x_21473); +if (lean_is_exclusive(x_21455)) { + lean_ctor_release(x_21455, 0); + lean_ctor_release(x_21455, 1); + x_21474 = x_21455; +} else { + lean_dec_ref(x_21455); + x_21474 = lean_box(0); +} +if (lean_is_scalar(x_21474)) { + x_21475 = lean_alloc_ctor(1, 2, 0); +} else { + x_21475 = x_21474; +} +lean_ctor_set(x_21475, 0, x_21472); +lean_ctor_set(x_21475, 1, x_21473); +return x_21475; +} +} +else +{ +lean_object* x_21476; lean_object* x_21477; lean_object* x_21478; lean_object* x_21479; lean_object* x_21480; lean_object* x_21481; lean_object* x_21482; lean_object* x_21483; lean_object* x_21484; +lean_dec(x_21436); +lean_dec(x_21434); +lean_inc(x_20601); +lean_inc(x_153); +x_21476 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_21476, 0, x_153); +lean_ctor_set(x_21476, 1, x_20601); +x_21477 = lean_ctor_get(x_1, 0); +lean_inc(x_21477); +x_21478 = l_Lean_IR_ToIR_bindVar(x_21477, x_20607, x_4, x_5, x_21431); +x_21479 = lean_ctor_get(x_21478, 0); +lean_inc(x_21479); +x_21480 = lean_ctor_get(x_21478, 1); +lean_inc(x_21480); +lean_dec(x_21478); +x_21481 = lean_ctor_get(x_21479, 0); +lean_inc(x_21481); +x_21482 = lean_ctor_get(x_21479, 1); +lean_inc(x_21482); +lean_dec(x_21479); +x_21483 = lean_ctor_get(x_1, 2); +lean_inc(x_21483); +lean_inc(x_5); +lean_inc(x_4); +x_21484 = l_Lean_IR_ToIR_lowerType(x_21483, x_21482, x_4, x_5, x_21480); +if (lean_obj_tag(x_21484) == 0) +{ +lean_object* x_21485; lean_object* x_21486; lean_object* x_21487; lean_object* x_21488; lean_object* x_21489; +x_21485 = lean_ctor_get(x_21484, 0); +lean_inc(x_21485); +x_21486 = lean_ctor_get(x_21484, 1); +lean_inc(x_21486); +lean_dec(x_21484); +x_21487 = lean_ctor_get(x_21485, 0); +lean_inc(x_21487); +x_21488 = lean_ctor_get(x_21485, 1); +lean_inc(x_21488); +lean_dec(x_21485); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21489 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21481, x_21476, x_21487, x_21488, x_4, x_5, x_21486); +if (lean_obj_tag(x_21489) == 0) +{ +lean_object* x_21490; lean_object* x_21491; lean_object* x_21492; lean_object* x_21493; lean_object* x_21494; lean_object* x_21495; lean_object* x_21496; +x_21490 = lean_ctor_get(x_21489, 0); +lean_inc(x_21490); +x_21491 = lean_ctor_get(x_21489, 1); +lean_inc(x_21491); +lean_dec(x_21489); +x_21492 = lean_ctor_get(x_21490, 0); +lean_inc(x_21492); +x_21493 = lean_ctor_get(x_21490, 1); +lean_inc(x_21493); +if (lean_is_exclusive(x_21490)) { + lean_ctor_release(x_21490, 0); + lean_ctor_release(x_21490, 1); + x_21494 = x_21490; +} else { + lean_dec_ref(x_21490); + x_21494 = lean_box(0); +} +if (lean_is_scalar(x_21433)) { + x_21495 = lean_alloc_ctor(1, 1, 0); +} else { + x_21495 = x_21433; +} +lean_ctor_set(x_21495, 0, x_21492); +if (lean_is_scalar(x_21494)) { + x_21496 = lean_alloc_ctor(0, 2, 0); +} else { + x_21496 = x_21494; +} +lean_ctor_set(x_21496, 0, x_21495); +lean_ctor_set(x_21496, 1, x_21493); +x_21214 = x_21496; +x_21215 = x_21491; +goto block_21244; +} +else +{ +lean_object* x_21497; lean_object* x_21498; lean_object* x_21499; lean_object* x_21500; +lean_dec(x_21433); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21497 = lean_ctor_get(x_21489, 0); +lean_inc(x_21497); +x_21498 = lean_ctor_get(x_21489, 1); +lean_inc(x_21498); +if (lean_is_exclusive(x_21489)) { + lean_ctor_release(x_21489, 0); + lean_ctor_release(x_21489, 1); + x_21499 = x_21489; +} else { + lean_dec_ref(x_21489); + x_21499 = lean_box(0); +} +if (lean_is_scalar(x_21499)) { + x_21500 = lean_alloc_ctor(1, 2, 0); +} else { + x_21500 = x_21499; +} +lean_ctor_set(x_21500, 0, x_21497); +lean_ctor_set(x_21500, 1, x_21498); +return x_21500; +} +} +else +{ +lean_object* x_21501; lean_object* x_21502; lean_object* x_21503; lean_object* x_21504; +lean_dec(x_21481); +lean_dec(x_21476); +lean_dec(x_21433); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21501 = lean_ctor_get(x_21484, 0); +lean_inc(x_21501); +x_21502 = lean_ctor_get(x_21484, 1); +lean_inc(x_21502); +if (lean_is_exclusive(x_21484)) { + lean_ctor_release(x_21484, 0); + lean_ctor_release(x_21484, 1); + x_21503 = x_21484; +} else { + lean_dec_ref(x_21484); + x_21503 = lean_box(0); +} +if (lean_is_scalar(x_21503)) { + x_21504 = lean_alloc_ctor(1, 2, 0); +} else { + x_21504 = x_21503; +} +lean_ctor_set(x_21504, 0, x_21501); +lean_ctor_set(x_21504, 1, x_21502); +return x_21504; +} +} +} +else +{ +lean_object* x_21505; lean_object* x_21506; lean_object* x_21507; lean_object* x_21508; lean_object* x_21509; lean_object* x_21510; lean_object* x_21511; lean_object* x_21512; lean_object* x_21513; +lean_dec(x_21436); +lean_dec(x_21434); +lean_inc(x_20601); +lean_inc(x_153); +x_21505 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_21505, 0, x_153); +lean_ctor_set(x_21505, 1, x_20601); +x_21506 = lean_ctor_get(x_1, 0); +lean_inc(x_21506); +x_21507 = l_Lean_IR_ToIR_bindVar(x_21506, x_20607, x_4, x_5, x_21431); +x_21508 = lean_ctor_get(x_21507, 0); +lean_inc(x_21508); +x_21509 = lean_ctor_get(x_21507, 1); +lean_inc(x_21509); +lean_dec(x_21507); +x_21510 = lean_ctor_get(x_21508, 0); +lean_inc(x_21510); +x_21511 = lean_ctor_get(x_21508, 1); +lean_inc(x_21511); +lean_dec(x_21508); +x_21512 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21513 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21510, x_21505, x_21512, x_21511, x_4, x_5, x_21509); +if (lean_obj_tag(x_21513) == 0) +{ +lean_object* x_21514; lean_object* x_21515; lean_object* x_21516; lean_object* x_21517; lean_object* x_21518; lean_object* x_21519; lean_object* x_21520; +x_21514 = lean_ctor_get(x_21513, 0); +lean_inc(x_21514); +x_21515 = lean_ctor_get(x_21513, 1); +lean_inc(x_21515); +lean_dec(x_21513); +x_21516 = lean_ctor_get(x_21514, 0); +lean_inc(x_21516); +x_21517 = lean_ctor_get(x_21514, 1); +lean_inc(x_21517); +if (lean_is_exclusive(x_21514)) { + lean_ctor_release(x_21514, 0); + lean_ctor_release(x_21514, 1); + x_21518 = x_21514; +} else { + lean_dec_ref(x_21514); + x_21518 = lean_box(0); +} +if (lean_is_scalar(x_21433)) { + x_21519 = lean_alloc_ctor(1, 1, 0); +} else { + x_21519 = x_21433; +} +lean_ctor_set(x_21519, 0, x_21516); +if (lean_is_scalar(x_21518)) { + x_21520 = lean_alloc_ctor(0, 2, 0); +} else { + x_21520 = x_21518; +} +lean_ctor_set(x_21520, 0, x_21519); +lean_ctor_set(x_21520, 1, x_21517); +x_21214 = x_21520; +x_21215 = x_21515; +goto block_21244; +} +else +{ +lean_object* x_21521; lean_object* x_21522; lean_object* x_21523; lean_object* x_21524; +lean_dec(x_21433); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21521 = lean_ctor_get(x_21513, 0); +lean_inc(x_21521); +x_21522 = lean_ctor_get(x_21513, 1); +lean_inc(x_21522); +if (lean_is_exclusive(x_21513)) { + lean_ctor_release(x_21513, 0); + lean_ctor_release(x_21513, 1); + x_21523 = x_21513; +} else { + lean_dec_ref(x_21513); + x_21523 = lean_box(0); +} +if (lean_is_scalar(x_21523)) { + x_21524 = lean_alloc_ctor(1, 2, 0); +} else { + x_21524 = x_21523; +} +lean_ctor_set(x_21524, 0, x_21521); +lean_ctor_set(x_21524, 1, x_21522); +return x_21524; +} +} +} +} +block_21244: +{ +lean_object* x_21216; +x_21216 = lean_ctor_get(x_21214, 0); +lean_inc(x_21216); +if (lean_obj_tag(x_21216) == 0) +{ +lean_object* x_21217; lean_object* x_21218; lean_object* x_21219; lean_object* x_21220; lean_object* x_21221; lean_object* x_21222; lean_object* x_21223; lean_object* x_21224; lean_object* x_21225; lean_object* x_21226; +lean_dec(x_20612); +x_21217 = lean_ctor_get(x_21214, 1); +lean_inc(x_21217); +lean_dec(x_21214); +x_21218 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_21218, 0, x_153); +lean_ctor_set(x_21218, 1, x_20601); +x_21219 = lean_ctor_get(x_1, 0); +lean_inc(x_21219); +x_21220 = l_Lean_IR_ToIR_bindVar(x_21219, x_21217, x_4, x_5, x_21215); +x_21221 = lean_ctor_get(x_21220, 0); +lean_inc(x_21221); +x_21222 = lean_ctor_get(x_21220, 1); +lean_inc(x_21222); +lean_dec(x_21220); +x_21223 = lean_ctor_get(x_21221, 0); +lean_inc(x_21223); +x_21224 = lean_ctor_get(x_21221, 1); +lean_inc(x_21224); +lean_dec(x_21221); +x_21225 = lean_ctor_get(x_1, 2); +lean_inc(x_21225); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_21226 = l_Lean_IR_ToIR_lowerType(x_21225, x_21224, x_4, x_5, x_21222); +if (lean_obj_tag(x_21226) == 0) +{ +lean_object* x_21227; lean_object* x_21228; lean_object* x_21229; lean_object* x_21230; lean_object* x_21231; +x_21227 = lean_ctor_get(x_21226, 0); +lean_inc(x_21227); +x_21228 = lean_ctor_get(x_21226, 1); +lean_inc(x_21228); +lean_dec(x_21226); +x_21229 = lean_ctor_get(x_21227, 0); +lean_inc(x_21229); +x_21230 = lean_ctor_get(x_21227, 1); +lean_inc(x_21230); +lean_dec(x_21227); +x_21231 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21223, x_21218, x_21229, x_21230, x_4, x_5, x_21228); +return x_21231; +} +else +{ +uint8_t x_21232; +lean_dec(x_21223); +lean_dec(x_21218); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_21232 = !lean_is_exclusive(x_21226); +if (x_21232 == 0) +{ +return x_21226; +} +else +{ +lean_object* x_21233; lean_object* x_21234; lean_object* x_21235; +x_21233 = lean_ctor_get(x_21226, 0); +x_21234 = lean_ctor_get(x_21226, 1); +lean_inc(x_21234); +lean_inc(x_21233); +lean_dec(x_21226); +x_21235 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21235, 0, x_21233); +lean_ctor_set(x_21235, 1, x_21234); +return x_21235; +} +} +} +else +{ +uint8_t x_21236; +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21236 = !lean_is_exclusive(x_21214); +if (x_21236 == 0) +{ +lean_object* x_21237; lean_object* x_21238; lean_object* x_21239; +x_21237 = lean_ctor_get(x_21214, 0); +lean_dec(x_21237); +x_21238 = lean_ctor_get(x_21216, 0); +lean_inc(x_21238); +lean_dec(x_21216); +lean_ctor_set(x_21214, 0, x_21238); +if (lean_is_scalar(x_20612)) { + x_21239 = lean_alloc_ctor(0, 2, 0); +} else { + x_21239 = x_20612; +} +lean_ctor_set(x_21239, 0, x_21214); +lean_ctor_set(x_21239, 1, x_21215); +return x_21239; +} +else +{ +lean_object* x_21240; lean_object* x_21241; lean_object* x_21242; lean_object* x_21243; +x_21240 = lean_ctor_get(x_21214, 1); +lean_inc(x_21240); +lean_dec(x_21214); +x_21241 = lean_ctor_get(x_21216, 0); +lean_inc(x_21241); +lean_dec(x_21216); +x_21242 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21242, 0, x_21241); +lean_ctor_set(x_21242, 1, x_21240); +if (lean_is_scalar(x_20612)) { + x_21243 = lean_alloc_ctor(0, 2, 0); +} else { + x_21243 = x_20612; +} +lean_ctor_set(x_21243, 0, x_21242); +lean_ctor_set(x_21243, 1, x_21215); +return x_21243; +} +} +} +} +case 4: +{ +uint8_t x_21525; +lean_dec(x_20613); +lean_dec(x_20612); +lean_free_object(x_20603); +lean_dec(x_20593); +lean_dec(x_20592); +x_21525 = !lean_is_exclusive(x_20618); +if (x_21525 == 0) +{ +lean_object* x_21526; lean_object* x_21527; uint8_t x_21528; +x_21526 = lean_ctor_get(x_20618, 0); +lean_dec(x_21526); +x_21527 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_21528 = lean_name_eq(x_153, x_21527); +if (x_21528 == 0) +{ +uint8_t x_21529; lean_object* x_21530; lean_object* x_21531; lean_object* x_21532; lean_object* x_21533; lean_object* x_21534; lean_object* x_21535; lean_object* x_21536; lean_object* x_21537; +lean_dec(x_20601); +lean_dec(x_2); +lean_dec(x_1); +x_21529 = 1; +x_21530 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_21531 = l_Lean_Name_toString(x_153, x_21529, x_21530); +lean_ctor_set_tag(x_20618, 3); +lean_ctor_set(x_20618, 0, x_21531); +x_21532 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_21533 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_21533, 0, x_21532); +lean_ctor_set(x_21533, 1, x_20618); +x_21534 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_21535 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_21535, 0, x_21533); +lean_ctor_set(x_21535, 1, x_21534); +x_21536 = l_Lean_MessageData_ofFormat(x_21535); +x_21537 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_21536, x_20607, x_4, x_5, x_20611); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_20607); +return x_21537; +} +else +{ +lean_object* x_21538; lean_object* x_21539; lean_object* x_21540; +lean_free_object(x_20618); +lean_dec(x_153); +x_21538 = l_Lean_IR_instInhabitedArg; +x_21539 = lean_unsigned_to_nat(2u); +x_21540 = lean_array_get(x_21538, x_20601, x_21539); +lean_dec(x_20601); +if (lean_obj_tag(x_21540) == 0) +{ +lean_object* x_21541; lean_object* x_21542; lean_object* x_21543; lean_object* x_21544; lean_object* x_21545; lean_object* x_21546; lean_object* x_21547; +x_21541 = lean_ctor_get(x_21540, 0); +lean_inc(x_21541); +lean_dec(x_21540); +x_21542 = lean_ctor_get(x_1, 0); +lean_inc(x_21542); +lean_dec(x_1); +x_21543 = l_Lean_IR_ToIR_bindVarToVarId(x_21542, x_21541, x_20607, x_4, x_5, x_20611); +x_21544 = lean_ctor_get(x_21543, 0); +lean_inc(x_21544); +x_21545 = lean_ctor_get(x_21543, 1); +lean_inc(x_21545); +lean_dec(x_21543); +x_21546 = lean_ctor_get(x_21544, 1); +lean_inc(x_21546); +lean_dec(x_21544); +x_21547 = l_Lean_IR_ToIR_lowerCode(x_2, x_21546, x_4, x_5, x_21545); +return x_21547; +} +else +{ +lean_object* x_21548; lean_object* x_21549; lean_object* x_21550; lean_object* x_21551; lean_object* x_21552; lean_object* x_21553; +x_21548 = lean_ctor_get(x_1, 0); +lean_inc(x_21548); +lean_dec(x_1); +x_21549 = l_Lean_IR_ToIR_bindErased(x_21548, x_20607, x_4, x_5, x_20611); +x_21550 = lean_ctor_get(x_21549, 0); +lean_inc(x_21550); +x_21551 = lean_ctor_get(x_21549, 1); +lean_inc(x_21551); +lean_dec(x_21549); +x_21552 = lean_ctor_get(x_21550, 1); +lean_inc(x_21552); +lean_dec(x_21550); +x_21553 = l_Lean_IR_ToIR_lowerCode(x_2, x_21552, x_4, x_5, x_21551); +return x_21553; +} +} +} +else +{ +lean_object* x_21554; uint8_t x_21555; +lean_dec(x_20618); +x_21554 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_21555 = lean_name_eq(x_153, x_21554); +if (x_21555 == 0) +{ +uint8_t x_21556; lean_object* x_21557; lean_object* x_21558; lean_object* x_21559; lean_object* x_21560; lean_object* x_21561; lean_object* x_21562; lean_object* x_21563; lean_object* x_21564; lean_object* x_21565; +lean_dec(x_20601); +lean_dec(x_2); +lean_dec(x_1); +x_21556 = 1; +x_21557 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_21558 = l_Lean_Name_toString(x_153, x_21556, x_21557); +x_21559 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_21559, 0, x_21558); +x_21560 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_21561 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_21561, 0, x_21560); +lean_ctor_set(x_21561, 1, x_21559); +x_21562 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_21563 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_21563, 0, x_21561); +lean_ctor_set(x_21563, 1, x_21562); +x_21564 = l_Lean_MessageData_ofFormat(x_21563); +x_21565 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_21564, x_20607, x_4, x_5, x_20611); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_20607); +return x_21565; +} +else +{ +lean_object* x_21566; lean_object* x_21567; lean_object* x_21568; +lean_dec(x_153); +x_21566 = l_Lean_IR_instInhabitedArg; +x_21567 = lean_unsigned_to_nat(2u); +x_21568 = lean_array_get(x_21566, x_20601, x_21567); +lean_dec(x_20601); +if (lean_obj_tag(x_21568) == 0) +{ +lean_object* x_21569; lean_object* x_21570; lean_object* x_21571; lean_object* x_21572; lean_object* x_21573; lean_object* x_21574; lean_object* x_21575; +x_21569 = lean_ctor_get(x_21568, 0); +lean_inc(x_21569); +lean_dec(x_21568); +x_21570 = lean_ctor_get(x_1, 0); +lean_inc(x_21570); +lean_dec(x_1); +x_21571 = l_Lean_IR_ToIR_bindVarToVarId(x_21570, x_21569, x_20607, x_4, x_5, x_20611); +x_21572 = lean_ctor_get(x_21571, 0); +lean_inc(x_21572); +x_21573 = lean_ctor_get(x_21571, 1); +lean_inc(x_21573); +lean_dec(x_21571); +x_21574 = lean_ctor_get(x_21572, 1); +lean_inc(x_21574); +lean_dec(x_21572); +x_21575 = l_Lean_IR_ToIR_lowerCode(x_2, x_21574, x_4, x_5, x_21573); +return x_21575; +} +else +{ +lean_object* x_21576; lean_object* x_21577; lean_object* x_21578; lean_object* x_21579; lean_object* x_21580; lean_object* x_21581; +x_21576 = lean_ctor_get(x_1, 0); +lean_inc(x_21576); +lean_dec(x_1); +x_21577 = l_Lean_IR_ToIR_bindErased(x_21576, x_20607, x_4, x_5, x_20611); +x_21578 = lean_ctor_get(x_21577, 0); +lean_inc(x_21578); +x_21579 = lean_ctor_get(x_21577, 1); +lean_inc(x_21579); +lean_dec(x_21577); +x_21580 = lean_ctor_get(x_21578, 1); +lean_inc(x_21580); +lean_dec(x_21578); +x_21581 = l_Lean_IR_ToIR_lowerCode(x_2, x_21580, x_4, x_5, x_21579); +return x_21581; +} +} +} +} +case 5: +{ +lean_object* x_21582; lean_object* x_21583; +lean_dec(x_20618); +lean_dec(x_20613); +lean_dec(x_20612); +lean_free_object(x_20603); +lean_dec(x_20601); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_21582 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_21583 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_21582, x_20607, x_4, x_5, x_20611); +return x_21583; +} +case 6: +{ +lean_object* x_21584; uint8_t x_21585; +x_21584 = lean_ctor_get(x_20618, 0); +lean_inc(x_21584); +lean_dec(x_20618); +lean_inc(x_153); +x_21585 = l_Lean_isExtern(x_20613, x_153); +if (x_21585 == 0) +{ +lean_object* x_21586; +lean_dec(x_20612); +lean_free_object(x_20603); +lean_dec(x_20601); +lean_inc(x_5); +lean_inc(x_4); +x_21586 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_20607, x_4, x_5, x_20611); +if (lean_obj_tag(x_21586) == 0) +{ +lean_object* x_21587; lean_object* x_21588; lean_object* x_21589; lean_object* x_21590; lean_object* x_21591; lean_object* x_21592; lean_object* x_21593; lean_object* x_21594; lean_object* x_21595; lean_object* x_21596; lean_object* x_21597; lean_object* x_21598; lean_object* x_21599; lean_object* x_21600; lean_object* x_21601; lean_object* x_21602; lean_object* x_21603; lean_object* x_21604; lean_object* x_21605; lean_object* x_21606; +x_21587 = lean_ctor_get(x_21586, 0); +lean_inc(x_21587); +x_21588 = lean_ctor_get(x_21587, 0); +lean_inc(x_21588); +x_21589 = lean_ctor_get(x_21586, 1); +lean_inc(x_21589); +lean_dec(x_21586); +x_21590 = lean_ctor_get(x_21587, 1); +lean_inc(x_21590); +lean_dec(x_21587); +x_21591 = lean_ctor_get(x_21588, 0); +lean_inc(x_21591); +x_21592 = lean_ctor_get(x_21588, 1); +lean_inc(x_21592); +lean_dec(x_21588); +x_21593 = lean_ctor_get(x_21584, 3); +lean_inc(x_21593); +lean_dec(x_21584); +x_21594 = lean_array_get_size(x_20592); +x_21595 = l_Array_extract___rarg(x_20592, x_21593, x_21594); +lean_dec(x_21594); +lean_dec(x_20592); +x_21596 = lean_array_get_size(x_21592); +x_21597 = lean_unsigned_to_nat(0u); +x_21598 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_20593)) { + x_21599 = lean_alloc_ctor(0, 3, 0); +} else { + x_21599 = x_20593; + lean_ctor_set_tag(x_21599, 0); +} +lean_ctor_set(x_21599, 0, x_21597); +lean_ctor_set(x_21599, 1, x_21596); +lean_ctor_set(x_21599, 2, x_21598); +x_21600 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_21601 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__9(x_21592, x_21595, x_21599, x_21599, x_21600, x_21597, lean_box(0), lean_box(0), x_21590, x_4, x_5, x_21589); +lean_dec(x_21599); +x_21602 = lean_ctor_get(x_21601, 0); +lean_inc(x_21602); +x_21603 = lean_ctor_get(x_21601, 1); +lean_inc(x_21603); +lean_dec(x_21601); +x_21604 = lean_ctor_get(x_21602, 0); +lean_inc(x_21604); +x_21605 = lean_ctor_get(x_21602, 1); +lean_inc(x_21605); +lean_dec(x_21602); +x_21606 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_21591, x_21592, x_21595, x_21604, x_21605, x_4, x_5, x_21603); +lean_dec(x_21595); +lean_dec(x_21592); +return x_21606; +} +else +{ +uint8_t x_21607; +lean_dec(x_21584); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21607 = !lean_is_exclusive(x_21586); +if (x_21607 == 0) +{ +return x_21586; +} +else +{ +lean_object* x_21608; lean_object* x_21609; lean_object* x_21610; +x_21608 = lean_ctor_get(x_21586, 0); +x_21609 = lean_ctor_get(x_21586, 1); +lean_inc(x_21609); +lean_inc(x_21608); +lean_dec(x_21586); +x_21610 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21610, 0, x_21608); +lean_ctor_set(x_21610, 1, x_21609); +return x_21610; +} +} +} +else +{ +lean_object* x_21611; lean_object* x_21612; lean_object* x_21642; lean_object* x_21643; +lean_dec(x_21584); +lean_dec(x_20593); +lean_dec(x_20592); +lean_inc(x_153); +x_21642 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_20611); +x_21643 = lean_ctor_get(x_21642, 0); +lean_inc(x_21643); +if (lean_obj_tag(x_21643) == 0) +{ +lean_object* x_21644; lean_object* x_21645; +x_21644 = lean_ctor_get(x_21642, 1); +lean_inc(x_21644); +lean_dec(x_21642); +x_21645 = lean_box(0); +lean_ctor_set(x_20603, 0, x_21645); +x_21611 = x_20603; +x_21612 = x_21644; +goto block_21641; +} +else +{ +uint8_t x_21646; +lean_free_object(x_20603); +x_21646 = !lean_is_exclusive(x_21642); +if (x_21646 == 0) +{ +lean_object* x_21647; lean_object* x_21648; uint8_t x_21649; +x_21647 = lean_ctor_get(x_21642, 1); +x_21648 = lean_ctor_get(x_21642, 0); +lean_dec(x_21648); +x_21649 = !lean_is_exclusive(x_21643); +if (x_21649 == 0) +{ +lean_object* x_21650; lean_object* x_21651; lean_object* x_21652; lean_object* x_21653; uint8_t x_21654; +x_21650 = lean_ctor_get(x_21643, 0); +x_21651 = lean_array_get_size(x_20601); +x_21652 = lean_ctor_get(x_21650, 3); +lean_inc(x_21652); +lean_dec(x_21650); +x_21653 = lean_array_get_size(x_21652); +lean_dec(x_21652); +x_21654 = lean_nat_dec_lt(x_21651, x_21653); +if (x_21654 == 0) +{ +uint8_t x_21655; +x_21655 = lean_nat_dec_eq(x_21651, x_21653); +if (x_21655 == 0) +{ +lean_object* x_21656; lean_object* x_21657; lean_object* x_21658; lean_object* x_21659; lean_object* x_21660; lean_object* x_21661; lean_object* x_21662; lean_object* x_21663; lean_object* x_21664; lean_object* x_21665; lean_object* x_21666; lean_object* x_21667; lean_object* x_21668; lean_object* x_21669; lean_object* x_21670; lean_object* x_21671; +x_21656 = lean_unsigned_to_nat(0u); +x_21657 = l_Array_extract___rarg(x_20601, x_21656, x_21653); +x_21658 = l_Array_extract___rarg(x_20601, x_21653, x_21651); +lean_dec(x_21651); +lean_inc(x_153); +lean_ctor_set_tag(x_21642, 6); +lean_ctor_set(x_21642, 1, x_21657); +lean_ctor_set(x_21642, 0, x_153); +x_21659 = lean_ctor_get(x_1, 0); +lean_inc(x_21659); +x_21660 = l_Lean_IR_ToIR_bindVar(x_21659, x_20607, x_4, x_5, x_21647); +x_21661 = lean_ctor_get(x_21660, 0); +lean_inc(x_21661); +x_21662 = lean_ctor_get(x_21660, 1); +lean_inc(x_21662); +lean_dec(x_21660); +x_21663 = lean_ctor_get(x_21661, 0); +lean_inc(x_21663); +x_21664 = lean_ctor_get(x_21661, 1); +lean_inc(x_21664); +lean_dec(x_21661); +x_21665 = l_Lean_IR_ToIR_newVar(x_21664, x_4, x_5, x_21662); +x_21666 = lean_ctor_get(x_21665, 0); +lean_inc(x_21666); +x_21667 = lean_ctor_get(x_21665, 1); +lean_inc(x_21667); +lean_dec(x_21665); +x_21668 = lean_ctor_get(x_21666, 0); +lean_inc(x_21668); +x_21669 = lean_ctor_get(x_21666, 1); +lean_inc(x_21669); +lean_dec(x_21666); +x_21670 = lean_ctor_get(x_1, 2); +lean_inc(x_21670); +lean_inc(x_5); +lean_inc(x_4); +x_21671 = l_Lean_IR_ToIR_lowerType(x_21670, x_21669, x_4, x_5, x_21667); +if (lean_obj_tag(x_21671) == 0) +{ +lean_object* x_21672; lean_object* x_21673; lean_object* x_21674; lean_object* x_21675; lean_object* x_21676; +x_21672 = lean_ctor_get(x_21671, 0); +lean_inc(x_21672); +x_21673 = lean_ctor_get(x_21671, 1); +lean_inc(x_21673); +lean_dec(x_21671); +x_21674 = lean_ctor_get(x_21672, 0); +lean_inc(x_21674); +x_21675 = lean_ctor_get(x_21672, 1); +lean_inc(x_21675); +lean_dec(x_21672); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21676 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_21668, x_21658, x_21663, x_21642, x_21674, x_21675, x_4, x_5, x_21673); +if (lean_obj_tag(x_21676) == 0) +{ +lean_object* x_21677; lean_object* x_21678; uint8_t x_21679; +x_21677 = lean_ctor_get(x_21676, 0); +lean_inc(x_21677); +x_21678 = lean_ctor_get(x_21676, 1); +lean_inc(x_21678); +lean_dec(x_21676); +x_21679 = !lean_is_exclusive(x_21677); +if (x_21679 == 0) +{ +lean_object* x_21680; +x_21680 = lean_ctor_get(x_21677, 0); +lean_ctor_set(x_21643, 0, x_21680); +lean_ctor_set(x_21677, 0, x_21643); +x_21611 = x_21677; +x_21612 = x_21678; +goto block_21641; +} +else +{ +lean_object* x_21681; lean_object* x_21682; lean_object* x_21683; +x_21681 = lean_ctor_get(x_21677, 0); +x_21682 = lean_ctor_get(x_21677, 1); +lean_inc(x_21682); +lean_inc(x_21681); +lean_dec(x_21677); +lean_ctor_set(x_21643, 0, x_21681); +x_21683 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21683, 0, x_21643); +lean_ctor_set(x_21683, 1, x_21682); +x_21611 = x_21683; +x_21612 = x_21678; +goto block_21641; +} +} +else +{ +uint8_t x_21684; +lean_free_object(x_21643); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21684 = !lean_is_exclusive(x_21676); +if (x_21684 == 0) +{ +return x_21676; +} +else +{ +lean_object* x_21685; lean_object* x_21686; lean_object* x_21687; +x_21685 = lean_ctor_get(x_21676, 0); +x_21686 = lean_ctor_get(x_21676, 1); +lean_inc(x_21686); +lean_inc(x_21685); +lean_dec(x_21676); +x_21687 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21687, 0, x_21685); +lean_ctor_set(x_21687, 1, x_21686); +return x_21687; +} +} +} +else +{ +uint8_t x_21688; +lean_dec(x_21668); +lean_dec(x_21663); +lean_dec(x_21642); +lean_dec(x_21658); +lean_free_object(x_21643); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21688 = !lean_is_exclusive(x_21671); +if (x_21688 == 0) +{ +return x_21671; +} +else +{ +lean_object* x_21689; lean_object* x_21690; lean_object* x_21691; +x_21689 = lean_ctor_get(x_21671, 0); +x_21690 = lean_ctor_get(x_21671, 1); +lean_inc(x_21690); +lean_inc(x_21689); +lean_dec(x_21671); +x_21691 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21691, 0, x_21689); +lean_ctor_set(x_21691, 1, x_21690); +return x_21691; +} +} +} +else +{ +lean_object* x_21692; lean_object* x_21693; lean_object* x_21694; lean_object* x_21695; lean_object* x_21696; lean_object* x_21697; lean_object* x_21698; lean_object* x_21699; +lean_dec(x_21653); +lean_dec(x_21651); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_21642, 6); +lean_ctor_set(x_21642, 1, x_20601); +lean_ctor_set(x_21642, 0, x_153); +x_21692 = lean_ctor_get(x_1, 0); +lean_inc(x_21692); +x_21693 = l_Lean_IR_ToIR_bindVar(x_21692, x_20607, x_4, x_5, x_21647); +x_21694 = lean_ctor_get(x_21693, 0); +lean_inc(x_21694); +x_21695 = lean_ctor_get(x_21693, 1); +lean_inc(x_21695); +lean_dec(x_21693); +x_21696 = lean_ctor_get(x_21694, 0); +lean_inc(x_21696); +x_21697 = lean_ctor_get(x_21694, 1); +lean_inc(x_21697); +lean_dec(x_21694); +x_21698 = lean_ctor_get(x_1, 2); +lean_inc(x_21698); +lean_inc(x_5); +lean_inc(x_4); +x_21699 = l_Lean_IR_ToIR_lowerType(x_21698, x_21697, x_4, x_5, x_21695); +if (lean_obj_tag(x_21699) == 0) +{ +lean_object* x_21700; lean_object* x_21701; lean_object* x_21702; lean_object* x_21703; lean_object* x_21704; +x_21700 = lean_ctor_get(x_21699, 0); +lean_inc(x_21700); +x_21701 = lean_ctor_get(x_21699, 1); +lean_inc(x_21701); +lean_dec(x_21699); +x_21702 = lean_ctor_get(x_21700, 0); +lean_inc(x_21702); +x_21703 = lean_ctor_get(x_21700, 1); +lean_inc(x_21703); +lean_dec(x_21700); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21704 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21696, x_21642, x_21702, x_21703, x_4, x_5, x_21701); +if (lean_obj_tag(x_21704) == 0) +{ +lean_object* x_21705; lean_object* x_21706; uint8_t x_21707; +x_21705 = lean_ctor_get(x_21704, 0); +lean_inc(x_21705); +x_21706 = lean_ctor_get(x_21704, 1); +lean_inc(x_21706); +lean_dec(x_21704); +x_21707 = !lean_is_exclusive(x_21705); +if (x_21707 == 0) +{ +lean_object* x_21708; +x_21708 = lean_ctor_get(x_21705, 0); +lean_ctor_set(x_21643, 0, x_21708); +lean_ctor_set(x_21705, 0, x_21643); +x_21611 = x_21705; +x_21612 = x_21706; +goto block_21641; +} +else +{ +lean_object* x_21709; lean_object* x_21710; lean_object* x_21711; +x_21709 = lean_ctor_get(x_21705, 0); +x_21710 = lean_ctor_get(x_21705, 1); +lean_inc(x_21710); +lean_inc(x_21709); +lean_dec(x_21705); +lean_ctor_set(x_21643, 0, x_21709); +x_21711 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21711, 0, x_21643); +lean_ctor_set(x_21711, 1, x_21710); +x_21611 = x_21711; +x_21612 = x_21706; +goto block_21641; +} +} +else +{ +uint8_t x_21712; +lean_free_object(x_21643); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21712 = !lean_is_exclusive(x_21704); +if (x_21712 == 0) +{ +return x_21704; +} +else +{ +lean_object* x_21713; lean_object* x_21714; lean_object* x_21715; +x_21713 = lean_ctor_get(x_21704, 0); +x_21714 = lean_ctor_get(x_21704, 1); +lean_inc(x_21714); +lean_inc(x_21713); +lean_dec(x_21704); +x_21715 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21715, 0, x_21713); +lean_ctor_set(x_21715, 1, x_21714); +return x_21715; +} +} +} +else +{ +uint8_t x_21716; +lean_dec(x_21696); +lean_dec(x_21642); +lean_free_object(x_21643); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21716 = !lean_is_exclusive(x_21699); +if (x_21716 == 0) +{ +return x_21699; +} +else +{ +lean_object* x_21717; lean_object* x_21718; lean_object* x_21719; +x_21717 = lean_ctor_get(x_21699, 0); +x_21718 = lean_ctor_get(x_21699, 1); +lean_inc(x_21718); +lean_inc(x_21717); +lean_dec(x_21699); +x_21719 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21719, 0, x_21717); +lean_ctor_set(x_21719, 1, x_21718); +return x_21719; +} +} +} +} +else +{ +lean_object* x_21720; lean_object* x_21721; lean_object* x_21722; lean_object* x_21723; lean_object* x_21724; lean_object* x_21725; lean_object* x_21726; lean_object* x_21727; +lean_dec(x_21653); +lean_dec(x_21651); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_21642, 7); +lean_ctor_set(x_21642, 1, x_20601); +lean_ctor_set(x_21642, 0, x_153); +x_21720 = lean_ctor_get(x_1, 0); +lean_inc(x_21720); +x_21721 = l_Lean_IR_ToIR_bindVar(x_21720, x_20607, x_4, x_5, x_21647); +x_21722 = lean_ctor_get(x_21721, 0); +lean_inc(x_21722); +x_21723 = lean_ctor_get(x_21721, 1); +lean_inc(x_21723); +lean_dec(x_21721); +x_21724 = lean_ctor_get(x_21722, 0); +lean_inc(x_21724); +x_21725 = lean_ctor_get(x_21722, 1); +lean_inc(x_21725); +lean_dec(x_21722); +x_21726 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21727 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21724, x_21642, x_21726, x_21725, x_4, x_5, x_21723); +if (lean_obj_tag(x_21727) == 0) +{ +lean_object* x_21728; lean_object* x_21729; uint8_t x_21730; +x_21728 = lean_ctor_get(x_21727, 0); +lean_inc(x_21728); +x_21729 = lean_ctor_get(x_21727, 1); +lean_inc(x_21729); +lean_dec(x_21727); +x_21730 = !lean_is_exclusive(x_21728); +if (x_21730 == 0) +{ +lean_object* x_21731; +x_21731 = lean_ctor_get(x_21728, 0); +lean_ctor_set(x_21643, 0, x_21731); +lean_ctor_set(x_21728, 0, x_21643); +x_21611 = x_21728; +x_21612 = x_21729; +goto block_21641; +} +else +{ +lean_object* x_21732; lean_object* x_21733; lean_object* x_21734; +x_21732 = lean_ctor_get(x_21728, 0); +x_21733 = lean_ctor_get(x_21728, 1); +lean_inc(x_21733); +lean_inc(x_21732); +lean_dec(x_21728); +lean_ctor_set(x_21643, 0, x_21732); +x_21734 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21734, 0, x_21643); +lean_ctor_set(x_21734, 1, x_21733); +x_21611 = x_21734; +x_21612 = x_21729; +goto block_21641; +} +} +else +{ +uint8_t x_21735; +lean_free_object(x_21643); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21735 = !lean_is_exclusive(x_21727); +if (x_21735 == 0) +{ +return x_21727; +} +else +{ +lean_object* x_21736; lean_object* x_21737; lean_object* x_21738; +x_21736 = lean_ctor_get(x_21727, 0); +x_21737 = lean_ctor_get(x_21727, 1); +lean_inc(x_21737); +lean_inc(x_21736); +lean_dec(x_21727); +x_21738 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21738, 0, x_21736); +lean_ctor_set(x_21738, 1, x_21737); +return x_21738; +} +} +} +} +else +{ +lean_object* x_21739; lean_object* x_21740; lean_object* x_21741; lean_object* x_21742; uint8_t x_21743; +x_21739 = lean_ctor_get(x_21643, 0); +lean_inc(x_21739); +lean_dec(x_21643); +x_21740 = lean_array_get_size(x_20601); +x_21741 = lean_ctor_get(x_21739, 3); +lean_inc(x_21741); +lean_dec(x_21739); +x_21742 = lean_array_get_size(x_21741); +lean_dec(x_21741); +x_21743 = lean_nat_dec_lt(x_21740, x_21742); +if (x_21743 == 0) +{ +uint8_t x_21744; +x_21744 = lean_nat_dec_eq(x_21740, x_21742); +if (x_21744 == 0) +{ +lean_object* x_21745; lean_object* x_21746; lean_object* x_21747; lean_object* x_21748; lean_object* x_21749; lean_object* x_21750; lean_object* x_21751; lean_object* x_21752; lean_object* x_21753; lean_object* x_21754; lean_object* x_21755; lean_object* x_21756; lean_object* x_21757; lean_object* x_21758; lean_object* x_21759; lean_object* x_21760; +x_21745 = lean_unsigned_to_nat(0u); +x_21746 = l_Array_extract___rarg(x_20601, x_21745, x_21742); +x_21747 = l_Array_extract___rarg(x_20601, x_21742, x_21740); +lean_dec(x_21740); +lean_inc(x_153); +lean_ctor_set_tag(x_21642, 6); +lean_ctor_set(x_21642, 1, x_21746); +lean_ctor_set(x_21642, 0, x_153); +x_21748 = lean_ctor_get(x_1, 0); +lean_inc(x_21748); +x_21749 = l_Lean_IR_ToIR_bindVar(x_21748, x_20607, x_4, x_5, x_21647); +x_21750 = lean_ctor_get(x_21749, 0); +lean_inc(x_21750); +x_21751 = lean_ctor_get(x_21749, 1); +lean_inc(x_21751); +lean_dec(x_21749); +x_21752 = lean_ctor_get(x_21750, 0); +lean_inc(x_21752); +x_21753 = lean_ctor_get(x_21750, 1); +lean_inc(x_21753); +lean_dec(x_21750); +x_21754 = l_Lean_IR_ToIR_newVar(x_21753, x_4, x_5, x_21751); +x_21755 = lean_ctor_get(x_21754, 0); +lean_inc(x_21755); +x_21756 = lean_ctor_get(x_21754, 1); +lean_inc(x_21756); +lean_dec(x_21754); +x_21757 = lean_ctor_get(x_21755, 0); +lean_inc(x_21757); +x_21758 = lean_ctor_get(x_21755, 1); +lean_inc(x_21758); +lean_dec(x_21755); +x_21759 = lean_ctor_get(x_1, 2); +lean_inc(x_21759); +lean_inc(x_5); +lean_inc(x_4); +x_21760 = l_Lean_IR_ToIR_lowerType(x_21759, x_21758, x_4, x_5, x_21756); +if (lean_obj_tag(x_21760) == 0) +{ +lean_object* x_21761; lean_object* x_21762; lean_object* x_21763; lean_object* x_21764; lean_object* x_21765; +x_21761 = lean_ctor_get(x_21760, 0); +lean_inc(x_21761); +x_21762 = lean_ctor_get(x_21760, 1); +lean_inc(x_21762); +lean_dec(x_21760); +x_21763 = lean_ctor_get(x_21761, 0); +lean_inc(x_21763); +x_21764 = lean_ctor_get(x_21761, 1); +lean_inc(x_21764); +lean_dec(x_21761); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21765 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_21757, x_21747, x_21752, x_21642, x_21763, x_21764, x_4, x_5, x_21762); +if (lean_obj_tag(x_21765) == 0) +{ +lean_object* x_21766; lean_object* x_21767; lean_object* x_21768; lean_object* x_21769; lean_object* x_21770; lean_object* x_21771; lean_object* x_21772; +x_21766 = lean_ctor_get(x_21765, 0); +lean_inc(x_21766); +x_21767 = lean_ctor_get(x_21765, 1); +lean_inc(x_21767); +lean_dec(x_21765); +x_21768 = lean_ctor_get(x_21766, 0); +lean_inc(x_21768); +x_21769 = lean_ctor_get(x_21766, 1); +lean_inc(x_21769); +if (lean_is_exclusive(x_21766)) { + lean_ctor_release(x_21766, 0); + lean_ctor_release(x_21766, 1); + x_21770 = x_21766; +} else { + lean_dec_ref(x_21766); + x_21770 = lean_box(0); +} +x_21771 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_21771, 0, x_21768); +if (lean_is_scalar(x_21770)) { + x_21772 = lean_alloc_ctor(0, 2, 0); +} else { + x_21772 = x_21770; +} +lean_ctor_set(x_21772, 0, x_21771); +lean_ctor_set(x_21772, 1, x_21769); +x_21611 = x_21772; +x_21612 = x_21767; +goto block_21641; +} +else +{ +lean_object* x_21773; lean_object* x_21774; lean_object* x_21775; lean_object* x_21776; +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21773 = lean_ctor_get(x_21765, 0); +lean_inc(x_21773); +x_21774 = lean_ctor_get(x_21765, 1); +lean_inc(x_21774); +if (lean_is_exclusive(x_21765)) { + lean_ctor_release(x_21765, 0); + lean_ctor_release(x_21765, 1); + x_21775 = x_21765; +} else { + lean_dec_ref(x_21765); + x_21775 = lean_box(0); +} +if (lean_is_scalar(x_21775)) { + x_21776 = lean_alloc_ctor(1, 2, 0); +} else { + x_21776 = x_21775; +} +lean_ctor_set(x_21776, 0, x_21773); +lean_ctor_set(x_21776, 1, x_21774); +return x_21776; +} +} +else +{ +lean_object* x_21777; lean_object* x_21778; lean_object* x_21779; lean_object* x_21780; +lean_dec(x_21757); +lean_dec(x_21752); +lean_dec(x_21642); +lean_dec(x_21747); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21777 = lean_ctor_get(x_21760, 0); +lean_inc(x_21777); +x_21778 = lean_ctor_get(x_21760, 1); +lean_inc(x_21778); +if (lean_is_exclusive(x_21760)) { + lean_ctor_release(x_21760, 0); + lean_ctor_release(x_21760, 1); + x_21779 = x_21760; +} else { + lean_dec_ref(x_21760); + x_21779 = lean_box(0); +} +if (lean_is_scalar(x_21779)) { + x_21780 = lean_alloc_ctor(1, 2, 0); +} else { + x_21780 = x_21779; +} +lean_ctor_set(x_21780, 0, x_21777); +lean_ctor_set(x_21780, 1, x_21778); +return x_21780; +} +} +else +{ +lean_object* x_21781; lean_object* x_21782; lean_object* x_21783; lean_object* x_21784; lean_object* x_21785; lean_object* x_21786; lean_object* x_21787; lean_object* x_21788; +lean_dec(x_21742); +lean_dec(x_21740); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_21642, 6); +lean_ctor_set(x_21642, 1, x_20601); +lean_ctor_set(x_21642, 0, x_153); +x_21781 = lean_ctor_get(x_1, 0); +lean_inc(x_21781); +x_21782 = l_Lean_IR_ToIR_bindVar(x_21781, x_20607, x_4, x_5, x_21647); +x_21783 = lean_ctor_get(x_21782, 0); +lean_inc(x_21783); +x_21784 = lean_ctor_get(x_21782, 1); +lean_inc(x_21784); +lean_dec(x_21782); +x_21785 = lean_ctor_get(x_21783, 0); +lean_inc(x_21785); +x_21786 = lean_ctor_get(x_21783, 1); +lean_inc(x_21786); +lean_dec(x_21783); +x_21787 = lean_ctor_get(x_1, 2); +lean_inc(x_21787); +lean_inc(x_5); +lean_inc(x_4); +x_21788 = l_Lean_IR_ToIR_lowerType(x_21787, x_21786, x_4, x_5, x_21784); +if (lean_obj_tag(x_21788) == 0) +{ +lean_object* x_21789; lean_object* x_21790; lean_object* x_21791; lean_object* x_21792; lean_object* x_21793; +x_21789 = lean_ctor_get(x_21788, 0); +lean_inc(x_21789); +x_21790 = lean_ctor_get(x_21788, 1); +lean_inc(x_21790); +lean_dec(x_21788); +x_21791 = lean_ctor_get(x_21789, 0); +lean_inc(x_21791); +x_21792 = lean_ctor_get(x_21789, 1); +lean_inc(x_21792); +lean_dec(x_21789); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21793 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21785, x_21642, x_21791, x_21792, x_4, x_5, x_21790); +if (lean_obj_tag(x_21793) == 0) +{ +lean_object* x_21794; lean_object* x_21795; lean_object* x_21796; lean_object* x_21797; lean_object* x_21798; lean_object* x_21799; lean_object* x_21800; +x_21794 = lean_ctor_get(x_21793, 0); +lean_inc(x_21794); +x_21795 = lean_ctor_get(x_21793, 1); +lean_inc(x_21795); +lean_dec(x_21793); +x_21796 = lean_ctor_get(x_21794, 0); +lean_inc(x_21796); +x_21797 = lean_ctor_get(x_21794, 1); +lean_inc(x_21797); +if (lean_is_exclusive(x_21794)) { + lean_ctor_release(x_21794, 0); + lean_ctor_release(x_21794, 1); + x_21798 = x_21794; +} else { + lean_dec_ref(x_21794); + x_21798 = lean_box(0); +} +x_21799 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_21799, 0, x_21796); +if (lean_is_scalar(x_21798)) { + x_21800 = lean_alloc_ctor(0, 2, 0); +} else { + x_21800 = x_21798; +} +lean_ctor_set(x_21800, 0, x_21799); +lean_ctor_set(x_21800, 1, x_21797); +x_21611 = x_21800; +x_21612 = x_21795; +goto block_21641; +} +else +{ +lean_object* x_21801; lean_object* x_21802; lean_object* x_21803; lean_object* x_21804; +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21801 = lean_ctor_get(x_21793, 0); +lean_inc(x_21801); +x_21802 = lean_ctor_get(x_21793, 1); +lean_inc(x_21802); +if (lean_is_exclusive(x_21793)) { + lean_ctor_release(x_21793, 0); + lean_ctor_release(x_21793, 1); + x_21803 = x_21793; +} else { + lean_dec_ref(x_21793); + x_21803 = lean_box(0); +} +if (lean_is_scalar(x_21803)) { + x_21804 = lean_alloc_ctor(1, 2, 0); +} else { + x_21804 = x_21803; +} +lean_ctor_set(x_21804, 0, x_21801); +lean_ctor_set(x_21804, 1, x_21802); +return x_21804; +} +} +else +{ +lean_object* x_21805; lean_object* x_21806; lean_object* x_21807; lean_object* x_21808; +lean_dec(x_21785); +lean_dec(x_21642); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21805 = lean_ctor_get(x_21788, 0); +lean_inc(x_21805); +x_21806 = lean_ctor_get(x_21788, 1); +lean_inc(x_21806); +if (lean_is_exclusive(x_21788)) { + lean_ctor_release(x_21788, 0); + lean_ctor_release(x_21788, 1); + x_21807 = x_21788; +} else { + lean_dec_ref(x_21788); + x_21807 = lean_box(0); +} +if (lean_is_scalar(x_21807)) { + x_21808 = lean_alloc_ctor(1, 2, 0); +} else { + x_21808 = x_21807; +} +lean_ctor_set(x_21808, 0, x_21805); +lean_ctor_set(x_21808, 1, x_21806); +return x_21808; +} +} +} +else +{ +lean_object* x_21809; lean_object* x_21810; lean_object* x_21811; lean_object* x_21812; lean_object* x_21813; lean_object* x_21814; lean_object* x_21815; lean_object* x_21816; +lean_dec(x_21742); +lean_dec(x_21740); +lean_inc(x_20601); +lean_inc(x_153); +lean_ctor_set_tag(x_21642, 7); +lean_ctor_set(x_21642, 1, x_20601); +lean_ctor_set(x_21642, 0, x_153); +x_21809 = lean_ctor_get(x_1, 0); +lean_inc(x_21809); +x_21810 = l_Lean_IR_ToIR_bindVar(x_21809, x_20607, x_4, x_5, x_21647); +x_21811 = lean_ctor_get(x_21810, 0); +lean_inc(x_21811); +x_21812 = lean_ctor_get(x_21810, 1); +lean_inc(x_21812); +lean_dec(x_21810); +x_21813 = lean_ctor_get(x_21811, 0); +lean_inc(x_21813); +x_21814 = lean_ctor_get(x_21811, 1); +lean_inc(x_21814); +lean_dec(x_21811); +x_21815 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21816 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21813, x_21642, x_21815, x_21814, x_4, x_5, x_21812); +if (lean_obj_tag(x_21816) == 0) +{ +lean_object* x_21817; lean_object* x_21818; lean_object* x_21819; lean_object* x_21820; lean_object* x_21821; lean_object* x_21822; lean_object* x_21823; +x_21817 = lean_ctor_get(x_21816, 0); +lean_inc(x_21817); +x_21818 = lean_ctor_get(x_21816, 1); +lean_inc(x_21818); +lean_dec(x_21816); +x_21819 = lean_ctor_get(x_21817, 0); +lean_inc(x_21819); +x_21820 = lean_ctor_get(x_21817, 1); +lean_inc(x_21820); +if (lean_is_exclusive(x_21817)) { + lean_ctor_release(x_21817, 0); + lean_ctor_release(x_21817, 1); + x_21821 = x_21817; +} else { + lean_dec_ref(x_21817); + x_21821 = lean_box(0); +} +x_21822 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_21822, 0, x_21819); +if (lean_is_scalar(x_21821)) { + x_21823 = lean_alloc_ctor(0, 2, 0); +} else { + x_21823 = x_21821; +} +lean_ctor_set(x_21823, 0, x_21822); +lean_ctor_set(x_21823, 1, x_21820); +x_21611 = x_21823; +x_21612 = x_21818; +goto block_21641; +} +else +{ +lean_object* x_21824; lean_object* x_21825; lean_object* x_21826; lean_object* x_21827; +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21824 = lean_ctor_get(x_21816, 0); +lean_inc(x_21824); +x_21825 = lean_ctor_get(x_21816, 1); +lean_inc(x_21825); +if (lean_is_exclusive(x_21816)) { + lean_ctor_release(x_21816, 0); + lean_ctor_release(x_21816, 1); + x_21826 = x_21816; +} else { + lean_dec_ref(x_21816); + x_21826 = lean_box(0); +} +if (lean_is_scalar(x_21826)) { + x_21827 = lean_alloc_ctor(1, 2, 0); +} else { + x_21827 = x_21826; +} +lean_ctor_set(x_21827, 0, x_21824); +lean_ctor_set(x_21827, 1, x_21825); +return x_21827; +} +} +} +} +else +{ +lean_object* x_21828; lean_object* x_21829; lean_object* x_21830; lean_object* x_21831; lean_object* x_21832; lean_object* x_21833; uint8_t x_21834; +x_21828 = lean_ctor_get(x_21642, 1); +lean_inc(x_21828); +lean_dec(x_21642); +x_21829 = lean_ctor_get(x_21643, 0); +lean_inc(x_21829); +if (lean_is_exclusive(x_21643)) { + lean_ctor_release(x_21643, 0); + x_21830 = x_21643; +} else { + lean_dec_ref(x_21643); + x_21830 = lean_box(0); +} +x_21831 = lean_array_get_size(x_20601); +x_21832 = lean_ctor_get(x_21829, 3); +lean_inc(x_21832); +lean_dec(x_21829); +x_21833 = lean_array_get_size(x_21832); +lean_dec(x_21832); +x_21834 = lean_nat_dec_lt(x_21831, x_21833); +if (x_21834 == 0) +{ +uint8_t x_21835; +x_21835 = lean_nat_dec_eq(x_21831, x_21833); +if (x_21835 == 0) +{ +lean_object* x_21836; lean_object* x_21837; lean_object* x_21838; lean_object* x_21839; lean_object* x_21840; lean_object* x_21841; lean_object* x_21842; lean_object* x_21843; lean_object* x_21844; lean_object* x_21845; lean_object* x_21846; lean_object* x_21847; lean_object* x_21848; lean_object* x_21849; lean_object* x_21850; lean_object* x_21851; lean_object* x_21852; +x_21836 = lean_unsigned_to_nat(0u); +x_21837 = l_Array_extract___rarg(x_20601, x_21836, x_21833); +x_21838 = l_Array_extract___rarg(x_20601, x_21833, x_21831); +lean_dec(x_21831); +lean_inc(x_153); +x_21839 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_21839, 0, x_153); +lean_ctor_set(x_21839, 1, x_21837); +x_21840 = lean_ctor_get(x_1, 0); +lean_inc(x_21840); +x_21841 = l_Lean_IR_ToIR_bindVar(x_21840, x_20607, x_4, x_5, x_21828); +x_21842 = lean_ctor_get(x_21841, 0); +lean_inc(x_21842); +x_21843 = lean_ctor_get(x_21841, 1); +lean_inc(x_21843); +lean_dec(x_21841); +x_21844 = lean_ctor_get(x_21842, 0); +lean_inc(x_21844); +x_21845 = lean_ctor_get(x_21842, 1); +lean_inc(x_21845); +lean_dec(x_21842); +x_21846 = l_Lean_IR_ToIR_newVar(x_21845, x_4, x_5, x_21843); +x_21847 = lean_ctor_get(x_21846, 0); +lean_inc(x_21847); +x_21848 = lean_ctor_get(x_21846, 1); +lean_inc(x_21848); +lean_dec(x_21846); +x_21849 = lean_ctor_get(x_21847, 0); +lean_inc(x_21849); +x_21850 = lean_ctor_get(x_21847, 1); +lean_inc(x_21850); +lean_dec(x_21847); +x_21851 = lean_ctor_get(x_1, 2); +lean_inc(x_21851); +lean_inc(x_5); +lean_inc(x_4); +x_21852 = l_Lean_IR_ToIR_lowerType(x_21851, x_21850, x_4, x_5, x_21848); +if (lean_obj_tag(x_21852) == 0) +{ +lean_object* x_21853; lean_object* x_21854; lean_object* x_21855; lean_object* x_21856; lean_object* x_21857; +x_21853 = lean_ctor_get(x_21852, 0); +lean_inc(x_21853); +x_21854 = lean_ctor_get(x_21852, 1); +lean_inc(x_21854); +lean_dec(x_21852); +x_21855 = lean_ctor_get(x_21853, 0); +lean_inc(x_21855); +x_21856 = lean_ctor_get(x_21853, 1); +lean_inc(x_21856); +lean_dec(x_21853); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21857 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_21849, x_21838, x_21844, x_21839, x_21855, x_21856, x_4, x_5, x_21854); +if (lean_obj_tag(x_21857) == 0) +{ +lean_object* x_21858; lean_object* x_21859; lean_object* x_21860; lean_object* x_21861; lean_object* x_21862; lean_object* x_21863; lean_object* x_21864; +x_21858 = lean_ctor_get(x_21857, 0); +lean_inc(x_21858); +x_21859 = lean_ctor_get(x_21857, 1); +lean_inc(x_21859); +lean_dec(x_21857); +x_21860 = lean_ctor_get(x_21858, 0); +lean_inc(x_21860); +x_21861 = lean_ctor_get(x_21858, 1); +lean_inc(x_21861); +if (lean_is_exclusive(x_21858)) { + lean_ctor_release(x_21858, 0); + lean_ctor_release(x_21858, 1); + x_21862 = x_21858; +} else { + lean_dec_ref(x_21858); + x_21862 = lean_box(0); +} +if (lean_is_scalar(x_21830)) { + x_21863 = lean_alloc_ctor(1, 1, 0); +} else { + x_21863 = x_21830; +} +lean_ctor_set(x_21863, 0, x_21860); +if (lean_is_scalar(x_21862)) { + x_21864 = lean_alloc_ctor(0, 2, 0); +} else { + x_21864 = x_21862; +} +lean_ctor_set(x_21864, 0, x_21863); +lean_ctor_set(x_21864, 1, x_21861); +x_21611 = x_21864; +x_21612 = x_21859; +goto block_21641; +} +else +{ +lean_object* x_21865; lean_object* x_21866; lean_object* x_21867; lean_object* x_21868; +lean_dec(x_21830); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21865 = lean_ctor_get(x_21857, 0); +lean_inc(x_21865); +x_21866 = lean_ctor_get(x_21857, 1); +lean_inc(x_21866); +if (lean_is_exclusive(x_21857)) { + lean_ctor_release(x_21857, 0); + lean_ctor_release(x_21857, 1); + x_21867 = x_21857; +} else { + lean_dec_ref(x_21857); + x_21867 = lean_box(0); +} +if (lean_is_scalar(x_21867)) { + x_21868 = lean_alloc_ctor(1, 2, 0); +} else { + x_21868 = x_21867; +} +lean_ctor_set(x_21868, 0, x_21865); +lean_ctor_set(x_21868, 1, x_21866); +return x_21868; +} +} +else +{ +lean_object* x_21869; lean_object* x_21870; lean_object* x_21871; lean_object* x_21872; +lean_dec(x_21849); +lean_dec(x_21844); +lean_dec(x_21839); +lean_dec(x_21838); +lean_dec(x_21830); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21869 = lean_ctor_get(x_21852, 0); +lean_inc(x_21869); +x_21870 = lean_ctor_get(x_21852, 1); +lean_inc(x_21870); +if (lean_is_exclusive(x_21852)) { + lean_ctor_release(x_21852, 0); + lean_ctor_release(x_21852, 1); + x_21871 = x_21852; +} else { + lean_dec_ref(x_21852); + x_21871 = lean_box(0); +} +if (lean_is_scalar(x_21871)) { + x_21872 = lean_alloc_ctor(1, 2, 0); +} else { + x_21872 = x_21871; +} +lean_ctor_set(x_21872, 0, x_21869); +lean_ctor_set(x_21872, 1, x_21870); +return x_21872; +} +} +else +{ +lean_object* x_21873; lean_object* x_21874; lean_object* x_21875; lean_object* x_21876; lean_object* x_21877; lean_object* x_21878; lean_object* x_21879; lean_object* x_21880; lean_object* x_21881; +lean_dec(x_21833); +lean_dec(x_21831); +lean_inc(x_20601); +lean_inc(x_153); +x_21873 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_21873, 0, x_153); +lean_ctor_set(x_21873, 1, x_20601); +x_21874 = lean_ctor_get(x_1, 0); +lean_inc(x_21874); +x_21875 = l_Lean_IR_ToIR_bindVar(x_21874, x_20607, x_4, x_5, x_21828); +x_21876 = lean_ctor_get(x_21875, 0); +lean_inc(x_21876); +x_21877 = lean_ctor_get(x_21875, 1); +lean_inc(x_21877); +lean_dec(x_21875); +x_21878 = lean_ctor_get(x_21876, 0); +lean_inc(x_21878); +x_21879 = lean_ctor_get(x_21876, 1); +lean_inc(x_21879); +lean_dec(x_21876); +x_21880 = lean_ctor_get(x_1, 2); +lean_inc(x_21880); +lean_inc(x_5); +lean_inc(x_4); +x_21881 = l_Lean_IR_ToIR_lowerType(x_21880, x_21879, x_4, x_5, x_21877); +if (lean_obj_tag(x_21881) == 0) +{ +lean_object* x_21882; lean_object* x_21883; lean_object* x_21884; lean_object* x_21885; lean_object* x_21886; +x_21882 = lean_ctor_get(x_21881, 0); +lean_inc(x_21882); +x_21883 = lean_ctor_get(x_21881, 1); +lean_inc(x_21883); +lean_dec(x_21881); +x_21884 = lean_ctor_get(x_21882, 0); +lean_inc(x_21884); +x_21885 = lean_ctor_get(x_21882, 1); +lean_inc(x_21885); +lean_dec(x_21882); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21886 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21878, x_21873, x_21884, x_21885, x_4, x_5, x_21883); +if (lean_obj_tag(x_21886) == 0) +{ +lean_object* x_21887; lean_object* x_21888; lean_object* x_21889; lean_object* x_21890; lean_object* x_21891; lean_object* x_21892; lean_object* x_21893; +x_21887 = lean_ctor_get(x_21886, 0); +lean_inc(x_21887); +x_21888 = lean_ctor_get(x_21886, 1); +lean_inc(x_21888); +lean_dec(x_21886); +x_21889 = lean_ctor_get(x_21887, 0); +lean_inc(x_21889); +x_21890 = lean_ctor_get(x_21887, 1); +lean_inc(x_21890); +if (lean_is_exclusive(x_21887)) { + lean_ctor_release(x_21887, 0); + lean_ctor_release(x_21887, 1); + x_21891 = x_21887; +} else { + lean_dec_ref(x_21887); + x_21891 = lean_box(0); +} +if (lean_is_scalar(x_21830)) { + x_21892 = lean_alloc_ctor(1, 1, 0); +} else { + x_21892 = x_21830; +} +lean_ctor_set(x_21892, 0, x_21889); +if (lean_is_scalar(x_21891)) { + x_21893 = lean_alloc_ctor(0, 2, 0); +} else { + x_21893 = x_21891; +} +lean_ctor_set(x_21893, 0, x_21892); +lean_ctor_set(x_21893, 1, x_21890); +x_21611 = x_21893; +x_21612 = x_21888; +goto block_21641; +} +else +{ +lean_object* x_21894; lean_object* x_21895; lean_object* x_21896; lean_object* x_21897; +lean_dec(x_21830); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21894 = lean_ctor_get(x_21886, 0); +lean_inc(x_21894); +x_21895 = lean_ctor_get(x_21886, 1); +lean_inc(x_21895); +if (lean_is_exclusive(x_21886)) { + lean_ctor_release(x_21886, 0); + lean_ctor_release(x_21886, 1); + x_21896 = x_21886; +} else { + lean_dec_ref(x_21886); + x_21896 = lean_box(0); +} +if (lean_is_scalar(x_21896)) { + x_21897 = lean_alloc_ctor(1, 2, 0); +} else { + x_21897 = x_21896; +} +lean_ctor_set(x_21897, 0, x_21894); +lean_ctor_set(x_21897, 1, x_21895); +return x_21897; +} +} +else +{ +lean_object* x_21898; lean_object* x_21899; lean_object* x_21900; lean_object* x_21901; +lean_dec(x_21878); +lean_dec(x_21873); +lean_dec(x_21830); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21898 = lean_ctor_get(x_21881, 0); +lean_inc(x_21898); +x_21899 = lean_ctor_get(x_21881, 1); +lean_inc(x_21899); +if (lean_is_exclusive(x_21881)) { + lean_ctor_release(x_21881, 0); + lean_ctor_release(x_21881, 1); + x_21900 = x_21881; +} else { + lean_dec_ref(x_21881); + x_21900 = lean_box(0); +} +if (lean_is_scalar(x_21900)) { + x_21901 = lean_alloc_ctor(1, 2, 0); +} else { + x_21901 = x_21900; +} +lean_ctor_set(x_21901, 0, x_21898); +lean_ctor_set(x_21901, 1, x_21899); +return x_21901; +} +} +} +else +{ +lean_object* x_21902; lean_object* x_21903; lean_object* x_21904; lean_object* x_21905; lean_object* x_21906; lean_object* x_21907; lean_object* x_21908; lean_object* x_21909; lean_object* x_21910; +lean_dec(x_21833); +lean_dec(x_21831); +lean_inc(x_20601); +lean_inc(x_153); +x_21902 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_21902, 0, x_153); +lean_ctor_set(x_21902, 1, x_20601); +x_21903 = lean_ctor_get(x_1, 0); +lean_inc(x_21903); +x_21904 = l_Lean_IR_ToIR_bindVar(x_21903, x_20607, x_4, x_5, x_21828); +x_21905 = lean_ctor_get(x_21904, 0); +lean_inc(x_21905); +x_21906 = lean_ctor_get(x_21904, 1); +lean_inc(x_21906); +lean_dec(x_21904); +x_21907 = lean_ctor_get(x_21905, 0); +lean_inc(x_21907); +x_21908 = lean_ctor_get(x_21905, 1); +lean_inc(x_21908); +lean_dec(x_21905); +x_21909 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_21910 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21907, x_21902, x_21909, x_21908, x_4, x_5, x_21906); +if (lean_obj_tag(x_21910) == 0) +{ +lean_object* x_21911; lean_object* x_21912; lean_object* x_21913; lean_object* x_21914; lean_object* x_21915; lean_object* x_21916; lean_object* x_21917; +x_21911 = lean_ctor_get(x_21910, 0); +lean_inc(x_21911); +x_21912 = lean_ctor_get(x_21910, 1); +lean_inc(x_21912); +lean_dec(x_21910); +x_21913 = lean_ctor_get(x_21911, 0); +lean_inc(x_21913); +x_21914 = lean_ctor_get(x_21911, 1); +lean_inc(x_21914); +if (lean_is_exclusive(x_21911)) { + lean_ctor_release(x_21911, 0); + lean_ctor_release(x_21911, 1); + x_21915 = x_21911; +} else { + lean_dec_ref(x_21911); + x_21915 = lean_box(0); +} +if (lean_is_scalar(x_21830)) { + x_21916 = lean_alloc_ctor(1, 1, 0); +} else { + x_21916 = x_21830; +} +lean_ctor_set(x_21916, 0, x_21913); +if (lean_is_scalar(x_21915)) { + x_21917 = lean_alloc_ctor(0, 2, 0); +} else { + x_21917 = x_21915; +} +lean_ctor_set(x_21917, 0, x_21916); +lean_ctor_set(x_21917, 1, x_21914); +x_21611 = x_21917; +x_21612 = x_21912; +goto block_21641; +} +else +{ +lean_object* x_21918; lean_object* x_21919; lean_object* x_21920; lean_object* x_21921; +lean_dec(x_21830); +lean_dec(x_20612); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21918 = lean_ctor_get(x_21910, 0); +lean_inc(x_21918); +x_21919 = lean_ctor_get(x_21910, 1); +lean_inc(x_21919); +if (lean_is_exclusive(x_21910)) { + lean_ctor_release(x_21910, 0); + lean_ctor_release(x_21910, 1); + x_21920 = x_21910; +} else { + lean_dec_ref(x_21910); + x_21920 = lean_box(0); +} +if (lean_is_scalar(x_21920)) { + x_21921 = lean_alloc_ctor(1, 2, 0); +} else { + x_21921 = x_21920; +} +lean_ctor_set(x_21921, 0, x_21918); +lean_ctor_set(x_21921, 1, x_21919); +return x_21921; +} +} +} +} +block_21641: +{ +lean_object* x_21613; +x_21613 = lean_ctor_get(x_21611, 0); +lean_inc(x_21613); +if (lean_obj_tag(x_21613) == 0) +{ +lean_object* x_21614; lean_object* x_21615; lean_object* x_21616; lean_object* x_21617; lean_object* x_21618; lean_object* x_21619; lean_object* x_21620; lean_object* x_21621; lean_object* x_21622; lean_object* x_21623; +lean_dec(x_20612); +x_21614 = lean_ctor_get(x_21611, 1); +lean_inc(x_21614); +lean_dec(x_21611); +x_21615 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_21615, 0, x_153); +lean_ctor_set(x_21615, 1, x_20601); +x_21616 = lean_ctor_get(x_1, 0); +lean_inc(x_21616); +x_21617 = l_Lean_IR_ToIR_bindVar(x_21616, x_21614, x_4, x_5, x_21612); +x_21618 = lean_ctor_get(x_21617, 0); +lean_inc(x_21618); +x_21619 = lean_ctor_get(x_21617, 1); +lean_inc(x_21619); +lean_dec(x_21617); +x_21620 = lean_ctor_get(x_21618, 0); +lean_inc(x_21620); +x_21621 = lean_ctor_get(x_21618, 1); +lean_inc(x_21621); +lean_dec(x_21618); +x_21622 = lean_ctor_get(x_1, 2); +lean_inc(x_21622); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_21623 = l_Lean_IR_ToIR_lowerType(x_21622, x_21621, x_4, x_5, x_21619); +if (lean_obj_tag(x_21623) == 0) +{ +lean_object* x_21624; lean_object* x_21625; lean_object* x_21626; lean_object* x_21627; lean_object* x_21628; +x_21624 = lean_ctor_get(x_21623, 0); +lean_inc(x_21624); +x_21625 = lean_ctor_get(x_21623, 1); +lean_inc(x_21625); +lean_dec(x_21623); +x_21626 = lean_ctor_get(x_21624, 0); +lean_inc(x_21626); +x_21627 = lean_ctor_get(x_21624, 1); +lean_inc(x_21627); +lean_dec(x_21624); +x_21628 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_21620, x_21615, x_21626, x_21627, x_4, x_5, x_21625); +return x_21628; +} +else +{ +uint8_t x_21629; +lean_dec(x_21620); +lean_dec(x_21615); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_21629 = !lean_is_exclusive(x_21623); +if (x_21629 == 0) +{ +return x_21623; +} +else +{ +lean_object* x_21630; lean_object* x_21631; lean_object* x_21632; +x_21630 = lean_ctor_get(x_21623, 0); +x_21631 = lean_ctor_get(x_21623, 1); +lean_inc(x_21631); +lean_inc(x_21630); +lean_dec(x_21623); +x_21632 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21632, 0, x_21630); +lean_ctor_set(x_21632, 1, x_21631); +return x_21632; +} +} +} +else +{ +uint8_t x_21633; +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_21633 = !lean_is_exclusive(x_21611); +if (x_21633 == 0) +{ +lean_object* x_21634; lean_object* x_21635; lean_object* x_21636; +x_21634 = lean_ctor_get(x_21611, 0); +lean_dec(x_21634); +x_21635 = lean_ctor_get(x_21613, 0); +lean_inc(x_21635); +lean_dec(x_21613); +lean_ctor_set(x_21611, 0, x_21635); +if (lean_is_scalar(x_20612)) { + x_21636 = lean_alloc_ctor(0, 2, 0); +} else { + x_21636 = x_20612; +} +lean_ctor_set(x_21636, 0, x_21611); +lean_ctor_set(x_21636, 1, x_21612); +return x_21636; +} +else +{ +lean_object* x_21637; lean_object* x_21638; lean_object* x_21639; lean_object* x_21640; +x_21637 = lean_ctor_get(x_21611, 1); +lean_inc(x_21637); +lean_dec(x_21611); +x_21638 = lean_ctor_get(x_21613, 0); +lean_inc(x_21638); +lean_dec(x_21613); +x_21639 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21639, 0, x_21638); +lean_ctor_set(x_21639, 1, x_21637); +if (lean_is_scalar(x_20612)) { + x_21640 = lean_alloc_ctor(0, 2, 0); +} else { + x_21640 = x_20612; +} +lean_ctor_set(x_21640, 0, x_21639); +lean_ctor_set(x_21640, 1, x_21612); +return x_21640; +} +} +} +} +} +default: +{ +uint8_t x_21922; +lean_dec(x_20613); +lean_dec(x_20612); +lean_free_object(x_20603); +lean_dec(x_20601); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_2); +lean_dec(x_1); +x_21922 = !lean_is_exclusive(x_20618); +if (x_21922 == 0) +{ +lean_object* x_21923; uint8_t x_21924; lean_object* x_21925; lean_object* x_21926; lean_object* x_21927; lean_object* x_21928; lean_object* x_21929; lean_object* x_21930; lean_object* x_21931; lean_object* x_21932; +x_21923 = lean_ctor_get(x_20618, 0); +lean_dec(x_21923); +x_21924 = 1; +x_21925 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_21926 = l_Lean_Name_toString(x_153, x_21924, x_21925); +lean_ctor_set_tag(x_20618, 3); +lean_ctor_set(x_20618, 0, x_21926); +x_21927 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_21928 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_21928, 0, x_21927); +lean_ctor_set(x_21928, 1, x_20618); +x_21929 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_21930 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_21930, 0, x_21928); +lean_ctor_set(x_21930, 1, x_21929); +x_21931 = l_Lean_MessageData_ofFormat(x_21930); +x_21932 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_21931, x_20607, x_4, x_5, x_20611); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_20607); +return x_21932; +} +else +{ +uint8_t x_21933; lean_object* x_21934; lean_object* x_21935; lean_object* x_21936; lean_object* x_21937; lean_object* x_21938; lean_object* x_21939; lean_object* x_21940; lean_object* x_21941; lean_object* x_21942; +lean_dec(x_20618); +x_21933 = 1; +x_21934 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_21935 = l_Lean_Name_toString(x_153, x_21933, x_21934); +x_21936 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_21936, 0, x_21935); +x_21937 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_21938 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_21938, 0, x_21937); +lean_ctor_set(x_21938, 1, x_21936); +x_21939 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_21940 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_21940, 0, x_21938); +lean_ctor_set(x_21940, 1, x_21939); +x_21941 = l_Lean_MessageData_ofFormat(x_21940); +x_21942 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_21941, x_20607, x_4, x_5, x_20611); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_20607); +return x_21942; +} +} +} +} +} +else +{ +lean_object* x_21943; lean_object* x_21944; lean_object* x_21945; lean_object* x_21946; lean_object* x_21947; lean_object* x_21948; uint8_t x_21949; lean_object* x_21950; +x_21943 = lean_ctor_get(x_20603, 1); +lean_inc(x_21943); +lean_dec(x_20603); +x_21944 = lean_st_ref_get(x_5, x_20604); +x_21945 = lean_ctor_get(x_21944, 0); +lean_inc(x_21945); +x_21946 = lean_ctor_get(x_21944, 1); +lean_inc(x_21946); +if (lean_is_exclusive(x_21944)) { + lean_ctor_release(x_21944, 0); + lean_ctor_release(x_21944, 1); + x_21947 = x_21944; +} else { + lean_dec_ref(x_21944); + x_21947 = lean_box(0); +} +x_21948 = lean_ctor_get(x_21945, 0); +lean_inc(x_21948); +lean_dec(x_21945); +x_21949 = 0; +lean_inc(x_153); +lean_inc(x_21948); +x_21950 = l_Lean_Environment_find_x3f(x_21948, x_153, x_21949); +if (lean_obj_tag(x_21950) == 0) +{ +lean_object* x_21951; lean_object* x_21952; +lean_dec(x_21948); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_21951 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_21952 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_21951, x_21943, x_4, x_5, x_21946); +return x_21952; +} +else +{ +lean_object* x_21953; +x_21953 = lean_ctor_get(x_21950, 0); +lean_inc(x_21953); +lean_dec(x_21950); +switch (lean_obj_tag(x_21953)) { +case 0: +{ +lean_object* x_21954; lean_object* x_21955; uint8_t x_21956; +lean_dec(x_21948); +lean_dec(x_20593); +lean_dec(x_20592); +if (lean_is_exclusive(x_21953)) { + lean_ctor_release(x_21953, 0); + x_21954 = x_21953; +} else { + lean_dec_ref(x_21953); + x_21954 = lean_box(0); +} +x_21955 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_21956 = lean_name_eq(x_153, x_21955); +if (x_21956 == 0) +{ +lean_object* x_21957; uint8_t x_21958; +x_21957 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_21958 = lean_name_eq(x_153, x_21957); +if (x_21958 == 0) +{ +lean_object* x_21959; lean_object* x_21960; lean_object* x_21961; +lean_dec(x_21947); +lean_inc(x_153); +x_21959 = l_Lean_IR_ToIR_findDecl(x_153, x_21943, x_4, x_5, x_21946); +x_21960 = lean_ctor_get(x_21959, 0); +lean_inc(x_21960); +x_21961 = lean_ctor_get(x_21960, 0); +lean_inc(x_21961); +if (lean_obj_tag(x_21961) == 0) +{ +lean_object* x_21962; lean_object* x_21963; lean_object* x_21964; lean_object* x_21965; uint8_t x_21966; lean_object* x_21967; lean_object* x_21968; lean_object* x_21969; lean_object* x_21970; lean_object* x_21971; lean_object* x_21972; lean_object* x_21973; lean_object* x_21974; lean_object* x_21975; +lean_dec(x_20601); +lean_dec(x_2); +lean_dec(x_1); +x_21962 = lean_ctor_get(x_21959, 1); +lean_inc(x_21962); +if (lean_is_exclusive(x_21959)) { + lean_ctor_release(x_21959, 0); + lean_ctor_release(x_21959, 1); + x_21963 = x_21959; +} else { + lean_dec_ref(x_21959); + x_21963 = lean_box(0); +} +x_21964 = lean_ctor_get(x_21960, 1); +lean_inc(x_21964); +if (lean_is_exclusive(x_21960)) { + lean_ctor_release(x_21960, 0); + lean_ctor_release(x_21960, 1); + x_21965 = x_21960; +} else { + lean_dec_ref(x_21960); + x_21965 = lean_box(0); +} +x_21966 = 1; +x_21967 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_21968 = l_Lean_Name_toString(x_153, x_21966, x_21967); +if (lean_is_scalar(x_21954)) { + x_21969 = lean_alloc_ctor(3, 1, 0); +} else { + x_21969 = x_21954; + lean_ctor_set_tag(x_21969, 3); +} +lean_ctor_set(x_21969, 0, x_21968); +x_21970 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_21965)) { + x_21971 = lean_alloc_ctor(5, 2, 0); +} else { + x_21971 = x_21965; + lean_ctor_set_tag(x_21971, 5); +} +lean_ctor_set(x_21971, 0, x_21970); +lean_ctor_set(x_21971, 1, x_21969); +x_21972 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_21963)) { + x_21973 = lean_alloc_ctor(5, 2, 0); +} else { + x_21973 = x_21963; + lean_ctor_set_tag(x_21973, 5); +} +lean_ctor_set(x_21973, 0, x_21971); +lean_ctor_set(x_21973, 1, x_21972); +x_21974 = l_Lean_MessageData_ofFormat(x_21973); +x_21975 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_21974, x_21964, x_4, x_5, x_21962); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_21964); +return x_21975; +} +else +{ +lean_object* x_21976; lean_object* x_21977; lean_object* x_21978; lean_object* x_21979; lean_object* x_21980; lean_object* x_21981; lean_object* x_21982; uint8_t x_21983; +lean_dec(x_21954); +x_21976 = lean_ctor_get(x_21959, 1); +lean_inc(x_21976); +lean_dec(x_21959); +x_21977 = lean_ctor_get(x_21960, 1); +lean_inc(x_21977); +if (lean_is_exclusive(x_21960)) { + lean_ctor_release(x_21960, 0); + lean_ctor_release(x_21960, 1); + x_21978 = x_21960; +} else { + lean_dec_ref(x_21960); + x_21978 = lean_box(0); +} +x_21979 = lean_ctor_get(x_21961, 0); +lean_inc(x_21979); +lean_dec(x_21961); +x_21980 = lean_array_get_size(x_20601); +x_21981 = l_Lean_IR_Decl_params(x_21979); +lean_dec(x_21979); +x_21982 = lean_array_get_size(x_21981); +lean_dec(x_21981); +x_21983 = lean_nat_dec_lt(x_21980, x_21982); +if (x_21983 == 0) +{ +uint8_t x_21984; +x_21984 = lean_nat_dec_eq(x_21980, x_21982); +if (x_21984 == 0) +{ +lean_object* x_21985; lean_object* x_21986; lean_object* x_21987; lean_object* x_21988; lean_object* x_21989; lean_object* x_21990; lean_object* x_21991; lean_object* x_21992; lean_object* x_21993; lean_object* x_21994; lean_object* x_21995; lean_object* x_21996; lean_object* x_21997; lean_object* x_21998; lean_object* x_21999; lean_object* x_22000; lean_object* x_22001; +x_21985 = lean_unsigned_to_nat(0u); +x_21986 = l_Array_extract___rarg(x_20601, x_21985, x_21982); +x_21987 = l_Array_extract___rarg(x_20601, x_21982, x_21980); +lean_dec(x_21980); +lean_dec(x_20601); +if (lean_is_scalar(x_21978)) { + x_21988 = lean_alloc_ctor(6, 2, 0); +} else { + x_21988 = x_21978; + lean_ctor_set_tag(x_21988, 6); +} +lean_ctor_set(x_21988, 0, x_153); +lean_ctor_set(x_21988, 1, x_21986); +x_21989 = lean_ctor_get(x_1, 0); +lean_inc(x_21989); +x_21990 = l_Lean_IR_ToIR_bindVar(x_21989, x_21977, x_4, x_5, x_21976); +x_21991 = lean_ctor_get(x_21990, 0); +lean_inc(x_21991); +x_21992 = lean_ctor_get(x_21990, 1); +lean_inc(x_21992); +lean_dec(x_21990); +x_21993 = lean_ctor_get(x_21991, 0); +lean_inc(x_21993); +x_21994 = lean_ctor_get(x_21991, 1); +lean_inc(x_21994); +lean_dec(x_21991); +x_21995 = l_Lean_IR_ToIR_newVar(x_21994, x_4, x_5, x_21992); +x_21996 = lean_ctor_get(x_21995, 0); +lean_inc(x_21996); +x_21997 = lean_ctor_get(x_21995, 1); +lean_inc(x_21997); +lean_dec(x_21995); +x_21998 = lean_ctor_get(x_21996, 0); +lean_inc(x_21998); +x_21999 = lean_ctor_get(x_21996, 1); +lean_inc(x_21999); +lean_dec(x_21996); +x_22000 = lean_ctor_get(x_1, 2); +lean_inc(x_22000); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_22001 = l_Lean_IR_ToIR_lowerType(x_22000, x_21999, x_4, x_5, x_21997); +if (lean_obj_tag(x_22001) == 0) +{ +lean_object* x_22002; lean_object* x_22003; lean_object* x_22004; lean_object* x_22005; lean_object* x_22006; +x_22002 = lean_ctor_get(x_22001, 0); +lean_inc(x_22002); +x_22003 = lean_ctor_get(x_22001, 1); +lean_inc(x_22003); +lean_dec(x_22001); +x_22004 = lean_ctor_get(x_22002, 0); +lean_inc(x_22004); +x_22005 = lean_ctor_get(x_22002, 1); +lean_inc(x_22005); +lean_dec(x_22002); +x_22006 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_21998, x_21987, x_21993, x_21988, x_22004, x_22005, x_4, x_5, x_22003); +return x_22006; +} +else +{ +lean_object* x_22007; lean_object* x_22008; lean_object* x_22009; lean_object* x_22010; +lean_dec(x_21998); +lean_dec(x_21993); +lean_dec(x_21988); +lean_dec(x_21987); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_22007 = lean_ctor_get(x_22001, 0); +lean_inc(x_22007); +x_22008 = lean_ctor_get(x_22001, 1); +lean_inc(x_22008); +if (lean_is_exclusive(x_22001)) { + lean_ctor_release(x_22001, 0); + lean_ctor_release(x_22001, 1); + x_22009 = x_22001; +} else { + lean_dec_ref(x_22001); + x_22009 = lean_box(0); +} +if (lean_is_scalar(x_22009)) { + x_22010 = lean_alloc_ctor(1, 2, 0); +} else { + x_22010 = x_22009; +} +lean_ctor_set(x_22010, 0, x_22007); +lean_ctor_set(x_22010, 1, x_22008); +return x_22010; +} +} +else +{ +lean_object* x_22011; lean_object* x_22012; lean_object* x_22013; lean_object* x_22014; lean_object* x_22015; lean_object* x_22016; lean_object* x_22017; lean_object* x_22018; lean_object* x_22019; +lean_dec(x_21982); +lean_dec(x_21980); +if (lean_is_scalar(x_21978)) { + x_22011 = lean_alloc_ctor(6, 2, 0); +} else { + x_22011 = x_21978; + lean_ctor_set_tag(x_22011, 6); +} +lean_ctor_set(x_22011, 0, x_153); +lean_ctor_set(x_22011, 1, x_20601); +x_22012 = lean_ctor_get(x_1, 0); +lean_inc(x_22012); +x_22013 = l_Lean_IR_ToIR_bindVar(x_22012, x_21977, x_4, x_5, x_21976); +x_22014 = lean_ctor_get(x_22013, 0); +lean_inc(x_22014); +x_22015 = lean_ctor_get(x_22013, 1); +lean_inc(x_22015); +lean_dec(x_22013); +x_22016 = lean_ctor_get(x_22014, 0); +lean_inc(x_22016); +x_22017 = lean_ctor_get(x_22014, 1); +lean_inc(x_22017); +lean_dec(x_22014); +x_22018 = lean_ctor_get(x_1, 2); +lean_inc(x_22018); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_22019 = l_Lean_IR_ToIR_lowerType(x_22018, x_22017, x_4, x_5, x_22015); +if (lean_obj_tag(x_22019) == 0) +{ +lean_object* x_22020; lean_object* x_22021; lean_object* x_22022; lean_object* x_22023; lean_object* x_22024; +x_22020 = lean_ctor_get(x_22019, 0); +lean_inc(x_22020); +x_22021 = lean_ctor_get(x_22019, 1); +lean_inc(x_22021); +lean_dec(x_22019); +x_22022 = lean_ctor_get(x_22020, 0); +lean_inc(x_22022); +x_22023 = lean_ctor_get(x_22020, 1); +lean_inc(x_22023); +lean_dec(x_22020); +x_22024 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22016, x_22011, x_22022, x_22023, x_4, x_5, x_22021); +return x_22024; +} +else +{ +lean_object* x_22025; lean_object* x_22026; lean_object* x_22027; lean_object* x_22028; +lean_dec(x_22016); +lean_dec(x_22011); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_22025 = lean_ctor_get(x_22019, 0); +lean_inc(x_22025); +x_22026 = lean_ctor_get(x_22019, 1); +lean_inc(x_22026); +if (lean_is_exclusive(x_22019)) { + lean_ctor_release(x_22019, 0); + lean_ctor_release(x_22019, 1); + x_22027 = x_22019; +} else { + lean_dec_ref(x_22019); + x_22027 = lean_box(0); +} +if (lean_is_scalar(x_22027)) { + x_22028 = lean_alloc_ctor(1, 2, 0); +} else { + x_22028 = x_22027; +} +lean_ctor_set(x_22028, 0, x_22025); +lean_ctor_set(x_22028, 1, x_22026); +return x_22028; +} +} +} +else +{ +lean_object* x_22029; lean_object* x_22030; lean_object* x_22031; lean_object* x_22032; lean_object* x_22033; lean_object* x_22034; lean_object* x_22035; lean_object* x_22036; lean_object* x_22037; +lean_dec(x_21982); +lean_dec(x_21980); +if (lean_is_scalar(x_21978)) { + x_22029 = lean_alloc_ctor(7, 2, 0); +} else { + x_22029 = x_21978; + lean_ctor_set_tag(x_22029, 7); +} +lean_ctor_set(x_22029, 0, x_153); +lean_ctor_set(x_22029, 1, x_20601); +x_22030 = lean_ctor_get(x_1, 0); +lean_inc(x_22030); +lean_dec(x_1); +x_22031 = l_Lean_IR_ToIR_bindVar(x_22030, x_21977, x_4, x_5, x_21976); +x_22032 = lean_ctor_get(x_22031, 0); +lean_inc(x_22032); +x_22033 = lean_ctor_get(x_22031, 1); +lean_inc(x_22033); +lean_dec(x_22031); +x_22034 = lean_ctor_get(x_22032, 0); +lean_inc(x_22034); +x_22035 = lean_ctor_get(x_22032, 1); +lean_inc(x_22035); +lean_dec(x_22032); +x_22036 = lean_box(7); +x_22037 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22034, x_22029, x_22036, x_22035, x_4, x_5, x_22033); +return x_22037; +} +} +} +else +{ +lean_object* x_22038; lean_object* x_22039; lean_object* x_22040; +lean_dec(x_21954); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22038 = lean_box(13); +x_22039 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_22039, 0, x_22038); +lean_ctor_set(x_22039, 1, x_21943); +if (lean_is_scalar(x_21947)) { + x_22040 = lean_alloc_ctor(0, 2, 0); +} else { + x_22040 = x_21947; +} +lean_ctor_set(x_22040, 0, x_22039); +lean_ctor_set(x_22040, 1, x_21946); +return x_22040; +} +} +else +{ +lean_object* x_22041; lean_object* x_22042; lean_object* x_22043; +lean_dec(x_21954); +lean_dec(x_21947); +lean_dec(x_153); +x_22041 = l_Lean_IR_instInhabitedArg; +x_22042 = lean_unsigned_to_nat(2u); +x_22043 = lean_array_get(x_22041, x_20601, x_22042); +lean_dec(x_20601); +if (lean_obj_tag(x_22043) == 0) +{ +lean_object* x_22044; lean_object* x_22045; lean_object* x_22046; lean_object* x_22047; lean_object* x_22048; lean_object* x_22049; lean_object* x_22050; +x_22044 = lean_ctor_get(x_22043, 0); +lean_inc(x_22044); +lean_dec(x_22043); +x_22045 = lean_ctor_get(x_1, 0); +lean_inc(x_22045); +lean_dec(x_1); +x_22046 = l_Lean_IR_ToIR_bindVarToVarId(x_22045, x_22044, x_21943, x_4, x_5, x_21946); +x_22047 = lean_ctor_get(x_22046, 0); +lean_inc(x_22047); +x_22048 = lean_ctor_get(x_22046, 1); +lean_inc(x_22048); +lean_dec(x_22046); +x_22049 = lean_ctor_get(x_22047, 1); +lean_inc(x_22049); +lean_dec(x_22047); +x_22050 = l_Lean_IR_ToIR_lowerCode(x_2, x_22049, x_4, x_5, x_22048); +return x_22050; +} +else +{ +lean_object* x_22051; lean_object* x_22052; lean_object* x_22053; lean_object* x_22054; lean_object* x_22055; lean_object* x_22056; +x_22051 = lean_ctor_get(x_1, 0); +lean_inc(x_22051); +lean_dec(x_1); +x_22052 = l_Lean_IR_ToIR_bindErased(x_22051, x_21943, x_4, x_5, x_21946); +x_22053 = lean_ctor_get(x_22052, 0); +lean_inc(x_22053); +x_22054 = lean_ctor_get(x_22052, 1); +lean_inc(x_22054); +lean_dec(x_22052); +x_22055 = lean_ctor_get(x_22053, 1); +lean_inc(x_22055); +lean_dec(x_22053); +x_22056 = l_Lean_IR_ToIR_lowerCode(x_2, x_22055, x_4, x_5, x_22054); +return x_22056; +} +} +} +case 1: +{ +lean_object* x_22057; lean_object* x_22058; lean_object* x_22085; lean_object* x_22086; +lean_dec(x_21953); +lean_dec(x_21948); +lean_dec(x_20593); +lean_dec(x_20592); +lean_inc(x_153); +x_22085 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_21946); +x_22086 = lean_ctor_get(x_22085, 0); +lean_inc(x_22086); +if (lean_obj_tag(x_22086) == 0) +{ +lean_object* x_22087; lean_object* x_22088; lean_object* x_22089; +x_22087 = lean_ctor_get(x_22085, 1); +lean_inc(x_22087); +lean_dec(x_22085); +x_22088 = lean_box(0); +x_22089 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_22089, 0, x_22088); +lean_ctor_set(x_22089, 1, x_21943); +x_22057 = x_22089; +x_22058 = x_22087; +goto block_22084; +} +else +{ +lean_object* x_22090; lean_object* x_22091; lean_object* x_22092; lean_object* x_22093; lean_object* x_22094; lean_object* x_22095; lean_object* x_22096; uint8_t x_22097; +x_22090 = lean_ctor_get(x_22085, 1); +lean_inc(x_22090); +if (lean_is_exclusive(x_22085)) { + lean_ctor_release(x_22085, 0); + lean_ctor_release(x_22085, 1); + x_22091 = x_22085; +} else { + lean_dec_ref(x_22085); + x_22091 = lean_box(0); +} +x_22092 = lean_ctor_get(x_22086, 0); +lean_inc(x_22092); +if (lean_is_exclusive(x_22086)) { + lean_ctor_release(x_22086, 0); + x_22093 = x_22086; +} else { + lean_dec_ref(x_22086); + x_22093 = lean_box(0); +} +x_22094 = lean_array_get_size(x_20601); +x_22095 = lean_ctor_get(x_22092, 3); +lean_inc(x_22095); +lean_dec(x_22092); +x_22096 = lean_array_get_size(x_22095); +lean_dec(x_22095); +x_22097 = lean_nat_dec_lt(x_22094, x_22096); +if (x_22097 == 0) +{ +uint8_t x_22098; +x_22098 = lean_nat_dec_eq(x_22094, x_22096); +if (x_22098 == 0) +{ +lean_object* x_22099; lean_object* x_22100; lean_object* x_22101; lean_object* x_22102; lean_object* x_22103; lean_object* x_22104; lean_object* x_22105; lean_object* x_22106; lean_object* x_22107; lean_object* x_22108; lean_object* x_22109; lean_object* x_22110; lean_object* x_22111; lean_object* x_22112; lean_object* x_22113; lean_object* x_22114; lean_object* x_22115; +x_22099 = lean_unsigned_to_nat(0u); +x_22100 = l_Array_extract___rarg(x_20601, x_22099, x_22096); +x_22101 = l_Array_extract___rarg(x_20601, x_22096, x_22094); +lean_dec(x_22094); +lean_inc(x_153); +if (lean_is_scalar(x_22091)) { + x_22102 = lean_alloc_ctor(6, 2, 0); +} else { + x_22102 = x_22091; + lean_ctor_set_tag(x_22102, 6); +} +lean_ctor_set(x_22102, 0, x_153); +lean_ctor_set(x_22102, 1, x_22100); +x_22103 = lean_ctor_get(x_1, 0); +lean_inc(x_22103); +x_22104 = l_Lean_IR_ToIR_bindVar(x_22103, x_21943, x_4, x_5, x_22090); +x_22105 = lean_ctor_get(x_22104, 0); +lean_inc(x_22105); +x_22106 = lean_ctor_get(x_22104, 1); +lean_inc(x_22106); +lean_dec(x_22104); +x_22107 = lean_ctor_get(x_22105, 0); +lean_inc(x_22107); +x_22108 = lean_ctor_get(x_22105, 1); +lean_inc(x_22108); +lean_dec(x_22105); +x_22109 = l_Lean_IR_ToIR_newVar(x_22108, x_4, x_5, x_22106); +x_22110 = lean_ctor_get(x_22109, 0); +lean_inc(x_22110); +x_22111 = lean_ctor_get(x_22109, 1); +lean_inc(x_22111); +lean_dec(x_22109); +x_22112 = lean_ctor_get(x_22110, 0); +lean_inc(x_22112); +x_22113 = lean_ctor_get(x_22110, 1); +lean_inc(x_22113); +lean_dec(x_22110); +x_22114 = lean_ctor_get(x_1, 2); +lean_inc(x_22114); +lean_inc(x_5); +lean_inc(x_4); +x_22115 = l_Lean_IR_ToIR_lowerType(x_22114, x_22113, x_4, x_5, x_22111); +if (lean_obj_tag(x_22115) == 0) +{ +lean_object* x_22116; lean_object* x_22117; lean_object* x_22118; lean_object* x_22119; lean_object* x_22120; +x_22116 = lean_ctor_get(x_22115, 0); +lean_inc(x_22116); +x_22117 = lean_ctor_get(x_22115, 1); +lean_inc(x_22117); +lean_dec(x_22115); +x_22118 = lean_ctor_get(x_22116, 0); +lean_inc(x_22118); +x_22119 = lean_ctor_get(x_22116, 1); +lean_inc(x_22119); +lean_dec(x_22116); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22120 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_22112, x_22101, x_22107, x_22102, x_22118, x_22119, x_4, x_5, x_22117); +if (lean_obj_tag(x_22120) == 0) +{ +lean_object* x_22121; lean_object* x_22122; lean_object* x_22123; lean_object* x_22124; lean_object* x_22125; lean_object* x_22126; lean_object* x_22127; +x_22121 = lean_ctor_get(x_22120, 0); +lean_inc(x_22121); +x_22122 = lean_ctor_get(x_22120, 1); +lean_inc(x_22122); +lean_dec(x_22120); +x_22123 = lean_ctor_get(x_22121, 0); +lean_inc(x_22123); +x_22124 = lean_ctor_get(x_22121, 1); +lean_inc(x_22124); +if (lean_is_exclusive(x_22121)) { + lean_ctor_release(x_22121, 0); + lean_ctor_release(x_22121, 1); + x_22125 = x_22121; +} else { + lean_dec_ref(x_22121); + x_22125 = lean_box(0); +} +if (lean_is_scalar(x_22093)) { + x_22126 = lean_alloc_ctor(1, 1, 0); +} else { + x_22126 = x_22093; +} +lean_ctor_set(x_22126, 0, x_22123); +if (lean_is_scalar(x_22125)) { + x_22127 = lean_alloc_ctor(0, 2, 0); +} else { + x_22127 = x_22125; +} +lean_ctor_set(x_22127, 0, x_22126); +lean_ctor_set(x_22127, 1, x_22124); +x_22057 = x_22127; +x_22058 = x_22122; +goto block_22084; +} +else +{ +lean_object* x_22128; lean_object* x_22129; lean_object* x_22130; lean_object* x_22131; +lean_dec(x_22093); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22128 = lean_ctor_get(x_22120, 0); +lean_inc(x_22128); +x_22129 = lean_ctor_get(x_22120, 1); +lean_inc(x_22129); +if (lean_is_exclusive(x_22120)) { + lean_ctor_release(x_22120, 0); + lean_ctor_release(x_22120, 1); + x_22130 = x_22120; +} else { + lean_dec_ref(x_22120); + x_22130 = lean_box(0); +} +if (lean_is_scalar(x_22130)) { + x_22131 = lean_alloc_ctor(1, 2, 0); +} else { + x_22131 = x_22130; +} +lean_ctor_set(x_22131, 0, x_22128); +lean_ctor_set(x_22131, 1, x_22129); +return x_22131; +} +} +else +{ +lean_object* x_22132; lean_object* x_22133; lean_object* x_22134; lean_object* x_22135; +lean_dec(x_22112); +lean_dec(x_22107); +lean_dec(x_22102); +lean_dec(x_22101); +lean_dec(x_22093); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22132 = lean_ctor_get(x_22115, 0); +lean_inc(x_22132); +x_22133 = lean_ctor_get(x_22115, 1); +lean_inc(x_22133); +if (lean_is_exclusive(x_22115)) { + lean_ctor_release(x_22115, 0); + lean_ctor_release(x_22115, 1); + x_22134 = x_22115; +} else { + lean_dec_ref(x_22115); + x_22134 = lean_box(0); +} +if (lean_is_scalar(x_22134)) { + x_22135 = lean_alloc_ctor(1, 2, 0); +} else { + x_22135 = x_22134; +} +lean_ctor_set(x_22135, 0, x_22132); +lean_ctor_set(x_22135, 1, x_22133); +return x_22135; +} +} +else +{ +lean_object* x_22136; lean_object* x_22137; lean_object* x_22138; lean_object* x_22139; lean_object* x_22140; lean_object* x_22141; lean_object* x_22142; lean_object* x_22143; lean_object* x_22144; +lean_dec(x_22096); +lean_dec(x_22094); +lean_inc(x_20601); +lean_inc(x_153); +if (lean_is_scalar(x_22091)) { + x_22136 = lean_alloc_ctor(6, 2, 0); +} else { + x_22136 = x_22091; + lean_ctor_set_tag(x_22136, 6); +} +lean_ctor_set(x_22136, 0, x_153); +lean_ctor_set(x_22136, 1, x_20601); +x_22137 = lean_ctor_get(x_1, 0); +lean_inc(x_22137); +x_22138 = l_Lean_IR_ToIR_bindVar(x_22137, x_21943, x_4, x_5, x_22090); +x_22139 = lean_ctor_get(x_22138, 0); +lean_inc(x_22139); +x_22140 = lean_ctor_get(x_22138, 1); +lean_inc(x_22140); +lean_dec(x_22138); +x_22141 = lean_ctor_get(x_22139, 0); +lean_inc(x_22141); +x_22142 = lean_ctor_get(x_22139, 1); +lean_inc(x_22142); +lean_dec(x_22139); +x_22143 = lean_ctor_get(x_1, 2); +lean_inc(x_22143); +lean_inc(x_5); +lean_inc(x_4); +x_22144 = l_Lean_IR_ToIR_lowerType(x_22143, x_22142, x_4, x_5, x_22140); +if (lean_obj_tag(x_22144) == 0) +{ +lean_object* x_22145; lean_object* x_22146; lean_object* x_22147; lean_object* x_22148; lean_object* x_22149; +x_22145 = lean_ctor_get(x_22144, 0); +lean_inc(x_22145); +x_22146 = lean_ctor_get(x_22144, 1); +lean_inc(x_22146); +lean_dec(x_22144); +x_22147 = lean_ctor_get(x_22145, 0); +lean_inc(x_22147); +x_22148 = lean_ctor_get(x_22145, 1); +lean_inc(x_22148); +lean_dec(x_22145); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22149 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22141, x_22136, x_22147, x_22148, x_4, x_5, x_22146); +if (lean_obj_tag(x_22149) == 0) +{ +lean_object* x_22150; lean_object* x_22151; lean_object* x_22152; lean_object* x_22153; lean_object* x_22154; lean_object* x_22155; lean_object* x_22156; +x_22150 = lean_ctor_get(x_22149, 0); +lean_inc(x_22150); +x_22151 = lean_ctor_get(x_22149, 1); +lean_inc(x_22151); +lean_dec(x_22149); +x_22152 = lean_ctor_get(x_22150, 0); +lean_inc(x_22152); +x_22153 = lean_ctor_get(x_22150, 1); +lean_inc(x_22153); +if (lean_is_exclusive(x_22150)) { + lean_ctor_release(x_22150, 0); + lean_ctor_release(x_22150, 1); + x_22154 = x_22150; +} else { + lean_dec_ref(x_22150); + x_22154 = lean_box(0); +} +if (lean_is_scalar(x_22093)) { + x_22155 = lean_alloc_ctor(1, 1, 0); +} else { + x_22155 = x_22093; +} +lean_ctor_set(x_22155, 0, x_22152); +if (lean_is_scalar(x_22154)) { + x_22156 = lean_alloc_ctor(0, 2, 0); +} else { + x_22156 = x_22154; +} +lean_ctor_set(x_22156, 0, x_22155); +lean_ctor_set(x_22156, 1, x_22153); +x_22057 = x_22156; +x_22058 = x_22151; +goto block_22084; +} +else +{ +lean_object* x_22157; lean_object* x_22158; lean_object* x_22159; lean_object* x_22160; +lean_dec(x_22093); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22157 = lean_ctor_get(x_22149, 0); +lean_inc(x_22157); +x_22158 = lean_ctor_get(x_22149, 1); +lean_inc(x_22158); +if (lean_is_exclusive(x_22149)) { + lean_ctor_release(x_22149, 0); + lean_ctor_release(x_22149, 1); + x_22159 = x_22149; +} else { + lean_dec_ref(x_22149); + x_22159 = lean_box(0); +} +if (lean_is_scalar(x_22159)) { + x_22160 = lean_alloc_ctor(1, 2, 0); +} else { + x_22160 = x_22159; +} +lean_ctor_set(x_22160, 0, x_22157); +lean_ctor_set(x_22160, 1, x_22158); +return x_22160; +} +} +else +{ +lean_object* x_22161; lean_object* x_22162; lean_object* x_22163; lean_object* x_22164; +lean_dec(x_22141); +lean_dec(x_22136); +lean_dec(x_22093); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22161 = lean_ctor_get(x_22144, 0); +lean_inc(x_22161); +x_22162 = lean_ctor_get(x_22144, 1); +lean_inc(x_22162); +if (lean_is_exclusive(x_22144)) { + lean_ctor_release(x_22144, 0); + lean_ctor_release(x_22144, 1); + x_22163 = x_22144; +} else { + lean_dec_ref(x_22144); + x_22163 = lean_box(0); +} +if (lean_is_scalar(x_22163)) { + x_22164 = lean_alloc_ctor(1, 2, 0); +} else { + x_22164 = x_22163; +} +lean_ctor_set(x_22164, 0, x_22161); +lean_ctor_set(x_22164, 1, x_22162); +return x_22164; +} +} +} +else +{ +lean_object* x_22165; lean_object* x_22166; lean_object* x_22167; lean_object* x_22168; lean_object* x_22169; lean_object* x_22170; lean_object* x_22171; lean_object* x_22172; lean_object* x_22173; +lean_dec(x_22096); +lean_dec(x_22094); +lean_inc(x_20601); +lean_inc(x_153); +if (lean_is_scalar(x_22091)) { + x_22165 = lean_alloc_ctor(7, 2, 0); +} else { + x_22165 = x_22091; + lean_ctor_set_tag(x_22165, 7); +} +lean_ctor_set(x_22165, 0, x_153); +lean_ctor_set(x_22165, 1, x_20601); +x_22166 = lean_ctor_get(x_1, 0); +lean_inc(x_22166); +x_22167 = l_Lean_IR_ToIR_bindVar(x_22166, x_21943, x_4, x_5, x_22090); +x_22168 = lean_ctor_get(x_22167, 0); +lean_inc(x_22168); +x_22169 = lean_ctor_get(x_22167, 1); +lean_inc(x_22169); +lean_dec(x_22167); +x_22170 = lean_ctor_get(x_22168, 0); +lean_inc(x_22170); +x_22171 = lean_ctor_get(x_22168, 1); +lean_inc(x_22171); +lean_dec(x_22168); +x_22172 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22173 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22170, x_22165, x_22172, x_22171, x_4, x_5, x_22169); +if (lean_obj_tag(x_22173) == 0) +{ +lean_object* x_22174; lean_object* x_22175; lean_object* x_22176; lean_object* x_22177; lean_object* x_22178; lean_object* x_22179; lean_object* x_22180; +x_22174 = lean_ctor_get(x_22173, 0); +lean_inc(x_22174); +x_22175 = lean_ctor_get(x_22173, 1); +lean_inc(x_22175); +lean_dec(x_22173); +x_22176 = lean_ctor_get(x_22174, 0); +lean_inc(x_22176); +x_22177 = lean_ctor_get(x_22174, 1); +lean_inc(x_22177); +if (lean_is_exclusive(x_22174)) { + lean_ctor_release(x_22174, 0); + lean_ctor_release(x_22174, 1); + x_22178 = x_22174; +} else { + lean_dec_ref(x_22174); + x_22178 = lean_box(0); +} +if (lean_is_scalar(x_22093)) { + x_22179 = lean_alloc_ctor(1, 1, 0); +} else { + x_22179 = x_22093; +} +lean_ctor_set(x_22179, 0, x_22176); +if (lean_is_scalar(x_22178)) { + x_22180 = lean_alloc_ctor(0, 2, 0); +} else { + x_22180 = x_22178; +} +lean_ctor_set(x_22180, 0, x_22179); +lean_ctor_set(x_22180, 1, x_22177); +x_22057 = x_22180; +x_22058 = x_22175; +goto block_22084; +} +else +{ +lean_object* x_22181; lean_object* x_22182; lean_object* x_22183; lean_object* x_22184; +lean_dec(x_22093); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22181 = lean_ctor_get(x_22173, 0); +lean_inc(x_22181); +x_22182 = lean_ctor_get(x_22173, 1); +lean_inc(x_22182); +if (lean_is_exclusive(x_22173)) { + lean_ctor_release(x_22173, 0); + lean_ctor_release(x_22173, 1); + x_22183 = x_22173; +} else { + lean_dec_ref(x_22173); + x_22183 = lean_box(0); +} +if (lean_is_scalar(x_22183)) { + x_22184 = lean_alloc_ctor(1, 2, 0); +} else { + x_22184 = x_22183; +} +lean_ctor_set(x_22184, 0, x_22181); +lean_ctor_set(x_22184, 1, x_22182); +return x_22184; +} +} +} +block_22084: +{ +lean_object* x_22059; +x_22059 = lean_ctor_get(x_22057, 0); +lean_inc(x_22059); +if (lean_obj_tag(x_22059) == 0) +{ +lean_object* x_22060; lean_object* x_22061; lean_object* x_22062; lean_object* x_22063; lean_object* x_22064; lean_object* x_22065; lean_object* x_22066; lean_object* x_22067; lean_object* x_22068; lean_object* x_22069; +lean_dec(x_21947); +x_22060 = lean_ctor_get(x_22057, 1); +lean_inc(x_22060); +lean_dec(x_22057); +x_22061 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_22061, 0, x_153); +lean_ctor_set(x_22061, 1, x_20601); +x_22062 = lean_ctor_get(x_1, 0); +lean_inc(x_22062); +x_22063 = l_Lean_IR_ToIR_bindVar(x_22062, x_22060, x_4, x_5, x_22058); +x_22064 = lean_ctor_get(x_22063, 0); +lean_inc(x_22064); +x_22065 = lean_ctor_get(x_22063, 1); +lean_inc(x_22065); +lean_dec(x_22063); +x_22066 = lean_ctor_get(x_22064, 0); +lean_inc(x_22066); +x_22067 = lean_ctor_get(x_22064, 1); +lean_inc(x_22067); +lean_dec(x_22064); +x_22068 = lean_ctor_get(x_1, 2); +lean_inc(x_22068); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_22069 = l_Lean_IR_ToIR_lowerType(x_22068, x_22067, x_4, x_5, x_22065); +if (lean_obj_tag(x_22069) == 0) +{ +lean_object* x_22070; lean_object* x_22071; lean_object* x_22072; lean_object* x_22073; lean_object* x_22074; +x_22070 = lean_ctor_get(x_22069, 0); +lean_inc(x_22070); +x_22071 = lean_ctor_get(x_22069, 1); +lean_inc(x_22071); +lean_dec(x_22069); +x_22072 = lean_ctor_get(x_22070, 0); +lean_inc(x_22072); +x_22073 = lean_ctor_get(x_22070, 1); +lean_inc(x_22073); +lean_dec(x_22070); +x_22074 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22066, x_22061, x_22072, x_22073, x_4, x_5, x_22071); +return x_22074; +} +else +{ +lean_object* x_22075; lean_object* x_22076; lean_object* x_22077; lean_object* x_22078; +lean_dec(x_22066); +lean_dec(x_22061); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_22075 = lean_ctor_get(x_22069, 0); +lean_inc(x_22075); +x_22076 = lean_ctor_get(x_22069, 1); +lean_inc(x_22076); +if (lean_is_exclusive(x_22069)) { + lean_ctor_release(x_22069, 0); + lean_ctor_release(x_22069, 1); + x_22077 = x_22069; +} else { + lean_dec_ref(x_22069); + x_22077 = lean_box(0); +} +if (lean_is_scalar(x_22077)) { + x_22078 = lean_alloc_ctor(1, 2, 0); +} else { + x_22078 = x_22077; +} +lean_ctor_set(x_22078, 0, x_22075); +lean_ctor_set(x_22078, 1, x_22076); +return x_22078; +} +} +else +{ +lean_object* x_22079; lean_object* x_22080; lean_object* x_22081; lean_object* x_22082; lean_object* x_22083; +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22079 = lean_ctor_get(x_22057, 1); +lean_inc(x_22079); +if (lean_is_exclusive(x_22057)) { + lean_ctor_release(x_22057, 0); + lean_ctor_release(x_22057, 1); + x_22080 = x_22057; +} else { + lean_dec_ref(x_22057); + x_22080 = lean_box(0); +} +x_22081 = lean_ctor_get(x_22059, 0); +lean_inc(x_22081); +lean_dec(x_22059); +if (lean_is_scalar(x_22080)) { + x_22082 = lean_alloc_ctor(0, 2, 0); +} else { + x_22082 = x_22080; +} +lean_ctor_set(x_22082, 0, x_22081); +lean_ctor_set(x_22082, 1, x_22079); +if (lean_is_scalar(x_21947)) { + x_22083 = lean_alloc_ctor(0, 2, 0); +} else { + x_22083 = x_21947; +} +lean_ctor_set(x_22083, 0, x_22082); +lean_ctor_set(x_22083, 1, x_22058); +return x_22083; +} +} +} +case 2: +{ +lean_object* x_22185; lean_object* x_22186; +lean_dec(x_21953); +lean_dec(x_21948); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_22185 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_22186 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_22185, x_21943, x_4, x_5, x_21946); +return x_22186; +} +case 3: +{ +lean_object* x_22187; lean_object* x_22188; lean_object* x_22215; lean_object* x_22216; +lean_dec(x_21953); +lean_dec(x_21948); +lean_dec(x_20593); +lean_dec(x_20592); +lean_inc(x_153); +x_22215 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_21946); +x_22216 = lean_ctor_get(x_22215, 0); +lean_inc(x_22216); +if (lean_obj_tag(x_22216) == 0) +{ +lean_object* x_22217; lean_object* x_22218; lean_object* x_22219; +x_22217 = lean_ctor_get(x_22215, 1); +lean_inc(x_22217); +lean_dec(x_22215); +x_22218 = lean_box(0); +x_22219 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_22219, 0, x_22218); +lean_ctor_set(x_22219, 1, x_21943); +x_22187 = x_22219; +x_22188 = x_22217; +goto block_22214; +} +else +{ +lean_object* x_22220; lean_object* x_22221; lean_object* x_22222; lean_object* x_22223; lean_object* x_22224; lean_object* x_22225; lean_object* x_22226; uint8_t x_22227; +x_22220 = lean_ctor_get(x_22215, 1); +lean_inc(x_22220); +if (lean_is_exclusive(x_22215)) { + lean_ctor_release(x_22215, 0); + lean_ctor_release(x_22215, 1); + x_22221 = x_22215; +} else { + lean_dec_ref(x_22215); + x_22221 = lean_box(0); +} +x_22222 = lean_ctor_get(x_22216, 0); +lean_inc(x_22222); +if (lean_is_exclusive(x_22216)) { + lean_ctor_release(x_22216, 0); + x_22223 = x_22216; +} else { + lean_dec_ref(x_22216); + x_22223 = lean_box(0); +} +x_22224 = lean_array_get_size(x_20601); +x_22225 = lean_ctor_get(x_22222, 3); +lean_inc(x_22225); +lean_dec(x_22222); +x_22226 = lean_array_get_size(x_22225); +lean_dec(x_22225); +x_22227 = lean_nat_dec_lt(x_22224, x_22226); +if (x_22227 == 0) +{ +uint8_t x_22228; +x_22228 = lean_nat_dec_eq(x_22224, x_22226); +if (x_22228 == 0) +{ +lean_object* x_22229; lean_object* x_22230; lean_object* x_22231; lean_object* x_22232; lean_object* x_22233; lean_object* x_22234; lean_object* x_22235; lean_object* x_22236; lean_object* x_22237; lean_object* x_22238; lean_object* x_22239; lean_object* x_22240; lean_object* x_22241; lean_object* x_22242; lean_object* x_22243; lean_object* x_22244; lean_object* x_22245; +x_22229 = lean_unsigned_to_nat(0u); +x_22230 = l_Array_extract___rarg(x_20601, x_22229, x_22226); +x_22231 = l_Array_extract___rarg(x_20601, x_22226, x_22224); +lean_dec(x_22224); +lean_inc(x_153); +if (lean_is_scalar(x_22221)) { + x_22232 = lean_alloc_ctor(6, 2, 0); +} else { + x_22232 = x_22221; + lean_ctor_set_tag(x_22232, 6); +} +lean_ctor_set(x_22232, 0, x_153); +lean_ctor_set(x_22232, 1, x_22230); +x_22233 = lean_ctor_get(x_1, 0); +lean_inc(x_22233); +x_22234 = l_Lean_IR_ToIR_bindVar(x_22233, x_21943, x_4, x_5, x_22220); +x_22235 = lean_ctor_get(x_22234, 0); +lean_inc(x_22235); +x_22236 = lean_ctor_get(x_22234, 1); +lean_inc(x_22236); +lean_dec(x_22234); +x_22237 = lean_ctor_get(x_22235, 0); +lean_inc(x_22237); +x_22238 = lean_ctor_get(x_22235, 1); +lean_inc(x_22238); +lean_dec(x_22235); +x_22239 = l_Lean_IR_ToIR_newVar(x_22238, x_4, x_5, x_22236); +x_22240 = lean_ctor_get(x_22239, 0); +lean_inc(x_22240); +x_22241 = lean_ctor_get(x_22239, 1); +lean_inc(x_22241); +lean_dec(x_22239); +x_22242 = lean_ctor_get(x_22240, 0); +lean_inc(x_22242); +x_22243 = lean_ctor_get(x_22240, 1); +lean_inc(x_22243); +lean_dec(x_22240); +x_22244 = lean_ctor_get(x_1, 2); +lean_inc(x_22244); +lean_inc(x_5); +lean_inc(x_4); +x_22245 = l_Lean_IR_ToIR_lowerType(x_22244, x_22243, x_4, x_5, x_22241); +if (lean_obj_tag(x_22245) == 0) +{ +lean_object* x_22246; lean_object* x_22247; lean_object* x_22248; lean_object* x_22249; lean_object* x_22250; +x_22246 = lean_ctor_get(x_22245, 0); +lean_inc(x_22246); +x_22247 = lean_ctor_get(x_22245, 1); +lean_inc(x_22247); +lean_dec(x_22245); +x_22248 = lean_ctor_get(x_22246, 0); +lean_inc(x_22248); +x_22249 = lean_ctor_get(x_22246, 1); +lean_inc(x_22249); +lean_dec(x_22246); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22250 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_22242, x_22231, x_22237, x_22232, x_22248, x_22249, x_4, x_5, x_22247); +if (lean_obj_tag(x_22250) == 0) +{ +lean_object* x_22251; lean_object* x_22252; lean_object* x_22253; lean_object* x_22254; lean_object* x_22255; lean_object* x_22256; lean_object* x_22257; +x_22251 = lean_ctor_get(x_22250, 0); +lean_inc(x_22251); +x_22252 = lean_ctor_get(x_22250, 1); +lean_inc(x_22252); +lean_dec(x_22250); +x_22253 = lean_ctor_get(x_22251, 0); +lean_inc(x_22253); +x_22254 = lean_ctor_get(x_22251, 1); +lean_inc(x_22254); +if (lean_is_exclusive(x_22251)) { + lean_ctor_release(x_22251, 0); + lean_ctor_release(x_22251, 1); + x_22255 = x_22251; +} else { + lean_dec_ref(x_22251); + x_22255 = lean_box(0); +} +if (lean_is_scalar(x_22223)) { + x_22256 = lean_alloc_ctor(1, 1, 0); +} else { + x_22256 = x_22223; +} +lean_ctor_set(x_22256, 0, x_22253); +if (lean_is_scalar(x_22255)) { + x_22257 = lean_alloc_ctor(0, 2, 0); +} else { + x_22257 = x_22255; +} +lean_ctor_set(x_22257, 0, x_22256); +lean_ctor_set(x_22257, 1, x_22254); +x_22187 = x_22257; +x_22188 = x_22252; +goto block_22214; +} +else +{ +lean_object* x_22258; lean_object* x_22259; lean_object* x_22260; lean_object* x_22261; +lean_dec(x_22223); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22258 = lean_ctor_get(x_22250, 0); +lean_inc(x_22258); +x_22259 = lean_ctor_get(x_22250, 1); +lean_inc(x_22259); +if (lean_is_exclusive(x_22250)) { + lean_ctor_release(x_22250, 0); + lean_ctor_release(x_22250, 1); + x_22260 = x_22250; +} else { + lean_dec_ref(x_22250); + x_22260 = lean_box(0); +} +if (lean_is_scalar(x_22260)) { + x_22261 = lean_alloc_ctor(1, 2, 0); +} else { + x_22261 = x_22260; +} +lean_ctor_set(x_22261, 0, x_22258); +lean_ctor_set(x_22261, 1, x_22259); +return x_22261; +} +} +else +{ +lean_object* x_22262; lean_object* x_22263; lean_object* x_22264; lean_object* x_22265; +lean_dec(x_22242); +lean_dec(x_22237); +lean_dec(x_22232); +lean_dec(x_22231); +lean_dec(x_22223); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22262 = lean_ctor_get(x_22245, 0); +lean_inc(x_22262); +x_22263 = lean_ctor_get(x_22245, 1); +lean_inc(x_22263); +if (lean_is_exclusive(x_22245)) { + lean_ctor_release(x_22245, 0); + lean_ctor_release(x_22245, 1); + x_22264 = x_22245; +} else { + lean_dec_ref(x_22245); + x_22264 = lean_box(0); +} +if (lean_is_scalar(x_22264)) { + x_22265 = lean_alloc_ctor(1, 2, 0); +} else { + x_22265 = x_22264; +} +lean_ctor_set(x_22265, 0, x_22262); +lean_ctor_set(x_22265, 1, x_22263); +return x_22265; +} +} +else +{ +lean_object* x_22266; lean_object* x_22267; lean_object* x_22268; lean_object* x_22269; lean_object* x_22270; lean_object* x_22271; lean_object* x_22272; lean_object* x_22273; lean_object* x_22274; +lean_dec(x_22226); +lean_dec(x_22224); +lean_inc(x_20601); +lean_inc(x_153); +if (lean_is_scalar(x_22221)) { + x_22266 = lean_alloc_ctor(6, 2, 0); +} else { + x_22266 = x_22221; + lean_ctor_set_tag(x_22266, 6); +} +lean_ctor_set(x_22266, 0, x_153); +lean_ctor_set(x_22266, 1, x_20601); +x_22267 = lean_ctor_get(x_1, 0); +lean_inc(x_22267); +x_22268 = l_Lean_IR_ToIR_bindVar(x_22267, x_21943, x_4, x_5, x_22220); +x_22269 = lean_ctor_get(x_22268, 0); +lean_inc(x_22269); +x_22270 = lean_ctor_get(x_22268, 1); +lean_inc(x_22270); +lean_dec(x_22268); +x_22271 = lean_ctor_get(x_22269, 0); +lean_inc(x_22271); +x_22272 = lean_ctor_get(x_22269, 1); +lean_inc(x_22272); +lean_dec(x_22269); +x_22273 = lean_ctor_get(x_1, 2); +lean_inc(x_22273); +lean_inc(x_5); +lean_inc(x_4); +x_22274 = l_Lean_IR_ToIR_lowerType(x_22273, x_22272, x_4, x_5, x_22270); +if (lean_obj_tag(x_22274) == 0) +{ +lean_object* x_22275; lean_object* x_22276; lean_object* x_22277; lean_object* x_22278; lean_object* x_22279; +x_22275 = lean_ctor_get(x_22274, 0); +lean_inc(x_22275); +x_22276 = lean_ctor_get(x_22274, 1); +lean_inc(x_22276); +lean_dec(x_22274); +x_22277 = lean_ctor_get(x_22275, 0); +lean_inc(x_22277); +x_22278 = lean_ctor_get(x_22275, 1); +lean_inc(x_22278); +lean_dec(x_22275); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22279 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22271, x_22266, x_22277, x_22278, x_4, x_5, x_22276); +if (lean_obj_tag(x_22279) == 0) +{ +lean_object* x_22280; lean_object* x_22281; lean_object* x_22282; lean_object* x_22283; lean_object* x_22284; lean_object* x_22285; lean_object* x_22286; +x_22280 = lean_ctor_get(x_22279, 0); +lean_inc(x_22280); +x_22281 = lean_ctor_get(x_22279, 1); +lean_inc(x_22281); +lean_dec(x_22279); +x_22282 = lean_ctor_get(x_22280, 0); +lean_inc(x_22282); +x_22283 = lean_ctor_get(x_22280, 1); +lean_inc(x_22283); +if (lean_is_exclusive(x_22280)) { + lean_ctor_release(x_22280, 0); + lean_ctor_release(x_22280, 1); + x_22284 = x_22280; +} else { + lean_dec_ref(x_22280); + x_22284 = lean_box(0); +} +if (lean_is_scalar(x_22223)) { + x_22285 = lean_alloc_ctor(1, 1, 0); +} else { + x_22285 = x_22223; +} +lean_ctor_set(x_22285, 0, x_22282); +if (lean_is_scalar(x_22284)) { + x_22286 = lean_alloc_ctor(0, 2, 0); +} else { + x_22286 = x_22284; +} +lean_ctor_set(x_22286, 0, x_22285); +lean_ctor_set(x_22286, 1, x_22283); +x_22187 = x_22286; +x_22188 = x_22281; +goto block_22214; +} +else +{ +lean_object* x_22287; lean_object* x_22288; lean_object* x_22289; lean_object* x_22290; +lean_dec(x_22223); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22287 = lean_ctor_get(x_22279, 0); +lean_inc(x_22287); +x_22288 = lean_ctor_get(x_22279, 1); +lean_inc(x_22288); +if (lean_is_exclusive(x_22279)) { + lean_ctor_release(x_22279, 0); + lean_ctor_release(x_22279, 1); + x_22289 = x_22279; +} else { + lean_dec_ref(x_22279); + x_22289 = lean_box(0); +} +if (lean_is_scalar(x_22289)) { + x_22290 = lean_alloc_ctor(1, 2, 0); +} else { + x_22290 = x_22289; +} +lean_ctor_set(x_22290, 0, x_22287); +lean_ctor_set(x_22290, 1, x_22288); +return x_22290; +} +} +else +{ +lean_object* x_22291; lean_object* x_22292; lean_object* x_22293; lean_object* x_22294; +lean_dec(x_22271); +lean_dec(x_22266); +lean_dec(x_22223); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22291 = lean_ctor_get(x_22274, 0); +lean_inc(x_22291); +x_22292 = lean_ctor_get(x_22274, 1); +lean_inc(x_22292); +if (lean_is_exclusive(x_22274)) { + lean_ctor_release(x_22274, 0); + lean_ctor_release(x_22274, 1); + x_22293 = x_22274; +} else { + lean_dec_ref(x_22274); + x_22293 = lean_box(0); +} +if (lean_is_scalar(x_22293)) { + x_22294 = lean_alloc_ctor(1, 2, 0); +} else { + x_22294 = x_22293; +} +lean_ctor_set(x_22294, 0, x_22291); +lean_ctor_set(x_22294, 1, x_22292); +return x_22294; +} +} +} +else +{ +lean_object* x_22295; lean_object* x_22296; lean_object* x_22297; lean_object* x_22298; lean_object* x_22299; lean_object* x_22300; lean_object* x_22301; lean_object* x_22302; lean_object* x_22303; +lean_dec(x_22226); +lean_dec(x_22224); +lean_inc(x_20601); +lean_inc(x_153); +if (lean_is_scalar(x_22221)) { + x_22295 = lean_alloc_ctor(7, 2, 0); +} else { + x_22295 = x_22221; + lean_ctor_set_tag(x_22295, 7); +} +lean_ctor_set(x_22295, 0, x_153); +lean_ctor_set(x_22295, 1, x_20601); +x_22296 = lean_ctor_get(x_1, 0); +lean_inc(x_22296); +x_22297 = l_Lean_IR_ToIR_bindVar(x_22296, x_21943, x_4, x_5, x_22220); +x_22298 = lean_ctor_get(x_22297, 0); +lean_inc(x_22298); +x_22299 = lean_ctor_get(x_22297, 1); +lean_inc(x_22299); +lean_dec(x_22297); +x_22300 = lean_ctor_get(x_22298, 0); +lean_inc(x_22300); +x_22301 = lean_ctor_get(x_22298, 1); +lean_inc(x_22301); +lean_dec(x_22298); +x_22302 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22303 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22300, x_22295, x_22302, x_22301, x_4, x_5, x_22299); +if (lean_obj_tag(x_22303) == 0) +{ +lean_object* x_22304; lean_object* x_22305; lean_object* x_22306; lean_object* x_22307; lean_object* x_22308; lean_object* x_22309; lean_object* x_22310; +x_22304 = lean_ctor_get(x_22303, 0); +lean_inc(x_22304); +x_22305 = lean_ctor_get(x_22303, 1); +lean_inc(x_22305); +lean_dec(x_22303); +x_22306 = lean_ctor_get(x_22304, 0); +lean_inc(x_22306); +x_22307 = lean_ctor_get(x_22304, 1); +lean_inc(x_22307); +if (lean_is_exclusive(x_22304)) { + lean_ctor_release(x_22304, 0); + lean_ctor_release(x_22304, 1); + x_22308 = x_22304; +} else { + lean_dec_ref(x_22304); + x_22308 = lean_box(0); +} +if (lean_is_scalar(x_22223)) { + x_22309 = lean_alloc_ctor(1, 1, 0); +} else { + x_22309 = x_22223; +} +lean_ctor_set(x_22309, 0, x_22306); +if (lean_is_scalar(x_22308)) { + x_22310 = lean_alloc_ctor(0, 2, 0); +} else { + x_22310 = x_22308; +} +lean_ctor_set(x_22310, 0, x_22309); +lean_ctor_set(x_22310, 1, x_22307); +x_22187 = x_22310; +x_22188 = x_22305; +goto block_22214; +} +else +{ +lean_object* x_22311; lean_object* x_22312; lean_object* x_22313; lean_object* x_22314; +lean_dec(x_22223); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22311 = lean_ctor_get(x_22303, 0); +lean_inc(x_22311); +x_22312 = lean_ctor_get(x_22303, 1); +lean_inc(x_22312); +if (lean_is_exclusive(x_22303)) { + lean_ctor_release(x_22303, 0); + lean_ctor_release(x_22303, 1); + x_22313 = x_22303; +} else { + lean_dec_ref(x_22303); + x_22313 = lean_box(0); +} +if (lean_is_scalar(x_22313)) { + x_22314 = lean_alloc_ctor(1, 2, 0); +} else { + x_22314 = x_22313; +} +lean_ctor_set(x_22314, 0, x_22311); +lean_ctor_set(x_22314, 1, x_22312); +return x_22314; +} +} +} +block_22214: +{ +lean_object* x_22189; +x_22189 = lean_ctor_get(x_22187, 0); +lean_inc(x_22189); +if (lean_obj_tag(x_22189) == 0) +{ +lean_object* x_22190; lean_object* x_22191; lean_object* x_22192; lean_object* x_22193; lean_object* x_22194; lean_object* x_22195; lean_object* x_22196; lean_object* x_22197; lean_object* x_22198; lean_object* x_22199; +lean_dec(x_21947); +x_22190 = lean_ctor_get(x_22187, 1); +lean_inc(x_22190); +lean_dec(x_22187); +x_22191 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_22191, 0, x_153); +lean_ctor_set(x_22191, 1, x_20601); +x_22192 = lean_ctor_get(x_1, 0); +lean_inc(x_22192); +x_22193 = l_Lean_IR_ToIR_bindVar(x_22192, x_22190, x_4, x_5, x_22188); +x_22194 = lean_ctor_get(x_22193, 0); +lean_inc(x_22194); +x_22195 = lean_ctor_get(x_22193, 1); +lean_inc(x_22195); +lean_dec(x_22193); +x_22196 = lean_ctor_get(x_22194, 0); +lean_inc(x_22196); +x_22197 = lean_ctor_get(x_22194, 1); +lean_inc(x_22197); +lean_dec(x_22194); +x_22198 = lean_ctor_get(x_1, 2); +lean_inc(x_22198); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_22199 = l_Lean_IR_ToIR_lowerType(x_22198, x_22197, x_4, x_5, x_22195); +if (lean_obj_tag(x_22199) == 0) +{ +lean_object* x_22200; lean_object* x_22201; lean_object* x_22202; lean_object* x_22203; lean_object* x_22204; +x_22200 = lean_ctor_get(x_22199, 0); +lean_inc(x_22200); +x_22201 = lean_ctor_get(x_22199, 1); +lean_inc(x_22201); +lean_dec(x_22199); +x_22202 = lean_ctor_get(x_22200, 0); +lean_inc(x_22202); +x_22203 = lean_ctor_get(x_22200, 1); +lean_inc(x_22203); +lean_dec(x_22200); +x_22204 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22196, x_22191, x_22202, x_22203, x_4, x_5, x_22201); +return x_22204; +} +else +{ +lean_object* x_22205; lean_object* x_22206; lean_object* x_22207; lean_object* x_22208; +lean_dec(x_22196); +lean_dec(x_22191); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_22205 = lean_ctor_get(x_22199, 0); +lean_inc(x_22205); +x_22206 = lean_ctor_get(x_22199, 1); +lean_inc(x_22206); +if (lean_is_exclusive(x_22199)) { + lean_ctor_release(x_22199, 0); + lean_ctor_release(x_22199, 1); + x_22207 = x_22199; +} else { + lean_dec_ref(x_22199); + x_22207 = lean_box(0); +} +if (lean_is_scalar(x_22207)) { + x_22208 = lean_alloc_ctor(1, 2, 0); +} else { + x_22208 = x_22207; +} +lean_ctor_set(x_22208, 0, x_22205); +lean_ctor_set(x_22208, 1, x_22206); +return x_22208; +} +} +else +{ +lean_object* x_22209; lean_object* x_22210; lean_object* x_22211; lean_object* x_22212; lean_object* x_22213; +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22209 = lean_ctor_get(x_22187, 1); +lean_inc(x_22209); +if (lean_is_exclusive(x_22187)) { + lean_ctor_release(x_22187, 0); + lean_ctor_release(x_22187, 1); + x_22210 = x_22187; +} else { + lean_dec_ref(x_22187); + x_22210 = lean_box(0); +} +x_22211 = lean_ctor_get(x_22189, 0); +lean_inc(x_22211); +lean_dec(x_22189); +if (lean_is_scalar(x_22210)) { + x_22212 = lean_alloc_ctor(0, 2, 0); +} else { + x_22212 = x_22210; +} +lean_ctor_set(x_22212, 0, x_22211); +lean_ctor_set(x_22212, 1, x_22209); +if (lean_is_scalar(x_21947)) { + x_22213 = lean_alloc_ctor(0, 2, 0); +} else { + x_22213 = x_21947; +} +lean_ctor_set(x_22213, 0, x_22212); +lean_ctor_set(x_22213, 1, x_22188); +return x_22213; +} +} +} +case 4: +{ +lean_object* x_22315; lean_object* x_22316; uint8_t x_22317; +lean_dec(x_21948); +lean_dec(x_21947); +lean_dec(x_20593); +lean_dec(x_20592); +if (lean_is_exclusive(x_21953)) { + lean_ctor_release(x_21953, 0); + x_22315 = x_21953; +} else { + lean_dec_ref(x_21953); + x_22315 = lean_box(0); +} +x_22316 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_22317 = lean_name_eq(x_153, x_22316); +if (x_22317 == 0) +{ +uint8_t x_22318; lean_object* x_22319; lean_object* x_22320; lean_object* x_22321; lean_object* x_22322; lean_object* x_22323; lean_object* x_22324; lean_object* x_22325; lean_object* x_22326; lean_object* x_22327; +lean_dec(x_20601); +lean_dec(x_2); +lean_dec(x_1); +x_22318 = 1; +x_22319 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_22320 = l_Lean_Name_toString(x_153, x_22318, x_22319); +if (lean_is_scalar(x_22315)) { + x_22321 = lean_alloc_ctor(3, 1, 0); +} else { + x_22321 = x_22315; + lean_ctor_set_tag(x_22321, 3); +} +lean_ctor_set(x_22321, 0, x_22320); +x_22322 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_22323 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_22323, 0, x_22322); +lean_ctor_set(x_22323, 1, x_22321); +x_22324 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_22325 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_22325, 0, x_22323); +lean_ctor_set(x_22325, 1, x_22324); +x_22326 = l_Lean_MessageData_ofFormat(x_22325); +x_22327 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_22326, x_21943, x_4, x_5, x_21946); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_21943); +return x_22327; +} +else +{ +lean_object* x_22328; lean_object* x_22329; lean_object* x_22330; +lean_dec(x_22315); +lean_dec(x_153); +x_22328 = l_Lean_IR_instInhabitedArg; +x_22329 = lean_unsigned_to_nat(2u); +x_22330 = lean_array_get(x_22328, x_20601, x_22329); +lean_dec(x_20601); +if (lean_obj_tag(x_22330) == 0) +{ +lean_object* x_22331; lean_object* x_22332; lean_object* x_22333; lean_object* x_22334; lean_object* x_22335; lean_object* x_22336; lean_object* x_22337; +x_22331 = lean_ctor_get(x_22330, 0); +lean_inc(x_22331); +lean_dec(x_22330); +x_22332 = lean_ctor_get(x_1, 0); +lean_inc(x_22332); +lean_dec(x_1); +x_22333 = l_Lean_IR_ToIR_bindVarToVarId(x_22332, x_22331, x_21943, x_4, x_5, x_21946); +x_22334 = lean_ctor_get(x_22333, 0); +lean_inc(x_22334); +x_22335 = lean_ctor_get(x_22333, 1); +lean_inc(x_22335); +lean_dec(x_22333); +x_22336 = lean_ctor_get(x_22334, 1); +lean_inc(x_22336); +lean_dec(x_22334); +x_22337 = l_Lean_IR_ToIR_lowerCode(x_2, x_22336, x_4, x_5, x_22335); +return x_22337; +} +else +{ +lean_object* x_22338; lean_object* x_22339; lean_object* x_22340; lean_object* x_22341; lean_object* x_22342; lean_object* x_22343; +x_22338 = lean_ctor_get(x_1, 0); +lean_inc(x_22338); +lean_dec(x_1); +x_22339 = l_Lean_IR_ToIR_bindErased(x_22338, x_21943, x_4, x_5, x_21946); +x_22340 = lean_ctor_get(x_22339, 0); +lean_inc(x_22340); +x_22341 = lean_ctor_get(x_22339, 1); +lean_inc(x_22341); +lean_dec(x_22339); +x_22342 = lean_ctor_get(x_22340, 1); +lean_inc(x_22342); +lean_dec(x_22340); +x_22343 = l_Lean_IR_ToIR_lowerCode(x_2, x_22342, x_4, x_5, x_22341); +return x_22343; +} +} +} +case 5: +{ +lean_object* x_22344; lean_object* x_22345; +lean_dec(x_21953); +lean_dec(x_21948); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_22344 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_22345 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_22344, x_21943, x_4, x_5, x_21946); +return x_22345; +} +case 6: +{ +lean_object* x_22346; uint8_t x_22347; +x_22346 = lean_ctor_get(x_21953, 0); +lean_inc(x_22346); +lean_dec(x_21953); +lean_inc(x_153); +x_22347 = l_Lean_isExtern(x_21948, x_153); +if (x_22347 == 0) +{ +lean_object* x_22348; +lean_dec(x_21947); +lean_dec(x_20601); +lean_inc(x_5); +lean_inc(x_4); +x_22348 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_21943, x_4, x_5, x_21946); +if (lean_obj_tag(x_22348) == 0) +{ +lean_object* x_22349; lean_object* x_22350; lean_object* x_22351; lean_object* x_22352; lean_object* x_22353; lean_object* x_22354; lean_object* x_22355; lean_object* x_22356; lean_object* x_22357; lean_object* x_22358; lean_object* x_22359; lean_object* x_22360; lean_object* x_22361; lean_object* x_22362; lean_object* x_22363; lean_object* x_22364; lean_object* x_22365; lean_object* x_22366; lean_object* x_22367; lean_object* x_22368; +x_22349 = lean_ctor_get(x_22348, 0); +lean_inc(x_22349); +x_22350 = lean_ctor_get(x_22349, 0); +lean_inc(x_22350); +x_22351 = lean_ctor_get(x_22348, 1); +lean_inc(x_22351); +lean_dec(x_22348); +x_22352 = lean_ctor_get(x_22349, 1); +lean_inc(x_22352); +lean_dec(x_22349); +x_22353 = lean_ctor_get(x_22350, 0); +lean_inc(x_22353); +x_22354 = lean_ctor_get(x_22350, 1); +lean_inc(x_22354); +lean_dec(x_22350); +x_22355 = lean_ctor_get(x_22346, 3); +lean_inc(x_22355); +lean_dec(x_22346); +x_22356 = lean_array_get_size(x_20592); +x_22357 = l_Array_extract___rarg(x_20592, x_22355, x_22356); +lean_dec(x_22356); +lean_dec(x_20592); +x_22358 = lean_array_get_size(x_22354); +x_22359 = lean_unsigned_to_nat(0u); +x_22360 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_20593)) { + x_22361 = lean_alloc_ctor(0, 3, 0); +} else { + x_22361 = x_20593; + lean_ctor_set_tag(x_22361, 0); +} +lean_ctor_set(x_22361, 0, x_22359); +lean_ctor_set(x_22361, 1, x_22358); +lean_ctor_set(x_22361, 2, x_22360); +x_22362 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_22363 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__9(x_22354, x_22357, x_22361, x_22361, x_22362, x_22359, lean_box(0), lean_box(0), x_22352, x_4, x_5, x_22351); +lean_dec(x_22361); +x_22364 = lean_ctor_get(x_22363, 0); +lean_inc(x_22364); +x_22365 = lean_ctor_get(x_22363, 1); +lean_inc(x_22365); +lean_dec(x_22363); +x_22366 = lean_ctor_get(x_22364, 0); +lean_inc(x_22366); +x_22367 = lean_ctor_get(x_22364, 1); +lean_inc(x_22367); +lean_dec(x_22364); +x_22368 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_22353, x_22354, x_22357, x_22366, x_22367, x_4, x_5, x_22365); +lean_dec(x_22357); +lean_dec(x_22354); +return x_22368; +} +else +{ +lean_object* x_22369; lean_object* x_22370; lean_object* x_22371; lean_object* x_22372; +lean_dec(x_22346); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22369 = lean_ctor_get(x_22348, 0); +lean_inc(x_22369); +x_22370 = lean_ctor_get(x_22348, 1); +lean_inc(x_22370); +if (lean_is_exclusive(x_22348)) { + lean_ctor_release(x_22348, 0); + lean_ctor_release(x_22348, 1); + x_22371 = x_22348; +} else { + lean_dec_ref(x_22348); + x_22371 = lean_box(0); +} +if (lean_is_scalar(x_22371)) { + x_22372 = lean_alloc_ctor(1, 2, 0); +} else { + x_22372 = x_22371; +} +lean_ctor_set(x_22372, 0, x_22369); +lean_ctor_set(x_22372, 1, x_22370); +return x_22372; +} +} +else +{ +lean_object* x_22373; lean_object* x_22374; lean_object* x_22401; lean_object* x_22402; +lean_dec(x_22346); +lean_dec(x_20593); +lean_dec(x_20592); +lean_inc(x_153); +x_22401 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_21946); +x_22402 = lean_ctor_get(x_22401, 0); +lean_inc(x_22402); +if (lean_obj_tag(x_22402) == 0) +{ +lean_object* x_22403; lean_object* x_22404; lean_object* x_22405; +x_22403 = lean_ctor_get(x_22401, 1); +lean_inc(x_22403); +lean_dec(x_22401); +x_22404 = lean_box(0); +x_22405 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_22405, 0, x_22404); +lean_ctor_set(x_22405, 1, x_21943); +x_22373 = x_22405; +x_22374 = x_22403; +goto block_22400; +} +else +{ +lean_object* x_22406; lean_object* x_22407; lean_object* x_22408; lean_object* x_22409; lean_object* x_22410; lean_object* x_22411; lean_object* x_22412; uint8_t x_22413; +x_22406 = lean_ctor_get(x_22401, 1); +lean_inc(x_22406); +if (lean_is_exclusive(x_22401)) { + lean_ctor_release(x_22401, 0); + lean_ctor_release(x_22401, 1); + x_22407 = x_22401; +} else { + lean_dec_ref(x_22401); + x_22407 = lean_box(0); +} +x_22408 = lean_ctor_get(x_22402, 0); +lean_inc(x_22408); +if (lean_is_exclusive(x_22402)) { + lean_ctor_release(x_22402, 0); + x_22409 = x_22402; +} else { + lean_dec_ref(x_22402); + x_22409 = lean_box(0); +} +x_22410 = lean_array_get_size(x_20601); +x_22411 = lean_ctor_get(x_22408, 3); +lean_inc(x_22411); +lean_dec(x_22408); +x_22412 = lean_array_get_size(x_22411); +lean_dec(x_22411); +x_22413 = lean_nat_dec_lt(x_22410, x_22412); +if (x_22413 == 0) +{ +uint8_t x_22414; +x_22414 = lean_nat_dec_eq(x_22410, x_22412); +if (x_22414 == 0) +{ +lean_object* x_22415; lean_object* x_22416; lean_object* x_22417; lean_object* x_22418; lean_object* x_22419; lean_object* x_22420; lean_object* x_22421; lean_object* x_22422; lean_object* x_22423; lean_object* x_22424; lean_object* x_22425; lean_object* x_22426; lean_object* x_22427; lean_object* x_22428; lean_object* x_22429; lean_object* x_22430; lean_object* x_22431; +x_22415 = lean_unsigned_to_nat(0u); +x_22416 = l_Array_extract___rarg(x_20601, x_22415, x_22412); +x_22417 = l_Array_extract___rarg(x_20601, x_22412, x_22410); +lean_dec(x_22410); +lean_inc(x_153); +if (lean_is_scalar(x_22407)) { + x_22418 = lean_alloc_ctor(6, 2, 0); +} else { + x_22418 = x_22407; + lean_ctor_set_tag(x_22418, 6); +} +lean_ctor_set(x_22418, 0, x_153); +lean_ctor_set(x_22418, 1, x_22416); +x_22419 = lean_ctor_get(x_1, 0); +lean_inc(x_22419); +x_22420 = l_Lean_IR_ToIR_bindVar(x_22419, x_21943, x_4, x_5, x_22406); +x_22421 = lean_ctor_get(x_22420, 0); +lean_inc(x_22421); +x_22422 = lean_ctor_get(x_22420, 1); +lean_inc(x_22422); +lean_dec(x_22420); +x_22423 = lean_ctor_get(x_22421, 0); +lean_inc(x_22423); +x_22424 = lean_ctor_get(x_22421, 1); +lean_inc(x_22424); +lean_dec(x_22421); +x_22425 = l_Lean_IR_ToIR_newVar(x_22424, x_4, x_5, x_22422); +x_22426 = lean_ctor_get(x_22425, 0); +lean_inc(x_22426); +x_22427 = lean_ctor_get(x_22425, 1); +lean_inc(x_22427); +lean_dec(x_22425); +x_22428 = lean_ctor_get(x_22426, 0); +lean_inc(x_22428); +x_22429 = lean_ctor_get(x_22426, 1); +lean_inc(x_22429); +lean_dec(x_22426); +x_22430 = lean_ctor_get(x_1, 2); +lean_inc(x_22430); +lean_inc(x_5); +lean_inc(x_4); +x_22431 = l_Lean_IR_ToIR_lowerType(x_22430, x_22429, x_4, x_5, x_22427); +if (lean_obj_tag(x_22431) == 0) +{ +lean_object* x_22432; lean_object* x_22433; lean_object* x_22434; lean_object* x_22435; lean_object* x_22436; +x_22432 = lean_ctor_get(x_22431, 0); +lean_inc(x_22432); +x_22433 = lean_ctor_get(x_22431, 1); +lean_inc(x_22433); +lean_dec(x_22431); +x_22434 = lean_ctor_get(x_22432, 0); +lean_inc(x_22434); +x_22435 = lean_ctor_get(x_22432, 1); +lean_inc(x_22435); +lean_dec(x_22432); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22436 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_22428, x_22417, x_22423, x_22418, x_22434, x_22435, x_4, x_5, x_22433); +if (lean_obj_tag(x_22436) == 0) +{ +lean_object* x_22437; lean_object* x_22438; lean_object* x_22439; lean_object* x_22440; lean_object* x_22441; lean_object* x_22442; lean_object* x_22443; +x_22437 = lean_ctor_get(x_22436, 0); +lean_inc(x_22437); +x_22438 = lean_ctor_get(x_22436, 1); +lean_inc(x_22438); +lean_dec(x_22436); +x_22439 = lean_ctor_get(x_22437, 0); +lean_inc(x_22439); +x_22440 = lean_ctor_get(x_22437, 1); +lean_inc(x_22440); +if (lean_is_exclusive(x_22437)) { + lean_ctor_release(x_22437, 0); + lean_ctor_release(x_22437, 1); + x_22441 = x_22437; +} else { + lean_dec_ref(x_22437); + x_22441 = lean_box(0); +} +if (lean_is_scalar(x_22409)) { + x_22442 = lean_alloc_ctor(1, 1, 0); +} else { + x_22442 = x_22409; +} +lean_ctor_set(x_22442, 0, x_22439); +if (lean_is_scalar(x_22441)) { + x_22443 = lean_alloc_ctor(0, 2, 0); +} else { + x_22443 = x_22441; +} +lean_ctor_set(x_22443, 0, x_22442); +lean_ctor_set(x_22443, 1, x_22440); +x_22373 = x_22443; +x_22374 = x_22438; +goto block_22400; +} +else +{ +lean_object* x_22444; lean_object* x_22445; lean_object* x_22446; lean_object* x_22447; +lean_dec(x_22409); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22444 = lean_ctor_get(x_22436, 0); +lean_inc(x_22444); +x_22445 = lean_ctor_get(x_22436, 1); +lean_inc(x_22445); +if (lean_is_exclusive(x_22436)) { + lean_ctor_release(x_22436, 0); + lean_ctor_release(x_22436, 1); + x_22446 = x_22436; +} else { + lean_dec_ref(x_22436); + x_22446 = lean_box(0); +} +if (lean_is_scalar(x_22446)) { + x_22447 = lean_alloc_ctor(1, 2, 0); +} else { + x_22447 = x_22446; +} +lean_ctor_set(x_22447, 0, x_22444); +lean_ctor_set(x_22447, 1, x_22445); +return x_22447; +} +} +else +{ +lean_object* x_22448; lean_object* x_22449; lean_object* x_22450; lean_object* x_22451; +lean_dec(x_22428); +lean_dec(x_22423); +lean_dec(x_22418); +lean_dec(x_22417); +lean_dec(x_22409); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22448 = lean_ctor_get(x_22431, 0); +lean_inc(x_22448); +x_22449 = lean_ctor_get(x_22431, 1); +lean_inc(x_22449); +if (lean_is_exclusive(x_22431)) { + lean_ctor_release(x_22431, 0); + lean_ctor_release(x_22431, 1); + x_22450 = x_22431; +} else { + lean_dec_ref(x_22431); + x_22450 = lean_box(0); +} +if (lean_is_scalar(x_22450)) { + x_22451 = lean_alloc_ctor(1, 2, 0); +} else { + x_22451 = x_22450; +} +lean_ctor_set(x_22451, 0, x_22448); +lean_ctor_set(x_22451, 1, x_22449); +return x_22451; +} +} +else +{ +lean_object* x_22452; lean_object* x_22453; lean_object* x_22454; lean_object* x_22455; lean_object* x_22456; lean_object* x_22457; lean_object* x_22458; lean_object* x_22459; lean_object* x_22460; +lean_dec(x_22412); +lean_dec(x_22410); +lean_inc(x_20601); +lean_inc(x_153); +if (lean_is_scalar(x_22407)) { + x_22452 = lean_alloc_ctor(6, 2, 0); +} else { + x_22452 = x_22407; + lean_ctor_set_tag(x_22452, 6); +} +lean_ctor_set(x_22452, 0, x_153); +lean_ctor_set(x_22452, 1, x_20601); +x_22453 = lean_ctor_get(x_1, 0); +lean_inc(x_22453); +x_22454 = l_Lean_IR_ToIR_bindVar(x_22453, x_21943, x_4, x_5, x_22406); +x_22455 = lean_ctor_get(x_22454, 0); +lean_inc(x_22455); +x_22456 = lean_ctor_get(x_22454, 1); +lean_inc(x_22456); +lean_dec(x_22454); +x_22457 = lean_ctor_get(x_22455, 0); +lean_inc(x_22457); +x_22458 = lean_ctor_get(x_22455, 1); +lean_inc(x_22458); +lean_dec(x_22455); +x_22459 = lean_ctor_get(x_1, 2); +lean_inc(x_22459); +lean_inc(x_5); +lean_inc(x_4); +x_22460 = l_Lean_IR_ToIR_lowerType(x_22459, x_22458, x_4, x_5, x_22456); +if (lean_obj_tag(x_22460) == 0) +{ +lean_object* x_22461; lean_object* x_22462; lean_object* x_22463; lean_object* x_22464; lean_object* x_22465; +x_22461 = lean_ctor_get(x_22460, 0); +lean_inc(x_22461); +x_22462 = lean_ctor_get(x_22460, 1); +lean_inc(x_22462); +lean_dec(x_22460); +x_22463 = lean_ctor_get(x_22461, 0); +lean_inc(x_22463); +x_22464 = lean_ctor_get(x_22461, 1); +lean_inc(x_22464); +lean_dec(x_22461); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22465 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22457, x_22452, x_22463, x_22464, x_4, x_5, x_22462); +if (lean_obj_tag(x_22465) == 0) +{ +lean_object* x_22466; lean_object* x_22467; lean_object* x_22468; lean_object* x_22469; lean_object* x_22470; lean_object* x_22471; lean_object* x_22472; +x_22466 = lean_ctor_get(x_22465, 0); +lean_inc(x_22466); +x_22467 = lean_ctor_get(x_22465, 1); +lean_inc(x_22467); +lean_dec(x_22465); +x_22468 = lean_ctor_get(x_22466, 0); +lean_inc(x_22468); +x_22469 = lean_ctor_get(x_22466, 1); +lean_inc(x_22469); +if (lean_is_exclusive(x_22466)) { + lean_ctor_release(x_22466, 0); + lean_ctor_release(x_22466, 1); + x_22470 = x_22466; +} else { + lean_dec_ref(x_22466); + x_22470 = lean_box(0); +} +if (lean_is_scalar(x_22409)) { + x_22471 = lean_alloc_ctor(1, 1, 0); +} else { + x_22471 = x_22409; +} +lean_ctor_set(x_22471, 0, x_22468); +if (lean_is_scalar(x_22470)) { + x_22472 = lean_alloc_ctor(0, 2, 0); +} else { + x_22472 = x_22470; +} +lean_ctor_set(x_22472, 0, x_22471); +lean_ctor_set(x_22472, 1, x_22469); +x_22373 = x_22472; +x_22374 = x_22467; +goto block_22400; +} +else +{ +lean_object* x_22473; lean_object* x_22474; lean_object* x_22475; lean_object* x_22476; +lean_dec(x_22409); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22473 = lean_ctor_get(x_22465, 0); +lean_inc(x_22473); +x_22474 = lean_ctor_get(x_22465, 1); +lean_inc(x_22474); +if (lean_is_exclusive(x_22465)) { + lean_ctor_release(x_22465, 0); + lean_ctor_release(x_22465, 1); + x_22475 = x_22465; +} else { + lean_dec_ref(x_22465); + x_22475 = lean_box(0); +} +if (lean_is_scalar(x_22475)) { + x_22476 = lean_alloc_ctor(1, 2, 0); +} else { + x_22476 = x_22475; +} +lean_ctor_set(x_22476, 0, x_22473); +lean_ctor_set(x_22476, 1, x_22474); +return x_22476; +} +} +else +{ +lean_object* x_22477; lean_object* x_22478; lean_object* x_22479; lean_object* x_22480; +lean_dec(x_22457); +lean_dec(x_22452); +lean_dec(x_22409); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22477 = lean_ctor_get(x_22460, 0); +lean_inc(x_22477); +x_22478 = lean_ctor_get(x_22460, 1); +lean_inc(x_22478); +if (lean_is_exclusive(x_22460)) { + lean_ctor_release(x_22460, 0); + lean_ctor_release(x_22460, 1); + x_22479 = x_22460; +} else { + lean_dec_ref(x_22460); + x_22479 = lean_box(0); +} +if (lean_is_scalar(x_22479)) { + x_22480 = lean_alloc_ctor(1, 2, 0); +} else { + x_22480 = x_22479; +} +lean_ctor_set(x_22480, 0, x_22477); +lean_ctor_set(x_22480, 1, x_22478); +return x_22480; +} +} +} +else +{ +lean_object* x_22481; lean_object* x_22482; lean_object* x_22483; lean_object* x_22484; lean_object* x_22485; lean_object* x_22486; lean_object* x_22487; lean_object* x_22488; lean_object* x_22489; +lean_dec(x_22412); +lean_dec(x_22410); +lean_inc(x_20601); +lean_inc(x_153); +if (lean_is_scalar(x_22407)) { + x_22481 = lean_alloc_ctor(7, 2, 0); +} else { + x_22481 = x_22407; + lean_ctor_set_tag(x_22481, 7); +} +lean_ctor_set(x_22481, 0, x_153); +lean_ctor_set(x_22481, 1, x_20601); +x_22482 = lean_ctor_get(x_1, 0); +lean_inc(x_22482); +x_22483 = l_Lean_IR_ToIR_bindVar(x_22482, x_21943, x_4, x_5, x_22406); +x_22484 = lean_ctor_get(x_22483, 0); +lean_inc(x_22484); +x_22485 = lean_ctor_get(x_22483, 1); +lean_inc(x_22485); +lean_dec(x_22483); +x_22486 = lean_ctor_get(x_22484, 0); +lean_inc(x_22486); +x_22487 = lean_ctor_get(x_22484, 1); +lean_inc(x_22487); +lean_dec(x_22484); +x_22488 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22489 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22486, x_22481, x_22488, x_22487, x_4, x_5, x_22485); +if (lean_obj_tag(x_22489) == 0) +{ +lean_object* x_22490; lean_object* x_22491; lean_object* x_22492; lean_object* x_22493; lean_object* x_22494; lean_object* x_22495; lean_object* x_22496; +x_22490 = lean_ctor_get(x_22489, 0); +lean_inc(x_22490); +x_22491 = lean_ctor_get(x_22489, 1); +lean_inc(x_22491); +lean_dec(x_22489); +x_22492 = lean_ctor_get(x_22490, 0); +lean_inc(x_22492); +x_22493 = lean_ctor_get(x_22490, 1); +lean_inc(x_22493); +if (lean_is_exclusive(x_22490)) { + lean_ctor_release(x_22490, 0); + lean_ctor_release(x_22490, 1); + x_22494 = x_22490; +} else { + lean_dec_ref(x_22490); + x_22494 = lean_box(0); +} +if (lean_is_scalar(x_22409)) { + x_22495 = lean_alloc_ctor(1, 1, 0); +} else { + x_22495 = x_22409; +} +lean_ctor_set(x_22495, 0, x_22492); +if (lean_is_scalar(x_22494)) { + x_22496 = lean_alloc_ctor(0, 2, 0); +} else { + x_22496 = x_22494; +} +lean_ctor_set(x_22496, 0, x_22495); +lean_ctor_set(x_22496, 1, x_22493); +x_22373 = x_22496; +x_22374 = x_22491; +goto block_22400; +} +else +{ +lean_object* x_22497; lean_object* x_22498; lean_object* x_22499; lean_object* x_22500; +lean_dec(x_22409); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22497 = lean_ctor_get(x_22489, 0); +lean_inc(x_22497); +x_22498 = lean_ctor_get(x_22489, 1); +lean_inc(x_22498); +if (lean_is_exclusive(x_22489)) { + lean_ctor_release(x_22489, 0); + lean_ctor_release(x_22489, 1); + x_22499 = x_22489; +} else { + lean_dec_ref(x_22489); + x_22499 = lean_box(0); +} +if (lean_is_scalar(x_22499)) { + x_22500 = lean_alloc_ctor(1, 2, 0); +} else { + x_22500 = x_22499; +} +lean_ctor_set(x_22500, 0, x_22497); +lean_ctor_set(x_22500, 1, x_22498); +return x_22500; +} +} +} +block_22400: +{ +lean_object* x_22375; +x_22375 = lean_ctor_get(x_22373, 0); +lean_inc(x_22375); +if (lean_obj_tag(x_22375) == 0) +{ +lean_object* x_22376; lean_object* x_22377; lean_object* x_22378; lean_object* x_22379; lean_object* x_22380; lean_object* x_22381; lean_object* x_22382; lean_object* x_22383; lean_object* x_22384; lean_object* x_22385; +lean_dec(x_21947); +x_22376 = lean_ctor_get(x_22373, 1); +lean_inc(x_22376); +lean_dec(x_22373); +x_22377 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_22377, 0, x_153); +lean_ctor_set(x_22377, 1, x_20601); +x_22378 = lean_ctor_get(x_1, 0); +lean_inc(x_22378); +x_22379 = l_Lean_IR_ToIR_bindVar(x_22378, x_22376, x_4, x_5, x_22374); +x_22380 = lean_ctor_get(x_22379, 0); +lean_inc(x_22380); +x_22381 = lean_ctor_get(x_22379, 1); +lean_inc(x_22381); +lean_dec(x_22379); +x_22382 = lean_ctor_get(x_22380, 0); +lean_inc(x_22382); +x_22383 = lean_ctor_get(x_22380, 1); +lean_inc(x_22383); +lean_dec(x_22380); +x_22384 = lean_ctor_get(x_1, 2); +lean_inc(x_22384); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_22385 = l_Lean_IR_ToIR_lowerType(x_22384, x_22383, x_4, x_5, x_22381); +if (lean_obj_tag(x_22385) == 0) +{ +lean_object* x_22386; lean_object* x_22387; lean_object* x_22388; lean_object* x_22389; lean_object* x_22390; +x_22386 = lean_ctor_get(x_22385, 0); +lean_inc(x_22386); +x_22387 = lean_ctor_get(x_22385, 1); +lean_inc(x_22387); +lean_dec(x_22385); +x_22388 = lean_ctor_get(x_22386, 0); +lean_inc(x_22388); +x_22389 = lean_ctor_get(x_22386, 1); +lean_inc(x_22389); +lean_dec(x_22386); +x_22390 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22382, x_22377, x_22388, x_22389, x_4, x_5, x_22387); +return x_22390; +} +else +{ +lean_object* x_22391; lean_object* x_22392; lean_object* x_22393; lean_object* x_22394; +lean_dec(x_22382); +lean_dec(x_22377); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_22391 = lean_ctor_get(x_22385, 0); +lean_inc(x_22391); +x_22392 = lean_ctor_get(x_22385, 1); +lean_inc(x_22392); +if (lean_is_exclusive(x_22385)) { + lean_ctor_release(x_22385, 0); + lean_ctor_release(x_22385, 1); + x_22393 = x_22385; +} else { + lean_dec_ref(x_22385); + x_22393 = lean_box(0); +} +if (lean_is_scalar(x_22393)) { + x_22394 = lean_alloc_ctor(1, 2, 0); +} else { + x_22394 = x_22393; +} +lean_ctor_set(x_22394, 0, x_22391); +lean_ctor_set(x_22394, 1, x_22392); +return x_22394; +} +} +else +{ +lean_object* x_22395; lean_object* x_22396; lean_object* x_22397; lean_object* x_22398; lean_object* x_22399; +lean_dec(x_20601); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22395 = lean_ctor_get(x_22373, 1); +lean_inc(x_22395); +if (lean_is_exclusive(x_22373)) { + lean_ctor_release(x_22373, 0); + lean_ctor_release(x_22373, 1); + x_22396 = x_22373; +} else { + lean_dec_ref(x_22373); + x_22396 = lean_box(0); +} +x_22397 = lean_ctor_get(x_22375, 0); +lean_inc(x_22397); +lean_dec(x_22375); +if (lean_is_scalar(x_22396)) { + x_22398 = lean_alloc_ctor(0, 2, 0); +} else { + x_22398 = x_22396; +} +lean_ctor_set(x_22398, 0, x_22397); +lean_ctor_set(x_22398, 1, x_22395); +if (lean_is_scalar(x_21947)) { + x_22399 = lean_alloc_ctor(0, 2, 0); +} else { + x_22399 = x_21947; +} +lean_ctor_set(x_22399, 0, x_22398); +lean_ctor_set(x_22399, 1, x_22374); +return x_22399; +} +} +} +} +default: +{ +lean_object* x_22501; uint8_t x_22502; lean_object* x_22503; lean_object* x_22504; lean_object* x_22505; lean_object* x_22506; lean_object* x_22507; lean_object* x_22508; lean_object* x_22509; lean_object* x_22510; lean_object* x_22511; +lean_dec(x_21948); +lean_dec(x_21947); +lean_dec(x_20601); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_21953)) { + lean_ctor_release(x_21953, 0); + x_22501 = x_21953; +} else { + lean_dec_ref(x_21953); + x_22501 = lean_box(0); +} +x_22502 = 1; +x_22503 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_22504 = l_Lean_Name_toString(x_153, x_22502, x_22503); +if (lean_is_scalar(x_22501)) { + x_22505 = lean_alloc_ctor(3, 1, 0); +} else { + x_22505 = x_22501; + lean_ctor_set_tag(x_22505, 3); +} +lean_ctor_set(x_22505, 0, x_22504); +x_22506 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_22507 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_22507, 0, x_22506); +lean_ctor_set(x_22507, 1, x_22505); +x_22508 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_22509 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_22509, 0, x_22507); +lean_ctor_set(x_22509, 1, x_22508); +x_22510 = l_Lean_MessageData_ofFormat(x_22509); +x_22511 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_22510, x_21943, x_4, x_5, x_21946); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_21943); +return x_22511; +} +} +} +} +} +else +{ +uint8_t x_22512; +lean_dec(x_20601); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22512 = !lean_is_exclusive(x_20603); +if (x_22512 == 0) +{ +lean_object* x_22513; lean_object* x_22514; lean_object* x_22515; +x_22513 = lean_ctor_get(x_20603, 0); +lean_dec(x_22513); +x_22514 = lean_ctor_get(x_20605, 0); +lean_inc(x_22514); +lean_dec(x_20605); +lean_ctor_set(x_20603, 0, x_22514); +if (lean_is_scalar(x_20599)) { + x_22515 = lean_alloc_ctor(0, 2, 0); +} else { + x_22515 = x_20599; +} +lean_ctor_set(x_22515, 0, x_20603); +lean_ctor_set(x_22515, 1, x_20604); +return x_22515; +} +else +{ +lean_object* x_22516; lean_object* x_22517; lean_object* x_22518; lean_object* x_22519; +x_22516 = lean_ctor_get(x_20603, 1); +lean_inc(x_22516); +lean_dec(x_20603); +x_22517 = lean_ctor_get(x_20605, 0); +lean_inc(x_22517); +lean_dec(x_20605); +x_22518 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_22518, 0, x_22517); +lean_ctor_set(x_22518, 1, x_22516); +if (lean_is_scalar(x_20599)) { + x_22519 = lean_alloc_ctor(0, 2, 0); +} else { + x_22519 = x_20599; +} +lean_ctor_set(x_22519, 0, x_22518); +lean_ctor_set(x_22519, 1, x_20604); +return x_22519; +} +} +} +} +else +{ +lean_object* x_22801; lean_object* x_22802; lean_object* x_22803; lean_object* x_22804; lean_object* x_23382; lean_object* x_23383; +x_22801 = lean_ctor_get(x_20597, 0); +x_22802 = lean_ctor_get(x_20597, 1); +lean_inc(x_22802); +lean_inc(x_22801); +lean_dec(x_20597); +lean_inc(x_153); +x_23382 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_20598); +x_23383 = lean_ctor_get(x_23382, 0); +lean_inc(x_23383); +if (lean_obj_tag(x_23383) == 0) +{ +lean_object* x_23384; lean_object* x_23385; lean_object* x_23386; +x_23384 = lean_ctor_get(x_23382, 1); +lean_inc(x_23384); +lean_dec(x_23382); +x_23385 = lean_box(0); +x_23386 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_23386, 0, x_23385); +lean_ctor_set(x_23386, 1, x_22802); +x_22803 = x_23386; +x_22804 = x_23384; +goto block_23381; +} +else +{ +lean_object* x_23387; lean_object* x_23388; lean_object* x_23389; lean_object* x_23390; lean_object* x_23391; lean_object* x_23392; lean_object* x_23393; uint8_t x_23394; +x_23387 = lean_ctor_get(x_23382, 1); +lean_inc(x_23387); +if (lean_is_exclusive(x_23382)) { + lean_ctor_release(x_23382, 0); + lean_ctor_release(x_23382, 1); + x_23388 = x_23382; +} else { + lean_dec_ref(x_23382); + x_23388 = lean_box(0); +} +x_23389 = lean_ctor_get(x_23383, 0); +lean_inc(x_23389); +if (lean_is_exclusive(x_23383)) { + lean_ctor_release(x_23383, 0); + x_23390 = x_23383; +} else { + lean_dec_ref(x_23383); + x_23390 = lean_box(0); +} +x_23391 = lean_array_get_size(x_22801); +x_23392 = lean_ctor_get(x_23389, 3); +lean_inc(x_23392); +lean_dec(x_23389); +x_23393 = lean_array_get_size(x_23392); +lean_dec(x_23392); +x_23394 = lean_nat_dec_lt(x_23391, x_23393); +if (x_23394 == 0) +{ +uint8_t x_23395; +x_23395 = lean_nat_dec_eq(x_23391, x_23393); +if (x_23395 == 0) +{ +lean_object* x_23396; lean_object* x_23397; lean_object* x_23398; lean_object* x_23399; lean_object* x_23400; lean_object* x_23401; lean_object* x_23402; lean_object* x_23403; lean_object* x_23404; lean_object* x_23405; lean_object* x_23406; lean_object* x_23407; lean_object* x_23408; lean_object* x_23409; lean_object* x_23410; lean_object* x_23411; lean_object* x_23412; +x_23396 = lean_unsigned_to_nat(0u); +x_23397 = l_Array_extract___rarg(x_22801, x_23396, x_23393); +x_23398 = l_Array_extract___rarg(x_22801, x_23393, x_23391); +lean_dec(x_23391); +lean_inc(x_153); +if (lean_is_scalar(x_23388)) { + x_23399 = lean_alloc_ctor(6, 2, 0); +} else { + x_23399 = x_23388; + lean_ctor_set_tag(x_23399, 6); +} +lean_ctor_set(x_23399, 0, x_153); +lean_ctor_set(x_23399, 1, x_23397); +x_23400 = lean_ctor_get(x_1, 0); +lean_inc(x_23400); +x_23401 = l_Lean_IR_ToIR_bindVar(x_23400, x_22802, x_4, x_5, x_23387); +x_23402 = lean_ctor_get(x_23401, 0); +lean_inc(x_23402); +x_23403 = lean_ctor_get(x_23401, 1); +lean_inc(x_23403); +lean_dec(x_23401); +x_23404 = lean_ctor_get(x_23402, 0); +lean_inc(x_23404); +x_23405 = lean_ctor_get(x_23402, 1); +lean_inc(x_23405); +lean_dec(x_23402); +x_23406 = l_Lean_IR_ToIR_newVar(x_23405, x_4, x_5, x_23403); +x_23407 = lean_ctor_get(x_23406, 0); +lean_inc(x_23407); +x_23408 = lean_ctor_get(x_23406, 1); +lean_inc(x_23408); +lean_dec(x_23406); +x_23409 = lean_ctor_get(x_23407, 0); +lean_inc(x_23409); +x_23410 = lean_ctor_get(x_23407, 1); +lean_inc(x_23410); +lean_dec(x_23407); +x_23411 = lean_ctor_get(x_1, 2); +lean_inc(x_23411); +lean_inc(x_5); +lean_inc(x_4); +x_23412 = l_Lean_IR_ToIR_lowerType(x_23411, x_23410, x_4, x_5, x_23408); +if (lean_obj_tag(x_23412) == 0) +{ +lean_object* x_23413; lean_object* x_23414; lean_object* x_23415; lean_object* x_23416; lean_object* x_23417; +x_23413 = lean_ctor_get(x_23412, 0); +lean_inc(x_23413); +x_23414 = lean_ctor_get(x_23412, 1); +lean_inc(x_23414); +lean_dec(x_23412); +x_23415 = lean_ctor_get(x_23413, 0); +lean_inc(x_23415); +x_23416 = lean_ctor_get(x_23413, 1); +lean_inc(x_23416); +lean_dec(x_23413); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_23417 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_23409, x_23398, x_23404, x_23399, x_23415, x_23416, x_4, x_5, x_23414); +if (lean_obj_tag(x_23417) == 0) +{ +lean_object* x_23418; lean_object* x_23419; lean_object* x_23420; lean_object* x_23421; lean_object* x_23422; lean_object* x_23423; lean_object* x_23424; +x_23418 = lean_ctor_get(x_23417, 0); +lean_inc(x_23418); +x_23419 = lean_ctor_get(x_23417, 1); +lean_inc(x_23419); +lean_dec(x_23417); +x_23420 = lean_ctor_get(x_23418, 0); +lean_inc(x_23420); +x_23421 = lean_ctor_get(x_23418, 1); +lean_inc(x_23421); +if (lean_is_exclusive(x_23418)) { + lean_ctor_release(x_23418, 0); + lean_ctor_release(x_23418, 1); + x_23422 = x_23418; +} else { + lean_dec_ref(x_23418); + x_23422 = lean_box(0); +} +if (lean_is_scalar(x_23390)) { + x_23423 = lean_alloc_ctor(1, 1, 0); +} else { + x_23423 = x_23390; +} +lean_ctor_set(x_23423, 0, x_23420); +if (lean_is_scalar(x_23422)) { + x_23424 = lean_alloc_ctor(0, 2, 0); +} else { + x_23424 = x_23422; +} +lean_ctor_set(x_23424, 0, x_23423); +lean_ctor_set(x_23424, 1, x_23421); +x_22803 = x_23424; +x_22804 = x_23419; +goto block_23381; +} +else +{ +lean_object* x_23425; lean_object* x_23426; lean_object* x_23427; lean_object* x_23428; +lean_dec(x_23390); +lean_dec(x_22801); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23425 = lean_ctor_get(x_23417, 0); +lean_inc(x_23425); +x_23426 = lean_ctor_get(x_23417, 1); +lean_inc(x_23426); +if (lean_is_exclusive(x_23417)) { + lean_ctor_release(x_23417, 0); + lean_ctor_release(x_23417, 1); + x_23427 = x_23417; +} else { + lean_dec_ref(x_23417); + x_23427 = lean_box(0); +} +if (lean_is_scalar(x_23427)) { + x_23428 = lean_alloc_ctor(1, 2, 0); +} else { + x_23428 = x_23427; +} +lean_ctor_set(x_23428, 0, x_23425); +lean_ctor_set(x_23428, 1, x_23426); +return x_23428; +} +} +else +{ +lean_object* x_23429; lean_object* x_23430; lean_object* x_23431; lean_object* x_23432; +lean_dec(x_23409); +lean_dec(x_23404); +lean_dec(x_23399); +lean_dec(x_23398); +lean_dec(x_23390); +lean_dec(x_22801); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23429 = lean_ctor_get(x_23412, 0); +lean_inc(x_23429); +x_23430 = lean_ctor_get(x_23412, 1); +lean_inc(x_23430); +if (lean_is_exclusive(x_23412)) { + lean_ctor_release(x_23412, 0); + lean_ctor_release(x_23412, 1); + x_23431 = x_23412; +} else { + lean_dec_ref(x_23412); + x_23431 = lean_box(0); +} +if (lean_is_scalar(x_23431)) { + x_23432 = lean_alloc_ctor(1, 2, 0); +} else { + x_23432 = x_23431; +} +lean_ctor_set(x_23432, 0, x_23429); +lean_ctor_set(x_23432, 1, x_23430); +return x_23432; +} +} +else +{ +lean_object* x_23433; lean_object* x_23434; lean_object* x_23435; lean_object* x_23436; lean_object* x_23437; lean_object* x_23438; lean_object* x_23439; lean_object* x_23440; lean_object* x_23441; +lean_dec(x_23393); +lean_dec(x_23391); +lean_inc(x_22801); +lean_inc(x_153); +if (lean_is_scalar(x_23388)) { + x_23433 = lean_alloc_ctor(6, 2, 0); +} else { + x_23433 = x_23388; + lean_ctor_set_tag(x_23433, 6); +} +lean_ctor_set(x_23433, 0, x_153); +lean_ctor_set(x_23433, 1, x_22801); +x_23434 = lean_ctor_get(x_1, 0); +lean_inc(x_23434); +x_23435 = l_Lean_IR_ToIR_bindVar(x_23434, x_22802, x_4, x_5, x_23387); +x_23436 = lean_ctor_get(x_23435, 0); +lean_inc(x_23436); +x_23437 = lean_ctor_get(x_23435, 1); +lean_inc(x_23437); +lean_dec(x_23435); +x_23438 = lean_ctor_get(x_23436, 0); +lean_inc(x_23438); +x_23439 = lean_ctor_get(x_23436, 1); +lean_inc(x_23439); +lean_dec(x_23436); +x_23440 = lean_ctor_get(x_1, 2); +lean_inc(x_23440); +lean_inc(x_5); +lean_inc(x_4); +x_23441 = l_Lean_IR_ToIR_lowerType(x_23440, x_23439, x_4, x_5, x_23437); +if (lean_obj_tag(x_23441) == 0) +{ +lean_object* x_23442; lean_object* x_23443; lean_object* x_23444; lean_object* x_23445; lean_object* x_23446; +x_23442 = lean_ctor_get(x_23441, 0); +lean_inc(x_23442); +x_23443 = lean_ctor_get(x_23441, 1); +lean_inc(x_23443); +lean_dec(x_23441); +x_23444 = lean_ctor_get(x_23442, 0); +lean_inc(x_23444); +x_23445 = lean_ctor_get(x_23442, 1); +lean_inc(x_23445); +lean_dec(x_23442); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_23446 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_23438, x_23433, x_23444, x_23445, x_4, x_5, x_23443); +if (lean_obj_tag(x_23446) == 0) +{ +lean_object* x_23447; lean_object* x_23448; lean_object* x_23449; lean_object* x_23450; lean_object* x_23451; lean_object* x_23452; lean_object* x_23453; +x_23447 = lean_ctor_get(x_23446, 0); +lean_inc(x_23447); +x_23448 = lean_ctor_get(x_23446, 1); +lean_inc(x_23448); +lean_dec(x_23446); +x_23449 = lean_ctor_get(x_23447, 0); +lean_inc(x_23449); +x_23450 = lean_ctor_get(x_23447, 1); +lean_inc(x_23450); +if (lean_is_exclusive(x_23447)) { + lean_ctor_release(x_23447, 0); + lean_ctor_release(x_23447, 1); + x_23451 = x_23447; +} else { + lean_dec_ref(x_23447); + x_23451 = lean_box(0); +} +if (lean_is_scalar(x_23390)) { + x_23452 = lean_alloc_ctor(1, 1, 0); +} else { + x_23452 = x_23390; +} +lean_ctor_set(x_23452, 0, x_23449); +if (lean_is_scalar(x_23451)) { + x_23453 = lean_alloc_ctor(0, 2, 0); +} else { + x_23453 = x_23451; +} +lean_ctor_set(x_23453, 0, x_23452); +lean_ctor_set(x_23453, 1, x_23450); +x_22803 = x_23453; +x_22804 = x_23448; +goto block_23381; +} +else +{ +lean_object* x_23454; lean_object* x_23455; lean_object* x_23456; lean_object* x_23457; +lean_dec(x_23390); +lean_dec(x_22801); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23454 = lean_ctor_get(x_23446, 0); +lean_inc(x_23454); +x_23455 = lean_ctor_get(x_23446, 1); +lean_inc(x_23455); +if (lean_is_exclusive(x_23446)) { + lean_ctor_release(x_23446, 0); + lean_ctor_release(x_23446, 1); + x_23456 = x_23446; +} else { + lean_dec_ref(x_23446); + x_23456 = lean_box(0); +} +if (lean_is_scalar(x_23456)) { + x_23457 = lean_alloc_ctor(1, 2, 0); +} else { + x_23457 = x_23456; +} +lean_ctor_set(x_23457, 0, x_23454); +lean_ctor_set(x_23457, 1, x_23455); +return x_23457; +} +} +else +{ +lean_object* x_23458; lean_object* x_23459; lean_object* x_23460; lean_object* x_23461; +lean_dec(x_23438); +lean_dec(x_23433); +lean_dec(x_23390); +lean_dec(x_22801); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23458 = lean_ctor_get(x_23441, 0); +lean_inc(x_23458); +x_23459 = lean_ctor_get(x_23441, 1); +lean_inc(x_23459); +if (lean_is_exclusive(x_23441)) { + lean_ctor_release(x_23441, 0); + lean_ctor_release(x_23441, 1); + x_23460 = x_23441; +} else { + lean_dec_ref(x_23441); + x_23460 = lean_box(0); +} +if (lean_is_scalar(x_23460)) { + x_23461 = lean_alloc_ctor(1, 2, 0); +} else { + x_23461 = x_23460; +} +lean_ctor_set(x_23461, 0, x_23458); +lean_ctor_set(x_23461, 1, x_23459); +return x_23461; +} +} +} +else +{ +lean_object* x_23462; lean_object* x_23463; lean_object* x_23464; lean_object* x_23465; lean_object* x_23466; lean_object* x_23467; lean_object* x_23468; lean_object* x_23469; lean_object* x_23470; +lean_dec(x_23393); +lean_dec(x_23391); +lean_inc(x_22801); +lean_inc(x_153); +if (lean_is_scalar(x_23388)) { + x_23462 = lean_alloc_ctor(7, 2, 0); +} else { + x_23462 = x_23388; + lean_ctor_set_tag(x_23462, 7); +} +lean_ctor_set(x_23462, 0, x_153); +lean_ctor_set(x_23462, 1, x_22801); +x_23463 = lean_ctor_get(x_1, 0); +lean_inc(x_23463); +x_23464 = l_Lean_IR_ToIR_bindVar(x_23463, x_22802, x_4, x_5, x_23387); +x_23465 = lean_ctor_get(x_23464, 0); +lean_inc(x_23465); +x_23466 = lean_ctor_get(x_23464, 1); +lean_inc(x_23466); +lean_dec(x_23464); +x_23467 = lean_ctor_get(x_23465, 0); +lean_inc(x_23467); +x_23468 = lean_ctor_get(x_23465, 1); +lean_inc(x_23468); +lean_dec(x_23465); +x_23469 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_23470 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_23467, x_23462, x_23469, x_23468, x_4, x_5, x_23466); +if (lean_obj_tag(x_23470) == 0) +{ +lean_object* x_23471; lean_object* x_23472; lean_object* x_23473; lean_object* x_23474; lean_object* x_23475; lean_object* x_23476; lean_object* x_23477; +x_23471 = lean_ctor_get(x_23470, 0); +lean_inc(x_23471); +x_23472 = lean_ctor_get(x_23470, 1); +lean_inc(x_23472); +lean_dec(x_23470); +x_23473 = lean_ctor_get(x_23471, 0); +lean_inc(x_23473); +x_23474 = lean_ctor_get(x_23471, 1); +lean_inc(x_23474); +if (lean_is_exclusive(x_23471)) { + lean_ctor_release(x_23471, 0); + lean_ctor_release(x_23471, 1); + x_23475 = x_23471; +} else { + lean_dec_ref(x_23471); + x_23475 = lean_box(0); +} +if (lean_is_scalar(x_23390)) { + x_23476 = lean_alloc_ctor(1, 1, 0); +} else { + x_23476 = x_23390; +} +lean_ctor_set(x_23476, 0, x_23473); +if (lean_is_scalar(x_23475)) { + x_23477 = lean_alloc_ctor(0, 2, 0); +} else { + x_23477 = x_23475; +} +lean_ctor_set(x_23477, 0, x_23476); +lean_ctor_set(x_23477, 1, x_23474); +x_22803 = x_23477; +x_22804 = x_23472; +goto block_23381; +} +else +{ +lean_object* x_23478; lean_object* x_23479; lean_object* x_23480; lean_object* x_23481; +lean_dec(x_23390); +lean_dec(x_22801); +lean_dec(x_20599); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23478 = lean_ctor_get(x_23470, 0); +lean_inc(x_23478); +x_23479 = lean_ctor_get(x_23470, 1); +lean_inc(x_23479); +if (lean_is_exclusive(x_23470)) { + lean_ctor_release(x_23470, 0); + lean_ctor_release(x_23470, 1); + x_23480 = x_23470; +} else { + lean_dec_ref(x_23470); + x_23480 = lean_box(0); +} +if (lean_is_scalar(x_23480)) { + x_23481 = lean_alloc_ctor(1, 2, 0); +} else { + x_23481 = x_23480; +} +lean_ctor_set(x_23481, 0, x_23478); +lean_ctor_set(x_23481, 1, x_23479); +return x_23481; +} +} +} +block_23381: +{ +lean_object* x_22805; +x_22805 = lean_ctor_get(x_22803, 0); +lean_inc(x_22805); +if (lean_obj_tag(x_22805) == 0) +{ +lean_object* x_22806; lean_object* x_22807; lean_object* x_22808; lean_object* x_22809; lean_object* x_22810; lean_object* x_22811; lean_object* x_22812; uint8_t x_22813; lean_object* x_22814; +lean_dec(x_20599); +x_22806 = lean_ctor_get(x_22803, 1); +lean_inc(x_22806); +if (lean_is_exclusive(x_22803)) { + lean_ctor_release(x_22803, 0); + lean_ctor_release(x_22803, 1); + x_22807 = x_22803; +} else { + lean_dec_ref(x_22803); + x_22807 = lean_box(0); +} +x_22808 = lean_st_ref_get(x_5, x_22804); +x_22809 = lean_ctor_get(x_22808, 0); +lean_inc(x_22809); +x_22810 = lean_ctor_get(x_22808, 1); +lean_inc(x_22810); +if (lean_is_exclusive(x_22808)) { + lean_ctor_release(x_22808, 0); + lean_ctor_release(x_22808, 1); + x_22811 = x_22808; +} else { + lean_dec_ref(x_22808); + x_22811 = lean_box(0); +} +x_22812 = lean_ctor_get(x_22809, 0); +lean_inc(x_22812); +lean_dec(x_22809); +x_22813 = 0; +lean_inc(x_153); +lean_inc(x_22812); +x_22814 = l_Lean_Environment_find_x3f(x_22812, x_153, x_22813); +if (lean_obj_tag(x_22814) == 0) +{ +lean_object* x_22815; lean_object* x_22816; +lean_dec(x_22812); +lean_dec(x_22811); +lean_dec(x_22807); +lean_dec(x_22801); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_22815 = l_Lean_IR_ToIR_lowerLet___closed__6; +x_22816 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_22815, x_22806, x_4, x_5, x_22810); +return x_22816; +} +else +{ +lean_object* x_22817; +x_22817 = lean_ctor_get(x_22814, 0); +lean_inc(x_22817); +lean_dec(x_22814); +switch (lean_obj_tag(x_22817)) { +case 0: +{ +lean_object* x_22818; lean_object* x_22819; uint8_t x_22820; +lean_dec(x_22812); +lean_dec(x_20593); +lean_dec(x_20592); +if (lean_is_exclusive(x_22817)) { + lean_ctor_release(x_22817, 0); + x_22818 = x_22817; +} else { + lean_dec_ref(x_22817); + x_22818 = lean_box(0); +} +x_22819 = l_Lean_IR_ToIR_lowerLet___closed__9; +x_22820 = lean_name_eq(x_153, x_22819); +if (x_22820 == 0) +{ +lean_object* x_22821; uint8_t x_22822; +x_22821 = l_Lean_IR_ToIR_lowerLet___closed__11; +x_22822 = lean_name_eq(x_153, x_22821); +if (x_22822 == 0) +{ +lean_object* x_22823; lean_object* x_22824; lean_object* x_22825; +lean_dec(x_22811); +lean_dec(x_22807); +lean_inc(x_153); +x_22823 = l_Lean_IR_ToIR_findDecl(x_153, x_22806, x_4, x_5, x_22810); +x_22824 = lean_ctor_get(x_22823, 0); +lean_inc(x_22824); +x_22825 = lean_ctor_get(x_22824, 0); +lean_inc(x_22825); +if (lean_obj_tag(x_22825) == 0) +{ +lean_object* x_22826; lean_object* x_22827; lean_object* x_22828; lean_object* x_22829; uint8_t x_22830; lean_object* x_22831; lean_object* x_22832; lean_object* x_22833; lean_object* x_22834; lean_object* x_22835; lean_object* x_22836; lean_object* x_22837; lean_object* x_22838; lean_object* x_22839; +lean_dec(x_22801); +lean_dec(x_2); +lean_dec(x_1); +x_22826 = lean_ctor_get(x_22823, 1); +lean_inc(x_22826); +if (lean_is_exclusive(x_22823)) { + lean_ctor_release(x_22823, 0); + lean_ctor_release(x_22823, 1); + x_22827 = x_22823; +} else { + lean_dec_ref(x_22823); + x_22827 = lean_box(0); +} +x_22828 = lean_ctor_get(x_22824, 1); +lean_inc(x_22828); +if (lean_is_exclusive(x_22824)) { + lean_ctor_release(x_22824, 0); + lean_ctor_release(x_22824, 1); + x_22829 = x_22824; +} else { + lean_dec_ref(x_22824); + x_22829 = lean_box(0); +} +x_22830 = 1; +x_22831 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_22832 = l_Lean_Name_toString(x_153, x_22830, x_22831); +if (lean_is_scalar(x_22818)) { + x_22833 = lean_alloc_ctor(3, 1, 0); +} else { + x_22833 = x_22818; + lean_ctor_set_tag(x_22833, 3); +} +lean_ctor_set(x_22833, 0, x_22832); +x_22834 = l_Lean_IR_ToIR_lowerLet___closed__13; +if (lean_is_scalar(x_22829)) { + x_22835 = lean_alloc_ctor(5, 2, 0); +} else { + x_22835 = x_22829; + lean_ctor_set_tag(x_22835, 5); +} +lean_ctor_set(x_22835, 0, x_22834); +lean_ctor_set(x_22835, 1, x_22833); +x_22836 = l_Lean_IR_ToIR_lowerLet___closed__16; +if (lean_is_scalar(x_22827)) { + x_22837 = lean_alloc_ctor(5, 2, 0); +} else { + x_22837 = x_22827; + lean_ctor_set_tag(x_22837, 5); +} +lean_ctor_set(x_22837, 0, x_22835); +lean_ctor_set(x_22837, 1, x_22836); +x_22838 = l_Lean_MessageData_ofFormat(x_22837); +x_22839 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_22838, x_22828, x_4, x_5, x_22826); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_22828); +return x_22839; +} +else +{ +lean_object* x_22840; lean_object* x_22841; lean_object* x_22842; lean_object* x_22843; lean_object* x_22844; lean_object* x_22845; lean_object* x_22846; uint8_t x_22847; +lean_dec(x_22818); +x_22840 = lean_ctor_get(x_22823, 1); +lean_inc(x_22840); +lean_dec(x_22823); +x_22841 = lean_ctor_get(x_22824, 1); +lean_inc(x_22841); +if (lean_is_exclusive(x_22824)) { + lean_ctor_release(x_22824, 0); + lean_ctor_release(x_22824, 1); + x_22842 = x_22824; +} else { + lean_dec_ref(x_22824); + x_22842 = lean_box(0); +} +x_22843 = lean_ctor_get(x_22825, 0); +lean_inc(x_22843); +lean_dec(x_22825); +x_22844 = lean_array_get_size(x_22801); +x_22845 = l_Lean_IR_Decl_params(x_22843); +lean_dec(x_22843); +x_22846 = lean_array_get_size(x_22845); +lean_dec(x_22845); +x_22847 = lean_nat_dec_lt(x_22844, x_22846); +if (x_22847 == 0) +{ +uint8_t x_22848; +x_22848 = lean_nat_dec_eq(x_22844, x_22846); +if (x_22848 == 0) +{ +lean_object* x_22849; lean_object* x_22850; lean_object* x_22851; lean_object* x_22852; lean_object* x_22853; lean_object* x_22854; lean_object* x_22855; lean_object* x_22856; lean_object* x_22857; lean_object* x_22858; lean_object* x_22859; lean_object* x_22860; lean_object* x_22861; lean_object* x_22862; lean_object* x_22863; lean_object* x_22864; lean_object* x_22865; +x_22849 = lean_unsigned_to_nat(0u); +x_22850 = l_Array_extract___rarg(x_22801, x_22849, x_22846); +x_22851 = l_Array_extract___rarg(x_22801, x_22846, x_22844); +lean_dec(x_22844); +lean_dec(x_22801); +if (lean_is_scalar(x_22842)) { + x_22852 = lean_alloc_ctor(6, 2, 0); +} else { + x_22852 = x_22842; + lean_ctor_set_tag(x_22852, 6); +} +lean_ctor_set(x_22852, 0, x_153); +lean_ctor_set(x_22852, 1, x_22850); +x_22853 = lean_ctor_get(x_1, 0); +lean_inc(x_22853); +x_22854 = l_Lean_IR_ToIR_bindVar(x_22853, x_22841, x_4, x_5, x_22840); +x_22855 = lean_ctor_get(x_22854, 0); +lean_inc(x_22855); +x_22856 = lean_ctor_get(x_22854, 1); +lean_inc(x_22856); +lean_dec(x_22854); +x_22857 = lean_ctor_get(x_22855, 0); +lean_inc(x_22857); +x_22858 = lean_ctor_get(x_22855, 1); +lean_inc(x_22858); +lean_dec(x_22855); +x_22859 = l_Lean_IR_ToIR_newVar(x_22858, x_4, x_5, x_22856); +x_22860 = lean_ctor_get(x_22859, 0); +lean_inc(x_22860); +x_22861 = lean_ctor_get(x_22859, 1); +lean_inc(x_22861); +lean_dec(x_22859); +x_22862 = lean_ctor_get(x_22860, 0); +lean_inc(x_22862); +x_22863 = lean_ctor_get(x_22860, 1); +lean_inc(x_22863); +lean_dec(x_22860); +x_22864 = lean_ctor_get(x_1, 2); +lean_inc(x_22864); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_22865 = l_Lean_IR_ToIR_lowerType(x_22864, x_22863, x_4, x_5, x_22861); +if (lean_obj_tag(x_22865) == 0) +{ +lean_object* x_22866; lean_object* x_22867; lean_object* x_22868; lean_object* x_22869; lean_object* x_22870; +x_22866 = lean_ctor_get(x_22865, 0); +lean_inc(x_22866); +x_22867 = lean_ctor_get(x_22865, 1); +lean_inc(x_22867); +lean_dec(x_22865); +x_22868 = lean_ctor_get(x_22866, 0); +lean_inc(x_22868); +x_22869 = lean_ctor_get(x_22866, 1); +lean_inc(x_22869); +lean_dec(x_22866); +x_22870 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_22862, x_22851, x_22857, x_22852, x_22868, x_22869, x_4, x_5, x_22867); +return x_22870; +} +else +{ +lean_object* x_22871; lean_object* x_22872; lean_object* x_22873; lean_object* x_22874; +lean_dec(x_22862); +lean_dec(x_22857); +lean_dec(x_22852); +lean_dec(x_22851); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_22871 = lean_ctor_get(x_22865, 0); +lean_inc(x_22871); +x_22872 = lean_ctor_get(x_22865, 1); +lean_inc(x_22872); +if (lean_is_exclusive(x_22865)) { + lean_ctor_release(x_22865, 0); + lean_ctor_release(x_22865, 1); + x_22873 = x_22865; +} else { + lean_dec_ref(x_22865); + x_22873 = lean_box(0); +} +if (lean_is_scalar(x_22873)) { + x_22874 = lean_alloc_ctor(1, 2, 0); +} else { + x_22874 = x_22873; +} +lean_ctor_set(x_22874, 0, x_22871); +lean_ctor_set(x_22874, 1, x_22872); +return x_22874; +} +} +else +{ +lean_object* x_22875; lean_object* x_22876; lean_object* x_22877; lean_object* x_22878; lean_object* x_22879; lean_object* x_22880; lean_object* x_22881; lean_object* x_22882; lean_object* x_22883; +lean_dec(x_22846); +lean_dec(x_22844); +if (lean_is_scalar(x_22842)) { + x_22875 = lean_alloc_ctor(6, 2, 0); +} else { + x_22875 = x_22842; + lean_ctor_set_tag(x_22875, 6); +} +lean_ctor_set(x_22875, 0, x_153); +lean_ctor_set(x_22875, 1, x_22801); +x_22876 = lean_ctor_get(x_1, 0); +lean_inc(x_22876); +x_22877 = l_Lean_IR_ToIR_bindVar(x_22876, x_22841, x_4, x_5, x_22840); +x_22878 = lean_ctor_get(x_22877, 0); +lean_inc(x_22878); +x_22879 = lean_ctor_get(x_22877, 1); +lean_inc(x_22879); +lean_dec(x_22877); +x_22880 = lean_ctor_get(x_22878, 0); +lean_inc(x_22880); +x_22881 = lean_ctor_get(x_22878, 1); +lean_inc(x_22881); +lean_dec(x_22878); +x_22882 = lean_ctor_get(x_1, 2); +lean_inc(x_22882); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_22883 = l_Lean_IR_ToIR_lowerType(x_22882, x_22881, x_4, x_5, x_22879); +if (lean_obj_tag(x_22883) == 0) +{ +lean_object* x_22884; lean_object* x_22885; lean_object* x_22886; lean_object* x_22887; lean_object* x_22888; +x_22884 = lean_ctor_get(x_22883, 0); +lean_inc(x_22884); +x_22885 = lean_ctor_get(x_22883, 1); +lean_inc(x_22885); +lean_dec(x_22883); +x_22886 = lean_ctor_get(x_22884, 0); +lean_inc(x_22886); +x_22887 = lean_ctor_get(x_22884, 1); +lean_inc(x_22887); +lean_dec(x_22884); +x_22888 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22880, x_22875, x_22886, x_22887, x_4, x_5, x_22885); +return x_22888; +} +else +{ +lean_object* x_22889; lean_object* x_22890; lean_object* x_22891; lean_object* x_22892; +lean_dec(x_22880); +lean_dec(x_22875); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_22889 = lean_ctor_get(x_22883, 0); +lean_inc(x_22889); +x_22890 = lean_ctor_get(x_22883, 1); +lean_inc(x_22890); +if (lean_is_exclusive(x_22883)) { + lean_ctor_release(x_22883, 0); + lean_ctor_release(x_22883, 1); + x_22891 = x_22883; +} else { + lean_dec_ref(x_22883); + x_22891 = lean_box(0); +} +if (lean_is_scalar(x_22891)) { + x_22892 = lean_alloc_ctor(1, 2, 0); +} else { + x_22892 = x_22891; +} +lean_ctor_set(x_22892, 0, x_22889); +lean_ctor_set(x_22892, 1, x_22890); +return x_22892; +} +} +} +else +{ +lean_object* x_22893; lean_object* x_22894; lean_object* x_22895; lean_object* x_22896; lean_object* x_22897; lean_object* x_22898; lean_object* x_22899; lean_object* x_22900; lean_object* x_22901; +lean_dec(x_22846); +lean_dec(x_22844); +if (lean_is_scalar(x_22842)) { + x_22893 = lean_alloc_ctor(7, 2, 0); +} else { + x_22893 = x_22842; + lean_ctor_set_tag(x_22893, 7); +} +lean_ctor_set(x_22893, 0, x_153); +lean_ctor_set(x_22893, 1, x_22801); +x_22894 = lean_ctor_get(x_1, 0); +lean_inc(x_22894); +lean_dec(x_1); +x_22895 = l_Lean_IR_ToIR_bindVar(x_22894, x_22841, x_4, x_5, x_22840); +x_22896 = lean_ctor_get(x_22895, 0); +lean_inc(x_22896); +x_22897 = lean_ctor_get(x_22895, 1); +lean_inc(x_22897); +lean_dec(x_22895); +x_22898 = lean_ctor_get(x_22896, 0); +lean_inc(x_22898); +x_22899 = lean_ctor_get(x_22896, 1); +lean_inc(x_22899); +lean_dec(x_22896); +x_22900 = lean_box(7); +x_22901 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22898, x_22893, x_22900, x_22899, x_4, x_5, x_22897); +return x_22901; +} +} +} +else +{ +lean_object* x_22902; lean_object* x_22903; lean_object* x_22904; +lean_dec(x_22818); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22902 = lean_box(13); +if (lean_is_scalar(x_22807)) { + x_22903 = lean_alloc_ctor(0, 2, 0); +} else { + x_22903 = x_22807; +} +lean_ctor_set(x_22903, 0, x_22902); +lean_ctor_set(x_22903, 1, x_22806); +if (lean_is_scalar(x_22811)) { + x_22904 = lean_alloc_ctor(0, 2, 0); +} else { + x_22904 = x_22811; +} +lean_ctor_set(x_22904, 0, x_22903); +lean_ctor_set(x_22904, 1, x_22810); +return x_22904; +} +} +else +{ +lean_object* x_22905; lean_object* x_22906; lean_object* x_22907; +lean_dec(x_22818); +lean_dec(x_22811); +lean_dec(x_22807); +lean_dec(x_153); +x_22905 = l_Lean_IR_instInhabitedArg; +x_22906 = lean_unsigned_to_nat(2u); +x_22907 = lean_array_get(x_22905, x_22801, x_22906); +lean_dec(x_22801); +if (lean_obj_tag(x_22907) == 0) +{ +lean_object* x_22908; lean_object* x_22909; lean_object* x_22910; lean_object* x_22911; lean_object* x_22912; lean_object* x_22913; lean_object* x_22914; +x_22908 = lean_ctor_get(x_22907, 0); +lean_inc(x_22908); +lean_dec(x_22907); +x_22909 = lean_ctor_get(x_1, 0); +lean_inc(x_22909); +lean_dec(x_1); +x_22910 = l_Lean_IR_ToIR_bindVarToVarId(x_22909, x_22908, x_22806, x_4, x_5, x_22810); +x_22911 = lean_ctor_get(x_22910, 0); +lean_inc(x_22911); +x_22912 = lean_ctor_get(x_22910, 1); +lean_inc(x_22912); +lean_dec(x_22910); +x_22913 = lean_ctor_get(x_22911, 1); +lean_inc(x_22913); +lean_dec(x_22911); +x_22914 = l_Lean_IR_ToIR_lowerCode(x_2, x_22913, x_4, x_5, x_22912); +return x_22914; +} +else +{ +lean_object* x_22915; lean_object* x_22916; lean_object* x_22917; lean_object* x_22918; lean_object* x_22919; lean_object* x_22920; +x_22915 = lean_ctor_get(x_1, 0); +lean_inc(x_22915); +lean_dec(x_1); +x_22916 = l_Lean_IR_ToIR_bindErased(x_22915, x_22806, x_4, x_5, x_22810); +x_22917 = lean_ctor_get(x_22916, 0); +lean_inc(x_22917); +x_22918 = lean_ctor_get(x_22916, 1); +lean_inc(x_22918); +lean_dec(x_22916); +x_22919 = lean_ctor_get(x_22917, 1); +lean_inc(x_22919); +lean_dec(x_22917); +x_22920 = l_Lean_IR_ToIR_lowerCode(x_2, x_22919, x_4, x_5, x_22918); +return x_22920; +} +} +} +case 1: +{ +lean_object* x_22921; lean_object* x_22922; lean_object* x_22949; lean_object* x_22950; +lean_dec(x_22817); +lean_dec(x_22812); +lean_dec(x_20593); +lean_dec(x_20592); +lean_inc(x_153); +x_22949 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_22810); +x_22950 = lean_ctor_get(x_22949, 0); +lean_inc(x_22950); +if (lean_obj_tag(x_22950) == 0) +{ +lean_object* x_22951; lean_object* x_22952; lean_object* x_22953; +x_22951 = lean_ctor_get(x_22949, 1); +lean_inc(x_22951); +lean_dec(x_22949); +x_22952 = lean_box(0); +if (lean_is_scalar(x_22807)) { + x_22953 = lean_alloc_ctor(0, 2, 0); +} else { + x_22953 = x_22807; +} +lean_ctor_set(x_22953, 0, x_22952); +lean_ctor_set(x_22953, 1, x_22806); +x_22921 = x_22953; +x_22922 = x_22951; +goto block_22948; +} +else +{ +lean_object* x_22954; lean_object* x_22955; lean_object* x_22956; lean_object* x_22957; lean_object* x_22958; lean_object* x_22959; lean_object* x_22960; uint8_t x_22961; +lean_dec(x_22807); +x_22954 = lean_ctor_get(x_22949, 1); +lean_inc(x_22954); +if (lean_is_exclusive(x_22949)) { + lean_ctor_release(x_22949, 0); + lean_ctor_release(x_22949, 1); + x_22955 = x_22949; +} else { + lean_dec_ref(x_22949); + x_22955 = lean_box(0); +} +x_22956 = lean_ctor_get(x_22950, 0); +lean_inc(x_22956); +if (lean_is_exclusive(x_22950)) { + lean_ctor_release(x_22950, 0); + x_22957 = x_22950; +} else { + lean_dec_ref(x_22950); + x_22957 = lean_box(0); +} +x_22958 = lean_array_get_size(x_22801); +x_22959 = lean_ctor_get(x_22956, 3); +lean_inc(x_22959); +lean_dec(x_22956); +x_22960 = lean_array_get_size(x_22959); +lean_dec(x_22959); +x_22961 = lean_nat_dec_lt(x_22958, x_22960); +if (x_22961 == 0) +{ +uint8_t x_22962; +x_22962 = lean_nat_dec_eq(x_22958, x_22960); +if (x_22962 == 0) +{ +lean_object* x_22963; lean_object* x_22964; lean_object* x_22965; lean_object* x_22966; lean_object* x_22967; lean_object* x_22968; lean_object* x_22969; lean_object* x_22970; lean_object* x_22971; lean_object* x_22972; lean_object* x_22973; lean_object* x_22974; lean_object* x_22975; lean_object* x_22976; lean_object* x_22977; lean_object* x_22978; lean_object* x_22979; +x_22963 = lean_unsigned_to_nat(0u); +x_22964 = l_Array_extract___rarg(x_22801, x_22963, x_22960); +x_22965 = l_Array_extract___rarg(x_22801, x_22960, x_22958); +lean_dec(x_22958); +lean_inc(x_153); +if (lean_is_scalar(x_22955)) { + x_22966 = lean_alloc_ctor(6, 2, 0); +} else { + x_22966 = x_22955; + lean_ctor_set_tag(x_22966, 6); +} +lean_ctor_set(x_22966, 0, x_153); +lean_ctor_set(x_22966, 1, x_22964); +x_22967 = lean_ctor_get(x_1, 0); +lean_inc(x_22967); +x_22968 = l_Lean_IR_ToIR_bindVar(x_22967, x_22806, x_4, x_5, x_22954); +x_22969 = lean_ctor_get(x_22968, 0); +lean_inc(x_22969); +x_22970 = lean_ctor_get(x_22968, 1); +lean_inc(x_22970); +lean_dec(x_22968); +x_22971 = lean_ctor_get(x_22969, 0); +lean_inc(x_22971); +x_22972 = lean_ctor_get(x_22969, 1); +lean_inc(x_22972); +lean_dec(x_22969); +x_22973 = l_Lean_IR_ToIR_newVar(x_22972, x_4, x_5, x_22970); +x_22974 = lean_ctor_get(x_22973, 0); +lean_inc(x_22974); +x_22975 = lean_ctor_get(x_22973, 1); +lean_inc(x_22975); +lean_dec(x_22973); +x_22976 = lean_ctor_get(x_22974, 0); +lean_inc(x_22976); +x_22977 = lean_ctor_get(x_22974, 1); +lean_inc(x_22977); +lean_dec(x_22974); +x_22978 = lean_ctor_get(x_1, 2); +lean_inc(x_22978); +lean_inc(x_5); +lean_inc(x_4); +x_22979 = l_Lean_IR_ToIR_lowerType(x_22978, x_22977, x_4, x_5, x_22975); +if (lean_obj_tag(x_22979) == 0) +{ +lean_object* x_22980; lean_object* x_22981; lean_object* x_22982; lean_object* x_22983; lean_object* x_22984; +x_22980 = lean_ctor_get(x_22979, 0); +lean_inc(x_22980); +x_22981 = lean_ctor_get(x_22979, 1); +lean_inc(x_22981); +lean_dec(x_22979); +x_22982 = lean_ctor_get(x_22980, 0); +lean_inc(x_22982); +x_22983 = lean_ctor_get(x_22980, 1); +lean_inc(x_22983); +lean_dec(x_22980); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_22984 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_22976, x_22965, x_22971, x_22966, x_22982, x_22983, x_4, x_5, x_22981); +if (lean_obj_tag(x_22984) == 0) +{ +lean_object* x_22985; lean_object* x_22986; lean_object* x_22987; lean_object* x_22988; lean_object* x_22989; lean_object* x_22990; lean_object* x_22991; +x_22985 = lean_ctor_get(x_22984, 0); +lean_inc(x_22985); +x_22986 = lean_ctor_get(x_22984, 1); +lean_inc(x_22986); +lean_dec(x_22984); +x_22987 = lean_ctor_get(x_22985, 0); +lean_inc(x_22987); +x_22988 = lean_ctor_get(x_22985, 1); +lean_inc(x_22988); +if (lean_is_exclusive(x_22985)) { + lean_ctor_release(x_22985, 0); + lean_ctor_release(x_22985, 1); + x_22989 = x_22985; +} else { + lean_dec_ref(x_22985); + x_22989 = lean_box(0); +} +if (lean_is_scalar(x_22957)) { + x_22990 = lean_alloc_ctor(1, 1, 0); +} else { + x_22990 = x_22957; +} +lean_ctor_set(x_22990, 0, x_22987); +if (lean_is_scalar(x_22989)) { + x_22991 = lean_alloc_ctor(0, 2, 0); +} else { + x_22991 = x_22989; +} +lean_ctor_set(x_22991, 0, x_22990); +lean_ctor_set(x_22991, 1, x_22988); +x_22921 = x_22991; +x_22922 = x_22986; +goto block_22948; +} +else +{ +lean_object* x_22992; lean_object* x_22993; lean_object* x_22994; lean_object* x_22995; +lean_dec(x_22957); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22992 = lean_ctor_get(x_22984, 0); +lean_inc(x_22992); +x_22993 = lean_ctor_get(x_22984, 1); +lean_inc(x_22993); +if (lean_is_exclusive(x_22984)) { + lean_ctor_release(x_22984, 0); + lean_ctor_release(x_22984, 1); + x_22994 = x_22984; +} else { + lean_dec_ref(x_22984); + x_22994 = lean_box(0); +} +if (lean_is_scalar(x_22994)) { + x_22995 = lean_alloc_ctor(1, 2, 0); +} else { + x_22995 = x_22994; +} +lean_ctor_set(x_22995, 0, x_22992); +lean_ctor_set(x_22995, 1, x_22993); +return x_22995; +} +} +else +{ +lean_object* x_22996; lean_object* x_22997; lean_object* x_22998; lean_object* x_22999; +lean_dec(x_22976); +lean_dec(x_22971); +lean_dec(x_22966); +lean_dec(x_22965); +lean_dec(x_22957); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22996 = lean_ctor_get(x_22979, 0); +lean_inc(x_22996); +x_22997 = lean_ctor_get(x_22979, 1); +lean_inc(x_22997); +if (lean_is_exclusive(x_22979)) { + lean_ctor_release(x_22979, 0); + lean_ctor_release(x_22979, 1); + x_22998 = x_22979; +} else { + lean_dec_ref(x_22979); + x_22998 = lean_box(0); +} +if (lean_is_scalar(x_22998)) { + x_22999 = lean_alloc_ctor(1, 2, 0); +} else { + x_22999 = x_22998; +} +lean_ctor_set(x_22999, 0, x_22996); +lean_ctor_set(x_22999, 1, x_22997); +return x_22999; +} +} +else +{ +lean_object* x_23000; lean_object* x_23001; lean_object* x_23002; lean_object* x_23003; lean_object* x_23004; lean_object* x_23005; lean_object* x_23006; lean_object* x_23007; lean_object* x_23008; +lean_dec(x_22960); +lean_dec(x_22958); +lean_inc(x_22801); +lean_inc(x_153); +if (lean_is_scalar(x_22955)) { + x_23000 = lean_alloc_ctor(6, 2, 0); +} else { + x_23000 = x_22955; + lean_ctor_set_tag(x_23000, 6); +} +lean_ctor_set(x_23000, 0, x_153); +lean_ctor_set(x_23000, 1, x_22801); +x_23001 = lean_ctor_get(x_1, 0); +lean_inc(x_23001); +x_23002 = l_Lean_IR_ToIR_bindVar(x_23001, x_22806, x_4, x_5, x_22954); +x_23003 = lean_ctor_get(x_23002, 0); +lean_inc(x_23003); +x_23004 = lean_ctor_get(x_23002, 1); +lean_inc(x_23004); +lean_dec(x_23002); +x_23005 = lean_ctor_get(x_23003, 0); +lean_inc(x_23005); +x_23006 = lean_ctor_get(x_23003, 1); +lean_inc(x_23006); +lean_dec(x_23003); +x_23007 = lean_ctor_get(x_1, 2); +lean_inc(x_23007); +lean_inc(x_5); +lean_inc(x_4); +x_23008 = l_Lean_IR_ToIR_lowerType(x_23007, x_23006, x_4, x_5, x_23004); +if (lean_obj_tag(x_23008) == 0) +{ +lean_object* x_23009; lean_object* x_23010; lean_object* x_23011; lean_object* x_23012; lean_object* x_23013; +x_23009 = lean_ctor_get(x_23008, 0); +lean_inc(x_23009); +x_23010 = lean_ctor_get(x_23008, 1); +lean_inc(x_23010); +lean_dec(x_23008); +x_23011 = lean_ctor_get(x_23009, 0); +lean_inc(x_23011); +x_23012 = lean_ctor_get(x_23009, 1); +lean_inc(x_23012); +lean_dec(x_23009); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_23013 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_23005, x_23000, x_23011, x_23012, x_4, x_5, x_23010); +if (lean_obj_tag(x_23013) == 0) +{ +lean_object* x_23014; lean_object* x_23015; lean_object* x_23016; lean_object* x_23017; lean_object* x_23018; lean_object* x_23019; lean_object* x_23020; +x_23014 = lean_ctor_get(x_23013, 0); +lean_inc(x_23014); +x_23015 = lean_ctor_get(x_23013, 1); +lean_inc(x_23015); +lean_dec(x_23013); +x_23016 = lean_ctor_get(x_23014, 0); +lean_inc(x_23016); +x_23017 = lean_ctor_get(x_23014, 1); +lean_inc(x_23017); +if (lean_is_exclusive(x_23014)) { + lean_ctor_release(x_23014, 0); + lean_ctor_release(x_23014, 1); + x_23018 = x_23014; +} else { + lean_dec_ref(x_23014); + x_23018 = lean_box(0); +} +if (lean_is_scalar(x_22957)) { + x_23019 = lean_alloc_ctor(1, 1, 0); +} else { + x_23019 = x_22957; +} +lean_ctor_set(x_23019, 0, x_23016); +if (lean_is_scalar(x_23018)) { + x_23020 = lean_alloc_ctor(0, 2, 0); +} else { + x_23020 = x_23018; +} +lean_ctor_set(x_23020, 0, x_23019); +lean_ctor_set(x_23020, 1, x_23017); +x_22921 = x_23020; +x_22922 = x_23015; +goto block_22948; +} +else +{ +lean_object* x_23021; lean_object* x_23022; lean_object* x_23023; lean_object* x_23024; +lean_dec(x_22957); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23021 = lean_ctor_get(x_23013, 0); +lean_inc(x_23021); +x_23022 = lean_ctor_get(x_23013, 1); +lean_inc(x_23022); +if (lean_is_exclusive(x_23013)) { + lean_ctor_release(x_23013, 0); + lean_ctor_release(x_23013, 1); + x_23023 = x_23013; +} else { + lean_dec_ref(x_23013); + x_23023 = lean_box(0); +} +if (lean_is_scalar(x_23023)) { + x_23024 = lean_alloc_ctor(1, 2, 0); +} else { + x_23024 = x_23023; +} +lean_ctor_set(x_23024, 0, x_23021); +lean_ctor_set(x_23024, 1, x_23022); +return x_23024; +} +} +else +{ +lean_object* x_23025; lean_object* x_23026; lean_object* x_23027; lean_object* x_23028; +lean_dec(x_23005); +lean_dec(x_23000); +lean_dec(x_22957); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23025 = lean_ctor_get(x_23008, 0); +lean_inc(x_23025); +x_23026 = lean_ctor_get(x_23008, 1); +lean_inc(x_23026); +if (lean_is_exclusive(x_23008)) { + lean_ctor_release(x_23008, 0); + lean_ctor_release(x_23008, 1); + x_23027 = x_23008; +} else { + lean_dec_ref(x_23008); + x_23027 = lean_box(0); +} +if (lean_is_scalar(x_23027)) { + x_23028 = lean_alloc_ctor(1, 2, 0); +} else { + x_23028 = x_23027; +} +lean_ctor_set(x_23028, 0, x_23025); +lean_ctor_set(x_23028, 1, x_23026); +return x_23028; +} +} +} +else +{ +lean_object* x_23029; lean_object* x_23030; lean_object* x_23031; lean_object* x_23032; lean_object* x_23033; lean_object* x_23034; lean_object* x_23035; lean_object* x_23036; lean_object* x_23037; +lean_dec(x_22960); +lean_dec(x_22958); +lean_inc(x_22801); +lean_inc(x_153); +if (lean_is_scalar(x_22955)) { + x_23029 = lean_alloc_ctor(7, 2, 0); +} else { + x_23029 = x_22955; + lean_ctor_set_tag(x_23029, 7); +} +lean_ctor_set(x_23029, 0, x_153); +lean_ctor_set(x_23029, 1, x_22801); +x_23030 = lean_ctor_get(x_1, 0); +lean_inc(x_23030); +x_23031 = l_Lean_IR_ToIR_bindVar(x_23030, x_22806, x_4, x_5, x_22954); +x_23032 = lean_ctor_get(x_23031, 0); +lean_inc(x_23032); +x_23033 = lean_ctor_get(x_23031, 1); +lean_inc(x_23033); +lean_dec(x_23031); +x_23034 = lean_ctor_get(x_23032, 0); +lean_inc(x_23034); +x_23035 = lean_ctor_get(x_23032, 1); +lean_inc(x_23035); +lean_dec(x_23032); +x_23036 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_23037 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_23034, x_23029, x_23036, x_23035, x_4, x_5, x_23033); +if (lean_obj_tag(x_23037) == 0) +{ +lean_object* x_23038; lean_object* x_23039; lean_object* x_23040; lean_object* x_23041; lean_object* x_23042; lean_object* x_23043; lean_object* x_23044; +x_23038 = lean_ctor_get(x_23037, 0); +lean_inc(x_23038); +x_23039 = lean_ctor_get(x_23037, 1); +lean_inc(x_23039); +lean_dec(x_23037); +x_23040 = lean_ctor_get(x_23038, 0); +lean_inc(x_23040); +x_23041 = lean_ctor_get(x_23038, 1); +lean_inc(x_23041); +if (lean_is_exclusive(x_23038)) { + lean_ctor_release(x_23038, 0); + lean_ctor_release(x_23038, 1); + x_23042 = x_23038; +} else { + lean_dec_ref(x_23038); + x_23042 = lean_box(0); +} +if (lean_is_scalar(x_22957)) { + x_23043 = lean_alloc_ctor(1, 1, 0); +} else { + x_23043 = x_22957; +} +lean_ctor_set(x_23043, 0, x_23040); +if (lean_is_scalar(x_23042)) { + x_23044 = lean_alloc_ctor(0, 2, 0); +} else { + x_23044 = x_23042; +} +lean_ctor_set(x_23044, 0, x_23043); +lean_ctor_set(x_23044, 1, x_23041); +x_22921 = x_23044; +x_22922 = x_23039; +goto block_22948; +} +else +{ +lean_object* x_23045; lean_object* x_23046; lean_object* x_23047; lean_object* x_23048; +lean_dec(x_22957); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23045 = lean_ctor_get(x_23037, 0); +lean_inc(x_23045); +x_23046 = lean_ctor_get(x_23037, 1); +lean_inc(x_23046); +if (lean_is_exclusive(x_23037)) { + lean_ctor_release(x_23037, 0); + lean_ctor_release(x_23037, 1); + x_23047 = x_23037; +} else { + lean_dec_ref(x_23037); + x_23047 = lean_box(0); +} +if (lean_is_scalar(x_23047)) { + x_23048 = lean_alloc_ctor(1, 2, 0); +} else { + x_23048 = x_23047; +} +lean_ctor_set(x_23048, 0, x_23045); +lean_ctor_set(x_23048, 1, x_23046); +return x_23048; +} +} +} +block_22948: +{ +lean_object* x_22923; +x_22923 = lean_ctor_get(x_22921, 0); +lean_inc(x_22923); +if (lean_obj_tag(x_22923) == 0) +{ +lean_object* x_22924; lean_object* x_22925; lean_object* x_22926; lean_object* x_22927; lean_object* x_22928; lean_object* x_22929; lean_object* x_22930; lean_object* x_22931; lean_object* x_22932; lean_object* x_22933; +lean_dec(x_22811); +x_22924 = lean_ctor_get(x_22921, 1); +lean_inc(x_22924); +lean_dec(x_22921); +x_22925 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_22925, 0, x_153); +lean_ctor_set(x_22925, 1, x_22801); +x_22926 = lean_ctor_get(x_1, 0); +lean_inc(x_22926); +x_22927 = l_Lean_IR_ToIR_bindVar(x_22926, x_22924, x_4, x_5, x_22922); +x_22928 = lean_ctor_get(x_22927, 0); +lean_inc(x_22928); +x_22929 = lean_ctor_get(x_22927, 1); +lean_inc(x_22929); +lean_dec(x_22927); +x_22930 = lean_ctor_get(x_22928, 0); +lean_inc(x_22930); +x_22931 = lean_ctor_get(x_22928, 1); +lean_inc(x_22931); +lean_dec(x_22928); +x_22932 = lean_ctor_get(x_1, 2); +lean_inc(x_22932); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_22933 = l_Lean_IR_ToIR_lowerType(x_22932, x_22931, x_4, x_5, x_22929); +if (lean_obj_tag(x_22933) == 0) +{ +lean_object* x_22934; lean_object* x_22935; lean_object* x_22936; lean_object* x_22937; lean_object* x_22938; +x_22934 = lean_ctor_get(x_22933, 0); +lean_inc(x_22934); +x_22935 = lean_ctor_get(x_22933, 1); +lean_inc(x_22935); +lean_dec(x_22933); +x_22936 = lean_ctor_get(x_22934, 0); +lean_inc(x_22936); +x_22937 = lean_ctor_get(x_22934, 1); +lean_inc(x_22937); +lean_dec(x_22934); +x_22938 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_22930, x_22925, x_22936, x_22937, x_4, x_5, x_22935); +return x_22938; +} +else +{ +lean_object* x_22939; lean_object* x_22940; lean_object* x_22941; lean_object* x_22942; +lean_dec(x_22930); +lean_dec(x_22925); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_22939 = lean_ctor_get(x_22933, 0); +lean_inc(x_22939); +x_22940 = lean_ctor_get(x_22933, 1); +lean_inc(x_22940); +if (lean_is_exclusive(x_22933)) { + lean_ctor_release(x_22933, 0); + lean_ctor_release(x_22933, 1); + x_22941 = x_22933; +} else { + lean_dec_ref(x_22933); + x_22941 = lean_box(0); +} +if (lean_is_scalar(x_22941)) { + x_22942 = lean_alloc_ctor(1, 2, 0); +} else { + x_22942 = x_22941; +} +lean_ctor_set(x_22942, 0, x_22939); +lean_ctor_set(x_22942, 1, x_22940); +return x_22942; +} +} +else +{ +lean_object* x_22943; lean_object* x_22944; lean_object* x_22945; lean_object* x_22946; lean_object* x_22947; +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22943 = lean_ctor_get(x_22921, 1); +lean_inc(x_22943); +if (lean_is_exclusive(x_22921)) { + lean_ctor_release(x_22921, 0); + lean_ctor_release(x_22921, 1); + x_22944 = x_22921; +} else { + lean_dec_ref(x_22921); + x_22944 = lean_box(0); +} +x_22945 = lean_ctor_get(x_22923, 0); +lean_inc(x_22945); +lean_dec(x_22923); +if (lean_is_scalar(x_22944)) { + x_22946 = lean_alloc_ctor(0, 2, 0); +} else { + x_22946 = x_22944; +} +lean_ctor_set(x_22946, 0, x_22945); +lean_ctor_set(x_22946, 1, x_22943); +if (lean_is_scalar(x_22811)) { + x_22947 = lean_alloc_ctor(0, 2, 0); +} else { + x_22947 = x_22811; +} +lean_ctor_set(x_22947, 0, x_22946); +lean_ctor_set(x_22947, 1, x_22922); +return x_22947; +} +} +} +case 2: +{ +lean_object* x_23049; lean_object* x_23050; +lean_dec(x_22817); +lean_dec(x_22812); +lean_dec(x_22811); +lean_dec(x_22807); +lean_dec(x_22801); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_23049 = l_Lean_IR_ToIR_lowerLet___closed__18; +x_23050 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_23049, x_22806, x_4, x_5, x_22810); +return x_23050; +} +case 3: +{ +lean_object* x_23051; lean_object* x_23052; lean_object* x_23079; lean_object* x_23080; +lean_dec(x_22817); +lean_dec(x_22812); +lean_dec(x_20593); +lean_dec(x_20592); +lean_inc(x_153); +x_23079 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_22810); +x_23080 = lean_ctor_get(x_23079, 0); +lean_inc(x_23080); +if (lean_obj_tag(x_23080) == 0) +{ +lean_object* x_23081; lean_object* x_23082; lean_object* x_23083; +x_23081 = lean_ctor_get(x_23079, 1); +lean_inc(x_23081); +lean_dec(x_23079); +x_23082 = lean_box(0); +if (lean_is_scalar(x_22807)) { + x_23083 = lean_alloc_ctor(0, 2, 0); +} else { + x_23083 = x_22807; +} +lean_ctor_set(x_23083, 0, x_23082); +lean_ctor_set(x_23083, 1, x_22806); +x_23051 = x_23083; +x_23052 = x_23081; +goto block_23078; +} +else +{ +lean_object* x_23084; lean_object* x_23085; lean_object* x_23086; lean_object* x_23087; lean_object* x_23088; lean_object* x_23089; lean_object* x_23090; uint8_t x_23091; +lean_dec(x_22807); +x_23084 = lean_ctor_get(x_23079, 1); +lean_inc(x_23084); +if (lean_is_exclusive(x_23079)) { + lean_ctor_release(x_23079, 0); + lean_ctor_release(x_23079, 1); + x_23085 = x_23079; +} else { + lean_dec_ref(x_23079); + x_23085 = lean_box(0); +} +x_23086 = lean_ctor_get(x_23080, 0); +lean_inc(x_23086); +if (lean_is_exclusive(x_23080)) { + lean_ctor_release(x_23080, 0); + x_23087 = x_23080; +} else { + lean_dec_ref(x_23080); + x_23087 = lean_box(0); +} +x_23088 = lean_array_get_size(x_22801); +x_23089 = lean_ctor_get(x_23086, 3); +lean_inc(x_23089); +lean_dec(x_23086); +x_23090 = lean_array_get_size(x_23089); +lean_dec(x_23089); +x_23091 = lean_nat_dec_lt(x_23088, x_23090); +if (x_23091 == 0) +{ +uint8_t x_23092; +x_23092 = lean_nat_dec_eq(x_23088, x_23090); +if (x_23092 == 0) +{ +lean_object* x_23093; lean_object* x_23094; lean_object* x_23095; lean_object* x_23096; lean_object* x_23097; lean_object* x_23098; lean_object* x_23099; lean_object* x_23100; lean_object* x_23101; lean_object* x_23102; lean_object* x_23103; lean_object* x_23104; lean_object* x_23105; lean_object* x_23106; lean_object* x_23107; lean_object* x_23108; lean_object* x_23109; +x_23093 = lean_unsigned_to_nat(0u); +x_23094 = l_Array_extract___rarg(x_22801, x_23093, x_23090); +x_23095 = l_Array_extract___rarg(x_22801, x_23090, x_23088); +lean_dec(x_23088); +lean_inc(x_153); +if (lean_is_scalar(x_23085)) { + x_23096 = lean_alloc_ctor(6, 2, 0); +} else { + x_23096 = x_23085; + lean_ctor_set_tag(x_23096, 6); +} +lean_ctor_set(x_23096, 0, x_153); +lean_ctor_set(x_23096, 1, x_23094); +x_23097 = lean_ctor_get(x_1, 0); +lean_inc(x_23097); +x_23098 = l_Lean_IR_ToIR_bindVar(x_23097, x_22806, x_4, x_5, x_23084); +x_23099 = lean_ctor_get(x_23098, 0); +lean_inc(x_23099); +x_23100 = lean_ctor_get(x_23098, 1); +lean_inc(x_23100); +lean_dec(x_23098); +x_23101 = lean_ctor_get(x_23099, 0); +lean_inc(x_23101); +x_23102 = lean_ctor_get(x_23099, 1); +lean_inc(x_23102); +lean_dec(x_23099); +x_23103 = l_Lean_IR_ToIR_newVar(x_23102, x_4, x_5, x_23100); +x_23104 = lean_ctor_get(x_23103, 0); +lean_inc(x_23104); +x_23105 = lean_ctor_get(x_23103, 1); +lean_inc(x_23105); +lean_dec(x_23103); +x_23106 = lean_ctor_get(x_23104, 0); +lean_inc(x_23106); +x_23107 = lean_ctor_get(x_23104, 1); +lean_inc(x_23107); +lean_dec(x_23104); +x_23108 = lean_ctor_get(x_1, 2); +lean_inc(x_23108); +lean_inc(x_5); +lean_inc(x_4); +x_23109 = l_Lean_IR_ToIR_lowerType(x_23108, x_23107, x_4, x_5, x_23105); +if (lean_obj_tag(x_23109) == 0) +{ +lean_object* x_23110; lean_object* x_23111; lean_object* x_23112; lean_object* x_23113; lean_object* x_23114; +x_23110 = lean_ctor_get(x_23109, 0); +lean_inc(x_23110); +x_23111 = lean_ctor_get(x_23109, 1); +lean_inc(x_23111); +lean_dec(x_23109); +x_23112 = lean_ctor_get(x_23110, 0); +lean_inc(x_23112); +x_23113 = lean_ctor_get(x_23110, 1); +lean_inc(x_23113); +lean_dec(x_23110); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_23114 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_23106, x_23095, x_23101, x_23096, x_23112, x_23113, x_4, x_5, x_23111); +if (lean_obj_tag(x_23114) == 0) +{ +lean_object* x_23115; lean_object* x_23116; lean_object* x_23117; lean_object* x_23118; lean_object* x_23119; lean_object* x_23120; lean_object* x_23121; +x_23115 = lean_ctor_get(x_23114, 0); +lean_inc(x_23115); +x_23116 = lean_ctor_get(x_23114, 1); +lean_inc(x_23116); +lean_dec(x_23114); +x_23117 = lean_ctor_get(x_23115, 0); +lean_inc(x_23117); +x_23118 = lean_ctor_get(x_23115, 1); +lean_inc(x_23118); +if (lean_is_exclusive(x_23115)) { + lean_ctor_release(x_23115, 0); + lean_ctor_release(x_23115, 1); + x_23119 = x_23115; +} else { + lean_dec_ref(x_23115); + x_23119 = lean_box(0); +} +if (lean_is_scalar(x_23087)) { + x_23120 = lean_alloc_ctor(1, 1, 0); +} else { + x_23120 = x_23087; +} +lean_ctor_set(x_23120, 0, x_23117); +if (lean_is_scalar(x_23119)) { + x_23121 = lean_alloc_ctor(0, 2, 0); +} else { + x_23121 = x_23119; +} +lean_ctor_set(x_23121, 0, x_23120); +lean_ctor_set(x_23121, 1, x_23118); +x_23051 = x_23121; +x_23052 = x_23116; +goto block_23078; +} +else +{ +lean_object* x_23122; lean_object* x_23123; lean_object* x_23124; lean_object* x_23125; +lean_dec(x_23087); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23122 = lean_ctor_get(x_23114, 0); +lean_inc(x_23122); +x_23123 = lean_ctor_get(x_23114, 1); +lean_inc(x_23123); +if (lean_is_exclusive(x_23114)) { + lean_ctor_release(x_23114, 0); + lean_ctor_release(x_23114, 1); + x_23124 = x_23114; +} else { + lean_dec_ref(x_23114); + x_23124 = lean_box(0); +} +if (lean_is_scalar(x_23124)) { + x_23125 = lean_alloc_ctor(1, 2, 0); +} else { + x_23125 = x_23124; +} +lean_ctor_set(x_23125, 0, x_23122); +lean_ctor_set(x_23125, 1, x_23123); +return x_23125; +} +} +else +{ +lean_object* x_23126; lean_object* x_23127; lean_object* x_23128; lean_object* x_23129; +lean_dec(x_23106); +lean_dec(x_23101); +lean_dec(x_23096); +lean_dec(x_23095); +lean_dec(x_23087); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23126 = lean_ctor_get(x_23109, 0); +lean_inc(x_23126); +x_23127 = lean_ctor_get(x_23109, 1); +lean_inc(x_23127); +if (lean_is_exclusive(x_23109)) { + lean_ctor_release(x_23109, 0); + lean_ctor_release(x_23109, 1); + x_23128 = x_23109; +} else { + lean_dec_ref(x_23109); + x_23128 = lean_box(0); +} +if (lean_is_scalar(x_23128)) { + x_23129 = lean_alloc_ctor(1, 2, 0); +} else { + x_23129 = x_23128; +} +lean_ctor_set(x_23129, 0, x_23126); +lean_ctor_set(x_23129, 1, x_23127); +return x_23129; +} +} +else +{ +lean_object* x_23130; lean_object* x_23131; lean_object* x_23132; lean_object* x_23133; lean_object* x_23134; lean_object* x_23135; lean_object* x_23136; lean_object* x_23137; lean_object* x_23138; +lean_dec(x_23090); +lean_dec(x_23088); +lean_inc(x_22801); +lean_inc(x_153); +if (lean_is_scalar(x_23085)) { + x_23130 = lean_alloc_ctor(6, 2, 0); +} else { + x_23130 = x_23085; + lean_ctor_set_tag(x_23130, 6); +} +lean_ctor_set(x_23130, 0, x_153); +lean_ctor_set(x_23130, 1, x_22801); +x_23131 = lean_ctor_get(x_1, 0); +lean_inc(x_23131); +x_23132 = l_Lean_IR_ToIR_bindVar(x_23131, x_22806, x_4, x_5, x_23084); +x_23133 = lean_ctor_get(x_23132, 0); +lean_inc(x_23133); +x_23134 = lean_ctor_get(x_23132, 1); +lean_inc(x_23134); +lean_dec(x_23132); +x_23135 = lean_ctor_get(x_23133, 0); +lean_inc(x_23135); +x_23136 = lean_ctor_get(x_23133, 1); +lean_inc(x_23136); +lean_dec(x_23133); +x_23137 = lean_ctor_get(x_1, 2); +lean_inc(x_23137); +lean_inc(x_5); +lean_inc(x_4); +x_23138 = l_Lean_IR_ToIR_lowerType(x_23137, x_23136, x_4, x_5, x_23134); +if (lean_obj_tag(x_23138) == 0) +{ +lean_object* x_23139; lean_object* x_23140; lean_object* x_23141; lean_object* x_23142; lean_object* x_23143; +x_23139 = lean_ctor_get(x_23138, 0); +lean_inc(x_23139); +x_23140 = lean_ctor_get(x_23138, 1); +lean_inc(x_23140); +lean_dec(x_23138); +x_23141 = lean_ctor_get(x_23139, 0); +lean_inc(x_23141); +x_23142 = lean_ctor_get(x_23139, 1); +lean_inc(x_23142); +lean_dec(x_23139); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_23143 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_23135, x_23130, x_23141, x_23142, x_4, x_5, x_23140); +if (lean_obj_tag(x_23143) == 0) +{ +lean_object* x_23144; lean_object* x_23145; lean_object* x_23146; lean_object* x_23147; lean_object* x_23148; lean_object* x_23149; lean_object* x_23150; +x_23144 = lean_ctor_get(x_23143, 0); +lean_inc(x_23144); +x_23145 = lean_ctor_get(x_23143, 1); +lean_inc(x_23145); +lean_dec(x_23143); +x_23146 = lean_ctor_get(x_23144, 0); +lean_inc(x_23146); +x_23147 = lean_ctor_get(x_23144, 1); +lean_inc(x_23147); +if (lean_is_exclusive(x_23144)) { + lean_ctor_release(x_23144, 0); + lean_ctor_release(x_23144, 1); + x_23148 = x_23144; +} else { + lean_dec_ref(x_23144); + x_23148 = lean_box(0); +} +if (lean_is_scalar(x_23087)) { + x_23149 = lean_alloc_ctor(1, 1, 0); +} else { + x_23149 = x_23087; +} +lean_ctor_set(x_23149, 0, x_23146); +if (lean_is_scalar(x_23148)) { + x_23150 = lean_alloc_ctor(0, 2, 0); +} else { + x_23150 = x_23148; +} +lean_ctor_set(x_23150, 0, x_23149); +lean_ctor_set(x_23150, 1, x_23147); +x_23051 = x_23150; +x_23052 = x_23145; +goto block_23078; +} +else +{ +lean_object* x_23151; lean_object* x_23152; lean_object* x_23153; lean_object* x_23154; +lean_dec(x_23087); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23151 = lean_ctor_get(x_23143, 0); +lean_inc(x_23151); +x_23152 = lean_ctor_get(x_23143, 1); +lean_inc(x_23152); +if (lean_is_exclusive(x_23143)) { + lean_ctor_release(x_23143, 0); + lean_ctor_release(x_23143, 1); + x_23153 = x_23143; +} else { + lean_dec_ref(x_23143); + x_23153 = lean_box(0); +} +if (lean_is_scalar(x_23153)) { + x_23154 = lean_alloc_ctor(1, 2, 0); +} else { + x_23154 = x_23153; +} +lean_ctor_set(x_23154, 0, x_23151); +lean_ctor_set(x_23154, 1, x_23152); +return x_23154; +} +} +else +{ +lean_object* x_23155; lean_object* x_23156; lean_object* x_23157; lean_object* x_23158; +lean_dec(x_23135); +lean_dec(x_23130); +lean_dec(x_23087); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23155 = lean_ctor_get(x_23138, 0); +lean_inc(x_23155); +x_23156 = lean_ctor_get(x_23138, 1); +lean_inc(x_23156); +if (lean_is_exclusive(x_23138)) { + lean_ctor_release(x_23138, 0); + lean_ctor_release(x_23138, 1); + x_23157 = x_23138; +} else { + lean_dec_ref(x_23138); + x_23157 = lean_box(0); +} +if (lean_is_scalar(x_23157)) { + x_23158 = lean_alloc_ctor(1, 2, 0); +} else { + x_23158 = x_23157; +} +lean_ctor_set(x_23158, 0, x_23155); +lean_ctor_set(x_23158, 1, x_23156); +return x_23158; +} +} +} +else +{ +lean_object* x_23159; lean_object* x_23160; lean_object* x_23161; lean_object* x_23162; lean_object* x_23163; lean_object* x_23164; lean_object* x_23165; lean_object* x_23166; lean_object* x_23167; +lean_dec(x_23090); +lean_dec(x_23088); +lean_inc(x_22801); +lean_inc(x_153); +if (lean_is_scalar(x_23085)) { + x_23159 = lean_alloc_ctor(7, 2, 0); +} else { + x_23159 = x_23085; + lean_ctor_set_tag(x_23159, 7); +} +lean_ctor_set(x_23159, 0, x_153); +lean_ctor_set(x_23159, 1, x_22801); +x_23160 = lean_ctor_get(x_1, 0); +lean_inc(x_23160); +x_23161 = l_Lean_IR_ToIR_bindVar(x_23160, x_22806, x_4, x_5, x_23084); +x_23162 = lean_ctor_get(x_23161, 0); +lean_inc(x_23162); +x_23163 = lean_ctor_get(x_23161, 1); +lean_inc(x_23163); +lean_dec(x_23161); +x_23164 = lean_ctor_get(x_23162, 0); +lean_inc(x_23164); +x_23165 = lean_ctor_get(x_23162, 1); +lean_inc(x_23165); +lean_dec(x_23162); +x_23166 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_23167 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_23164, x_23159, x_23166, x_23165, x_4, x_5, x_23163); +if (lean_obj_tag(x_23167) == 0) +{ +lean_object* x_23168; lean_object* x_23169; lean_object* x_23170; lean_object* x_23171; lean_object* x_23172; lean_object* x_23173; lean_object* x_23174; +x_23168 = lean_ctor_get(x_23167, 0); +lean_inc(x_23168); +x_23169 = lean_ctor_get(x_23167, 1); +lean_inc(x_23169); +lean_dec(x_23167); +x_23170 = lean_ctor_get(x_23168, 0); +lean_inc(x_23170); +x_23171 = lean_ctor_get(x_23168, 1); +lean_inc(x_23171); +if (lean_is_exclusive(x_23168)) { + lean_ctor_release(x_23168, 0); + lean_ctor_release(x_23168, 1); + x_23172 = x_23168; +} else { + lean_dec_ref(x_23168); + x_23172 = lean_box(0); +} +if (lean_is_scalar(x_23087)) { + x_23173 = lean_alloc_ctor(1, 1, 0); +} else { + x_23173 = x_23087; +} +lean_ctor_set(x_23173, 0, x_23170); +if (lean_is_scalar(x_23172)) { + x_23174 = lean_alloc_ctor(0, 2, 0); +} else { + x_23174 = x_23172; +} +lean_ctor_set(x_23174, 0, x_23173); +lean_ctor_set(x_23174, 1, x_23171); +x_23051 = x_23174; +x_23052 = x_23169; +goto block_23078; +} +else +{ +lean_object* x_23175; lean_object* x_23176; lean_object* x_23177; lean_object* x_23178; +lean_dec(x_23087); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23175 = lean_ctor_get(x_23167, 0); +lean_inc(x_23175); +x_23176 = lean_ctor_get(x_23167, 1); +lean_inc(x_23176); +if (lean_is_exclusive(x_23167)) { + lean_ctor_release(x_23167, 0); + lean_ctor_release(x_23167, 1); + x_23177 = x_23167; +} else { + lean_dec_ref(x_23167); + x_23177 = lean_box(0); +} +if (lean_is_scalar(x_23177)) { + x_23178 = lean_alloc_ctor(1, 2, 0); +} else { + x_23178 = x_23177; +} +lean_ctor_set(x_23178, 0, x_23175); +lean_ctor_set(x_23178, 1, x_23176); +return x_23178; +} +} +} +block_23078: +{ +lean_object* x_23053; +x_23053 = lean_ctor_get(x_23051, 0); +lean_inc(x_23053); +if (lean_obj_tag(x_23053) == 0) +{ +lean_object* x_23054; lean_object* x_23055; lean_object* x_23056; lean_object* x_23057; lean_object* x_23058; lean_object* x_23059; lean_object* x_23060; lean_object* x_23061; lean_object* x_23062; lean_object* x_23063; +lean_dec(x_22811); +x_23054 = lean_ctor_get(x_23051, 1); +lean_inc(x_23054); +lean_dec(x_23051); +x_23055 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_23055, 0, x_153); +lean_ctor_set(x_23055, 1, x_22801); +x_23056 = lean_ctor_get(x_1, 0); +lean_inc(x_23056); +x_23057 = l_Lean_IR_ToIR_bindVar(x_23056, x_23054, x_4, x_5, x_23052); +x_23058 = lean_ctor_get(x_23057, 0); +lean_inc(x_23058); +x_23059 = lean_ctor_get(x_23057, 1); +lean_inc(x_23059); +lean_dec(x_23057); +x_23060 = lean_ctor_get(x_23058, 0); +lean_inc(x_23060); +x_23061 = lean_ctor_get(x_23058, 1); +lean_inc(x_23061); +lean_dec(x_23058); +x_23062 = lean_ctor_get(x_1, 2); +lean_inc(x_23062); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_23063 = l_Lean_IR_ToIR_lowerType(x_23062, x_23061, x_4, x_5, x_23059); +if (lean_obj_tag(x_23063) == 0) +{ +lean_object* x_23064; lean_object* x_23065; lean_object* x_23066; lean_object* x_23067; lean_object* x_23068; +x_23064 = lean_ctor_get(x_23063, 0); +lean_inc(x_23064); +x_23065 = lean_ctor_get(x_23063, 1); +lean_inc(x_23065); +lean_dec(x_23063); +x_23066 = lean_ctor_get(x_23064, 0); +lean_inc(x_23066); +x_23067 = lean_ctor_get(x_23064, 1); +lean_inc(x_23067); +lean_dec(x_23064); +x_23068 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_23060, x_23055, x_23066, x_23067, x_4, x_5, x_23065); +return x_23068; +} +else +{ +lean_object* x_23069; lean_object* x_23070; lean_object* x_23071; lean_object* x_23072; +lean_dec(x_23060); +lean_dec(x_23055); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_23069 = lean_ctor_get(x_23063, 0); +lean_inc(x_23069); +x_23070 = lean_ctor_get(x_23063, 1); +lean_inc(x_23070); +if (lean_is_exclusive(x_23063)) { + lean_ctor_release(x_23063, 0); + lean_ctor_release(x_23063, 1); + x_23071 = x_23063; +} else { + lean_dec_ref(x_23063); + x_23071 = lean_box(0); +} +if (lean_is_scalar(x_23071)) { + x_23072 = lean_alloc_ctor(1, 2, 0); +} else { + x_23072 = x_23071; +} +lean_ctor_set(x_23072, 0, x_23069); +lean_ctor_set(x_23072, 1, x_23070); +return x_23072; +} +} +else +{ +lean_object* x_23073; lean_object* x_23074; lean_object* x_23075; lean_object* x_23076; lean_object* x_23077; +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23073 = lean_ctor_get(x_23051, 1); +lean_inc(x_23073); +if (lean_is_exclusive(x_23051)) { + lean_ctor_release(x_23051, 0); + lean_ctor_release(x_23051, 1); + x_23074 = x_23051; +} else { + lean_dec_ref(x_23051); + x_23074 = lean_box(0); +} +x_23075 = lean_ctor_get(x_23053, 0); +lean_inc(x_23075); +lean_dec(x_23053); +if (lean_is_scalar(x_23074)) { + x_23076 = lean_alloc_ctor(0, 2, 0); +} else { + x_23076 = x_23074; +} +lean_ctor_set(x_23076, 0, x_23075); +lean_ctor_set(x_23076, 1, x_23073); +if (lean_is_scalar(x_22811)) { + x_23077 = lean_alloc_ctor(0, 2, 0); +} else { + x_23077 = x_22811; +} +lean_ctor_set(x_23077, 0, x_23076); +lean_ctor_set(x_23077, 1, x_23052); +return x_23077; +} +} +} +case 4: +{ +lean_object* x_23179; lean_object* x_23180; uint8_t x_23181; +lean_dec(x_22812); +lean_dec(x_22811); +lean_dec(x_22807); +lean_dec(x_20593); +lean_dec(x_20592); +if (lean_is_exclusive(x_22817)) { + lean_ctor_release(x_22817, 0); + x_23179 = x_22817; +} else { + lean_dec_ref(x_22817); + x_23179 = lean_box(0); +} +x_23180 = l_Lean_IR_ToIR_lowerLet___closed__20; +x_23181 = lean_name_eq(x_153, x_23180); +if (x_23181 == 0) +{ +uint8_t x_23182; lean_object* x_23183; lean_object* x_23184; lean_object* x_23185; lean_object* x_23186; lean_object* x_23187; lean_object* x_23188; lean_object* x_23189; lean_object* x_23190; lean_object* x_23191; +lean_dec(x_22801); +lean_dec(x_2); +lean_dec(x_1); +x_23182 = 1; +x_23183 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_23184 = l_Lean_Name_toString(x_153, x_23182, x_23183); +if (lean_is_scalar(x_23179)) { + x_23185 = lean_alloc_ctor(3, 1, 0); +} else { + x_23185 = x_23179; + lean_ctor_set_tag(x_23185, 3); +} +lean_ctor_set(x_23185, 0, x_23184); +x_23186 = l_Lean_IR_ToIR_lowerLet___closed__22; +x_23187 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_23187, 0, x_23186); +lean_ctor_set(x_23187, 1, x_23185); +x_23188 = l_Lean_IR_ToIR_lowerLet___closed__24; +x_23189 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_23189, 0, x_23187); +lean_ctor_set(x_23189, 1, x_23188); +x_23190 = l_Lean_MessageData_ofFormat(x_23189); +x_23191 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_23190, x_22806, x_4, x_5, x_22810); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_22806); +return x_23191; +} +else +{ +lean_object* x_23192; lean_object* x_23193; lean_object* x_23194; +lean_dec(x_23179); +lean_dec(x_153); +x_23192 = l_Lean_IR_instInhabitedArg; +x_23193 = lean_unsigned_to_nat(2u); +x_23194 = lean_array_get(x_23192, x_22801, x_23193); +lean_dec(x_22801); +if (lean_obj_tag(x_23194) == 0) +{ +lean_object* x_23195; lean_object* x_23196; lean_object* x_23197; lean_object* x_23198; lean_object* x_23199; lean_object* x_23200; lean_object* x_23201; +x_23195 = lean_ctor_get(x_23194, 0); +lean_inc(x_23195); +lean_dec(x_23194); +x_23196 = lean_ctor_get(x_1, 0); +lean_inc(x_23196); +lean_dec(x_1); +x_23197 = l_Lean_IR_ToIR_bindVarToVarId(x_23196, x_23195, x_22806, x_4, x_5, x_22810); +x_23198 = lean_ctor_get(x_23197, 0); +lean_inc(x_23198); +x_23199 = lean_ctor_get(x_23197, 1); +lean_inc(x_23199); +lean_dec(x_23197); +x_23200 = lean_ctor_get(x_23198, 1); +lean_inc(x_23200); +lean_dec(x_23198); +x_23201 = l_Lean_IR_ToIR_lowerCode(x_2, x_23200, x_4, x_5, x_23199); +return x_23201; +} +else +{ +lean_object* x_23202; lean_object* x_23203; lean_object* x_23204; lean_object* x_23205; lean_object* x_23206; lean_object* x_23207; +x_23202 = lean_ctor_get(x_1, 0); +lean_inc(x_23202); +lean_dec(x_1); +x_23203 = l_Lean_IR_ToIR_bindErased(x_23202, x_22806, x_4, x_5, x_22810); +x_23204 = lean_ctor_get(x_23203, 0); +lean_inc(x_23204); +x_23205 = lean_ctor_get(x_23203, 1); +lean_inc(x_23205); +lean_dec(x_23203); +x_23206 = lean_ctor_get(x_23204, 1); +lean_inc(x_23206); +lean_dec(x_23204); +x_23207 = l_Lean_IR_ToIR_lowerCode(x_2, x_23206, x_4, x_5, x_23205); +return x_23207; +} +} +} +case 5: +{ +lean_object* x_23208; lean_object* x_23209; +lean_dec(x_22817); +lean_dec(x_22812); +lean_dec(x_22811); +lean_dec(x_22807); +lean_dec(x_22801); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_2); +lean_dec(x_1); +x_23208 = l_Lean_IR_ToIR_lowerLet___closed__26; +x_23209 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_23208, x_22806, x_4, x_5, x_22810); +return x_23209; +} +case 6: +{ +lean_object* x_23210; uint8_t x_23211; +x_23210 = lean_ctor_get(x_22817, 0); +lean_inc(x_23210); +lean_dec(x_22817); +lean_inc(x_153); +x_23211 = l_Lean_isExtern(x_22812, x_153); +if (x_23211 == 0) +{ +lean_object* x_23212; +lean_dec(x_22811); +lean_dec(x_22807); +lean_dec(x_22801); +lean_inc(x_5); +lean_inc(x_4); +x_23212 = l_Lean_IR_ToIR_getCtorInfo(x_153, x_22806, x_4, x_5, x_22810); +if (lean_obj_tag(x_23212) == 0) +{ +lean_object* x_23213; lean_object* x_23214; lean_object* x_23215; lean_object* x_23216; lean_object* x_23217; lean_object* x_23218; lean_object* x_23219; lean_object* x_23220; lean_object* x_23221; lean_object* x_23222; lean_object* x_23223; lean_object* x_23224; lean_object* x_23225; lean_object* x_23226; lean_object* x_23227; lean_object* x_23228; lean_object* x_23229; lean_object* x_23230; lean_object* x_23231; lean_object* x_23232; +x_23213 = lean_ctor_get(x_23212, 0); +lean_inc(x_23213); +x_23214 = lean_ctor_get(x_23213, 0); +lean_inc(x_23214); +x_23215 = lean_ctor_get(x_23212, 1); +lean_inc(x_23215); +lean_dec(x_23212); +x_23216 = lean_ctor_get(x_23213, 1); +lean_inc(x_23216); +lean_dec(x_23213); +x_23217 = lean_ctor_get(x_23214, 0); +lean_inc(x_23217); +x_23218 = lean_ctor_get(x_23214, 1); +lean_inc(x_23218); +lean_dec(x_23214); +x_23219 = lean_ctor_get(x_23210, 3); +lean_inc(x_23219); +lean_dec(x_23210); +x_23220 = lean_array_get_size(x_20592); +x_23221 = l_Array_extract___rarg(x_20592, x_23219, x_23220); +lean_dec(x_23220); +lean_dec(x_20592); +x_23222 = lean_array_get_size(x_23218); +x_23223 = lean_unsigned_to_nat(0u); +x_23224 = lean_unsigned_to_nat(1u); +if (lean_is_scalar(x_20593)) { + x_23225 = lean_alloc_ctor(0, 3, 0); +} else { + x_23225 = x_20593; + lean_ctor_set_tag(x_23225, 0); +} +lean_ctor_set(x_23225, 0, x_23223); +lean_ctor_set(x_23225, 1, x_23222); +lean_ctor_set(x_23225, 2, x_23224); +x_23226 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_23227 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__9(x_23218, x_23221, x_23225, x_23225, x_23226, x_23223, lean_box(0), lean_box(0), x_23216, x_4, x_5, x_23215); +lean_dec(x_23225); +x_23228 = lean_ctor_get(x_23227, 0); +lean_inc(x_23228); +x_23229 = lean_ctor_get(x_23227, 1); +lean_inc(x_23229); +lean_dec(x_23227); +x_23230 = lean_ctor_get(x_23228, 0); +lean_inc(x_23230); +x_23231 = lean_ctor_get(x_23228, 1); +lean_inc(x_23231); +lean_dec(x_23228); +x_23232 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_23217, x_23218, x_23221, x_23230, x_23231, x_4, x_5, x_23229); +lean_dec(x_23221); +lean_dec(x_23218); +return x_23232; +} +else +{ +lean_object* x_23233; lean_object* x_23234; lean_object* x_23235; lean_object* x_23236; +lean_dec(x_23210); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23233 = lean_ctor_get(x_23212, 0); +lean_inc(x_23233); +x_23234 = lean_ctor_get(x_23212, 1); +lean_inc(x_23234); +if (lean_is_exclusive(x_23212)) { + lean_ctor_release(x_23212, 0); + lean_ctor_release(x_23212, 1); + x_23235 = x_23212; +} else { + lean_dec_ref(x_23212); + x_23235 = lean_box(0); +} +if (lean_is_scalar(x_23235)) { + x_23236 = lean_alloc_ctor(1, 2, 0); +} else { + x_23236 = x_23235; +} +lean_ctor_set(x_23236, 0, x_23233); +lean_ctor_set(x_23236, 1, x_23234); +return x_23236; +} +} +else +{ +lean_object* x_23237; lean_object* x_23238; lean_object* x_23265; lean_object* x_23266; +lean_dec(x_23210); +lean_dec(x_20593); +lean_dec(x_20592); +lean_inc(x_153); +x_23265 = l_Lean_Compiler_LCNF_getMonoDecl_x3f(x_153, x_4, x_5, x_22810); +x_23266 = lean_ctor_get(x_23265, 0); +lean_inc(x_23266); +if (lean_obj_tag(x_23266) == 0) +{ +lean_object* x_23267; lean_object* x_23268; lean_object* x_23269; +x_23267 = lean_ctor_get(x_23265, 1); +lean_inc(x_23267); +lean_dec(x_23265); +x_23268 = lean_box(0); +if (lean_is_scalar(x_22807)) { + x_23269 = lean_alloc_ctor(0, 2, 0); +} else { + x_23269 = x_22807; +} +lean_ctor_set(x_23269, 0, x_23268); +lean_ctor_set(x_23269, 1, x_22806); +x_23237 = x_23269; +x_23238 = x_23267; +goto block_23264; +} +else +{ +lean_object* x_23270; lean_object* x_23271; lean_object* x_23272; lean_object* x_23273; lean_object* x_23274; lean_object* x_23275; lean_object* x_23276; uint8_t x_23277; +lean_dec(x_22807); +x_23270 = lean_ctor_get(x_23265, 1); +lean_inc(x_23270); +if (lean_is_exclusive(x_23265)) { + lean_ctor_release(x_23265, 0); + lean_ctor_release(x_23265, 1); + x_23271 = x_23265; +} else { + lean_dec_ref(x_23265); + x_23271 = lean_box(0); +} +x_23272 = lean_ctor_get(x_23266, 0); +lean_inc(x_23272); +if (lean_is_exclusive(x_23266)) { + lean_ctor_release(x_23266, 0); + x_23273 = x_23266; +} else { + lean_dec_ref(x_23266); + x_23273 = lean_box(0); +} +x_23274 = lean_array_get_size(x_22801); +x_23275 = lean_ctor_get(x_23272, 3); +lean_inc(x_23275); +lean_dec(x_23272); +x_23276 = lean_array_get_size(x_23275); +lean_dec(x_23275); +x_23277 = lean_nat_dec_lt(x_23274, x_23276); +if (x_23277 == 0) +{ +uint8_t x_23278; +x_23278 = lean_nat_dec_eq(x_23274, x_23276); +if (x_23278 == 0) +{ +lean_object* x_23279; lean_object* x_23280; lean_object* x_23281; lean_object* x_23282; lean_object* x_23283; lean_object* x_23284; lean_object* x_23285; lean_object* x_23286; lean_object* x_23287; lean_object* x_23288; lean_object* x_23289; lean_object* x_23290; lean_object* x_23291; lean_object* x_23292; lean_object* x_23293; lean_object* x_23294; lean_object* x_23295; +x_23279 = lean_unsigned_to_nat(0u); +x_23280 = l_Array_extract___rarg(x_22801, x_23279, x_23276); +x_23281 = l_Array_extract___rarg(x_22801, x_23276, x_23274); +lean_dec(x_23274); +lean_inc(x_153); +if (lean_is_scalar(x_23271)) { + x_23282 = lean_alloc_ctor(6, 2, 0); +} else { + x_23282 = x_23271; + lean_ctor_set_tag(x_23282, 6); +} +lean_ctor_set(x_23282, 0, x_153); +lean_ctor_set(x_23282, 1, x_23280); +x_23283 = lean_ctor_get(x_1, 0); +lean_inc(x_23283); +x_23284 = l_Lean_IR_ToIR_bindVar(x_23283, x_22806, x_4, x_5, x_23270); +x_23285 = lean_ctor_get(x_23284, 0); +lean_inc(x_23285); +x_23286 = lean_ctor_get(x_23284, 1); +lean_inc(x_23286); +lean_dec(x_23284); +x_23287 = lean_ctor_get(x_23285, 0); +lean_inc(x_23287); +x_23288 = lean_ctor_get(x_23285, 1); +lean_inc(x_23288); +lean_dec(x_23285); +x_23289 = l_Lean_IR_ToIR_newVar(x_23288, x_4, x_5, x_23286); +x_23290 = lean_ctor_get(x_23289, 0); +lean_inc(x_23290); +x_23291 = lean_ctor_get(x_23289, 1); +lean_inc(x_23291); +lean_dec(x_23289); +x_23292 = lean_ctor_get(x_23290, 0); +lean_inc(x_23292); +x_23293 = lean_ctor_get(x_23290, 1); +lean_inc(x_23293); +lean_dec(x_23290); +x_23294 = lean_ctor_get(x_1, 2); +lean_inc(x_23294); +lean_inc(x_5); +lean_inc(x_4); +x_23295 = l_Lean_IR_ToIR_lowerType(x_23294, x_23293, x_4, x_5, x_23291); +if (lean_obj_tag(x_23295) == 0) +{ +lean_object* x_23296; lean_object* x_23297; lean_object* x_23298; lean_object* x_23299; lean_object* x_23300; +x_23296 = lean_ctor_get(x_23295, 0); +lean_inc(x_23296); +x_23297 = lean_ctor_get(x_23295, 1); +lean_inc(x_23297); +lean_dec(x_23295); +x_23298 = lean_ctor_get(x_23296, 0); +lean_inc(x_23298); +x_23299 = lean_ctor_get(x_23296, 1); +lean_inc(x_23299); +lean_dec(x_23296); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_23300 = l_Lean_IR_ToIR_lowerLet___lambda__3(x_2, x_23292, x_23281, x_23287, x_23282, x_23298, x_23299, x_4, x_5, x_23297); +if (lean_obj_tag(x_23300) == 0) +{ +lean_object* x_23301; lean_object* x_23302; lean_object* x_23303; lean_object* x_23304; lean_object* x_23305; lean_object* x_23306; lean_object* x_23307; +x_23301 = lean_ctor_get(x_23300, 0); +lean_inc(x_23301); +x_23302 = lean_ctor_get(x_23300, 1); +lean_inc(x_23302); +lean_dec(x_23300); +x_23303 = lean_ctor_get(x_23301, 0); +lean_inc(x_23303); +x_23304 = lean_ctor_get(x_23301, 1); +lean_inc(x_23304); +if (lean_is_exclusive(x_23301)) { + lean_ctor_release(x_23301, 0); + lean_ctor_release(x_23301, 1); + x_23305 = x_23301; +} else { + lean_dec_ref(x_23301); + x_23305 = lean_box(0); +} +if (lean_is_scalar(x_23273)) { + x_23306 = lean_alloc_ctor(1, 1, 0); +} else { + x_23306 = x_23273; +} +lean_ctor_set(x_23306, 0, x_23303); +if (lean_is_scalar(x_23305)) { + x_23307 = lean_alloc_ctor(0, 2, 0); +} else { + x_23307 = x_23305; +} +lean_ctor_set(x_23307, 0, x_23306); +lean_ctor_set(x_23307, 1, x_23304); +x_23237 = x_23307; +x_23238 = x_23302; +goto block_23264; +} +else +{ +lean_object* x_23308; lean_object* x_23309; lean_object* x_23310; lean_object* x_23311; +lean_dec(x_23273); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23308 = lean_ctor_get(x_23300, 0); +lean_inc(x_23308); +x_23309 = lean_ctor_get(x_23300, 1); +lean_inc(x_23309); +if (lean_is_exclusive(x_23300)) { + lean_ctor_release(x_23300, 0); + lean_ctor_release(x_23300, 1); + x_23310 = x_23300; +} else { + lean_dec_ref(x_23300); + x_23310 = lean_box(0); +} +if (lean_is_scalar(x_23310)) { + x_23311 = lean_alloc_ctor(1, 2, 0); +} else { + x_23311 = x_23310; +} +lean_ctor_set(x_23311, 0, x_23308); +lean_ctor_set(x_23311, 1, x_23309); +return x_23311; +} +} +else +{ +lean_object* x_23312; lean_object* x_23313; lean_object* x_23314; lean_object* x_23315; +lean_dec(x_23292); +lean_dec(x_23287); +lean_dec(x_23282); +lean_dec(x_23281); +lean_dec(x_23273); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23312 = lean_ctor_get(x_23295, 0); +lean_inc(x_23312); +x_23313 = lean_ctor_get(x_23295, 1); +lean_inc(x_23313); +if (lean_is_exclusive(x_23295)) { + lean_ctor_release(x_23295, 0); + lean_ctor_release(x_23295, 1); + x_23314 = x_23295; +} else { + lean_dec_ref(x_23295); + x_23314 = lean_box(0); +} +if (lean_is_scalar(x_23314)) { + x_23315 = lean_alloc_ctor(1, 2, 0); +} else { + x_23315 = x_23314; +} +lean_ctor_set(x_23315, 0, x_23312); +lean_ctor_set(x_23315, 1, x_23313); +return x_23315; +} +} +else +{ +lean_object* x_23316; lean_object* x_23317; lean_object* x_23318; lean_object* x_23319; lean_object* x_23320; lean_object* x_23321; lean_object* x_23322; lean_object* x_23323; lean_object* x_23324; +lean_dec(x_23276); +lean_dec(x_23274); +lean_inc(x_22801); +lean_inc(x_153); +if (lean_is_scalar(x_23271)) { + x_23316 = lean_alloc_ctor(6, 2, 0); +} else { + x_23316 = x_23271; + lean_ctor_set_tag(x_23316, 6); +} +lean_ctor_set(x_23316, 0, x_153); +lean_ctor_set(x_23316, 1, x_22801); +x_23317 = lean_ctor_get(x_1, 0); +lean_inc(x_23317); +x_23318 = l_Lean_IR_ToIR_bindVar(x_23317, x_22806, x_4, x_5, x_23270); +x_23319 = lean_ctor_get(x_23318, 0); +lean_inc(x_23319); +x_23320 = lean_ctor_get(x_23318, 1); +lean_inc(x_23320); +lean_dec(x_23318); +x_23321 = lean_ctor_get(x_23319, 0); +lean_inc(x_23321); +x_23322 = lean_ctor_get(x_23319, 1); +lean_inc(x_23322); +lean_dec(x_23319); +x_23323 = lean_ctor_get(x_1, 2); +lean_inc(x_23323); +lean_inc(x_5); +lean_inc(x_4); +x_23324 = l_Lean_IR_ToIR_lowerType(x_23323, x_23322, x_4, x_5, x_23320); +if (lean_obj_tag(x_23324) == 0) +{ +lean_object* x_23325; lean_object* x_23326; lean_object* x_23327; lean_object* x_23328; lean_object* x_23329; +x_23325 = lean_ctor_get(x_23324, 0); +lean_inc(x_23325); +x_23326 = lean_ctor_get(x_23324, 1); +lean_inc(x_23326); +lean_dec(x_23324); +x_23327 = lean_ctor_get(x_23325, 0); +lean_inc(x_23327); +x_23328 = lean_ctor_get(x_23325, 1); +lean_inc(x_23328); +lean_dec(x_23325); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_23329 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_23321, x_23316, x_23327, x_23328, x_4, x_5, x_23326); +if (lean_obj_tag(x_23329) == 0) +{ +lean_object* x_23330; lean_object* x_23331; lean_object* x_23332; lean_object* x_23333; lean_object* x_23334; lean_object* x_23335; lean_object* x_23336; +x_23330 = lean_ctor_get(x_23329, 0); +lean_inc(x_23330); +x_23331 = lean_ctor_get(x_23329, 1); +lean_inc(x_23331); +lean_dec(x_23329); +x_23332 = lean_ctor_get(x_23330, 0); +lean_inc(x_23332); +x_23333 = lean_ctor_get(x_23330, 1); +lean_inc(x_23333); +if (lean_is_exclusive(x_23330)) { + lean_ctor_release(x_23330, 0); + lean_ctor_release(x_23330, 1); + x_23334 = x_23330; +} else { + lean_dec_ref(x_23330); + x_23334 = lean_box(0); +} +if (lean_is_scalar(x_23273)) { + x_23335 = lean_alloc_ctor(1, 1, 0); +} else { + x_23335 = x_23273; +} +lean_ctor_set(x_23335, 0, x_23332); +if (lean_is_scalar(x_23334)) { + x_23336 = lean_alloc_ctor(0, 2, 0); +} else { + x_23336 = x_23334; +} +lean_ctor_set(x_23336, 0, x_23335); +lean_ctor_set(x_23336, 1, x_23333); +x_23237 = x_23336; +x_23238 = x_23331; +goto block_23264; +} +else +{ +lean_object* x_23337; lean_object* x_23338; lean_object* x_23339; lean_object* x_23340; +lean_dec(x_23273); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23337 = lean_ctor_get(x_23329, 0); +lean_inc(x_23337); +x_23338 = lean_ctor_get(x_23329, 1); +lean_inc(x_23338); +if (lean_is_exclusive(x_23329)) { + lean_ctor_release(x_23329, 0); + lean_ctor_release(x_23329, 1); + x_23339 = x_23329; +} else { + lean_dec_ref(x_23329); + x_23339 = lean_box(0); +} +if (lean_is_scalar(x_23339)) { + x_23340 = lean_alloc_ctor(1, 2, 0); +} else { + x_23340 = x_23339; +} +lean_ctor_set(x_23340, 0, x_23337); +lean_ctor_set(x_23340, 1, x_23338); +return x_23340; +} +} +else +{ +lean_object* x_23341; lean_object* x_23342; lean_object* x_23343; lean_object* x_23344; +lean_dec(x_23321); +lean_dec(x_23316); +lean_dec(x_23273); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23341 = lean_ctor_get(x_23324, 0); +lean_inc(x_23341); +x_23342 = lean_ctor_get(x_23324, 1); +lean_inc(x_23342); +if (lean_is_exclusive(x_23324)) { + lean_ctor_release(x_23324, 0); + lean_ctor_release(x_23324, 1); + x_23343 = x_23324; +} else { + lean_dec_ref(x_23324); + x_23343 = lean_box(0); +} +if (lean_is_scalar(x_23343)) { + x_23344 = lean_alloc_ctor(1, 2, 0); +} else { + x_23344 = x_23343; +} +lean_ctor_set(x_23344, 0, x_23341); +lean_ctor_set(x_23344, 1, x_23342); +return x_23344; +} +} +} +else +{ +lean_object* x_23345; lean_object* x_23346; lean_object* x_23347; lean_object* x_23348; lean_object* x_23349; lean_object* x_23350; lean_object* x_23351; lean_object* x_23352; lean_object* x_23353; +lean_dec(x_23276); +lean_dec(x_23274); +lean_inc(x_22801); +lean_inc(x_153); +if (lean_is_scalar(x_23271)) { + x_23345 = lean_alloc_ctor(7, 2, 0); +} else { + x_23345 = x_23271; + lean_ctor_set_tag(x_23345, 7); +} +lean_ctor_set(x_23345, 0, x_153); +lean_ctor_set(x_23345, 1, x_22801); +x_23346 = lean_ctor_get(x_1, 0); +lean_inc(x_23346); +x_23347 = l_Lean_IR_ToIR_bindVar(x_23346, x_22806, x_4, x_5, x_23270); +x_23348 = lean_ctor_get(x_23347, 0); +lean_inc(x_23348); +x_23349 = lean_ctor_get(x_23347, 1); +lean_inc(x_23349); +lean_dec(x_23347); +x_23350 = lean_ctor_get(x_23348, 0); +lean_inc(x_23350); +x_23351 = lean_ctor_get(x_23348, 1); +lean_inc(x_23351); +lean_dec(x_23348); +x_23352 = lean_box(7); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +x_23353 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_23350, x_23345, x_23352, x_23351, x_4, x_5, x_23349); +if (lean_obj_tag(x_23353) == 0) +{ +lean_object* x_23354; lean_object* x_23355; lean_object* x_23356; lean_object* x_23357; lean_object* x_23358; lean_object* x_23359; lean_object* x_23360; +x_23354 = lean_ctor_get(x_23353, 0); +lean_inc(x_23354); +x_23355 = lean_ctor_get(x_23353, 1); +lean_inc(x_23355); +lean_dec(x_23353); +x_23356 = lean_ctor_get(x_23354, 0); +lean_inc(x_23356); +x_23357 = lean_ctor_get(x_23354, 1); +lean_inc(x_23357); +if (lean_is_exclusive(x_23354)) { + lean_ctor_release(x_23354, 0); + lean_ctor_release(x_23354, 1); + x_23358 = x_23354; +} else { + lean_dec_ref(x_23354); + x_23358 = lean_box(0); +} +if (lean_is_scalar(x_23273)) { + x_23359 = lean_alloc_ctor(1, 1, 0); +} else { + x_23359 = x_23273; +} +lean_ctor_set(x_23359, 0, x_23356); +if (lean_is_scalar(x_23358)) { + x_23360 = lean_alloc_ctor(0, 2, 0); +} else { + x_23360 = x_23358; +} +lean_ctor_set(x_23360, 0, x_23359); +lean_ctor_set(x_23360, 1, x_23357); +x_23237 = x_23360; +x_23238 = x_23355; +goto block_23264; +} +else +{ +lean_object* x_23361; lean_object* x_23362; lean_object* x_23363; lean_object* x_23364; +lean_dec(x_23273); +lean_dec(x_22811); +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23361 = lean_ctor_get(x_23353, 0); +lean_inc(x_23361); +x_23362 = lean_ctor_get(x_23353, 1); +lean_inc(x_23362); +if (lean_is_exclusive(x_23353)) { + lean_ctor_release(x_23353, 0); + lean_ctor_release(x_23353, 1); + x_23363 = x_23353; +} else { + lean_dec_ref(x_23353); + x_23363 = lean_box(0); +} +if (lean_is_scalar(x_23363)) { + x_23364 = lean_alloc_ctor(1, 2, 0); +} else { + x_23364 = x_23363; +} +lean_ctor_set(x_23364, 0, x_23361); +lean_ctor_set(x_23364, 1, x_23362); +return x_23364; +} +} +} +block_23264: +{ +lean_object* x_23239; +x_23239 = lean_ctor_get(x_23237, 0); +lean_inc(x_23239); +if (lean_obj_tag(x_23239) == 0) +{ +lean_object* x_23240; lean_object* x_23241; lean_object* x_23242; lean_object* x_23243; lean_object* x_23244; lean_object* x_23245; lean_object* x_23246; lean_object* x_23247; lean_object* x_23248; lean_object* x_23249; +lean_dec(x_22811); +x_23240 = lean_ctor_get(x_23237, 1); +lean_inc(x_23240); +lean_dec(x_23237); +x_23241 = lean_alloc_ctor(6, 2, 0); +lean_ctor_set(x_23241, 0, x_153); +lean_ctor_set(x_23241, 1, x_22801); +x_23242 = lean_ctor_get(x_1, 0); +lean_inc(x_23242); +x_23243 = l_Lean_IR_ToIR_bindVar(x_23242, x_23240, x_4, x_5, x_23238); +x_23244 = lean_ctor_get(x_23243, 0); +lean_inc(x_23244); +x_23245 = lean_ctor_get(x_23243, 1); +lean_inc(x_23245); +lean_dec(x_23243); +x_23246 = lean_ctor_get(x_23244, 0); +lean_inc(x_23246); +x_23247 = lean_ctor_get(x_23244, 1); +lean_inc(x_23247); +lean_dec(x_23244); +x_23248 = lean_ctor_get(x_1, 2); +lean_inc(x_23248); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_23249 = l_Lean_IR_ToIR_lowerType(x_23248, x_23247, x_4, x_5, x_23245); +if (lean_obj_tag(x_23249) == 0) +{ +lean_object* x_23250; lean_object* x_23251; lean_object* x_23252; lean_object* x_23253; lean_object* x_23254; +x_23250 = lean_ctor_get(x_23249, 0); +lean_inc(x_23250); +x_23251 = lean_ctor_get(x_23249, 1); +lean_inc(x_23251); +lean_dec(x_23249); +x_23252 = lean_ctor_get(x_23250, 0); +lean_inc(x_23252); +x_23253 = lean_ctor_get(x_23250, 1); +lean_inc(x_23253); +lean_dec(x_23250); +x_23254 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_23246, x_23241, x_23252, x_23253, x_4, x_5, x_23251); +return x_23254; +} +else +{ +lean_object* x_23255; lean_object* x_23256; lean_object* x_23257; lean_object* x_23258; +lean_dec(x_23246); +lean_dec(x_23241); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_23255 = lean_ctor_get(x_23249, 0); +lean_inc(x_23255); +x_23256 = lean_ctor_get(x_23249, 1); +lean_inc(x_23256); +if (lean_is_exclusive(x_23249)) { + lean_ctor_release(x_23249, 0); + lean_ctor_release(x_23249, 1); + x_23257 = x_23249; +} else { + lean_dec_ref(x_23249); + x_23257 = lean_box(0); +} +if (lean_is_scalar(x_23257)) { + x_23258 = lean_alloc_ctor(1, 2, 0); +} else { + x_23258 = x_23257; +} +lean_ctor_set(x_23258, 0, x_23255); +lean_ctor_set(x_23258, 1, x_23256); +return x_23258; +} +} +else +{ +lean_object* x_23259; lean_object* x_23260; lean_object* x_23261; lean_object* x_23262; lean_object* x_23263; +lean_dec(x_22801); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23259 = lean_ctor_get(x_23237, 1); +lean_inc(x_23259); +if (lean_is_exclusive(x_23237)) { + lean_ctor_release(x_23237, 0); + lean_ctor_release(x_23237, 1); + x_23260 = x_23237; +} else { + lean_dec_ref(x_23237); + x_23260 = lean_box(0); +} +x_23261 = lean_ctor_get(x_23239, 0); +lean_inc(x_23261); +lean_dec(x_23239); +if (lean_is_scalar(x_23260)) { + x_23262 = lean_alloc_ctor(0, 2, 0); +} else { + x_23262 = x_23260; +} +lean_ctor_set(x_23262, 0, x_23261); +lean_ctor_set(x_23262, 1, x_23259); +if (lean_is_scalar(x_22811)) { + x_23263 = lean_alloc_ctor(0, 2, 0); +} else { + x_23263 = x_22811; +} +lean_ctor_set(x_23263, 0, x_23262); +lean_ctor_set(x_23263, 1, x_23238); +return x_23263; +} +} +} +} +default: +{ +lean_object* x_23365; uint8_t x_23366; lean_object* x_23367; lean_object* x_23368; lean_object* x_23369; lean_object* x_23370; lean_object* x_23371; lean_object* x_23372; lean_object* x_23373; lean_object* x_23374; lean_object* x_23375; +lean_dec(x_22812); +lean_dec(x_22811); +lean_dec(x_22807); +lean_dec(x_22801); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_exclusive(x_22817)) { + lean_ctor_release(x_22817, 0); + x_23365 = x_22817; +} else { + lean_dec_ref(x_22817); + x_23365 = lean_box(0); +} +x_23366 = 1; +x_23367 = l_Lean_IR_ToIR_lowerLet___closed__14; +x_23368 = l_Lean_Name_toString(x_153, x_23366, x_23367); +if (lean_is_scalar(x_23365)) { + x_23369 = lean_alloc_ctor(3, 1, 0); +} else { + x_23369 = x_23365; + lean_ctor_set_tag(x_23369, 3); +} +lean_ctor_set(x_23369, 0, x_23368); +x_23370 = l_Lean_IR_ToIR_lowerLet___closed__29; +x_23371 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_23371, 0, x_23370); +lean_ctor_set(x_23371, 1, x_23369); +x_23372 = l_Lean_IR_ToIR_lowerLet___closed__31; +x_23373 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_23373, 0, x_23371); +lean_ctor_set(x_23373, 1, x_23372); +x_23374 = l_Lean_MessageData_ofFormat(x_23373); +x_23375 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_23374, x_22806, x_4, x_5, x_22810); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_22806); +return x_23375; +} +} +} +} +else +{ +lean_object* x_23376; lean_object* x_23377; lean_object* x_23378; lean_object* x_23379; lean_object* x_23380; +lean_dec(x_22801); +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23376 = lean_ctor_get(x_22803, 1); +lean_inc(x_23376); +if (lean_is_exclusive(x_22803)) { + lean_ctor_release(x_22803, 0); + lean_ctor_release(x_22803, 1); + x_23377 = x_22803; +} else { + lean_dec_ref(x_22803); + x_23377 = lean_box(0); +} +x_23378 = lean_ctor_get(x_22805, 0); +lean_inc(x_23378); +lean_dec(x_22805); +if (lean_is_scalar(x_23377)) { + x_23379 = lean_alloc_ctor(0, 2, 0); +} else { + x_23379 = x_23377; +} +lean_ctor_set(x_23379, 0, x_23378); +lean_ctor_set(x_23379, 1, x_23376); +if (lean_is_scalar(x_20599)) { + x_23380 = lean_alloc_ctor(0, 2, 0); +} else { + x_23380 = x_20599; +} +lean_ctor_set(x_23380, 0, x_23379); +lean_ctor_set(x_23380, 1, x_22804); +return x_23380; +} +} +} +} +else +{ +uint8_t x_23482; +lean_dec(x_20593); +lean_dec(x_20592); +lean_dec(x_153); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23482 = !lean_is_exclusive(x_20596); +if (x_23482 == 0) +{ +return x_20596; +} +else +{ +lean_object* x_23483; lean_object* x_23484; lean_object* x_23485; +x_23483 = lean_ctor_get(x_20596, 0); +x_23484 = lean_ctor_get(x_20596, 1); +lean_inc(x_23484); +lean_inc(x_23483); +lean_dec(x_20596); +x_23485 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_23485, 0, x_23483); +lean_ctor_set(x_23485, 1, x_23484); +return x_23485; +} +} +} +} +} +default: +{ +lean_object* x_23486; lean_object* x_23487; lean_object* x_23488; lean_object* x_23489; lean_object* x_23490; uint64_t x_23491; uint64_t x_23492; uint64_t x_23493; uint64_t x_23494; uint64_t x_23495; uint64_t x_23496; uint64_t x_23497; size_t x_23498; size_t x_23499; size_t x_23500; size_t x_23501; size_t x_23502; lean_object* x_23503; lean_object* x_23504; +x_23486 = lean_ctor_get(x_3, 0); +lean_inc(x_23486); +x_23487 = lean_ctor_get(x_7, 0); +lean_inc(x_23487); +x_23488 = lean_ctor_get(x_7, 1); +lean_inc(x_23488); +lean_dec(x_7); +x_23489 = lean_ctor_get(x_23486, 1); +lean_inc(x_23489); +lean_dec(x_23486); +x_23490 = lean_array_get_size(x_23489); +x_23491 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_23487); +x_23492 = 32; +x_23493 = lean_uint64_shift_right(x_23491, x_23492); +x_23494 = lean_uint64_xor(x_23491, x_23493); +x_23495 = 16; +x_23496 = lean_uint64_shift_right(x_23494, x_23495); +x_23497 = lean_uint64_xor(x_23494, x_23496); +x_23498 = lean_uint64_to_usize(x_23497); +x_23499 = lean_usize_of_nat(x_23490); +lean_dec(x_23490); +x_23500 = 1; +x_23501 = lean_usize_sub(x_23499, x_23500); +x_23502 = lean_usize_land(x_23498, x_23501); +x_23503 = lean_array_uget(x_23489, x_23502); +lean_dec(x_23489); +x_23504 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_23487, x_23503); +lean_dec(x_23503); +lean_dec(x_23487); +if (lean_obj_tag(x_23504) == 0) +{ +lean_object* x_23505; lean_object* x_23506; +lean_dec(x_23488); +lean_dec(x_2); +lean_dec(x_1); +x_23505 = l_Lean_IR_ToIR_lowerLet___closed__38; +x_23506 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_23505, x_3, x_4, x_5, x_6); +return x_23506; +} +else +{ +lean_object* x_23507; +x_23507 = lean_ctor_get(x_23504, 0); +lean_inc(x_23507); +lean_dec(x_23504); +switch (lean_obj_tag(x_23507)) { +case 0: +{ +lean_object* x_23508; size_t x_23509; size_t x_23510; lean_object* x_23511; +x_23508 = lean_ctor_get(x_23507, 0); +lean_inc(x_23508); +lean_dec(x_23507); +x_23509 = lean_array_size(x_23488); +x_23510 = 0; +lean_inc(x_5); +lean_inc(x_4); +x_23511 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_23509, x_23510, x_23488, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_23511) == 0) +{ +lean_object* x_23512; lean_object* x_23513; uint8_t x_23514; +x_23512 = lean_ctor_get(x_23511, 0); +lean_inc(x_23512); +x_23513 = lean_ctor_get(x_23511, 1); +lean_inc(x_23513); +lean_dec(x_23511); +x_23514 = !lean_is_exclusive(x_23512); +if (x_23514 == 0) +{ +lean_object* x_23515; lean_object* x_23516; lean_object* x_23517; lean_object* x_23518; lean_object* x_23519; lean_object* x_23520; lean_object* x_23521; lean_object* x_23522; lean_object* x_23523; lean_object* x_23524; +x_23515 = lean_ctor_get(x_23512, 0); +x_23516 = lean_ctor_get(x_23512, 1); +lean_ctor_set_tag(x_23512, 8); +lean_ctor_set(x_23512, 1, x_23515); +lean_ctor_set(x_23512, 0, x_23508); +x_23517 = lean_ctor_get(x_1, 0); +lean_inc(x_23517); +x_23518 = l_Lean_IR_ToIR_bindVar(x_23517, x_23516, x_4, x_5, x_23513); +x_23519 = lean_ctor_get(x_23518, 0); +lean_inc(x_23519); +x_23520 = lean_ctor_get(x_23518, 1); +lean_inc(x_23520); +lean_dec(x_23518); +x_23521 = lean_ctor_get(x_23519, 0); +lean_inc(x_23521); +x_23522 = lean_ctor_get(x_23519, 1); +lean_inc(x_23522); +lean_dec(x_23519); +x_23523 = lean_ctor_get(x_1, 2); +lean_inc(x_23523); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_23524 = l_Lean_IR_ToIR_lowerType(x_23523, x_23522, x_4, x_5, x_23520); +if (lean_obj_tag(x_23524) == 0) +{ +lean_object* x_23525; lean_object* x_23526; lean_object* x_23527; lean_object* x_23528; lean_object* x_23529; +x_23525 = lean_ctor_get(x_23524, 0); +lean_inc(x_23525); +x_23526 = lean_ctor_get(x_23524, 1); +lean_inc(x_23526); +lean_dec(x_23524); +x_23527 = lean_ctor_get(x_23525, 0); +lean_inc(x_23527); +x_23528 = lean_ctor_get(x_23525, 1); +lean_inc(x_23528); +lean_dec(x_23525); +x_23529 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_23521, x_23512, x_23527, x_23528, x_4, x_5, x_23526); +return x_23529; +} +else +{ +uint8_t x_23530; +lean_dec(x_23521); +lean_dec(x_23512); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_23530 = !lean_is_exclusive(x_23524); +if (x_23530 == 0) +{ +return x_23524; +} +else +{ +lean_object* x_23531; lean_object* x_23532; lean_object* x_23533; +x_23531 = lean_ctor_get(x_23524, 0); +x_23532 = lean_ctor_get(x_23524, 1); +lean_inc(x_23532); +lean_inc(x_23531); +lean_dec(x_23524); +x_23533 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_23533, 0, x_23531); +lean_ctor_set(x_23533, 1, x_23532); +return x_23533; +} +} +} +else +{ +lean_object* x_23534; lean_object* x_23535; lean_object* x_23536; lean_object* x_23537; lean_object* x_23538; lean_object* x_23539; lean_object* x_23540; lean_object* x_23541; lean_object* x_23542; lean_object* x_23543; lean_object* x_23544; +x_23534 = lean_ctor_get(x_23512, 0); +x_23535 = lean_ctor_get(x_23512, 1); +lean_inc(x_23535); +lean_inc(x_23534); +lean_dec(x_23512); +x_23536 = lean_alloc_ctor(8, 2, 0); +lean_ctor_set(x_23536, 0, x_23508); +lean_ctor_set(x_23536, 1, x_23534); +x_23537 = lean_ctor_get(x_1, 0); +lean_inc(x_23537); +x_23538 = l_Lean_IR_ToIR_bindVar(x_23537, x_23535, x_4, x_5, x_23513); +x_23539 = lean_ctor_get(x_23538, 0); +lean_inc(x_23539); +x_23540 = lean_ctor_get(x_23538, 1); +lean_inc(x_23540); +lean_dec(x_23538); +x_23541 = lean_ctor_get(x_23539, 0); +lean_inc(x_23541); +x_23542 = lean_ctor_get(x_23539, 1); +lean_inc(x_23542); +lean_dec(x_23539); +x_23543 = lean_ctor_get(x_1, 2); +lean_inc(x_23543); +lean_dec(x_1); +lean_inc(x_5); +lean_inc(x_4); +x_23544 = l_Lean_IR_ToIR_lowerType(x_23543, x_23542, x_4, x_5, x_23540); +if (lean_obj_tag(x_23544) == 0) +{ +lean_object* x_23545; lean_object* x_23546; lean_object* x_23547; lean_object* x_23548; lean_object* x_23549; +x_23545 = lean_ctor_get(x_23544, 0); +lean_inc(x_23545); +x_23546 = lean_ctor_get(x_23544, 1); +lean_inc(x_23546); +lean_dec(x_23544); +x_23547 = lean_ctor_get(x_23545, 0); +lean_inc(x_23547); +x_23548 = lean_ctor_get(x_23545, 1); +lean_inc(x_23548); +lean_dec(x_23545); +x_23549 = l_Lean_IR_ToIR_lowerLet___lambda__1(x_2, x_23541, x_23536, x_23547, x_23548, x_4, x_5, x_23546); +return x_23549; +} +else +{ +lean_object* x_23550; lean_object* x_23551; lean_object* x_23552; lean_object* x_23553; +lean_dec(x_23541); +lean_dec(x_23536); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_23550 = lean_ctor_get(x_23544, 0); +lean_inc(x_23550); +x_23551 = lean_ctor_get(x_23544, 1); +lean_inc(x_23551); +if (lean_is_exclusive(x_23544)) { + lean_ctor_release(x_23544, 0); + lean_ctor_release(x_23544, 1); + x_23552 = x_23544; +} else { + lean_dec_ref(x_23544); + x_23552 = lean_box(0); +} +if (lean_is_scalar(x_23552)) { + x_23553 = lean_alloc_ctor(1, 2, 0); +} else { + x_23553 = x_23552; +} +lean_ctor_set(x_23553, 0, x_23550); +lean_ctor_set(x_23553, 1, x_23551); +return x_23553; +} +} +} +else +{ +uint8_t x_23554; +lean_dec(x_23508); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_23554 = !lean_is_exclusive(x_23511); +if (x_23554 == 0) +{ +return x_23511; +} +else +{ +lean_object* x_23555; lean_object* x_23556; lean_object* x_23557; +x_23555 = lean_ctor_get(x_23511, 0); +x_23556 = lean_ctor_get(x_23511, 1); +lean_inc(x_23556); +lean_inc(x_23555); +lean_dec(x_23511); +x_23557 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_23557, 0, x_23555); +lean_ctor_set(x_23557, 1, x_23556); +return x_23557; +} +} +} +case 1: +{ +lean_object* x_23558; lean_object* x_23559; +lean_dec(x_23507); +lean_dec(x_23488); +lean_dec(x_2); +lean_dec(x_1); +x_23558 = l_Lean_IR_ToIR_lowerLet___closed__38; +x_23559 = l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1(x_23558, x_3, x_4, x_5, x_6); +return x_23559; +} +default: +{ +lean_object* x_23560; lean_object* x_23561; lean_object* x_23562; lean_object* x_23563; lean_object* x_23564; lean_object* x_23565; +lean_dec(x_23488); +x_23560 = lean_ctor_get(x_1, 0); +lean_inc(x_23560); +lean_dec(x_1); +x_23561 = l_Lean_IR_ToIR_bindErased(x_23560, x_3, x_4, x_5, x_6); +x_23562 = lean_ctor_get(x_23561, 0); +lean_inc(x_23562); +x_23563 = lean_ctor_get(x_23561, 1); +lean_inc(x_23563); +lean_dec(x_23561); +x_23564 = lean_ctor_get(x_23562, 1); +lean_inc(x_23564); +lean_dec(x_23562); +x_23565 = l_Lean_IR_ToIR_lowerCode(x_2, x_23564, x_4, x_5, x_23563); +return x_23565; +} +} +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +lean_object* x_11; lean_object* x_12; +x_11 = lean_unsigned_to_nat(0u); +x_12 = l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields_loop(x_1, x_2, x_3, x_4, x_5, x_11, x_11, x_7, x_8, x_9, x_10); +return x_12; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields_loop(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +lean_object* x_12; uint8_t x_13; +x_12 = lean_array_get_size(x_4); +x_13 = lean_nat_dec_lt(x_7, x_12); +lean_dec(x_12); +if (x_13 == 0) +{ +lean_object* x_14; +lean_dec(x_7); +lean_dec(x_5); +x_14 = l_Lean_IR_ToIR_lowerCode(x_1, x_8, x_9, x_10, x_11); +return x_14; +} +else +{ +lean_object* x_15; +x_15 = lean_array_fget(x_4, x_7); +if (lean_obj_tag(x_15) == 1) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint64_t x_20; uint64_t x_21; uint64_t x_22; uint64_t x_23; uint64_t x_24; uint64_t x_25; uint64_t x_26; size_t x_27; size_t x_28; size_t x_29; size_t x_30; size_t x_31; lean_object* x_32; lean_object* x_33; +x_16 = lean_ctor_get(x_8, 0); +lean_inc(x_16); +x_17 = lean_ctor_get(x_15, 0); +lean_inc(x_17); +lean_dec(x_15); +x_18 = lean_ctor_get(x_16, 1); +lean_inc(x_18); +lean_dec(x_16); +x_19 = lean_array_get_size(x_18); +x_20 = l___private_Lean_Expr_0__Lean_hashFVarId____x40_Lean_Expr___hyg_1730_(x_17); +x_21 = 32; +x_22 = lean_uint64_shift_right(x_20, x_21); +x_23 = lean_uint64_xor(x_20, x_22); +x_24 = 16; +x_25 = lean_uint64_shift_right(x_23, x_24); +x_26 = lean_uint64_xor(x_23, x_25); +x_27 = lean_uint64_to_usize(x_26); +x_28 = lean_usize_of_nat(x_19); +lean_dec(x_19); +x_29 = 1; +x_30 = lean_usize_sub(x_28, x_29); +x_31 = lean_usize_land(x_27, x_30); +x_32 = lean_array_uget(x_18, x_31); +lean_dec(x_18); +x_33 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_IR_ToIR_lowerArg___spec__1(x_17, x_32); +lean_dec(x_32); +lean_dec(x_17); +if (lean_obj_tag(x_33) == 0) +{ +lean_object* x_34; lean_object* x_35; +x_34 = lean_unsigned_to_nat(1u); +x_35 = lean_nat_add(x_7, x_34); +lean_dec(x_7); +x_7 = x_35; +goto _start; +} +else +{ +lean_object* x_37; +x_37 = lean_ctor_get(x_33, 0); +lean_inc(x_37); +lean_dec(x_33); +if (lean_obj_tag(x_37) == 0) +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_38 = lean_ctor_get(x_37, 0); +lean_inc(x_38); +lean_dec(x_37); +x_39 = l_Lean_IR_instInhabitedCtorFieldInfo; +x_40 = lean_array_get(x_39, x_3, x_7); +switch (lean_obj_tag(x_40)) { +case 2: +{ +lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; +lean_dec(x_40); +x_41 = lean_unsigned_to_nat(1u); +x_42 = lean_nat_add(x_6, x_41); +x_43 = lean_nat_add(x_7, x_41); +lean_dec(x_7); +lean_inc(x_5); +x_44 = l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields_loop(x_1, x_2, x_3, x_4, x_5, x_42, x_43, x_8, x_9, x_10, x_11); +lean_dec(x_42); +if (lean_obj_tag(x_44) == 0) +{ +uint8_t x_45; +x_45 = !lean_is_exclusive(x_44); +if (x_45 == 0) +{ +lean_object* x_46; uint8_t x_47; +x_46 = lean_ctor_get(x_44, 0); +x_47 = !lean_is_exclusive(x_46); +if (x_47 == 0) +{ +lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; +x_48 = lean_ctor_get(x_46, 0); +x_49 = lean_ctor_get(x_2, 2); +x_50 = lean_nat_add(x_49, x_6); +x_51 = lean_alloc_ctor(4, 4, 0); +lean_ctor_set(x_51, 0, x_5); +lean_ctor_set(x_51, 1, x_50); +lean_ctor_set(x_51, 2, x_38); +lean_ctor_set(x_51, 3, x_48); +lean_ctor_set(x_46, 0, x_51); +return x_44; +} +else +{ +lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; +x_52 = lean_ctor_get(x_46, 0); +x_53 = lean_ctor_get(x_46, 1); +lean_inc(x_53); +lean_inc(x_52); +lean_dec(x_46); +x_54 = lean_ctor_get(x_2, 2); +x_55 = lean_nat_add(x_54, x_6); +x_56 = lean_alloc_ctor(4, 4, 0); +lean_ctor_set(x_56, 0, x_5); +lean_ctor_set(x_56, 1, x_55); +lean_ctor_set(x_56, 2, x_38); +lean_ctor_set(x_56, 3, x_52); +x_57 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_57, 0, x_56); +lean_ctor_set(x_57, 1, x_53); +lean_ctor_set(x_44, 0, x_57); +return x_44; +} +} +else +{ +lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; +x_58 = lean_ctor_get(x_44, 0); +x_59 = lean_ctor_get(x_44, 1); +lean_inc(x_59); +lean_inc(x_58); +lean_dec(x_44); +x_60 = lean_ctor_get(x_58, 0); +lean_inc(x_60); +x_61 = lean_ctor_get(x_58, 1); +lean_inc(x_61); +if (lean_is_exclusive(x_58)) { + lean_ctor_release(x_58, 0); + lean_ctor_release(x_58, 1); + x_62 = x_58; +} else { + lean_dec_ref(x_58); + x_62 = lean_box(0); +} +x_63 = lean_ctor_get(x_2, 2); +x_64 = lean_nat_add(x_63, x_6); +x_65 = lean_alloc_ctor(4, 4, 0); +lean_ctor_set(x_65, 0, x_5); +lean_ctor_set(x_65, 1, x_64); +lean_ctor_set(x_65, 2, x_38); +lean_ctor_set(x_65, 3, x_60); +if (lean_is_scalar(x_62)) { + x_66 = lean_alloc_ctor(0, 2, 0); +} else { + x_66 = x_62; +} +lean_ctor_set(x_66, 0, x_65); +lean_ctor_set(x_66, 1, x_61); +x_67 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_67, 0, x_66); +lean_ctor_set(x_67, 1, x_59); +return x_67; +} +} +else +{ +uint8_t x_68; +lean_dec(x_38); +lean_dec(x_5); +x_68 = !lean_is_exclusive(x_44); +if (x_68 == 0) +{ +return x_44; +} +else +{ +lean_object* x_69; lean_object* x_70; lean_object* x_71; +x_69 = lean_ctor_get(x_44, 0); +x_70 = lean_ctor_get(x_44, 1); +lean_inc(x_70); +lean_inc(x_69); +lean_dec(x_44); +x_71 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_71, 0, x_69); +lean_ctor_set(x_71, 1, x_70); +return x_71; +} +} +} +case 3: +{ +lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; +x_72 = lean_ctor_get(x_40, 1); +lean_inc(x_72); +x_73 = lean_ctor_get(x_40, 2); +lean_inc(x_73); +lean_dec(x_40); +x_74 = lean_unsigned_to_nat(1u); +x_75 = lean_nat_add(x_7, x_74); +lean_dec(x_7); +lean_inc(x_5); +x_76 = l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields_loop(x_1, x_2, x_3, x_4, x_5, x_6, x_75, x_8, x_9, x_10, x_11); +if (lean_obj_tag(x_76) == 0) +{ +uint8_t x_77; +x_77 = !lean_is_exclusive(x_76); +if (x_77 == 0) +{ +lean_object* x_78; uint8_t x_79; +x_78 = lean_ctor_get(x_76, 0); +x_79 = !lean_is_exclusive(x_78); +if (x_79 == 0) +{ +lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; +x_80 = lean_ctor_get(x_78, 0); +x_81 = lean_ctor_get(x_2, 2); +x_82 = lean_ctor_get(x_2, 3); +x_83 = lean_nat_add(x_81, x_82); +x_84 = lean_alloc_ctor(5, 6, 0); +lean_ctor_set(x_84, 0, x_5); +lean_ctor_set(x_84, 1, x_83); +lean_ctor_set(x_84, 2, x_72); +lean_ctor_set(x_84, 3, x_38); +lean_ctor_set(x_84, 4, x_73); +lean_ctor_set(x_84, 5, x_80); +lean_ctor_set(x_78, 0, x_84); +return x_76; +} +else +{ +lean_object* x_85; lean_object* x_86; lean_object* x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; +x_85 = lean_ctor_get(x_78, 0); +x_86 = lean_ctor_get(x_78, 1); +lean_inc(x_86); +lean_inc(x_85); +lean_dec(x_78); +x_87 = lean_ctor_get(x_2, 2); +x_88 = lean_ctor_get(x_2, 3); +x_89 = lean_nat_add(x_87, x_88); +x_90 = lean_alloc_ctor(5, 6, 0); +lean_ctor_set(x_90, 0, x_5); +lean_ctor_set(x_90, 1, x_89); +lean_ctor_set(x_90, 2, x_72); +lean_ctor_set(x_90, 3, x_38); +lean_ctor_set(x_90, 4, x_73); +lean_ctor_set(x_90, 5, x_85); +x_91 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_91, 0, x_90); +lean_ctor_set(x_91, 1, x_86); +lean_ctor_set(x_76, 0, x_91); +return x_76; +} +} +else +{ +lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; +x_92 = lean_ctor_get(x_76, 0); +x_93 = lean_ctor_get(x_76, 1); +lean_inc(x_93); +lean_inc(x_92); +lean_dec(x_76); +x_94 = lean_ctor_get(x_92, 0); +lean_inc(x_94); +x_95 = lean_ctor_get(x_92, 1); +lean_inc(x_95); +if (lean_is_exclusive(x_92)) { + lean_ctor_release(x_92, 0); + lean_ctor_release(x_92, 1); + x_96 = x_92; +} else { + lean_dec_ref(x_92); + x_96 = lean_box(0); +} +x_97 = lean_ctor_get(x_2, 2); +x_98 = lean_ctor_get(x_2, 3); +x_99 = lean_nat_add(x_97, x_98); +x_100 = lean_alloc_ctor(5, 6, 0); +lean_ctor_set(x_100, 0, x_5); +lean_ctor_set(x_100, 1, x_99); +lean_ctor_set(x_100, 2, x_72); +lean_ctor_set(x_100, 3, x_38); +lean_ctor_set(x_100, 4, x_73); +lean_ctor_set(x_100, 5, x_94); +if (lean_is_scalar(x_96)) { + x_101 = lean_alloc_ctor(0, 2, 0); +} else { + x_101 = x_96; +} +lean_ctor_set(x_101, 0, x_100); +lean_ctor_set(x_101, 1, x_95); +x_102 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_102, 0, x_101); +lean_ctor_set(x_102, 1, x_93); +return x_102; +} +} +else +{ +uint8_t x_103; +lean_dec(x_73); +lean_dec(x_72); +lean_dec(x_38); +lean_dec(x_5); +x_103 = !lean_is_exclusive(x_76); +if (x_103 == 0) +{ +return x_76; +} +else +{ +lean_object* x_104; lean_object* x_105; lean_object* x_106; +x_104 = lean_ctor_get(x_76, 0); +x_105 = lean_ctor_get(x_76, 1); +lean_inc(x_105); +lean_inc(x_104); +lean_dec(x_76); +x_106 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_106, 0, x_104); +lean_ctor_set(x_106, 1, x_105); +return x_106; +} +} +} +default: +{ +lean_object* x_107; lean_object* x_108; +lean_dec(x_40); +lean_dec(x_38); +x_107 = lean_unsigned_to_nat(1u); +x_108 = lean_nat_add(x_7, x_107); +lean_dec(x_7); +x_7 = x_108; +goto _start; +} +} +} +else +{ +lean_object* x_110; lean_object* x_111; +lean_dec(x_37); +x_110 = lean_unsigned_to_nat(1u); +x_111 = lean_nat_add(x_7, x_110); +lean_dec(x_7); +x_7 = x_111; +goto _start; +} +} +} +else +{ +lean_object* x_113; lean_object* x_114; +lean_dec(x_15); +x_113 = lean_unsigned_to_nat(1u); +x_114 = lean_nat_add(x_7, x_113); +lean_dec(x_7); +x_7 = x_114; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerAlt_loop___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +lean_object* x_11; +x_11 = l_Lean_IR_ToIR_lowerAlt_loop(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_11; +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +size_t x_8; size_t x_9; lean_object* x_10; +x_8 = lean_unbox_usize(x_1); +lean_dec(x_1); +x_9 = lean_unbox_usize(x_2); +lean_dec(x_2); +x_10 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__1(x_8, x_9, x_3, x_4, x_5, x_6, x_7); +return x_10; +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +size_t x_8; size_t x_9; lean_object* x_10; +x_8 = lean_unbox_usize(x_1); +lean_dec(x_1); +x_9 = lean_unbox_usize(x_2); +lean_dec(x_2); +x_10 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__2(x_8, x_9, x_3, x_4, x_5, x_6, x_7); +return x_10; +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +size_t x_9; size_t x_10; lean_object* x_11; +x_9 = lean_unbox_usize(x_2); +lean_dec(x_2); +x_10 = lean_unbox_usize(x_3); +lean_dec(x_3); +x_11 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__3(x_1, x_9, x_10, x_4, x_5, x_6, x_7, x_8); +return x_11; +} +} +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; +x_6 = l_Lean_throwError___at_Lean_IR_ToIR_lowerLet___spec__1(x_1, x_2, x_3, x_4, x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_6; +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__5___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__5(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__6___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__6(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__7___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__7(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__8___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__8(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__9___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Std_Range_forIn_x27_loop___at_Lean_IR_ToIR_lowerLet___spec__9(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet___lambda__2___boxed(lean_object* x_1) { +_start: +{ +uint8_t x_2; lean_object* x_3; +x_2 = l_Lean_IR_ToIR_lowerLet___lambda__2(x_1); +lean_dec(x_1); +x_3 = lean_box(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet___lambda__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +lean_object* x_11; +x_11 = l_Lean_IR_ToIR_lowerLet___lambda__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +lean_dec(x_5); +lean_dec(x_4); +return x_11; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +lean_object* x_11; +x_11 = l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_11; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields_loop___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +lean_object* x_12; +x_12 = l_Lean_IR_ToIR_lowerLet_lowerNonObjectFields_loop(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_12; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean.IR.ToIR.lowerResultType.resultTypeForArity", 47, 47); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("invalid arity", 13, 13); +return x_1; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1; +x_2 = l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__1; +x_3 = lean_unsigned_to_nat(384u); +x_4 = lean_unsigned_to_nat(11u); +x_5 = l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__2; +x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_Lean_IR_ToIR_lowerType___closed__12; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__4; +x_3 = l_Lean_Expr_const___override(x_2, x_1); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerResultType_resultTypeForArity(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; uint8_t x_4; +x_3 = lean_unsigned_to_nat(0u); +x_4 = lean_nat_dec_eq(x_2, x_3); +if (x_4 == 0) +{ +switch (lean_obj_tag(x_1)) { +case 4: +{ +lean_object* x_5; +lean_dec(x_2); +x_5 = lean_ctor_get(x_1, 0); +if (lean_obj_tag(x_5) == 1) +{ +lean_object* x_6; +x_6 = lean_ctor_get(x_5, 0); +if (lean_obj_tag(x_6) == 0) +{ +lean_object* x_7; lean_object* x_8; uint8_t x_9; +x_7 = lean_ctor_get(x_5, 1); +x_8 = l_Lean_IR_ToIR_lowerType___closed__12; +x_9 = lean_string_dec_eq(x_7, x_8); +if (x_9 == 0) +{ +lean_object* x_10; lean_object* x_11; +x_10 = l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__3; +x_11 = l_panic___at_Lean_Expr_appFn_x21___spec__1(x_10); +return x_11; +} +else +{ +lean_object* x_12; +x_12 = l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__5; +return x_12; +} +} +else +{ +lean_object* x_13; lean_object* x_14; +x_13 = l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__3; +x_14 = l_panic___at_Lean_Expr_appFn_x21___spec__1(x_13); +return x_14; +} +} +else +{ +lean_object* x_15; lean_object* x_16; +x_15 = l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__3; +x_16 = l_panic___at_Lean_Expr_appFn_x21___spec__1(x_15); +return x_16; +} +} +case 7: +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_17 = lean_ctor_get(x_1, 2); +x_18 = lean_unsigned_to_nat(1u); +x_19 = lean_nat_sub(x_2, x_18); +lean_dec(x_2); +x_1 = x_17; +x_2 = x_19; +goto _start; +} +default: +{ +lean_object* x_21; lean_object* x_22; +lean_dec(x_2); +x_21 = l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__3; +x_22 = l_panic___at_Lean_Expr_appFn_x21___spec__1(x_21); +return x_22; +} +} +} +else +{ +lean_dec(x_2); +lean_inc(x_1); +return x_1; +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_IR_ToIR_lowerResultType_resultTypeForArity(x_1, x_2); +lean_dec(x_1); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerResultType(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; lean_object* x_8; +x_7 = l_Lean_IR_ToIR_lowerResultType_resultTypeForArity(x_1, x_2); +x_8 = l_Lean_IR_ToIR_lowerType(x_7, x_3, x_4, x_5, x_6); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerResultType___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; +x_7 = l_Lean_IR_ToIR_lowerResultType(x_1, x_2, x_3, x_4, x_5, x_6); +lean_dec(x_1); +return x_7; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_ToIR_lowerDecl(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; size_t x_10; size_t x_11; lean_object* x_12; +x_6 = lean_ctor_get(x_1, 0); +lean_inc(x_6); +x_7 = lean_ctor_get(x_1, 2); +lean_inc(x_7); +x_8 = lean_ctor_get(x_1, 3); +lean_inc(x_8); +x_9 = lean_ctor_get(x_1, 4); +lean_inc(x_9); +lean_dec(x_1); +x_10 = lean_array_size(x_8); +x_11 = 0; +lean_inc(x_4); +lean_inc(x_3); +lean_inc(x_8); +x_12 = l_Array_mapMUnsafe_map___at_Lean_IR_ToIR_lowerCode___spec__1(x_10, x_11, x_8, x_2, x_3, x_4, x_5); +if (lean_obj_tag(x_12) == 0) +{ +lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_13 = lean_ctor_get(x_12, 0); +lean_inc(x_13); +x_14 = lean_ctor_get(x_12, 1); +lean_inc(x_14); +lean_dec(x_12); +x_15 = lean_ctor_get(x_13, 0); +lean_inc(x_15); +x_16 = lean_ctor_get(x_13, 1); +lean_inc(x_16); +lean_dec(x_13); +x_17 = lean_array_get_size(x_8); +lean_dec(x_8); +lean_inc(x_4); +lean_inc(x_3); +x_18 = l_Lean_IR_ToIR_lowerResultType(x_7, x_17, x_16, x_3, x_4, x_14); +lean_dec(x_7); +if (lean_obj_tag(x_18) == 0) +{ +lean_object* x_19; +x_19 = lean_ctor_get(x_18, 0); +lean_inc(x_19); +if (lean_obj_tag(x_9) == 0) +{ +lean_object* x_20; lean_object* x_21; lean_object* x_22; uint8_t x_23; +x_20 = lean_ctor_get(x_18, 1); +lean_inc(x_20); +lean_dec(x_18); +x_21 = lean_ctor_get(x_19, 0); +lean_inc(x_21); +x_22 = lean_ctor_get(x_19, 1); +lean_inc(x_22); +lean_dec(x_19); +x_23 = !lean_is_exclusive(x_9); +if (x_23 == 0) +{ +lean_object* x_24; lean_object* x_25; +x_24 = lean_ctor_get(x_9, 0); +x_25 = l_Lean_IR_ToIR_lowerCode(x_24, x_22, x_3, x_4, x_20); +if (lean_obj_tag(x_25) == 0) +{ +uint8_t x_26; +x_26 = !lean_is_exclusive(x_25); +if (x_26 == 0) +{ +lean_object* x_27; uint8_t x_28; +x_27 = lean_ctor_get(x_25, 0); +x_28 = !lean_is_exclusive(x_27); +if (x_28 == 0) +{ +lean_object* x_29; lean_object* x_30; lean_object* x_31; +x_29 = lean_ctor_get(x_27, 0); +x_30 = lean_box(0); +x_31 = lean_alloc_ctor(0, 5, 0); +lean_ctor_set(x_31, 0, x_6); +lean_ctor_set(x_31, 1, x_15); +lean_ctor_set(x_31, 2, x_21); +lean_ctor_set(x_31, 3, x_29); +lean_ctor_set(x_31, 4, x_30); +lean_ctor_set_tag(x_9, 1); +lean_ctor_set(x_9, 0, x_31); +lean_ctor_set(x_27, 0, x_9); +return x_25; +} +else +{ +lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_32 = lean_ctor_get(x_27, 0); +x_33 = lean_ctor_get(x_27, 1); +lean_inc(x_33); +lean_inc(x_32); +lean_dec(x_27); +x_34 = lean_box(0); +x_35 = lean_alloc_ctor(0, 5, 0); +lean_ctor_set(x_35, 0, x_6); +lean_ctor_set(x_35, 1, x_15); +lean_ctor_set(x_35, 2, x_21); +lean_ctor_set(x_35, 3, x_32); +lean_ctor_set(x_35, 4, x_34); +lean_ctor_set_tag(x_9, 1); +lean_ctor_set(x_9, 0, x_35); +x_36 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_36, 0, x_9); +lean_ctor_set(x_36, 1, x_33); +lean_ctor_set(x_25, 0, x_36); +return x_25; +} +} +else +{ +lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; +x_37 = lean_ctor_get(x_25, 0); +x_38 = lean_ctor_get(x_25, 1); +lean_inc(x_38); +lean_inc(x_37); +lean_dec(x_25); +x_39 = lean_ctor_get(x_37, 0); +lean_inc(x_39); +x_40 = lean_ctor_get(x_37, 1); +lean_inc(x_40); +if (lean_is_exclusive(x_37)) { + lean_ctor_release(x_37, 0); + lean_ctor_release(x_37, 1); + x_41 = x_37; +} else { + lean_dec_ref(x_37); + x_41 = lean_box(0); +} +x_42 = lean_box(0); +x_43 = lean_alloc_ctor(0, 5, 0); +lean_ctor_set(x_43, 0, x_6); +lean_ctor_set(x_43, 1, x_15); +lean_ctor_set(x_43, 2, x_21); +lean_ctor_set(x_43, 3, x_39); +lean_ctor_set(x_43, 4, x_42); +lean_ctor_set_tag(x_9, 1); +lean_ctor_set(x_9, 0, x_43); +if (lean_is_scalar(x_41)) { + x_44 = lean_alloc_ctor(0, 2, 0); +} else { + x_44 = x_41; +} +lean_ctor_set(x_44, 0, x_9); +lean_ctor_set(x_44, 1, x_40); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_38); +return x_45; +} +} +else +{ +uint8_t x_46; +lean_free_object(x_9); +lean_dec(x_21); +lean_dec(x_15); +lean_dec(x_6); +x_46 = !lean_is_exclusive(x_25); +if (x_46 == 0) +{ +return x_25; +} +else +{ +lean_object* x_47; lean_object* x_48; lean_object* x_49; +x_47 = lean_ctor_get(x_25, 0); +x_48 = lean_ctor_get(x_25, 1); +lean_inc(x_48); +lean_inc(x_47); +lean_dec(x_25); +x_49 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_49, 0, x_47); +lean_ctor_set(x_49, 1, x_48); +return x_49; +} +} +} +else +{ +lean_object* x_50; lean_object* x_51; +x_50 = lean_ctor_get(x_9, 0); +lean_inc(x_50); +lean_dec(x_9); +x_51 = l_Lean_IR_ToIR_lowerCode(x_50, x_22, x_3, x_4, x_20); +if (lean_obj_tag(x_51) == 0) +{ +lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; +x_52 = lean_ctor_get(x_51, 0); +lean_inc(x_52); +x_53 = lean_ctor_get(x_51, 1); +lean_inc(x_53); +if (lean_is_exclusive(x_51)) { + lean_ctor_release(x_51, 0); + lean_ctor_release(x_51, 1); + x_54 = x_51; +} else { + lean_dec_ref(x_51); + x_54 = lean_box(0); +} +x_55 = lean_ctor_get(x_52, 0); +lean_inc(x_55); +x_56 = lean_ctor_get(x_52, 1); +lean_inc(x_56); +if (lean_is_exclusive(x_52)) { + lean_ctor_release(x_52, 0); + lean_ctor_release(x_52, 1); + x_57 = x_52; +} else { + lean_dec_ref(x_52); + x_57 = lean_box(0); +} +x_58 = lean_box(0); +x_59 = lean_alloc_ctor(0, 5, 0); +lean_ctor_set(x_59, 0, x_6); +lean_ctor_set(x_59, 1, x_15); +lean_ctor_set(x_59, 2, x_21); +lean_ctor_set(x_59, 3, x_55); +lean_ctor_set(x_59, 4, x_58); +x_60 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_60, 0, x_59); +if (lean_is_scalar(x_57)) { + x_61 = lean_alloc_ctor(0, 2, 0); +} else { + x_61 = x_57; +} +lean_ctor_set(x_61, 0, x_60); +lean_ctor_set(x_61, 1, x_56); +if (lean_is_scalar(x_54)) { + x_62 = lean_alloc_ctor(0, 2, 0); +} else { + x_62 = x_54; +} +lean_ctor_set(x_62, 0, x_61); +lean_ctor_set(x_62, 1, x_53); +return x_62; +} +else +{ +lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; +lean_dec(x_21); +lean_dec(x_15); +lean_dec(x_6); +x_63 = lean_ctor_get(x_51, 0); +lean_inc(x_63); +x_64 = lean_ctor_get(x_51, 1); +lean_inc(x_64); +if (lean_is_exclusive(x_51)) { + lean_ctor_release(x_51, 0); + lean_ctor_release(x_51, 1); + x_65 = x_51; +} else { + lean_dec_ref(x_51); + x_65 = lean_box(0); +} +if (lean_is_scalar(x_65)) { + x_66 = lean_alloc_ctor(1, 2, 0); +} else { + x_66 = x_65; +} +lean_ctor_set(x_66, 0, x_63); +lean_ctor_set(x_66, 1, x_64); +return x_66; +} +} +} +else +{ +uint8_t x_67; +x_67 = !lean_is_exclusive(x_18); +if (x_67 == 0) +{ +lean_object* x_68; lean_object* x_69; uint8_t x_70; +x_68 = lean_ctor_get(x_18, 1); +x_69 = lean_ctor_get(x_18, 0); +lean_dec(x_69); +x_70 = !lean_is_exclusive(x_19); +if (x_70 == 0) +{ +uint8_t x_71; +x_71 = !lean_is_exclusive(x_9); +if (x_71 == 0) +{ +lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; uint8_t x_76; +x_72 = lean_ctor_get(x_19, 0); +x_73 = lean_ctor_get(x_19, 1); +x_74 = lean_ctor_get(x_9, 0); +x_75 = lean_ctor_get(x_74, 1); +lean_inc(x_75); +x_76 = l_List_isEmpty___rarg(x_75); +lean_dec(x_75); +if (x_76 == 0) +{ +lean_object* x_77; +lean_dec(x_4); +lean_dec(x_3); +x_77 = lean_alloc_ctor(1, 4, 0); +lean_ctor_set(x_77, 0, x_6); +lean_ctor_set(x_77, 1, x_15); +lean_ctor_set(x_77, 2, x_72); +lean_ctor_set(x_77, 3, x_74); +lean_ctor_set(x_9, 0, x_77); +lean_ctor_set(x_19, 0, x_9); +return x_18; +} +else +{ +lean_object* x_78; lean_object* x_79; uint8_t x_80; +lean_free_object(x_9); +lean_dec(x_74); +lean_free_object(x_19); +lean_free_object(x_18); +x_78 = lean_ir_mk_dummy_extern_decl(x_6, x_15, x_72); +x_79 = l_Lean_IR_ToIR_addDecl(x_78, x_73, x_3, x_4, x_68); +lean_dec(x_4); +lean_dec(x_3); +x_80 = !lean_is_exclusive(x_79); +if (x_80 == 0) +{ +lean_object* x_81; uint8_t x_82; +x_81 = lean_ctor_get(x_79, 0); +x_82 = !lean_is_exclusive(x_81); +if (x_82 == 0) +{ +lean_object* x_83; lean_object* x_84; +x_83 = lean_ctor_get(x_81, 0); +lean_dec(x_83); +x_84 = lean_box(0); +lean_ctor_set(x_81, 0, x_84); +return x_79; +} +else +{ +lean_object* x_85; lean_object* x_86; lean_object* x_87; +x_85 = lean_ctor_get(x_81, 1); +lean_inc(x_85); +lean_dec(x_81); +x_86 = lean_box(0); +x_87 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_87, 0, x_86); +lean_ctor_set(x_87, 1, x_85); +lean_ctor_set(x_79, 0, x_87); +return x_79; +} +} +else +{ +lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; +x_88 = lean_ctor_get(x_79, 0); +x_89 = lean_ctor_get(x_79, 1); +lean_inc(x_89); +lean_inc(x_88); +lean_dec(x_79); +x_90 = lean_ctor_get(x_88, 1); +lean_inc(x_90); +if (lean_is_exclusive(x_88)) { + lean_ctor_release(x_88, 0); + lean_ctor_release(x_88, 1); + x_91 = x_88; +} else { + lean_dec_ref(x_88); + x_91 = lean_box(0); +} +x_92 = lean_box(0); +if (lean_is_scalar(x_91)) { + x_93 = lean_alloc_ctor(0, 2, 0); +} else { + x_93 = x_91; +} +lean_ctor_set(x_93, 0, x_92); +lean_ctor_set(x_93, 1, x_90); +x_94 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_94, 0, x_93); +lean_ctor_set(x_94, 1, x_89); +return x_94; +} +} +} +else +{ +lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; uint8_t x_99; +x_95 = lean_ctor_get(x_19, 0); +x_96 = lean_ctor_get(x_19, 1); +x_97 = lean_ctor_get(x_9, 0); +lean_inc(x_97); +lean_dec(x_9); +x_98 = lean_ctor_get(x_97, 1); +lean_inc(x_98); +x_99 = l_List_isEmpty___rarg(x_98); +lean_dec(x_98); +if (x_99 == 0) +{ +lean_object* x_100; lean_object* x_101; +lean_dec(x_4); +lean_dec(x_3); +x_100 = lean_alloc_ctor(1, 4, 0); +lean_ctor_set(x_100, 0, x_6); +lean_ctor_set(x_100, 1, x_15); +lean_ctor_set(x_100, 2, x_95); +lean_ctor_set(x_100, 3, x_97); +x_101 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_101, 0, x_100); +lean_ctor_set(x_19, 0, x_101); +return x_18; +} +else +{ +lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; +lean_dec(x_97); +lean_free_object(x_19); +lean_free_object(x_18); +x_102 = lean_ir_mk_dummy_extern_decl(x_6, x_15, x_95); +x_103 = l_Lean_IR_ToIR_addDecl(x_102, x_96, x_3, x_4, x_68); +lean_dec(x_4); +lean_dec(x_3); +x_104 = lean_ctor_get(x_103, 0); +lean_inc(x_104); +x_105 = lean_ctor_get(x_103, 1); +lean_inc(x_105); +if (lean_is_exclusive(x_103)) { + lean_ctor_release(x_103, 0); + lean_ctor_release(x_103, 1); + x_106 = x_103; +} else { + lean_dec_ref(x_103); + x_106 = lean_box(0); +} +x_107 = lean_ctor_get(x_104, 1); +lean_inc(x_107); +if (lean_is_exclusive(x_104)) { + lean_ctor_release(x_104, 0); + lean_ctor_release(x_104, 1); + x_108 = x_104; +} else { + lean_dec_ref(x_104); + x_108 = lean_box(0); +} +x_109 = lean_box(0); +if (lean_is_scalar(x_108)) { + x_110 = lean_alloc_ctor(0, 2, 0); +} else { + x_110 = x_108; +} +lean_ctor_set(x_110, 0, x_109); +lean_ctor_set(x_110, 1, x_107); +if (lean_is_scalar(x_106)) { + x_111 = lean_alloc_ctor(0, 2, 0); +} else { + x_111 = x_106; +} +lean_ctor_set(x_111, 0, x_110); +lean_ctor_set(x_111, 1, x_105); +return x_111; +} +} +} +else +{ +lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; uint8_t x_117; +x_112 = lean_ctor_get(x_19, 0); +x_113 = lean_ctor_get(x_19, 1); +lean_inc(x_113); +lean_inc(x_112); +lean_dec(x_19); +x_114 = lean_ctor_get(x_9, 0); +lean_inc(x_114); +if (lean_is_exclusive(x_9)) { + lean_ctor_release(x_9, 0); + x_115 = x_9; +} else { + lean_dec_ref(x_9); + x_115 = lean_box(0); +} +x_116 = lean_ctor_get(x_114, 1); +lean_inc(x_116); +x_117 = l_List_isEmpty___rarg(x_116); +lean_dec(x_116); +if (x_117 == 0) +{ +lean_object* x_118; lean_object* x_119; lean_object* x_120; +lean_dec(x_4); +lean_dec(x_3); +x_118 = lean_alloc_ctor(1, 4, 0); +lean_ctor_set(x_118, 0, x_6); +lean_ctor_set(x_118, 1, x_15); +lean_ctor_set(x_118, 2, x_112); +lean_ctor_set(x_118, 3, x_114); +if (lean_is_scalar(x_115)) { + x_119 = lean_alloc_ctor(1, 1, 0); +} else { + x_119 = x_115; +} +lean_ctor_set(x_119, 0, x_118); +x_120 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_120, 0, x_119); +lean_ctor_set(x_120, 1, x_113); +lean_ctor_set(x_18, 0, x_120); +return x_18; +} +else +{ +lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; +lean_dec(x_115); +lean_dec(x_114); +lean_free_object(x_18); +x_121 = lean_ir_mk_dummy_extern_decl(x_6, x_15, x_112); +x_122 = l_Lean_IR_ToIR_addDecl(x_121, x_113, x_3, x_4, x_68); +lean_dec(x_4); +lean_dec(x_3); +x_123 = lean_ctor_get(x_122, 0); +lean_inc(x_123); +x_124 = lean_ctor_get(x_122, 1); +lean_inc(x_124); +if (lean_is_exclusive(x_122)) { + lean_ctor_release(x_122, 0); + lean_ctor_release(x_122, 1); + x_125 = x_122; +} else { + lean_dec_ref(x_122); + x_125 = lean_box(0); +} +x_126 = lean_ctor_get(x_123, 1); +lean_inc(x_126); +if (lean_is_exclusive(x_123)) { + lean_ctor_release(x_123, 0); + lean_ctor_release(x_123, 1); + x_127 = x_123; +} else { + lean_dec_ref(x_123); + x_127 = lean_box(0); +} +x_128 = lean_box(0); +if (lean_is_scalar(x_127)) { + x_129 = lean_alloc_ctor(0, 2, 0); +} else { + x_129 = x_127; +} +lean_ctor_set(x_129, 0, x_128); +lean_ctor_set(x_129, 1, x_126); +if (lean_is_scalar(x_125)) { + x_130 = lean_alloc_ctor(0, 2, 0); +} else { + x_130 = x_125; +} +lean_ctor_set(x_130, 0, x_129); +lean_ctor_set(x_130, 1, x_124); +return x_130; +} +} +} +else +{ +lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; uint8_t x_138; +x_131 = lean_ctor_get(x_18, 1); +lean_inc(x_131); +lean_dec(x_18); +x_132 = lean_ctor_get(x_19, 0); +lean_inc(x_132); +x_133 = lean_ctor_get(x_19, 1); +lean_inc(x_133); +if (lean_is_exclusive(x_19)) { + lean_ctor_release(x_19, 0); + lean_ctor_release(x_19, 1); + x_134 = x_19; +} else { + lean_dec_ref(x_19); + x_134 = lean_box(0); +} +x_135 = lean_ctor_get(x_9, 0); +lean_inc(x_135); +if (lean_is_exclusive(x_9)) { + lean_ctor_release(x_9, 0); + x_136 = x_9; +} else { + lean_dec_ref(x_9); + x_136 = lean_box(0); +} +x_137 = lean_ctor_get(x_135, 1); +lean_inc(x_137); +x_138 = l_List_isEmpty___rarg(x_137); +lean_dec(x_137); +if (x_138 == 0) +{ +lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; +lean_dec(x_4); +lean_dec(x_3); +x_139 = lean_alloc_ctor(1, 4, 0); +lean_ctor_set(x_139, 0, x_6); +lean_ctor_set(x_139, 1, x_15); +lean_ctor_set(x_139, 2, x_132); +lean_ctor_set(x_139, 3, x_135); +if (lean_is_scalar(x_136)) { + x_140 = lean_alloc_ctor(1, 1, 0); +} else { + x_140 = x_136; +} +lean_ctor_set(x_140, 0, x_139); +if (lean_is_scalar(x_134)) { + x_141 = lean_alloc_ctor(0, 2, 0); +} else { + x_141 = x_134; +} +lean_ctor_set(x_141, 0, x_140); +lean_ctor_set(x_141, 1, x_133); +x_142 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_142, 0, x_141); +lean_ctor_set(x_142, 1, x_131); +return x_142; +} +else +{ +lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; lean_object* x_152; +lean_dec(x_136); +lean_dec(x_135); +lean_dec(x_134); +x_143 = lean_ir_mk_dummy_extern_decl(x_6, x_15, x_132); +x_144 = l_Lean_IR_ToIR_addDecl(x_143, x_133, x_3, x_4, x_131); +lean_dec(x_4); +lean_dec(x_3); +x_145 = lean_ctor_get(x_144, 0); +lean_inc(x_145); +x_146 = lean_ctor_get(x_144, 1); +lean_inc(x_146); +if (lean_is_exclusive(x_144)) { + lean_ctor_release(x_144, 0); + lean_ctor_release(x_144, 1); + x_147 = x_144; +} else { + lean_dec_ref(x_144); + x_147 = lean_box(0); +} +x_148 = lean_ctor_get(x_145, 1); +lean_inc(x_148); +if (lean_is_exclusive(x_145)) { + lean_ctor_release(x_145, 0); + lean_ctor_release(x_145, 1); + x_149 = x_145; +} else { + lean_dec_ref(x_145); + x_149 = lean_box(0); +} +x_150 = lean_box(0); +if (lean_is_scalar(x_149)) { + x_151 = lean_alloc_ctor(0, 2, 0); +} else { + x_151 = x_149; +} +lean_ctor_set(x_151, 0, x_150); +lean_ctor_set(x_151, 1, x_148); +if (lean_is_scalar(x_147)) { + x_152 = lean_alloc_ctor(0, 2, 0); +} else { + x_152 = x_147; +} +lean_ctor_set(x_152, 0, x_151); +lean_ctor_set(x_152, 1, x_146); +return x_152; +} +} +} +} +else +{ +uint8_t x_153; +lean_dec(x_15); +lean_dec(x_9); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +x_153 = !lean_is_exclusive(x_18); +if (x_153 == 0) +{ +return x_18; +} +else +{ +lean_object* x_154; lean_object* x_155; lean_object* x_156; +x_154 = lean_ctor_get(x_18, 0); +x_155 = lean_ctor_get(x_18, 1); +lean_inc(x_155); +lean_inc(x_154); +lean_dec(x_18); +x_156 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_156, 0, x_154); +lean_ctor_set(x_156, 1, x_155); +return x_156; +} +} +} +else +{ +uint8_t x_157; +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +x_157 = !lean_is_exclusive(x_12); +if (x_157 == 0) +{ +return x_12; +} +else +{ +lean_object* x_158; lean_object* x_159; lean_object* x_160; +x_158 = lean_ctor_get(x_12, 0); +x_159 = lean_ctor_get(x_12, 1); +lean_inc(x_159); +lean_inc(x_158); +lean_dec(x_12); +x_160 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_160, 0, x_158); +lean_ctor_set(x_160, 1, x_159); +return x_160; +} +} +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_IR_toIR___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, size_t x_4, size_t x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +_start: +{ +uint8_t x_10; +x_10 = lean_usize_dec_lt(x_5, x_4); +if (x_10 == 0) +{ +lean_object* x_11; +lean_dec(x_8); +lean_dec(x_7); +x_11 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_11, 0, x_6); +lean_ctor_set(x_11, 1, x_9); +return x_11; +} +else +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_12 = lean_array_uget(x_3, x_5); +x_13 = lean_alloc_closure((void*)(l_Lean_IR_ToIR_lowerDecl), 5, 1); +lean_closure_set(x_13, 0, x_12); +lean_inc(x_8); +lean_inc(x_7); +x_14 = l_Lean_IR_ToIR_M_run___rarg(x_13, x_7, x_8, x_9); +if (lean_obj_tag(x_14) == 0) +{ +lean_object* x_15; +x_15 = lean_ctor_get(x_14, 0); +lean_inc(x_15); +if (lean_obj_tag(x_15) == 0) +{ +lean_object* x_16; size_t x_17; size_t x_18; +x_16 = lean_ctor_get(x_14, 1); +lean_inc(x_16); +lean_dec(x_14); +x_17 = 1; +x_18 = lean_usize_add(x_5, x_17); +x_5 = x_18; +x_9 = x_16; +goto _start; +} +else +{ +lean_object* x_20; lean_object* x_21; lean_object* x_22; size_t x_23; size_t x_24; +x_20 = lean_ctor_get(x_14, 1); +lean_inc(x_20); +lean_dec(x_14); +x_21 = lean_ctor_get(x_15, 0); +lean_inc(x_21); +lean_dec(x_15); +x_22 = lean_array_push(x_6, x_21); +x_23 = 1; +x_24 = lean_usize_add(x_5, x_23); +x_5 = x_24; +x_6 = x_22; +x_9 = x_20; +goto _start; +} +} +else +{ +uint8_t x_26; +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +x_26 = !lean_is_exclusive(x_14); +if (x_26 == 0) +{ +return x_14; +} +else +{ +lean_object* x_27; lean_object* x_28; lean_object* x_29; +x_27 = lean_ctor_get(x_14, 0); +x_28 = lean_ctor_get(x_14, 1); +lean_inc(x_28); +lean_inc(x_27); +lean_dec(x_14); +x_29 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_29, 0, x_27); +lean_ctor_set(x_29, 1, x_28); +return x_29; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_IR_toIR(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: +{ +lean_object* x_5; size_t x_6; size_t x_7; lean_object* x_8; lean_object* x_9; +x_5 = lean_box(0); +x_6 = lean_array_size(x_1); +x_7 = 0; +x_8 = l_Lean_IR_ToIR_lowerLet___closed__27; +x_9 = l_Array_forIn_x27Unsafe_loop___at_Lean_IR_toIR___spec__1(x_1, x_5, x_1, x_6, x_7, x_8, x_2, x_3, x_4); +if (lean_obj_tag(x_9) == 0) +{ +uint8_t x_10; +x_10 = !lean_is_exclusive(x_9); +if (x_10 == 0) +{ +return x_9; +} +else +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; +x_11 = lean_ctor_get(x_9, 0); +x_12 = lean_ctor_get(x_9, 1); +lean_inc(x_12); +lean_inc(x_11); +lean_dec(x_9); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_11); +lean_ctor_set(x_13, 1, x_12); +return x_13; +} +} +else +{ +uint8_t x_14; +x_14 = !lean_is_exclusive(x_9); +if (x_14 == 0) +{ +return x_9; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_15 = lean_ctor_get(x_9, 0); +x_16 = lean_ctor_get(x_9, 1); +lean_inc(x_16); +lean_inc(x_15); +lean_dec(x_9); +x_17 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_17, 0, x_15); +lean_ctor_set(x_17, 1, x_16); +return x_17; +} +} +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_IR_toIR___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +_start: +{ +size_t x_10; size_t x_11; lean_object* x_12; +x_10 = lean_unbox_usize(x_4); +lean_dec(x_4); +x_11 = lean_unbox_usize(x_5); +lean_dec(x_5); +x_12 = l_Array_forIn_x27Unsafe_loop___at_Lean_IR_toIR___spec__1(x_1, x_2, x_3, x_10, x_11, x_6, x_7, x_8, x_9); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_12; +} +} +LEAN_EXPORT lean_object* l_Lean_IR_toIR___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: +{ +lean_object* x_5; +x_5 = l_Lean_IR_toIR(x_1, x_2, x_3, x_4); +lean_dec(x_1); +return x_5; +} +} +lean_object* initialize_Lean_Compiler_LCNF_Basic(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Compiler_LCNF_CompilerM(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Compiler_LCNF_PhaseExt(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Compiler_IR_Basic(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Compiler_IR_CompilerM(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Compiler_IR_CtorLayout(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_CoreM(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Environment(uint8_t builtin, lean_object*); +static bool _G_initialized = false; +LEAN_EXPORT lean_object* initialize_Lean_Compiler_IR_ToIR(uint8_t builtin, lean_object* w) { +lean_object * res; +if (_G_initialized) return lean_io_result_mk_ok(lean_box(0)); +_G_initialized = true; +res = initialize_Lean_Compiler_LCNF_Basic(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +res = initialize_Lean_Compiler_LCNF_CompilerM(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +res = initialize_Lean_Compiler_LCNF_PhaseExt(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +res = initialize_Lean_Compiler_IR_Basic(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +res = initialize_Lean_Compiler_IR_CompilerM(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +res = initialize_Lean_Compiler_IR_CtorLayout(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +res = initialize_Lean_CoreM(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +res = initialize_Lean_Environment(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +l_Lean_IR_ToIR_M_run___rarg___closed__1 = _init_l_Lean_IR_ToIR_M_run___rarg___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_M_run___rarg___closed__1); +l_Lean_IR_ToIR_M_run___rarg___closed__2 = _init_l_Lean_IR_ToIR_M_run___rarg___closed__2(); +lean_mark_persistent(l_Lean_IR_ToIR_M_run___rarg___closed__2); +l_Lean_IR_ToIR_M_run___rarg___closed__3 = _init_l_Lean_IR_ToIR_M_run___rarg___closed__3(); +lean_mark_persistent(l_Lean_IR_ToIR_M_run___rarg___closed__3); +l_Lean_IR_ToIR_M_run___rarg___closed__4 = _init_l_Lean_IR_ToIR_M_run___rarg___closed__4(); +lean_mark_persistent(l_Lean_IR_ToIR_M_run___rarg___closed__4); +l_Lean_IR_ToIR_addDecl___closed__1 = _init_l_Lean_IR_ToIR_addDecl___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_addDecl___closed__1); +l_Lean_IR_ToIR_addDecl___closed__2 = _init_l_Lean_IR_ToIR_addDecl___closed__2(); +lean_mark_persistent(l_Lean_IR_ToIR_addDecl___closed__2); +l_Lean_IR_ToIR_addDecl___closed__3 = _init_l_Lean_IR_ToIR_addDecl___closed__3(); +lean_mark_persistent(l_Lean_IR_ToIR_addDecl___closed__3); +l_Lean_IR_ToIR_addDecl___closed__4 = _init_l_Lean_IR_ToIR_addDecl___closed__4(); +lean_mark_persistent(l_Lean_IR_ToIR_addDecl___closed__4); +l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__1 = _init_l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__1(); +lean_mark_persistent(l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__1); +l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__2 = _init_l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__2(); +lean_mark_persistent(l_panic___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__1___closed__2); +l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1 = _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__1); +l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__2 = _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__2(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__2); +l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__3 = _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__3(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__3); +l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__4 = _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__4(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__4); +l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__5 = _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__5(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__5); +l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__6 = _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__6(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__6); +l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__7 = _init_l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__7(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_IR_ToIR_lowerEnumToScalarType___spec__2___closed__7); +l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__1 = _init_l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__1); +l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__2 = _init_l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__2(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__2); +l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__3 = _init_l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__3(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerEnumToScalarType___lambda__1___closed__3); +l_Lean_IR_ToIR_lowerEnumToScalarType___closed__1 = _init_l_Lean_IR_ToIR_lowerEnumToScalarType___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerEnumToScalarType___closed__1); +l_panic___at_Lean_IR_ToIR_lowerType___spec__1___closed__1 = _init_l_panic___at_Lean_IR_ToIR_lowerType___spec__1___closed__1(); +lean_mark_persistent(l_panic___at_Lean_IR_ToIR_lowerType___spec__1___closed__1); +l_Lean_IR_ToIR_lowerType___closed__1 = _init_l_Lean_IR_ToIR_lowerType___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerType___closed__1); +l_Lean_IR_ToIR_lowerType___closed__2 = _init_l_Lean_IR_ToIR_lowerType___closed__2(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerType___closed__2); +l_Lean_IR_ToIR_lowerType___closed__3 = _init_l_Lean_IR_ToIR_lowerType___closed__3(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerType___closed__3); +l_Lean_IR_ToIR_lowerType___closed__4 = _init_l_Lean_IR_ToIR_lowerType___closed__4(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerType___closed__4); +l_Lean_IR_ToIR_lowerType___closed__5 = _init_l_Lean_IR_ToIR_lowerType___closed__5(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerType___closed__5); +l_Lean_IR_ToIR_lowerType___closed__6 = _init_l_Lean_IR_ToIR_lowerType___closed__6(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerType___closed__6); +l_Lean_IR_ToIR_lowerType___closed__7 = _init_l_Lean_IR_ToIR_lowerType___closed__7(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerType___closed__7); +l_Lean_IR_ToIR_lowerType___closed__8 = _init_l_Lean_IR_ToIR_lowerType___closed__8(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerType___closed__8); +l_Lean_IR_ToIR_lowerType___closed__9 = _init_l_Lean_IR_ToIR_lowerType___closed__9(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerType___closed__9); +l_Lean_IR_ToIR_lowerType___closed__10 = _init_l_Lean_IR_ToIR_lowerType___closed__10(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerType___closed__10); +l_Lean_IR_ToIR_lowerType___closed__11 = _init_l_Lean_IR_ToIR_lowerType___closed__11(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerType___closed__11); +l_Lean_IR_ToIR_lowerType___closed__12 = _init_l_Lean_IR_ToIR_lowerType___closed__12(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerType___closed__12); +l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__1 = _init_l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__1(); +lean_mark_persistent(l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__1); +l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__2 = _init_l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__2(); +lean_mark_persistent(l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__2); +l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__3 = _init_l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__3(); +lean_mark_persistent(l_panic___at_Lean_IR_ToIR_getCtorInfo___spec__1___closed__3); +l_Lean_IR_ToIR_getCtorInfo___closed__1 = _init_l_Lean_IR_ToIR_getCtorInfo___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_getCtorInfo___closed__1); +l_Lean_IR_ToIR_getCtorInfo___closed__2 = _init_l_Lean_IR_ToIR_getCtorInfo___closed__2(); +lean_mark_persistent(l_Lean_IR_ToIR_getCtorInfo___closed__2); +l_Lean_IR_ToIR_getCtorInfo___closed__3 = _init_l_Lean_IR_ToIR_getCtorInfo___closed__3(); +lean_mark_persistent(l_Lean_IR_ToIR_getCtorInfo___closed__3); +l_panic___at_Lean_IR_ToIR_lowerArg___spec__2___closed__1 = _init_l_panic___at_Lean_IR_ToIR_lowerArg___spec__2___closed__1(); +lean_mark_persistent(l_panic___at_Lean_IR_ToIR_lowerArg___spec__2___closed__1); +l_Lean_IR_ToIR_lowerArg___closed__1 = _init_l_Lean_IR_ToIR_lowerArg___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerArg___closed__1); +l_Lean_IR_ToIR_lowerArg___closed__2 = _init_l_Lean_IR_ToIR_lowerArg___closed__2(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerArg___closed__2); +l_Lean_IR_ToIR_lowerArg___closed__3 = _init_l_Lean_IR_ToIR_lowerArg___closed__3(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerArg___closed__3); +l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__1 = _init_l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__1); +l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__2 = _init_l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__2(); +lean_mark_persistent(l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__2); +l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__3 = _init_l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__3(); +lean_mark_persistent(l_Lean_IR_ToIR_instInhabitedTranslatedProj___closed__3); +l_Lean_IR_ToIR_instInhabitedTranslatedProj = _init_l_Lean_IR_ToIR_instInhabitedTranslatedProj(); +lean_mark_persistent(l_Lean_IR_ToIR_instInhabitedTranslatedProj); +l_Lean_IR_ToIR_lowerProj___closed__1 = _init_l_Lean_IR_ToIR_lowerProj___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerProj___closed__1); +l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1___closed__1 = _init_l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1___closed__1(); +lean_mark_persistent(l_panic___at_Lean_IR_ToIR_lowerAlt_loop___spec__1___closed__1); +l_Lean_IR_ToIR_lowerAlt_loop___closed__1 = _init_l_Lean_IR_ToIR_lowerAlt_loop___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerAlt_loop___closed__1); +l_Lean_IR_ToIR_lowerAlt_loop___closed__2 = _init_l_Lean_IR_ToIR_lowerAlt_loop___closed__2(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerAlt_loop___closed__2); +l_Lean_IR_ToIR_lowerAlt_loop___closed__3 = _init_l_Lean_IR_ToIR_lowerAlt_loop___closed__3(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerAlt_loop___closed__3); +l_Lean_IR_ToIR_lowerCode___closed__1 = _init_l_Lean_IR_ToIR_lowerCode___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerCode___closed__1); +l_Lean_IR_ToIR_lowerCode___closed__2 = _init_l_Lean_IR_ToIR_lowerCode___closed__2(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerCode___closed__2); +l_Lean_IR_ToIR_lowerCode___closed__3 = _init_l_Lean_IR_ToIR_lowerCode___closed__3(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerCode___closed__3); +l_Lean_IR_ToIR_lowerCode___closed__4 = _init_l_Lean_IR_ToIR_lowerCode___closed__4(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerCode___closed__4); +l_Lean_IR_ToIR_lowerCode___closed__5 = _init_l_Lean_IR_ToIR_lowerCode___closed__5(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerCode___closed__5); +l_Lean_IR_ToIR_lowerCode___closed__6 = _init_l_Lean_IR_ToIR_lowerCode___closed__6(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerCode___closed__6); +l_Lean_IR_ToIR_lowerCode___closed__7 = _init_l_Lean_IR_ToIR_lowerCode___closed__7(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerCode___closed__7); +l_Lean_IR_ToIR_lowerLet___closed__1 = _init_l_Lean_IR_ToIR_lowerLet___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__1); +l_Lean_IR_ToIR_lowerLet___closed__2 = _init_l_Lean_IR_ToIR_lowerLet___closed__2(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__2); +l_Lean_IR_ToIR_lowerLet___closed__3 = _init_l_Lean_IR_ToIR_lowerLet___closed__3(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__3); +l_Lean_IR_ToIR_lowerLet___closed__4 = _init_l_Lean_IR_ToIR_lowerLet___closed__4(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__4); +l_Lean_IR_ToIR_lowerLet___closed__5 = _init_l_Lean_IR_ToIR_lowerLet___closed__5(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__5); +l_Lean_IR_ToIR_lowerLet___closed__6 = _init_l_Lean_IR_ToIR_lowerLet___closed__6(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__6); +l_Lean_IR_ToIR_lowerLet___closed__7 = _init_l_Lean_IR_ToIR_lowerLet___closed__7(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__7); +l_Lean_IR_ToIR_lowerLet___closed__8 = _init_l_Lean_IR_ToIR_lowerLet___closed__8(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__8); +l_Lean_IR_ToIR_lowerLet___closed__9 = _init_l_Lean_IR_ToIR_lowerLet___closed__9(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__9); +l_Lean_IR_ToIR_lowerLet___closed__10 = _init_l_Lean_IR_ToIR_lowerLet___closed__10(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__10); +l_Lean_IR_ToIR_lowerLet___closed__11 = _init_l_Lean_IR_ToIR_lowerLet___closed__11(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__11); +l_Lean_IR_ToIR_lowerLet___closed__12 = _init_l_Lean_IR_ToIR_lowerLet___closed__12(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__12); +l_Lean_IR_ToIR_lowerLet___closed__13 = _init_l_Lean_IR_ToIR_lowerLet___closed__13(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__13); +l_Lean_IR_ToIR_lowerLet___closed__14 = _init_l_Lean_IR_ToIR_lowerLet___closed__14(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__14); +l_Lean_IR_ToIR_lowerLet___closed__15 = _init_l_Lean_IR_ToIR_lowerLet___closed__15(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__15); +l_Lean_IR_ToIR_lowerLet___closed__16 = _init_l_Lean_IR_ToIR_lowerLet___closed__16(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__16); +l_Lean_IR_ToIR_lowerLet___closed__17 = _init_l_Lean_IR_ToIR_lowerLet___closed__17(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__17); +l_Lean_IR_ToIR_lowerLet___closed__18 = _init_l_Lean_IR_ToIR_lowerLet___closed__18(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__18); +l_Lean_IR_ToIR_lowerLet___closed__19 = _init_l_Lean_IR_ToIR_lowerLet___closed__19(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__19); +l_Lean_IR_ToIR_lowerLet___closed__20 = _init_l_Lean_IR_ToIR_lowerLet___closed__20(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__20); +l_Lean_IR_ToIR_lowerLet___closed__21 = _init_l_Lean_IR_ToIR_lowerLet___closed__21(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__21); +l_Lean_IR_ToIR_lowerLet___closed__22 = _init_l_Lean_IR_ToIR_lowerLet___closed__22(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__22); +l_Lean_IR_ToIR_lowerLet___closed__23 = _init_l_Lean_IR_ToIR_lowerLet___closed__23(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__23); +l_Lean_IR_ToIR_lowerLet___closed__24 = _init_l_Lean_IR_ToIR_lowerLet___closed__24(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__24); +l_Lean_IR_ToIR_lowerLet___closed__25 = _init_l_Lean_IR_ToIR_lowerLet___closed__25(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__25); +l_Lean_IR_ToIR_lowerLet___closed__26 = _init_l_Lean_IR_ToIR_lowerLet___closed__26(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__26); +l_Lean_IR_ToIR_lowerLet___closed__27 = _init_l_Lean_IR_ToIR_lowerLet___closed__27(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__27); +l_Lean_IR_ToIR_lowerLet___closed__28 = _init_l_Lean_IR_ToIR_lowerLet___closed__28(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__28); +l_Lean_IR_ToIR_lowerLet___closed__29 = _init_l_Lean_IR_ToIR_lowerLet___closed__29(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__29); +l_Lean_IR_ToIR_lowerLet___closed__30 = _init_l_Lean_IR_ToIR_lowerLet___closed__30(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__30); +l_Lean_IR_ToIR_lowerLet___closed__31 = _init_l_Lean_IR_ToIR_lowerLet___closed__31(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__31); +l_Lean_IR_ToIR_lowerLet___closed__32 = _init_l_Lean_IR_ToIR_lowerLet___closed__32(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__32); +l_Lean_IR_ToIR_lowerLet___closed__33 = _init_l_Lean_IR_ToIR_lowerLet___closed__33(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__33); +l_Lean_IR_ToIR_lowerLet___closed__34 = _init_l_Lean_IR_ToIR_lowerLet___closed__34(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__34); +l_Lean_IR_ToIR_lowerLet___closed__35 = _init_l_Lean_IR_ToIR_lowerLet___closed__35(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__35); +l_Lean_IR_ToIR_lowerLet___closed__36 = _init_l_Lean_IR_ToIR_lowerLet___closed__36(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__36); +l_Lean_IR_ToIR_lowerLet___closed__37 = _init_l_Lean_IR_ToIR_lowerLet___closed__37(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__37); +l_Lean_IR_ToIR_lowerLet___closed__38 = _init_l_Lean_IR_ToIR_lowerLet___closed__38(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerLet___closed__38); +l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__1 = _init_l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__1(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__1); +l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__2 = _init_l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__2(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__2); +l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__3 = _init_l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__3(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__3); +l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__4 = _init_l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__4(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__4); +l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__5 = _init_l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__5(); +lean_mark_persistent(l_Lean_IR_ToIR_lowerResultType_resultTypeForArity___closed__5); +return lean_io_result_mk_ok(lean_box(0)); +} +#ifdef __cplusplus +} +#endif diff --git a/stage0/stdlib/Lean/Compiler/LCNF/Main.c b/stage0/stdlib/Lean/Compiler/LCNF/Main.c index d49665bddd88..e71f7d4da512 100644 --- a/stage0/stdlib/Lean/Compiler/LCNF/Main.c +++ b/stage0/stdlib/Lean/Compiler/LCNF/Main.c @@ -1,6 +1,6 @@ // Lean compiler output // Module: Lean.Compiler.LCNF.Main -// Imports: Lean.Compiler.Options Lean.Compiler.ExternAttr Lean.Compiler.LCNF.PassManager Lean.Compiler.LCNF.Passes Lean.Compiler.LCNF.PrettyPrinter Lean.Compiler.LCNF.ToDecl Lean.Compiler.LCNF.Check Lean.Compiler.LCNF.PullLetDecls Lean.Compiler.LCNF.PhaseExt Lean.Compiler.LCNF.CSE +// Imports: Lean.Compiler.Options Lean.Compiler.ExternAttr Lean.Compiler.IR Lean.Compiler.IR.Basic Lean.Compiler.IR.Checker Lean.Compiler.IR.ToIR Lean.Compiler.LCNF.PassManager Lean.Compiler.LCNF.Passes Lean.Compiler.LCNF.PrettyPrinter Lean.Compiler.LCNF.ToDecl Lean.Compiler.LCNF.Check Lean.Compiler.LCNF.PullLetDecls Lean.Compiler.LCNF.PhaseExt Lean.Compiler.LCNF.CSE #include <lean/lean.h> #if defined(__clang__) #pragma clang diagnostic ignored "-Wunused-parameter" @@ -13,23 +13,26 @@ #ifdef __cplusplus extern "C" { #endif +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__5(lean_object*, lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_main___spec__1___lambda__4(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_is_matcher(lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__2___closed__2; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__11; +static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__2; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__2___closed__4; -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__3; static lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__4___closed__3; lean_object* l_Lean_Compiler_LCNF_toDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_main___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__10; lean_object* l_Lean_getConstInfo___at_Lean_Meta_mkConstWithFreshMVarLevels___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__10(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__4; +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___closed__4; lean_object* lean_mk_empty_array_with_capacity(lean_object*); static lean_object* l_Lean_Compiler_LCNF_compile___closed__1; -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__17; +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__12___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__4; +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__2; lean_object* l_Lean_PersistentArray_toArray___rarg(lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__3; lean_object* l_Lean_ConstantInfo_type(lean_object*); @@ -38,11 +41,12 @@ LEAN_EXPORT lean_object* l___private_Lean_Util_Trace_0__Lean_getResetTraces___at lean_object* l_Lean_Meta_isProp(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); double lean_float_div(double, double); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__3; -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__10___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__4(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___private_Lean_Util_Trace_0__Lean_getResetTraces___at_Lean_Core_wrapAsyncAsSnapshot___spec__3___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__13(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_IR_LogEntry_fmt(lean_object*); static lean_object* l_Lean_Compiler_LCNF_compile___closed__5; lean_object* l_Lean_MessageData_ofList(lean_object*); static lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__4___closed__1; @@ -51,23 +55,26 @@ lean_object* l_Lean_PersistentArray_push___rarg(lean_object*, lean_object*); lean_object* lean_array_push(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_main___spec__1___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___closed__2; +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__8; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__2(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, uint8_t, double, double, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Option_get___at_Lean_Compiler_LCNF_toConfigOptions___spec__2(lean_object*, lean_object*); uint8_t lean_usize_dec_eq(size_t, size_t); lean_object* l_Lean_Compiler_LCNF_checkDeadLocalDecls(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Compiler_LCNF_PassManager_run___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_mk_array(lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__2; LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__13___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_casesOnSuffix; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__4; LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_checkpoint___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_float_decLt(double, double); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__20; uint8_t l_Lean_ConstantInfo_hasValue(lean_object*, uint8_t); lean_object* l_Lean_Compiler_LCNF_getPassManager___rarg(lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__8; @@ -78,13 +85,15 @@ LEAN_EXPORT lean_object* l_MonadExcept_ofExcept___at_Lean_Compiler_LCNF_main___s static lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___closed__13; LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_io_get_num_heartbeats(lean_object*); +LEAN_EXPORT lean_object* l_Lean_setEnv___at_Lean_Compiler_LCNF_PassManager_run___spec__10(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Nat_nextPowerOfTwo_go(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___closed__7; extern lean_object* l_Lean_trace_profiler_useHeartbeats; lean_object* l_Lean_stringToMessageData(lean_object*); -LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__9___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* lean_lcnf_compile_decls(lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_maxRecDepth; +LEAN_EXPORT lean_object* l_Lean_setEnv___at_Lean_Compiler_LCNF_PassManager_run___spec__10___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_compile(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Compiler_LCNF_LCtx_toLocalContext(lean_object*); @@ -94,70 +103,71 @@ lean_object* l_Lean_Kernel_enableDiag(lean_object*, uint8_t); static lean_object* l_Lean_Compiler_LCNF_showDecl___closed__1; lean_object* l_Lean_addTrace___at_Lean_Compiler_LCNF_UnreachableBranches_elimDead_go___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Kernel_isDiagnosticsEnabled(lean_object*); +static lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__1; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__6; LEAN_EXPORT lean_object* l_Lean_addTrace___at_Lean_Compiler_LCNF_PassManager_run___spec__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__2___closed__5; -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__14; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__1; -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__10; lean_object* l_Lean_Name_mkStr3(lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__2___closed__6; -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__4; LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_usize_of_nat(lean_object*); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__18; LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_main___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__8; +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_MonadExcept_ofExcept___at_Lean_Compiler_LCNF_main___spec__2(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_st_ref_take(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__1; -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__11; -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452_(lean_object*); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__22; +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__3; LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_main___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___closed__11; LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_main___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__4; -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__20; -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__6; LEAN_EXPORT lean_object* l___private_Lean_Util_Trace_0__Lean_getResetTraces___at_Lean_Compiler_LCNF_PassManager_run___spec__3___boxed(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__15; lean_object* l_Lean_registerTraceClass(lean_object*, uint8_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static double l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__4___closed__5; -LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__9(lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__12; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__2___closed__7; -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__1(lean_object*, lean_object*, lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_MessageData_ofFormat(lean_object*); lean_object* l_Lean_PersistentArray_append___rarg(lean_object*, lean_object*); lean_object* l_Lean_Compiler_LCNF_withPhase___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__12(lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static uint64_t l_Lean_Compiler_LCNF_shouldGenerateCode___closed__2; lean_object* l_Lean_Meta_isTypeFormerType(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__14; +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__17; lean_object* lean_st_ref_get(lean_object*, lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__19; lean_object* l_Lean_profileitM___at_Lean_traceBlock___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__18; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__19; +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699_(lean_object*); lean_object* lean_st_mk_ref(lean_object*, lean_object*); uint8_t l___private_Lean_Compiler_InlineAttrs_0__Lean_Compiler_hasInlineAttrCore(lean_object*, uint8_t, lean_object*); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__2; +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__10; +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__14___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Name_num___override(lean_object*, lean_object*); lean_object* l_Lean_Name_append(lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__1; +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__22; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_main___spec__1___lambda__2(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, uint8_t, double, double, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___private_Lean_Util_Trace_0__Lean_addTraceNode___at_Lean_Core_wrapAsyncAsSnapshot___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__3___closed__2; lean_object* lean_io_mono_nanos_now(lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__2; static lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__3___closed__1; +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Compiler_LCNF_PassManager_run___spec__9___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__11; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_main___spec__1(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__21; lean_object* l_Lean_Compiler_LCNF_markRecDecls(lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__7; lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__1; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__17; @@ -165,10 +175,10 @@ LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_showDecl___boxed(lean_object*, lea LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_isMatcher___at_Lean_Compiler_LCNF_shouldGenerateCode___spec__1(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_isExtern(lean_object*, lean_object*); -static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__3; extern lean_object* l_Lean_trace_profiler_threshold; lean_object* l___private_Init_Util_0__mkPanicMessageWithDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__1; +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__16; +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__4(lean_object*, lean_object*, lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__4___closed__1; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__3(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, uint8_t, double, double, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_main___spec__1___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -181,13 +191,10 @@ static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpo LEAN_EXPORT lean_object* l_MonadExcept_ofExcept___at_Lean_Compiler_LCNF_PassManager_run___spec__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_KVMap_setBool(lean_object*, lean_object*, uint8_t); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__9; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__16; LEAN_EXPORT lean_object* l___private_Lean_Util_Trace_0__Lean_addTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__19; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__23; -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__2(lean_object*, lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Util_Trace_0__Lean_addTraceNode___spec__1(size_t, size_t, lean_object*); double l_Float_ofScientific(lean_object*, uint8_t, lean_object*); @@ -198,18 +205,22 @@ LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode_isCompIrrelevan static lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__4___closed__2; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_main___spec__1___lambda__3(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, uint8_t, double, double, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__9; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_main___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___closed__6; +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__13; lean_object* l_Lean_Compiler_LCNF_CompilerM_run___rarg(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___closed__10; uint8_t lean_nat_dec_lt(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___closed__1; lean_object* l_Lean_Name_mkStr2(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__15(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__5; lean_object* l_Lean_ParametricAttribute_getParam_x3f___rarg(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__1; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__4; lean_object* l_Lean_Compiler_LCNF_CompilerM_run___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -222,52 +233,58 @@ LEAN_EXPORT lean_object* l___private_Lean_Util_Trace_0__Lean_addTraceNode___at_L static lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___closed__14; LEAN_EXPORT lean_object* l___private_Lean_Util_Trace_0__Lean_getResetTraces___at_Lean_Compiler_LCNF_PassManager_run___spec__3(lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__12; -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__16; static double l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__4___closed__4; LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__5; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__5; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__2; +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__12; LEAN_EXPORT lean_object* l_MonadExcept_ofExcept___at_Lean_Compiler_LCNF_PassManager_run___spec__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Compiler_LCNF_Decl_check(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static double l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__2___closed__1; lean_object* l_Lean_PersistentHashMap_mkEmptyEntriesArray(lean_object*, lean_object*); lean_object* l_Lean_Compiler_LCNF_getDeclInfo_x3f(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__3; +lean_object* lean_ir_compile(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_checkpoint(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Compiler_LCNF_Decl_size(lean_object*); static lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___closed__3; -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__13; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__14; lean_object* lean_array_mk(lean_object*); static lean_object* l_Lean_Compiler_LCNF_showDecl___closed__2; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__9; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Compiler_LCNF_PassManager_run___spec__1(size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__8; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__15; size_t lean_usize_add(size_t, size_t); lean_object* l_Lean_Compiler_LCNF_ppDecl_x27(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__6; LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_showDecl(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_IR_toIR(lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_instInhabitedName; lean_object* lean_array_uget(lean_object*, size_t); size_t lean_array_size(lean_object*); extern lean_object* l_Lean_trace_profiler; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_main___spec__1___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_st_ref_set(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__5; lean_object* l_Lean_isTracingEnabledFor___at_Lean_Core_wrapAsyncAsSnapshot___spec__2(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_main___closed__1; lean_object* l_List_mapTR_loop___at_Lean_compileDecls_doCompile___spec__1(lean_object*, lean_object*); extern lean_object* l_Lean_Compiler_implementedByAttr; static lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___closed__1; +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__14(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_isAuxRecursorWithSuffix(lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___closed__3; lean_object* lean_array_get_size(lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__23; static lean_object* l_Lean_Compiler_LCNF_main___lambda__1___closed__1; +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__21; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_main___spec__1___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__15___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_le(lean_object*, lean_object*); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__7; uint8_t lean_usize_dec_lt(size_t, size_t); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__9; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__7; @@ -280,22 +297,26 @@ static lean_object* l_Lean_Compiler_LCNF_compile___closed__3; LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_panic___at_Lean_Compiler_LCNF_saveSpecParamInfo___spec__7(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint64_t l___private_Lean_Meta_Basic_0__Lean_Meta_Config_toKey(lean_object*); +static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__18; lean_object* lean_array_uset(lean_object*, size_t, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Util_Trace_0__Lean_getResetTraces___at_Lean_Compiler_LCNF_PassManager_run___spec__3___rarg___boxed(lean_object*, lean_object*); lean_object* l_Lean_MessageData_ofName(lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Compiler_LCNF_PassManager_run___spec__2___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___private_Init_Data_Repr_0__Nat_reprFast(lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__15; -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__2; +static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__1; +static lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__2; +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___closed__5; LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +extern lean_object* l_Lean_compiler_enableNew; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6___lambda__1___closed__13; double lean_float_sub(double, double); LEAN_EXPORT lean_object* l_Lean_Meta_isMatcher___at_Lean_Compiler_LCNF_shouldGenerateCode___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Array_isEmpty___rarg(lean_object*); static lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode___lambda__2___closed__1; +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Compiler_LCNF_PassManager_run___spec__9(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_shouldGenerateCode_isCompIrrelevant(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { _start: { @@ -5595,19 +5616,308 @@ static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_P _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("Lean.Compiler.LCNF.Main", 23, 23); +x_1 = lean_mk_string_unchecked("IR", 2, 2); return x_1; } } static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__2() { _start: { +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__1; +x_2 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__1; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8(lean_object* x_1, lean_object* x_2, lean_object* x_3, size_t x_4, size_t x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +uint8_t x_12; +x_12 = lean_usize_dec_lt(x_5, x_4); +if (x_12 == 0) +{ +lean_object* x_13; +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_6); +lean_ctor_set(x_13, 1, x_11); +return x_13; +} +else +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; size_t x_23; size_t x_24; lean_object* x_25; +lean_dec(x_6); +x_14 = lean_array_uget(x_3, x_5); +x_15 = l_Lean_IR_LogEntry_fmt(x_14); +x_16 = l_Lean_MessageData_ofFormat(x_15); +x_17 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__2___closed__7; +x_18 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_18, 0, x_17); +lean_ctor_set(x_18, 1, x_16); +x_19 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_19, 0, x_18); +lean_ctor_set(x_19, 1, x_17); +x_20 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__2; +x_21 = l_Lean_addTrace___at_Lean_Compiler_LCNF_PassManager_run___spec__7(x_20, x_19, x_7, x_8, x_9, x_10, x_11); +x_22 = lean_ctor_get(x_21, 1); +lean_inc(x_22); +lean_dec(x_21); +x_23 = 1; +x_24 = lean_usize_add(x_5, x_23); +x_25 = lean_box(0); +x_5 = x_24; +x_6 = x_25; +x_11 = x_22; +goto _start; +} +} +} +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Compiler_LCNF_PassManager_run___spec__9(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; lean_object* x_8; uint8_t x_9; +x_7 = lean_ctor_get(x_4, 5); +x_8 = lean_st_ref_get(x_5, x_6); +x_9 = !lean_is_exclusive(x_8); +if (x_9 == 0) +{ +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; uint8_t x_14; +x_10 = lean_ctor_get(x_8, 0); +x_11 = lean_ctor_get(x_8, 1); +x_12 = lean_ctor_get(x_10, 0); +lean_inc(x_12); +lean_dec(x_10); +x_13 = lean_st_ref_get(x_3, x_11); +x_14 = !lean_is_exclusive(x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; +x_15 = lean_ctor_get(x_13, 0); +x_16 = lean_ctor_get(x_15, 0); +lean_inc(x_16); +lean_dec(x_15); +x_17 = l_Lean_Compiler_LCNF_LCtx_toLocalContext(x_16); +lean_dec(x_16); +x_18 = lean_ctor_get(x_4, 2); +x_19 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__11; +lean_inc(x_18); +x_20 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_20, 0, x_12); +lean_ctor_set(x_20, 1, x_19); +lean_ctor_set(x_20, 2, x_17); +lean_ctor_set(x_20, 3, x_18); +lean_ctor_set_tag(x_8, 3); +lean_ctor_set(x_8, 1, x_1); +lean_ctor_set(x_8, 0, x_20); +lean_inc(x_7); +x_21 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21, 0, x_7); +lean_ctor_set(x_21, 1, x_8); +lean_ctor_set_tag(x_13, 1); +lean_ctor_set(x_13, 0, x_21); +return x_13; +} +else +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; +x_22 = lean_ctor_get(x_13, 0); +x_23 = lean_ctor_get(x_13, 1); +lean_inc(x_23); +lean_inc(x_22); +lean_dec(x_13); +x_24 = lean_ctor_get(x_22, 0); +lean_inc(x_24); +lean_dec(x_22); +x_25 = l_Lean_Compiler_LCNF_LCtx_toLocalContext(x_24); +lean_dec(x_24); +x_26 = lean_ctor_get(x_4, 2); +x_27 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__11; +lean_inc(x_26); +x_28 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_28, 0, x_12); +lean_ctor_set(x_28, 1, x_27); +lean_ctor_set(x_28, 2, x_25); +lean_ctor_set(x_28, 3, x_26); +lean_ctor_set_tag(x_8, 3); +lean_ctor_set(x_8, 1, x_1); +lean_ctor_set(x_8, 0, x_28); +lean_inc(x_7); +x_29 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_29, 0, x_7); +lean_ctor_set(x_29, 1, x_8); +x_30 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_30, 0, x_29); +lean_ctor_set(x_30, 1, x_23); +return x_30; +} +} +else +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; +x_31 = lean_ctor_get(x_8, 0); +x_32 = lean_ctor_get(x_8, 1); +lean_inc(x_32); +lean_inc(x_31); +lean_dec(x_8); +x_33 = lean_ctor_get(x_31, 0); +lean_inc(x_33); +lean_dec(x_31); +x_34 = lean_st_ref_get(x_3, x_32); +x_35 = lean_ctor_get(x_34, 0); +lean_inc(x_35); +x_36 = lean_ctor_get(x_34, 1); +lean_inc(x_36); +if (lean_is_exclusive(x_34)) { + lean_ctor_release(x_34, 0); + lean_ctor_release(x_34, 1); + x_37 = x_34; +} else { + lean_dec_ref(x_34); + x_37 = lean_box(0); +} +x_38 = lean_ctor_get(x_35, 0); +lean_inc(x_38); +lean_dec(x_35); +x_39 = l_Lean_Compiler_LCNF_LCtx_toLocalContext(x_38); +lean_dec(x_38); +x_40 = lean_ctor_get(x_4, 2); +x_41 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__11; +lean_inc(x_40); +x_42 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_42, 0, x_33); +lean_ctor_set(x_42, 1, x_41); +lean_ctor_set(x_42, 2, x_39); +lean_ctor_set(x_42, 3, x_40); +x_43 = lean_alloc_ctor(3, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_1); +lean_inc(x_7); +x_44 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_44, 0, x_7); +lean_ctor_set(x_44, 1, x_43); +if (lean_is_scalar(x_37)) { + x_45 = lean_alloc_ctor(1, 2, 0); +} else { + x_45 = x_37; + lean_ctor_set_tag(x_45, 1); +} +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_36); +return x_45; +} +} +} +LEAN_EXPORT lean_object* l_Lean_setEnv___at_Lean_Compiler_LCNF_PassManager_run___spec__10(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; uint8_t x_10; +x_7 = lean_st_ref_take(x_5, x_6); +x_8 = lean_ctor_get(x_7, 0); +lean_inc(x_8); +x_9 = lean_ctor_get(x_7, 1); +lean_inc(x_9); +lean_dec(x_7); +x_10 = !lean_is_exclusive(x_8); +if (x_10 == 0) +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_11 = lean_ctor_get(x_8, 4); +lean_dec(x_11); +x_12 = lean_ctor_get(x_8, 0); +lean_dec(x_12); +x_13 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__8; +lean_ctor_set(x_8, 4, x_13); +lean_ctor_set(x_8, 0, x_1); +x_14 = lean_st_ref_set(x_5, x_8, x_9); +x_15 = !lean_is_exclusive(x_14); +if (x_15 == 0) +{ +lean_object* x_16; lean_object* x_17; +x_16 = lean_ctor_get(x_14, 0); +lean_dec(x_16); +x_17 = lean_box(0); +lean_ctor_set(x_14, 0, x_17); +return x_14; +} +else +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_18 = lean_ctor_get(x_14, 1); +lean_inc(x_18); +lean_dec(x_14); +x_19 = lean_box(0); +x_20 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_20, 0, x_19); +lean_ctor_set(x_20, 1, x_18); +return x_20; +} +} +else +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_21 = lean_ctor_get(x_8, 1); +x_22 = lean_ctor_get(x_8, 2); +x_23 = lean_ctor_get(x_8, 3); +x_24 = lean_ctor_get(x_8, 5); +x_25 = lean_ctor_get(x_8, 6); +x_26 = lean_ctor_get(x_8, 7); +lean_inc(x_26); +lean_inc(x_25); +lean_inc(x_24); +lean_inc(x_23); +lean_inc(x_22); +lean_inc(x_21); +lean_dec(x_8); +x_27 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__8; +x_28 = lean_alloc_ctor(0, 8, 0); +lean_ctor_set(x_28, 0, x_1); +lean_ctor_set(x_28, 1, x_21); +lean_ctor_set(x_28, 2, x_22); +lean_ctor_set(x_28, 3, x_23); +lean_ctor_set(x_28, 4, x_27); +lean_ctor_set(x_28, 5, x_24); +lean_ctor_set(x_28, 6, x_25); +lean_ctor_set(x_28, 7, x_26); +x_29 = lean_st_ref_set(x_5, x_28, x_9); +x_30 = lean_ctor_get(x_29, 1); +lean_inc(x_30); +if (lean_is_exclusive(x_29)) { + lean_ctor_release(x_29, 0); + lean_ctor_release(x_29, 1); + x_31 = x_29; +} else { + lean_dec_ref(x_29); + x_31 = lean_box(0); +} +x_32 = lean_box(0); +if (lean_is_scalar(x_31)) { + x_33 = lean_alloc_ctor(0, 2, 0); +} else { + x_33 = x_31; +} +lean_ctor_set(x_33, 0, x_32); +lean_ctor_set(x_33, 1, x_30); +return x_33; +} +} +} +static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean.Compiler.LCNF.Main", 23, 23); +return x_1; +} +} +static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__2() { +_start: +{ lean_object* x_1; x_1 = lean_mk_string_unchecked("Lean.Compiler.LCNF.PassManager.run", 34, 34); return x_1; } } -static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__3() { +static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__3() { _start: { lean_object* x_1; @@ -5615,20 +5925,20 @@ x_1 = lean_mk_string_unchecked("unreachable code has been reached", 33, 33); return x_1; } } -static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__4() { +static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__1; -x_2 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__2; -x_3 = lean_unsigned_to_nat(84u); +x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__1; +x_2 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__2; +x_3 = lean_unsigned_to_nat(88u); x_4 = lean_unsigned_to_nat(52u); -x_5 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__3; +x_5 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__3; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); return x_6; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, size_t x_5, size_t x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, size_t x_5, size_t x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { uint8_t x_13; @@ -5664,7 +5974,7 @@ lean_dec(x_15); x_27 = lean_ctor_get(x_25, 1); lean_inc(x_27); lean_dec(x_25); -x_28 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__4; +x_28 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__4; lean_inc(x_11); lean_inc(x_10); lean_inc(x_9); @@ -5992,7 +6302,7 @@ goto _start; } } } -LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__9(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__12(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: { uint8_t x_10; @@ -6079,7 +6389,7 @@ return x_28; } } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__10(lean_object* x_1, lean_object* x_2, lean_object* x_3, size_t x_4, size_t x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__13(lean_object* x_1, lean_object* x_2, lean_object* x_3, size_t x_4, size_t x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { uint8_t x_12; @@ -6211,7 +6521,49 @@ return x_43; } } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, size_t x_5, size_t x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__14(lean_object* x_1, lean_object* x_2, lean_object* x_3, size_t x_4, size_t x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +uint8_t x_12; +x_12 = lean_usize_dec_lt(x_5, x_4); +if (x_12 == 0) +{ +lean_object* x_13; +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_6); +lean_ctor_set(x_13, 1, x_11); +return x_13; +} +else +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; size_t x_23; size_t x_24; lean_object* x_25; +lean_dec(x_6); +x_14 = lean_array_uget(x_3, x_5); +x_15 = l_Lean_IR_LogEntry_fmt(x_14); +x_16 = l_Lean_MessageData_ofFormat(x_15); +x_17 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__2___closed__7; +x_18 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_18, 0, x_17); +lean_ctor_set(x_18, 1, x_16); +x_19 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_19, 0, x_18); +lean_ctor_set(x_19, 1, x_17); +x_20 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__2; +x_21 = l_Lean_addTrace___at_Lean_Compiler_LCNF_PassManager_run___spec__7(x_20, x_19, x_7, x_8, x_9, x_10, x_11); +x_22 = lean_ctor_get(x_21, 1); +lean_inc(x_22); +lean_dec(x_21); +x_23 = 1; +x_24 = lean_usize_add(x_5, x_23); +x_25 = lean_box(0); +x_5 = x_24; +x_6 = x_25; +x_11 = x_22; +goto _start; +} +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__15(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, size_t x_5, size_t x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { uint8_t x_13; @@ -6247,7 +6599,7 @@ lean_dec(x_15); x_27 = lean_ctor_get(x_25, 1); lean_inc(x_27); lean_dec(x_25); -x_28 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__4; +x_28 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__4; lean_inc(x_11); lean_inc(x_10); lean_inc(x_9); @@ -6575,408 +6927,690 @@ goto _start; } } } -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { -_start: -{ -lean_object* x_8; -x_8 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_8, 0, x_1); -lean_ctor_set(x_8, 1, x_7); -return x_8; -} -} -static lean_object* _init_l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__1() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("result", 6, 6); -return x_1; -} -} -static lean_object* _init_l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__2() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__1; -x_2 = l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__1; -x_3 = l_Lean_Name_mkStr2(x_1, x_2); -return x_3; -} -} -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, size_t x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { -size_t x_8; size_t x_9; lean_object* x_10; -x_8 = lean_array_size(x_1); -x_9 = 0; -lean_inc(x_6); -lean_inc(x_5); -lean_inc(x_4); -lean_inc(x_3); -x_10 = l_Array_mapMUnsafe_map___at_Lean_Compiler_LCNF_PassManager_run___spec__1(x_8, x_9, x_1, x_3, x_4, x_5, x_6, x_7); -if (lean_obj_tag(x_10) == 0) +lean_object* x_11; +lean_inc(x_9); +lean_inc(x_8); +x_11 = l_Lean_IR_toIR(x_1, x_8, x_9, x_10); +if (lean_obj_tag(x_11) == 0) { -lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; size_t x_18; lean_object* x_19; -x_11 = lean_ctor_get(x_10, 0); -lean_inc(x_11); -x_12 = lean_ctor_get(x_10, 1); +lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; size_t x_21; lean_object* x_22; lean_object* x_23; +x_12 = lean_ctor_get(x_11, 0); lean_inc(x_12); -lean_dec(x_10); -x_13 = l_Lean_Compiler_LCNF_markRecDecls(x_11); -x_14 = l_Lean_Compiler_LCNF_getPassManager___rarg(x_6, x_12); +x_13 = lean_ctor_get(x_11, 1); +lean_inc(x_13); +lean_dec(x_11); +x_14 = lean_st_ref_get(x_9, x_13); x_15 = lean_ctor_get(x_14, 0); lean_inc(x_15); x_16 = lean_ctor_get(x_14, 1); lean_inc(x_16); lean_dec(x_14); -x_17 = lean_box(0); -x_18 = lean_array_size(x_15); -lean_inc(x_6); -lean_inc(x_5); -lean_inc(x_4); -lean_inc(x_3); -x_19 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6(x_17, x_15, x_15, x_18, x_9, x_13, x_3, x_4, x_5, x_6, x_16); +x_17 = lean_ctor_get(x_15, 0); +lean_inc(x_17); lean_dec(x_15); -if (lean_obj_tag(x_19) == 0) -{ -lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; uint8_t x_25; -x_20 = lean_ctor_get(x_19, 0); +lean_inc(x_12); +x_18 = lean_ir_compile(x_17, x_2, x_12); +x_19 = lean_ctor_get(x_18, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_18, 1); lean_inc(x_20); -x_21 = lean_ctor_get(x_19, 1); -lean_inc(x_21); +lean_dec(x_18); +x_21 = lean_array_size(x_19); +x_22 = lean_box(0); +x_23 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8(x_19, x_3, x_19, x_21, x_4, x_22, x_6, x_7, x_8, x_9, x_16); lean_dec(x_19); -x_22 = l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__2; -x_23 = l_Lean_isTracingEnabledFor___at_Lean_Compiler_LCNF_UnreachableBranches_elimDead_go___spec__1(x_22, x_3, x_4, x_5, x_6, x_21); -x_24 = lean_ctor_get(x_23, 0); +if (lean_obj_tag(x_20) == 0) +{ +lean_object* x_24; uint8_t x_25; +lean_dec(x_12); +x_24 = lean_ctor_get(x_23, 1); lean_inc(x_24); -x_25 = lean_unbox(x_24); -lean_dec(x_24); +lean_dec(x_23); +x_25 = !lean_is_exclusive(x_20); if (x_25 == 0) { -uint8_t x_26; -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -x_26 = !lean_is_exclusive(x_23); -if (x_26 == 0) -{ -lean_object* x_27; -x_27 = lean_ctor_get(x_23, 0); -lean_dec(x_27); -lean_ctor_set(x_23, 0, x_20); -return x_23; +lean_object* x_26; lean_object* x_27; +lean_ctor_set_tag(x_20, 3); +x_26 = l_Lean_MessageData_ofFormat(x_20); +x_27 = l_Lean_throwError___at_Lean_Compiler_LCNF_PassManager_run___spec__9(x_26, x_6, x_7, x_8, x_9, x_24); +lean_dec(x_9); +lean_dec(x_8); +return x_27; } else { -lean_object* x_28; lean_object* x_29; -x_28 = lean_ctor_get(x_23, 1); +lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; +x_28 = lean_ctor_get(x_20, 0); lean_inc(x_28); -lean_dec(x_23); -x_29 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_29, 0, x_20); -lean_ctor_set(x_29, 1, x_28); -return x_29; +lean_dec(x_20); +x_29 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_29, 0, x_28); +x_30 = l_Lean_MessageData_ofFormat(x_29); +x_31 = l_Lean_throwError___at_Lean_Compiler_LCNF_PassManager_run___spec__9(x_30, x_6, x_7, x_8, x_9, x_24); +lean_dec(x_9); +lean_dec(x_8); +return x_31; } } else { -lean_object* x_30; size_t x_31; lean_object* x_32; lean_object* x_33; -x_30 = lean_ctor_get(x_23, 1); -lean_inc(x_30); +lean_object* x_32; lean_object* x_33; lean_object* x_34; uint8_t x_35; +x_32 = lean_ctor_get(x_23, 1); +lean_inc(x_32); lean_dec(x_23); -x_31 = lean_array_size(x_20); -x_32 = lean_box(0); -x_33 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8(x_20, x_22, x_17, x_20, x_31, x_9, x_32, x_3, x_4, x_5, x_6, x_30); -if (lean_obj_tag(x_33) == 0) +x_33 = lean_ctor_get(x_20, 0); +lean_inc(x_33); +lean_dec(x_20); +x_34 = l_Lean_setEnv___at_Lean_Compiler_LCNF_PassManager_run___spec__10(x_33, x_6, x_7, x_8, x_9, x_32); +lean_dec(x_9); +lean_dec(x_8); +x_35 = !lean_is_exclusive(x_34); +if (x_35 == 0) { -uint8_t x_34; -x_34 = !lean_is_exclusive(x_33); -if (x_34 == 0) +lean_object* x_36; +x_36 = lean_ctor_get(x_34, 0); +lean_dec(x_36); +lean_ctor_set(x_34, 0, x_12); +return x_34; +} +else { -lean_object* x_35; -x_35 = lean_ctor_get(x_33, 0); -lean_dec(x_35); -lean_ctor_set(x_33, 0, x_20); -return x_33; +lean_object* x_37; lean_object* x_38; +x_37 = lean_ctor_get(x_34, 1); +lean_inc(x_37); +lean_dec(x_34); +x_38 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_38, 0, x_12); +lean_ctor_set(x_38, 1, x_37); +return x_38; +} +} } else { -lean_object* x_36; lean_object* x_37; -x_36 = lean_ctor_get(x_33, 1); -lean_inc(x_36); -lean_dec(x_33); -x_37 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_37, 0, x_20); -lean_ctor_set(x_37, 1, x_36); -return x_37; +uint8_t x_39; +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_2); +x_39 = !lean_is_exclusive(x_11); +if (x_39 == 0) +{ +return x_11; +} +else +{ +lean_object* x_40; lean_object* x_41; lean_object* x_42; +x_40 = lean_ctor_get(x_11, 0); +x_41 = lean_ctor_get(x_11, 1); +lean_inc(x_41); +lean_inc(x_40); +lean_dec(x_11); +x_42 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_42, 0, x_40); +lean_ctor_set(x_42, 1, x_41); +return x_42; +} +} } } +static lean_object* _init_l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_compiler_enableNew; +return x_1; +} +} +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__2(lean_object* x_1, lean_object* x_2, size_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +lean_object* x_11; lean_object* x_12; uint8_t x_13; +x_11 = lean_ctor_get(x_8, 2); +lean_inc(x_11); +x_12 = l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__1; +x_13 = l_Lean_Option_get___at_Lean_Compiler_LCNF_toConfigOptions___spec__2(x_11, x_12); +if (x_13 == 0) +{ +lean_object* x_14; +lean_dec(x_11); +lean_dec(x_9); +lean_dec(x_8); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_4); +lean_ctor_set(x_14, 1, x_10); +return x_14; +} else { -uint8_t x_38; +lean_object* x_15; lean_object* x_16; +lean_dec(x_4); +x_15 = lean_box(0); +x_16 = l_Lean_Compiler_LCNF_PassManager_run___lambda__1(x_1, x_11, x_2, x_3, x_15, x_6, x_7, x_8, x_9, x_10); +return x_16; +} +} +} +static lean_object* _init_l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("result", 6, 6); +return x_1; +} +} +static lean_object* _init_l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__1; +x_2 = l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__1; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +size_t x_9; size_t x_10; lean_object* x_11; +x_9 = lean_array_size(x_1); +x_10 = 0; +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +x_11 = l_Array_mapMUnsafe_map___at_Lean_Compiler_LCNF_PassManager_run___spec__1(x_9, x_10, x_1, x_4, x_5, x_6, x_7, x_8); +if (lean_obj_tag(x_11) == 0) +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; size_t x_19; lean_object* x_20; +x_12 = lean_ctor_get(x_11, 0); +lean_inc(x_12); +x_13 = lean_ctor_get(x_11, 1); +lean_inc(x_13); +lean_dec(x_11); +x_14 = l_Lean_Compiler_LCNF_markRecDecls(x_12); +x_15 = l_Lean_Compiler_LCNF_getPassManager___rarg(x_7, x_13); +x_16 = lean_ctor_get(x_15, 0); +lean_inc(x_16); +x_17 = lean_ctor_get(x_15, 1); +lean_inc(x_17); +lean_dec(x_15); +x_18 = lean_box(0); +x_19 = lean_array_size(x_16); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +x_20 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__6(x_18, x_16, x_16, x_19, x_10, x_14, x_4, x_5, x_6, x_7, x_17); +lean_dec(x_16); +if (lean_obj_tag(x_20) == 0) +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; uint8_t x_26; +x_21 = lean_ctor_get(x_20, 0); +lean_inc(x_21); +x_22 = lean_ctor_get(x_20, 1); +lean_inc(x_22); lean_dec(x_20); -x_38 = !lean_is_exclusive(x_33); -if (x_38 == 0) +x_23 = l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__2; +x_24 = l_Lean_isTracingEnabledFor___at_Lean_Compiler_LCNF_UnreachableBranches_elimDead_go___spec__1(x_23, x_4, x_5, x_6, x_7, x_22); +x_25 = lean_ctor_get(x_24, 0); +lean_inc(x_25); +x_26 = lean_unbox(x_25); +lean_dec(x_25); +if (x_26 == 0) +{ +lean_object* x_27; lean_object* x_28; lean_object* x_29; +x_27 = lean_ctor_get(x_24, 1); +lean_inc(x_27); +lean_dec(x_24); +x_28 = lean_box(0); +x_29 = l_Lean_Compiler_LCNF_PassManager_run___lambda__2(x_21, x_18, x_10, x_2, x_28, x_4, x_5, x_6, x_7, x_27); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_21); +return x_29; +} +else +{ +lean_object* x_30; size_t x_31; lean_object* x_32; lean_object* x_33; +x_30 = lean_ctor_get(x_24, 1); +lean_inc(x_30); +lean_dec(x_24); +x_31 = lean_array_size(x_21); +x_32 = lean_box(0); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +x_33 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11(x_21, x_23, x_18, x_21, x_31, x_10, x_32, x_4, x_5, x_6, x_7, x_30); +if (lean_obj_tag(x_33) == 0) +{ +lean_object* x_34; lean_object* x_35; +x_34 = lean_ctor_get(x_33, 1); +lean_inc(x_34); +lean_dec(x_33); +x_35 = l_Lean_Compiler_LCNF_PassManager_run___lambda__2(x_21, x_18, x_10, x_2, x_32, x_4, x_5, x_6, x_7, x_34); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_21); +return x_35; +} +else +{ +uint8_t x_36; +lean_dec(x_21); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_36 = !lean_is_exclusive(x_33); +if (x_36 == 0) { return x_33; } else { -lean_object* x_39; lean_object* x_40; lean_object* x_41; -x_39 = lean_ctor_get(x_33, 0); -x_40 = lean_ctor_get(x_33, 1); -lean_inc(x_40); -lean_inc(x_39); +lean_object* x_37; lean_object* x_38; lean_object* x_39; +x_37 = lean_ctor_get(x_33, 0); +x_38 = lean_ctor_get(x_33, 1); +lean_inc(x_38); +lean_inc(x_37); lean_dec(x_33); -x_41 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_41, 0, x_39); -lean_ctor_set(x_41, 1, x_40); -return x_41; +x_39 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_39, 0, x_37); +lean_ctor_set(x_39, 1, x_38); +return x_39; } } } } else { -uint8_t x_42; +uint8_t x_40; +lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_3); -x_42 = !lean_is_exclusive(x_19); -if (x_42 == 0) +lean_dec(x_2); +x_40 = !lean_is_exclusive(x_20); +if (x_40 == 0) { -return x_19; +return x_20; } else { -lean_object* x_43; lean_object* x_44; lean_object* x_45; -x_43 = lean_ctor_get(x_19, 0); -x_44 = lean_ctor_get(x_19, 1); -lean_inc(x_44); -lean_inc(x_43); -lean_dec(x_19); -x_45 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_45, 0, x_43); -lean_ctor_set(x_45, 1, x_44); -return x_45; +lean_object* x_41; lean_object* x_42; lean_object* x_43; +x_41 = lean_ctor_get(x_20, 0); +x_42 = lean_ctor_get(x_20, 1); +lean_inc(x_42); +lean_inc(x_41); +lean_dec(x_20); +x_43 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_43, 0, x_41); +lean_ctor_set(x_43, 1, x_42); +return x_43; } } } else { -uint8_t x_46; +uint8_t x_44; +lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_3); -x_46 = !lean_is_exclusive(x_10); -if (x_46 == 0) +lean_dec(x_2); +x_44 = !lean_is_exclusive(x_11); +if (x_44 == 0) { -return x_10; +return x_11; } else { -lean_object* x_47; lean_object* x_48; lean_object* x_49; -x_47 = lean_ctor_get(x_10, 0); -x_48 = lean_ctor_get(x_10, 1); -lean_inc(x_48); -lean_inc(x_47); -lean_dec(x_10); -x_49 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_49, 0, x_47); -lean_ctor_set(x_49, 1, x_48); -return x_49; +lean_object* x_45; lean_object* x_46; lean_object* x_47; +x_45 = lean_ctor_get(x_11, 0); +x_46 = lean_ctor_get(x_11, 1); +lean_inc(x_46); +lean_inc(x_45); +lean_dec(x_11); +x_47 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_47, 0, x_45); +lean_ctor_set(x_47, 1, x_46); +return x_47; } } } } -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__4(lean_object* x_1, lean_object* x_2, lean_object* x_3, size_t x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { -size_t x_8; size_t x_9; lean_object* x_10; -x_8 = lean_array_size(x_1); -x_9 = 0; -lean_inc(x_6); -lean_inc(x_5); -lean_inc(x_4); -lean_inc(x_3); -x_10 = l_Array_mapMUnsafe_map___at_Lean_Compiler_LCNF_PassManager_run___spec__1(x_8, x_9, x_1, x_3, x_4, x_5, x_6, x_7); -if (lean_obj_tag(x_10) == 0) +lean_object* x_11; +lean_inc(x_9); +lean_inc(x_8); +x_11 = l_Lean_IR_toIR(x_1, x_8, x_9, x_10); +if (lean_obj_tag(x_11) == 0) { -lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; size_t x_18; lean_object* x_19; -x_11 = lean_ctor_get(x_10, 0); -lean_inc(x_11); -x_12 = lean_ctor_get(x_10, 1); +lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; size_t x_21; lean_object* x_22; lean_object* x_23; +x_12 = lean_ctor_get(x_11, 0); lean_inc(x_12); -lean_dec(x_10); -x_13 = l_Lean_Compiler_LCNF_markRecDecls(x_11); -x_14 = l_Lean_Compiler_LCNF_getPassManager___rarg(x_6, x_12); +x_13 = lean_ctor_get(x_11, 1); +lean_inc(x_13); +lean_dec(x_11); +x_14 = lean_st_ref_get(x_9, x_13); x_15 = lean_ctor_get(x_14, 0); lean_inc(x_15); x_16 = lean_ctor_get(x_14, 1); lean_inc(x_16); lean_dec(x_14); -x_17 = lean_box(0); -x_18 = lean_array_size(x_15); -lean_inc(x_6); -lean_inc(x_5); -lean_inc(x_4); -lean_inc(x_3); -x_19 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__10(x_17, x_15, x_15, x_18, x_9, x_13, x_3, x_4, x_5, x_6, x_16); +x_17 = lean_ctor_get(x_15, 0); +lean_inc(x_17); lean_dec(x_15); -if (lean_obj_tag(x_19) == 0) -{ -lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; uint8_t x_25; -x_20 = lean_ctor_get(x_19, 0); +lean_inc(x_12); +x_18 = lean_ir_compile(x_17, x_2, x_12); +x_19 = lean_ctor_get(x_18, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_18, 1); lean_inc(x_20); -x_21 = lean_ctor_get(x_19, 1); -lean_inc(x_21); +lean_dec(x_18); +x_21 = lean_array_size(x_19); +x_22 = lean_box(0); +x_23 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__14(x_19, x_3, x_19, x_21, x_4, x_22, x_6, x_7, x_8, x_9, x_16); lean_dec(x_19); -x_22 = l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__2; -x_23 = l_Lean_isTracingEnabledFor___at_Lean_Compiler_LCNF_UnreachableBranches_elimDead_go___spec__1(x_22, x_3, x_4, x_5, x_6, x_21); -x_24 = lean_ctor_get(x_23, 0); +if (lean_obj_tag(x_20) == 0) +{ +lean_object* x_24; uint8_t x_25; +lean_dec(x_12); +x_24 = lean_ctor_get(x_23, 1); lean_inc(x_24); -x_25 = lean_unbox(x_24); -lean_dec(x_24); +lean_dec(x_23); +x_25 = !lean_is_exclusive(x_20); if (x_25 == 0) { -uint8_t x_26; -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -x_26 = !lean_is_exclusive(x_23); -if (x_26 == 0) -{ -lean_object* x_27; -x_27 = lean_ctor_get(x_23, 0); -lean_dec(x_27); -lean_ctor_set(x_23, 0, x_20); -return x_23; +lean_object* x_26; lean_object* x_27; +lean_ctor_set_tag(x_20, 3); +x_26 = l_Lean_MessageData_ofFormat(x_20); +x_27 = l_Lean_throwError___at_Lean_Compiler_LCNF_PassManager_run___spec__9(x_26, x_6, x_7, x_8, x_9, x_24); +lean_dec(x_9); +lean_dec(x_8); +return x_27; } else { -lean_object* x_28; lean_object* x_29; -x_28 = lean_ctor_get(x_23, 1); +lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; +x_28 = lean_ctor_get(x_20, 0); lean_inc(x_28); -lean_dec(x_23); -x_29 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_29, 0, x_20); -lean_ctor_set(x_29, 1, x_28); -return x_29; +lean_dec(x_20); +x_29 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_29, 0, x_28); +x_30 = l_Lean_MessageData_ofFormat(x_29); +x_31 = l_Lean_throwError___at_Lean_Compiler_LCNF_PassManager_run___spec__9(x_30, x_6, x_7, x_8, x_9, x_24); +lean_dec(x_9); +lean_dec(x_8); +return x_31; } } else { -lean_object* x_30; size_t x_31; lean_object* x_32; lean_object* x_33; -x_30 = lean_ctor_get(x_23, 1); -lean_inc(x_30); +lean_object* x_32; lean_object* x_33; lean_object* x_34; uint8_t x_35; +x_32 = lean_ctor_get(x_23, 1); +lean_inc(x_32); lean_dec(x_23); -x_31 = lean_array_size(x_20); -x_32 = lean_box(0); -x_33 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11(x_20, x_22, x_17, x_20, x_31, x_9, x_32, x_3, x_4, x_5, x_6, x_30); -if (lean_obj_tag(x_33) == 0) +x_33 = lean_ctor_get(x_20, 0); +lean_inc(x_33); +lean_dec(x_20); +x_34 = l_Lean_setEnv___at_Lean_Compiler_LCNF_PassManager_run___spec__10(x_33, x_6, x_7, x_8, x_9, x_32); +lean_dec(x_9); +lean_dec(x_8); +x_35 = !lean_is_exclusive(x_34); +if (x_35 == 0) { -uint8_t x_34; -x_34 = !lean_is_exclusive(x_33); -if (x_34 == 0) +lean_object* x_36; +x_36 = lean_ctor_get(x_34, 0); +lean_dec(x_36); +lean_ctor_set(x_34, 0, x_12); +return x_34; +} +else { -lean_object* x_35; -x_35 = lean_ctor_get(x_33, 0); -lean_dec(x_35); -lean_ctor_set(x_33, 0, x_20); -return x_33; +lean_object* x_37; lean_object* x_38; +x_37 = lean_ctor_get(x_34, 1); +lean_inc(x_37); +lean_dec(x_34); +x_38 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_38, 0, x_12); +lean_ctor_set(x_38, 1, x_37); +return x_38; +} +} } else { -lean_object* x_36; lean_object* x_37; -x_36 = lean_ctor_get(x_33, 1); -lean_inc(x_36); -lean_dec(x_33); -x_37 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_37, 0, x_20); -lean_ctor_set(x_37, 1, x_36); -return x_37; +uint8_t x_39; +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_2); +x_39 = !lean_is_exclusive(x_11); +if (x_39 == 0) +{ +return x_11; +} +else +{ +lean_object* x_40; lean_object* x_41; lean_object* x_42; +x_40 = lean_ctor_get(x_11, 0); +x_41 = lean_ctor_get(x_11, 1); +lean_inc(x_41); +lean_inc(x_40); +lean_dec(x_11); +x_42 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_42, 0, x_40); +lean_ctor_set(x_42, 1, x_41); +return x_42; } } +} +} +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__5(lean_object* x_1, lean_object* x_2, size_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +lean_object* x_11; lean_object* x_12; uint8_t x_13; +x_11 = lean_ctor_get(x_8, 2); +lean_inc(x_11); +x_12 = l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__1; +x_13 = l_Lean_Option_get___at_Lean_Compiler_LCNF_toConfigOptions___spec__2(x_11, x_12); +if (x_13 == 0) +{ +lean_object* x_14; +lean_dec(x_11); +lean_dec(x_9); +lean_dec(x_8); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_4); +lean_ctor_set(x_14, 1, x_10); +return x_14; +} else { -uint8_t x_38; +lean_object* x_15; lean_object* x_16; +lean_dec(x_4); +x_15 = lean_box(0); +x_16 = l_Lean_Compiler_LCNF_PassManager_run___lambda__4(x_1, x_11, x_2, x_3, x_15, x_6, x_7, x_8, x_9, x_10); +return x_16; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__6(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +size_t x_9; size_t x_10; lean_object* x_11; +x_9 = lean_array_size(x_1); +x_10 = 0; +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +x_11 = l_Array_mapMUnsafe_map___at_Lean_Compiler_LCNF_PassManager_run___spec__1(x_9, x_10, x_1, x_4, x_5, x_6, x_7, x_8); +if (lean_obj_tag(x_11) == 0) +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; size_t x_19; lean_object* x_20; +x_12 = lean_ctor_get(x_11, 0); +lean_inc(x_12); +x_13 = lean_ctor_get(x_11, 1); +lean_inc(x_13); +lean_dec(x_11); +x_14 = l_Lean_Compiler_LCNF_markRecDecls(x_12); +x_15 = l_Lean_Compiler_LCNF_getPassManager___rarg(x_7, x_13); +x_16 = lean_ctor_get(x_15, 0); +lean_inc(x_16); +x_17 = lean_ctor_get(x_15, 1); +lean_inc(x_17); +lean_dec(x_15); +x_18 = lean_box(0); +x_19 = lean_array_size(x_16); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +x_20 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__13(x_18, x_16, x_16, x_19, x_10, x_14, x_4, x_5, x_6, x_7, x_17); +lean_dec(x_16); +if (lean_obj_tag(x_20) == 0) +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; uint8_t x_26; +x_21 = lean_ctor_get(x_20, 0); +lean_inc(x_21); +x_22 = lean_ctor_get(x_20, 1); +lean_inc(x_22); lean_dec(x_20); -x_38 = !lean_is_exclusive(x_33); -if (x_38 == 0) +x_23 = l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__2; +x_24 = l_Lean_isTracingEnabledFor___at_Lean_Compiler_LCNF_UnreachableBranches_elimDead_go___spec__1(x_23, x_4, x_5, x_6, x_7, x_22); +x_25 = lean_ctor_get(x_24, 0); +lean_inc(x_25); +x_26 = lean_unbox(x_25); +lean_dec(x_25); +if (x_26 == 0) +{ +lean_object* x_27; lean_object* x_28; lean_object* x_29; +x_27 = lean_ctor_get(x_24, 1); +lean_inc(x_27); +lean_dec(x_24); +x_28 = lean_box(0); +x_29 = l_Lean_Compiler_LCNF_PassManager_run___lambda__5(x_21, x_18, x_10, x_2, x_28, x_4, x_5, x_6, x_7, x_27); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_21); +return x_29; +} +else +{ +lean_object* x_30; size_t x_31; lean_object* x_32; lean_object* x_33; +x_30 = lean_ctor_get(x_24, 1); +lean_inc(x_30); +lean_dec(x_24); +x_31 = lean_array_size(x_21); +x_32 = lean_box(0); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +x_33 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__15(x_21, x_23, x_18, x_21, x_31, x_10, x_32, x_4, x_5, x_6, x_7, x_30); +if (lean_obj_tag(x_33) == 0) +{ +lean_object* x_34; lean_object* x_35; +x_34 = lean_ctor_get(x_33, 1); +lean_inc(x_34); +lean_dec(x_33); +x_35 = l_Lean_Compiler_LCNF_PassManager_run___lambda__5(x_21, x_18, x_10, x_2, x_32, x_4, x_5, x_6, x_7, x_34); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_21); +return x_35; +} +else +{ +uint8_t x_36; +lean_dec(x_21); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_36 = !lean_is_exclusive(x_33); +if (x_36 == 0) { return x_33; } else { -lean_object* x_39; lean_object* x_40; lean_object* x_41; -x_39 = lean_ctor_get(x_33, 0); -x_40 = lean_ctor_get(x_33, 1); -lean_inc(x_40); -lean_inc(x_39); +lean_object* x_37; lean_object* x_38; lean_object* x_39; +x_37 = lean_ctor_get(x_33, 0); +x_38 = lean_ctor_get(x_33, 1); +lean_inc(x_38); +lean_inc(x_37); lean_dec(x_33); -x_41 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_41, 0, x_39); -lean_ctor_set(x_41, 1, x_40); -return x_41; +x_39 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_39, 0, x_37); +lean_ctor_set(x_39, 1, x_38); +return x_39; } } } } else { -uint8_t x_42; +uint8_t x_40; +lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_3); -x_42 = !lean_is_exclusive(x_19); -if (x_42 == 0) +lean_dec(x_2); +x_40 = !lean_is_exclusive(x_20); +if (x_40 == 0) { -return x_19; +return x_20; } else { -lean_object* x_43; lean_object* x_44; lean_object* x_45; -x_43 = lean_ctor_get(x_19, 0); -x_44 = lean_ctor_get(x_19, 1); -lean_inc(x_44); -lean_inc(x_43); -lean_dec(x_19); -x_45 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_45, 0, x_43); -lean_ctor_set(x_45, 1, x_44); -return x_45; +lean_object* x_41; lean_object* x_42; lean_object* x_43; +x_41 = lean_ctor_get(x_20, 0); +x_42 = lean_ctor_get(x_20, 1); +lean_inc(x_42); +lean_inc(x_41); +lean_dec(x_20); +x_43 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_43, 0, x_41); +lean_ctor_set(x_43, 1, x_42); +return x_43; } } } else { -uint8_t x_46; +uint8_t x_44; +lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_3); -x_46 = !lean_is_exclusive(x_10); -if (x_46 == 0) +lean_dec(x_2); +x_44 = !lean_is_exclusive(x_11); +if (x_44 == 0) { -return x_10; +return x_11; } else { -lean_object* x_47; lean_object* x_48; lean_object* x_49; -x_47 = lean_ctor_get(x_10, 0); -x_48 = lean_ctor_get(x_10, 1); -lean_inc(x_48); -lean_inc(x_47); -lean_dec(x_10); -x_49 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_49, 0, x_47); -lean_ctor_set(x_49, 1, x_48); -return x_49; +lean_object* x_45; lean_object* x_46; lean_object* x_47; +x_45 = lean_ctor_get(x_11, 0); +x_46 = lean_ctor_get(x_11, 1); +lean_inc(x_46); +lean_inc(x_45); +lean_dec(x_11); +x_47 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_47, 0, x_45); +lean_ctor_set(x_47, 1, x_46); +return x_47; } } } @@ -7002,226 +7636,230 @@ lean_dec(x_11); lean_ctor_set(x_4, 4, x_12); if (x_9 == 0) { -lean_object* x_22; +lean_object* x_23; lean_dec(x_7); -x_22 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; -x_14 = x_22; +x_23 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_14 = x_23; x_15 = x_6; -goto block_21; +goto block_22; } else { -uint8_t x_23; -x_23 = lean_nat_dec_le(x_7, x_7); -if (x_23 == 0) +uint8_t x_24; +x_24 = lean_nat_dec_le(x_7, x_7); +if (x_24 == 0) { -lean_object* x_24; +lean_object* x_25; lean_dec(x_7); -x_24 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; -x_14 = x_24; +x_25 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_14 = x_25; x_15 = x_6; -goto block_21; +goto block_22; } else { -size_t x_25; size_t x_26; lean_object* x_27; lean_object* x_28; -x_25 = 0; -x_26 = lean_usize_of_nat(x_7); +size_t x_26; size_t x_27; lean_object* x_28; lean_object* x_29; +x_26 = 0; +x_27 = lean_usize_of_nat(x_7); lean_dec(x_7); -x_27 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_28 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; lean_inc(x_5); lean_inc(x_4); -x_28 = l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__9(x_1, x_25, x_26, x_27, x_2, x_3, x_4, x_5, x_6); -if (lean_obj_tag(x_28) == 0) +x_29 = l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__12(x_1, x_26, x_27, x_28, x_2, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_29) == 0) { -lean_object* x_29; lean_object* x_30; -x_29 = lean_ctor_get(x_28, 0); -lean_inc(x_29); -x_30 = lean_ctor_get(x_28, 1); +lean_object* x_30; lean_object* x_31; +x_30 = lean_ctor_get(x_29, 0); lean_inc(x_30); -lean_dec(x_28); -x_14 = x_29; -x_15 = x_30; -goto block_21; +x_31 = lean_ctor_get(x_29, 1); +lean_inc(x_31); +lean_dec(x_29); +x_14 = x_30; +x_15 = x_31; +goto block_22; } else { -uint8_t x_31; +uint8_t x_32; lean_dec(x_4); lean_dec(x_5); lean_dec(x_3); lean_dec(x_2); -x_31 = !lean_is_exclusive(x_28); -if (x_31 == 0) +x_32 = !lean_is_exclusive(x_29); +if (x_32 == 0) { -return x_28; +return x_29; } else { -lean_object* x_32; lean_object* x_33; lean_object* x_34; -x_32 = lean_ctor_get(x_28, 0); -x_33 = lean_ctor_get(x_28, 1); +lean_object* x_33; lean_object* x_34; lean_object* x_35; +x_33 = lean_ctor_get(x_29, 0); +x_34 = lean_ctor_get(x_29, 1); +lean_inc(x_34); lean_inc(x_33); -lean_inc(x_32); -lean_dec(x_28); -x_34 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_34, 0, x_32); -lean_ctor_set(x_34, 1, x_33); -return x_34; +lean_dec(x_29); +x_35 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_35, 0, x_33); +lean_ctor_set(x_35, 1, x_34); +return x_35; } } } } -block_21: +block_22: { uint8_t x_16; x_16 = l_Array_isEmpty___rarg(x_14); if (x_16 == 0) { -lean_object* x_17; lean_object* x_18; -x_17 = lean_box(0); -x_18 = l_Lean_Compiler_LCNF_PassManager_run___lambda__2(x_14, x_17, x_2, x_3, x_4, x_5, x_15); -return x_18; +lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_17 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_18 = lean_box(0); +x_19 = l_Lean_Compiler_LCNF_PassManager_run___lambda__3(x_14, x_17, x_18, x_2, x_3, x_4, x_5, x_15); +return x_19; } else { -lean_object* x_19; lean_object* x_20; +lean_object* x_20; lean_object* x_21; lean_dec(x_14); lean_dec(x_4); lean_dec(x_5); lean_dec(x_3); lean_dec(x_2); -x_19 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; -x_20 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_20, 0, x_19); -lean_ctor_set(x_20, 1, x_15); -return x_20; +x_20 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_21 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21, 0, x_20); +lean_ctor_set(x_21, 1, x_15); +return x_21; } } } else { -lean_object* x_35; lean_object* x_36; +lean_object* x_36; lean_object* x_37; if (x_9 == 0) { -lean_object* x_43; +lean_object* x_45; lean_dec(x_7); -x_43 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; -x_35 = x_43; -x_36 = x_6; -goto block_42; +x_45 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_36 = x_45; +x_37 = x_6; +goto block_44; } else { -uint8_t x_44; -x_44 = lean_nat_dec_le(x_7, x_7); -if (x_44 == 0) +uint8_t x_46; +x_46 = lean_nat_dec_le(x_7, x_7); +if (x_46 == 0) { -lean_object* x_45; +lean_object* x_47; lean_dec(x_7); -x_45 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; -x_35 = x_45; -x_36 = x_6; -goto block_42; +x_47 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_36 = x_47; +x_37 = x_6; +goto block_44; } else { -size_t x_46; size_t x_47; lean_object* x_48; lean_object* x_49; -x_46 = 0; -x_47 = lean_usize_of_nat(x_7); +size_t x_48; size_t x_49; lean_object* x_50; lean_object* x_51; +x_48 = 0; +x_49 = lean_usize_of_nat(x_7); lean_dec(x_7); -x_48 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_50 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; lean_inc(x_5); lean_inc(x_4); -x_49 = l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__9(x_1, x_46, x_47, x_48, x_2, x_3, x_4, x_5, x_6); -if (lean_obj_tag(x_49) == 0) +x_51 = l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__12(x_1, x_48, x_49, x_50, x_2, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_51) == 0) { -lean_object* x_50; lean_object* x_51; -x_50 = lean_ctor_get(x_49, 0); -lean_inc(x_50); -x_51 = lean_ctor_get(x_49, 1); -lean_inc(x_51); -lean_dec(x_49); -x_35 = x_50; -x_36 = x_51; -goto block_42; +lean_object* x_52; lean_object* x_53; +x_52 = lean_ctor_get(x_51, 0); +lean_inc(x_52); +x_53 = lean_ctor_get(x_51, 1); +lean_inc(x_53); +lean_dec(x_51); +x_36 = x_52; +x_37 = x_53; +goto block_44; } else { -uint8_t x_52; +uint8_t x_54; lean_dec(x_4); lean_dec(x_5); lean_dec(x_3); lean_dec(x_2); -x_52 = !lean_is_exclusive(x_49); -if (x_52 == 0) +x_54 = !lean_is_exclusive(x_51); +if (x_54 == 0) { -return x_49; +return x_51; } else { -lean_object* x_53; lean_object* x_54; lean_object* x_55; -x_53 = lean_ctor_get(x_49, 0); -x_54 = lean_ctor_get(x_49, 1); -lean_inc(x_54); -lean_inc(x_53); -lean_dec(x_49); -x_55 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_55, 0, x_53); -lean_ctor_set(x_55, 1, x_54); -return x_55; +lean_object* x_55; lean_object* x_56; lean_object* x_57; +x_55 = lean_ctor_get(x_51, 0); +x_56 = lean_ctor_get(x_51, 1); +lean_inc(x_56); +lean_inc(x_55); +lean_dec(x_51); +x_57 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_57, 0, x_55); +lean_ctor_set(x_57, 1, x_56); +return x_57; } } } } -block_42: +block_44: { -uint8_t x_37; -x_37 = l_Array_isEmpty___rarg(x_35); -if (x_37 == 0) +uint8_t x_38; +x_38 = l_Array_isEmpty___rarg(x_36); +if (x_38 == 0) { -lean_object* x_38; lean_object* x_39; -x_38 = lean_box(0); -x_39 = l_Lean_Compiler_LCNF_PassManager_run___lambda__3(x_35, x_38, x_2, x_3, x_4, x_5, x_36); -return x_39; +lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_39 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_40 = lean_box(0); +x_41 = l_Lean_Compiler_LCNF_PassManager_run___lambda__6(x_36, x_39, x_40, x_2, x_3, x_4, x_5, x_37); +return x_41; } else { -lean_object* x_40; lean_object* x_41; -lean_dec(x_35); +lean_object* x_42; lean_object* x_43; +lean_dec(x_36); lean_dec(x_4); lean_dec(x_5); lean_dec(x_3); lean_dec(x_2); -x_40 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; -x_41 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_41, 0, x_40); -lean_ctor_set(x_41, 1, x_36); -return x_41; +x_42 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_37); +return x_43; } } } } else { -lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; uint8_t x_67; lean_object* x_68; uint8_t x_69; lean_object* x_70; lean_object* x_71; uint8_t x_72; -x_56 = lean_ctor_get(x_4, 0); -x_57 = lean_ctor_get(x_4, 1); -x_58 = lean_ctor_get(x_4, 2); -x_59 = lean_ctor_get(x_4, 3); -x_60 = lean_ctor_get(x_4, 4); -x_61 = lean_ctor_get(x_4, 5); -x_62 = lean_ctor_get(x_4, 6); -x_63 = lean_ctor_get(x_4, 7); -x_64 = lean_ctor_get(x_4, 8); -x_65 = lean_ctor_get(x_4, 9); -x_66 = lean_ctor_get(x_4, 10); -x_67 = lean_ctor_get_uint8(x_4, sizeof(void*)*13); -x_68 = lean_ctor_get(x_4, 11); -x_69 = lean_ctor_get_uint8(x_4, sizeof(void*)*13 + 1); -x_70 = lean_ctor_get(x_4, 12); +lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; uint8_t x_69; lean_object* x_70; uint8_t x_71; lean_object* x_72; lean_object* x_73; uint8_t x_74; +x_58 = lean_ctor_get(x_4, 0); +x_59 = lean_ctor_get(x_4, 1); +x_60 = lean_ctor_get(x_4, 2); +x_61 = lean_ctor_get(x_4, 3); +x_62 = lean_ctor_get(x_4, 4); +x_63 = lean_ctor_get(x_4, 5); +x_64 = lean_ctor_get(x_4, 6); +x_65 = lean_ctor_get(x_4, 7); +x_66 = lean_ctor_get(x_4, 8); +x_67 = lean_ctor_get(x_4, 9); +x_68 = lean_ctor_get(x_4, 10); +x_69 = lean_ctor_get_uint8(x_4, sizeof(void*)*13); +x_70 = lean_ctor_get(x_4, 11); +x_71 = lean_ctor_get_uint8(x_4, sizeof(void*)*13 + 1); +x_72 = lean_ctor_get(x_4, 12); +lean_inc(x_72); lean_inc(x_70); lean_inc(x_68); +lean_inc(x_67); lean_inc(x_66); lean_inc(x_65); lean_inc(x_64); @@ -7231,249 +7869,249 @@ lean_inc(x_61); lean_inc(x_60); lean_inc(x_59); lean_inc(x_58); -lean_inc(x_57); -lean_inc(x_56); lean_dec(x_4); -x_71 = lean_unsigned_to_nat(8192u); -x_72 = lean_nat_dec_le(x_71, x_60); -if (x_72 == 0) -{ -lean_object* x_73; lean_object* x_74; lean_object* x_75; -lean_dec(x_60); -x_73 = lean_alloc_ctor(0, 13, 2); -lean_ctor_set(x_73, 0, x_56); -lean_ctor_set(x_73, 1, x_57); -lean_ctor_set(x_73, 2, x_58); -lean_ctor_set(x_73, 3, x_59); -lean_ctor_set(x_73, 4, x_71); -lean_ctor_set(x_73, 5, x_61); -lean_ctor_set(x_73, 6, x_62); -lean_ctor_set(x_73, 7, x_63); -lean_ctor_set(x_73, 8, x_64); -lean_ctor_set(x_73, 9, x_65); -lean_ctor_set(x_73, 10, x_66); -lean_ctor_set(x_73, 11, x_68); -lean_ctor_set(x_73, 12, x_70); -lean_ctor_set_uint8(x_73, sizeof(void*)*13, x_67); -lean_ctor_set_uint8(x_73, sizeof(void*)*13 + 1, x_69); +x_73 = lean_unsigned_to_nat(8192u); +x_74 = lean_nat_dec_le(x_73, x_62); +if (x_74 == 0) +{ +lean_object* x_75; lean_object* x_76; lean_object* x_77; +lean_dec(x_62); +x_75 = lean_alloc_ctor(0, 13, 2); +lean_ctor_set(x_75, 0, x_58); +lean_ctor_set(x_75, 1, x_59); +lean_ctor_set(x_75, 2, x_60); +lean_ctor_set(x_75, 3, x_61); +lean_ctor_set(x_75, 4, x_73); +lean_ctor_set(x_75, 5, x_63); +lean_ctor_set(x_75, 6, x_64); +lean_ctor_set(x_75, 7, x_65); +lean_ctor_set(x_75, 8, x_66); +lean_ctor_set(x_75, 9, x_67); +lean_ctor_set(x_75, 10, x_68); +lean_ctor_set(x_75, 11, x_70); +lean_ctor_set(x_75, 12, x_72); +lean_ctor_set_uint8(x_75, sizeof(void*)*13, x_69); +lean_ctor_set_uint8(x_75, sizeof(void*)*13 + 1, x_71); if (x_9 == 0) { -lean_object* x_82; +lean_object* x_85; lean_dec(x_7); -x_82 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; -x_74 = x_82; -x_75 = x_6; -goto block_81; +x_85 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_76 = x_85; +x_77 = x_6; +goto block_84; } else { -uint8_t x_83; -x_83 = lean_nat_dec_le(x_7, x_7); -if (x_83 == 0) +uint8_t x_86; +x_86 = lean_nat_dec_le(x_7, x_7); +if (x_86 == 0) { -lean_object* x_84; +lean_object* x_87; lean_dec(x_7); -x_84 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; -x_74 = x_84; -x_75 = x_6; -goto block_81; +x_87 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_76 = x_87; +x_77 = x_6; +goto block_84; } else { -size_t x_85; size_t x_86; lean_object* x_87; lean_object* x_88; -x_85 = 0; -x_86 = lean_usize_of_nat(x_7); +size_t x_88; size_t x_89; lean_object* x_90; lean_object* x_91; +x_88 = 0; +x_89 = lean_usize_of_nat(x_7); lean_dec(x_7); -x_87 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_90 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; lean_inc(x_5); -lean_inc(x_73); -x_88 = l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__9(x_1, x_85, x_86, x_87, x_2, x_3, x_73, x_5, x_6); -if (lean_obj_tag(x_88) == 0) +lean_inc(x_75); +x_91 = l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__12(x_1, x_88, x_89, x_90, x_2, x_3, x_75, x_5, x_6); +if (lean_obj_tag(x_91) == 0) { -lean_object* x_89; lean_object* x_90; -x_89 = lean_ctor_get(x_88, 0); -lean_inc(x_89); -x_90 = lean_ctor_get(x_88, 1); -lean_inc(x_90); -lean_dec(x_88); -x_74 = x_89; -x_75 = x_90; -goto block_81; +lean_object* x_92; lean_object* x_93; +x_92 = lean_ctor_get(x_91, 0); +lean_inc(x_92); +x_93 = lean_ctor_get(x_91, 1); +lean_inc(x_93); +lean_dec(x_91); +x_76 = x_92; +x_77 = x_93; +goto block_84; } else { -lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; -lean_dec(x_73); +lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; +lean_dec(x_75); lean_dec(x_5); lean_dec(x_3); lean_dec(x_2); -x_91 = lean_ctor_get(x_88, 0); -lean_inc(x_91); -x_92 = lean_ctor_get(x_88, 1); -lean_inc(x_92); -if (lean_is_exclusive(x_88)) { - lean_ctor_release(x_88, 0); - lean_ctor_release(x_88, 1); - x_93 = x_88; +x_94 = lean_ctor_get(x_91, 0); +lean_inc(x_94); +x_95 = lean_ctor_get(x_91, 1); +lean_inc(x_95); +if (lean_is_exclusive(x_91)) { + lean_ctor_release(x_91, 0); + lean_ctor_release(x_91, 1); + x_96 = x_91; } else { - lean_dec_ref(x_88); - x_93 = lean_box(0); + lean_dec_ref(x_91); + x_96 = lean_box(0); } -if (lean_is_scalar(x_93)) { - x_94 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_96)) { + x_97 = lean_alloc_ctor(1, 2, 0); } else { - x_94 = x_93; + x_97 = x_96; } -lean_ctor_set(x_94, 0, x_91); -lean_ctor_set(x_94, 1, x_92); -return x_94; +lean_ctor_set(x_97, 0, x_94); +lean_ctor_set(x_97, 1, x_95); +return x_97; } } } -block_81: +block_84: { -uint8_t x_76; -x_76 = l_Array_isEmpty___rarg(x_74); -if (x_76 == 0) +uint8_t x_78; +x_78 = l_Array_isEmpty___rarg(x_76); +if (x_78 == 0) { -lean_object* x_77; lean_object* x_78; -x_77 = lean_box(0); -x_78 = l_Lean_Compiler_LCNF_PassManager_run___lambda__2(x_74, x_77, x_2, x_3, x_73, x_5, x_75); -return x_78; +lean_object* x_79; lean_object* x_80; lean_object* x_81; +x_79 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_80 = lean_box(0); +x_81 = l_Lean_Compiler_LCNF_PassManager_run___lambda__3(x_76, x_79, x_80, x_2, x_3, x_75, x_5, x_77); +return x_81; } else { -lean_object* x_79; lean_object* x_80; -lean_dec(x_74); -lean_dec(x_73); +lean_object* x_82; lean_object* x_83; +lean_dec(x_76); +lean_dec(x_75); lean_dec(x_5); lean_dec(x_3); lean_dec(x_2); -x_79 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; -x_80 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_80, 0, x_79); -lean_ctor_set(x_80, 1, x_75); -return x_80; -} -} -} -else -{ -lean_object* x_95; lean_object* x_96; lean_object* x_97; -x_95 = lean_alloc_ctor(0, 13, 2); -lean_ctor_set(x_95, 0, x_56); -lean_ctor_set(x_95, 1, x_57); -lean_ctor_set(x_95, 2, x_58); -lean_ctor_set(x_95, 3, x_59); -lean_ctor_set(x_95, 4, x_60); -lean_ctor_set(x_95, 5, x_61); -lean_ctor_set(x_95, 6, x_62); -lean_ctor_set(x_95, 7, x_63); -lean_ctor_set(x_95, 8, x_64); -lean_ctor_set(x_95, 9, x_65); -lean_ctor_set(x_95, 10, x_66); -lean_ctor_set(x_95, 11, x_68); -lean_ctor_set(x_95, 12, x_70); -lean_ctor_set_uint8(x_95, sizeof(void*)*13, x_67); -lean_ctor_set_uint8(x_95, sizeof(void*)*13 + 1, x_69); +x_82 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_83 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_83, 0, x_82); +lean_ctor_set(x_83, 1, x_77); +return x_83; +} +} +} +else +{ +lean_object* x_98; lean_object* x_99; lean_object* x_100; +x_98 = lean_alloc_ctor(0, 13, 2); +lean_ctor_set(x_98, 0, x_58); +lean_ctor_set(x_98, 1, x_59); +lean_ctor_set(x_98, 2, x_60); +lean_ctor_set(x_98, 3, x_61); +lean_ctor_set(x_98, 4, x_62); +lean_ctor_set(x_98, 5, x_63); +lean_ctor_set(x_98, 6, x_64); +lean_ctor_set(x_98, 7, x_65); +lean_ctor_set(x_98, 8, x_66); +lean_ctor_set(x_98, 9, x_67); +lean_ctor_set(x_98, 10, x_68); +lean_ctor_set(x_98, 11, x_70); +lean_ctor_set(x_98, 12, x_72); +lean_ctor_set_uint8(x_98, sizeof(void*)*13, x_69); +lean_ctor_set_uint8(x_98, sizeof(void*)*13 + 1, x_71); if (x_9 == 0) { -lean_object* x_104; +lean_object* x_108; lean_dec(x_7); -x_104 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; -x_96 = x_104; -x_97 = x_6; -goto block_103; +x_108 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_99 = x_108; +x_100 = x_6; +goto block_107; } else { -uint8_t x_105; -x_105 = lean_nat_dec_le(x_7, x_7); -if (x_105 == 0) +uint8_t x_109; +x_109 = lean_nat_dec_le(x_7, x_7); +if (x_109 == 0) { -lean_object* x_106; +lean_object* x_110; lean_dec(x_7); -x_106 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; -x_96 = x_106; -x_97 = x_6; -goto block_103; +x_110 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_99 = x_110; +x_100 = x_6; +goto block_107; } else { -size_t x_107; size_t x_108; lean_object* x_109; lean_object* x_110; -x_107 = 0; -x_108 = lean_usize_of_nat(x_7); +size_t x_111; size_t x_112; lean_object* x_113; lean_object* x_114; +x_111 = 0; +x_112 = lean_usize_of_nat(x_7); lean_dec(x_7); -x_109 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_113 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; lean_inc(x_5); -lean_inc(x_95); -x_110 = l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__9(x_1, x_107, x_108, x_109, x_2, x_3, x_95, x_5, x_6); -if (lean_obj_tag(x_110) == 0) +lean_inc(x_98); +x_114 = l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__12(x_1, x_111, x_112, x_113, x_2, x_3, x_98, x_5, x_6); +if (lean_obj_tag(x_114) == 0) { -lean_object* x_111; lean_object* x_112; -x_111 = lean_ctor_get(x_110, 0); -lean_inc(x_111); -x_112 = lean_ctor_get(x_110, 1); -lean_inc(x_112); -lean_dec(x_110); -x_96 = x_111; -x_97 = x_112; -goto block_103; +lean_object* x_115; lean_object* x_116; +x_115 = lean_ctor_get(x_114, 0); +lean_inc(x_115); +x_116 = lean_ctor_get(x_114, 1); +lean_inc(x_116); +lean_dec(x_114); +x_99 = x_115; +x_100 = x_116; +goto block_107; } else { -lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; -lean_dec(x_95); +lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; +lean_dec(x_98); lean_dec(x_5); lean_dec(x_3); lean_dec(x_2); -x_113 = lean_ctor_get(x_110, 0); -lean_inc(x_113); -x_114 = lean_ctor_get(x_110, 1); -lean_inc(x_114); -if (lean_is_exclusive(x_110)) { - lean_ctor_release(x_110, 0); - lean_ctor_release(x_110, 1); - x_115 = x_110; +x_117 = lean_ctor_get(x_114, 0); +lean_inc(x_117); +x_118 = lean_ctor_get(x_114, 1); +lean_inc(x_118); +if (lean_is_exclusive(x_114)) { + lean_ctor_release(x_114, 0); + lean_ctor_release(x_114, 1); + x_119 = x_114; } else { - lean_dec_ref(x_110); - x_115 = lean_box(0); + lean_dec_ref(x_114); + x_119 = lean_box(0); } -if (lean_is_scalar(x_115)) { - x_116 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_119)) { + x_120 = lean_alloc_ctor(1, 2, 0); } else { - x_116 = x_115; + x_120 = x_119; } -lean_ctor_set(x_116, 0, x_113); -lean_ctor_set(x_116, 1, x_114); -return x_116; +lean_ctor_set(x_120, 0, x_117); +lean_ctor_set(x_120, 1, x_118); +return x_120; } } } -block_103: +block_107: { -uint8_t x_98; -x_98 = l_Array_isEmpty___rarg(x_96); -if (x_98 == 0) +uint8_t x_101; +x_101 = l_Array_isEmpty___rarg(x_99); +if (x_101 == 0) { -lean_object* x_99; lean_object* x_100; -x_99 = lean_box(0); -x_100 = l_Lean_Compiler_LCNF_PassManager_run___lambda__3(x_96, x_99, x_2, x_3, x_95, x_5, x_97); -return x_100; +lean_object* x_102; lean_object* x_103; lean_object* x_104; +x_102 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_103 = lean_box(0); +x_104 = l_Lean_Compiler_LCNF_PassManager_run___lambda__6(x_99, x_102, x_103, x_2, x_3, x_98, x_5, x_100); +return x_104; } else { -lean_object* x_101; lean_object* x_102; -lean_dec(x_96); -lean_dec(x_95); +lean_object* x_105; lean_object* x_106; +lean_dec(x_99); +lean_dec(x_98); lean_dec(x_5); lean_dec(x_3); lean_dec(x_2); -x_101 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; -x_102 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_102, 0, x_101); -lean_ctor_set(x_102, 1, x_97); -return x_102; +x_105 = l_Lean_Compiler_LCNF_shouldGenerateCode___closed__9; +x_106 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_106, 0, x_105); +lean_ctor_set(x_106, 1, x_100); +return x_106; } } } @@ -7653,7 +8291,50 @@ lean_dec(x_3); return x_8; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +size_t x_12; size_t x_13; lean_object* x_14; +x_12 = lean_unbox_usize(x_4); +lean_dec(x_4); +x_13 = lean_unbox_usize(x_5); +lean_dec(x_5); +x_14 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8(x_1, x_2, x_3, x_12, x_13, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_14; +} +} +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Compiler_LCNF_PassManager_run___spec__9___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; +x_7 = l_Lean_throwError___at_Lean_Compiler_LCNF_PassManager_run___spec__9(x_1, x_2, x_3, x_4, x_5, x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_7; +} +} +LEAN_EXPORT lean_object* l_Lean_setEnv___at_Lean_Compiler_LCNF_PassManager_run___spec__10___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; +x_7 = l_Lean_setEnv___at_Lean_Compiler_LCNF_PassManager_run___spec__10(x_1, x_2, x_3, x_4, x_5, x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_7; +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { size_t x_13; size_t x_14; lean_object* x_15; @@ -7661,14 +8342,14 @@ x_13 = lean_unbox_usize(x_5); lean_dec(x_5); x_14 = lean_unbox_usize(x_6); lean_dec(x_6); -x_15 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8(x_1, x_2, x_3, x_4, x_13, x_14, x_7, x_8, x_9, x_10, x_11, x_12); +x_15 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11(x_1, x_2, x_3, x_4, x_13, x_14, x_7, x_8, x_9, x_10, x_11, x_12); lean_dec(x_4); lean_dec(x_3); lean_dec(x_1); return x_15; } } -LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__9___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__12___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: { size_t x_10; size_t x_11; lean_object* x_12; @@ -7676,14 +8357,14 @@ x_10 = lean_unbox_usize(x_2); lean_dec(x_2); x_11 = lean_unbox_usize(x_3); lean_dec(x_3); -x_12 = l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__9(x_1, x_10, x_11, x_4, x_5, x_6, x_7, x_8, x_9); +x_12 = l_Array_foldlMUnsafe_fold___at_Lean_Compiler_LCNF_PassManager_run___spec__12(x_1, x_10, x_11, x_4, x_5, x_6, x_7, x_8, x_9); lean_dec(x_6); lean_dec(x_5); lean_dec(x_1); return x_12; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__10___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__13___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { size_t x_12; size_t x_13; lean_object* x_14; @@ -7691,14 +8372,33 @@ x_12 = lean_unbox_usize(x_4); lean_dec(x_4); x_13 = lean_unbox_usize(x_5); lean_dec(x_5); -x_14 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__10(x_1, x_2, x_3, x_12, x_13, x_6, x_7, x_8, x_9, x_10, x_11); +x_14 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__13(x_1, x_2, x_3, x_12, x_13, x_6, x_7, x_8, x_9, x_10, x_11); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); return x_14; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__14___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +size_t x_12; size_t x_13; lean_object* x_14; +x_12 = lean_unbox_usize(x_4); +lean_dec(x_4); +x_13 = lean_unbox_usize(x_5); +lean_dec(x_5); +x_14 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__14(x_1, x_2, x_3, x_12, x_13, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_14; +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__15___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { size_t x_13; size_t x_14; lean_object* x_15; @@ -7706,42 +8406,89 @@ x_13 = lean_unbox_usize(x_5); lean_dec(x_5); x_14 = lean_unbox_usize(x_6); lean_dec(x_6); -x_15 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11(x_1, x_2, x_3, x_4, x_13, x_14, x_7, x_8, x_9, x_10, x_11, x_12); +x_15 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__15(x_1, x_2, x_3, x_4, x_13, x_14, x_7, x_8, x_9, x_10, x_11, x_12); lean_dec(x_4); lean_dec(x_3); lean_dec(x_1); return x_15; } } -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { -lean_object* x_8; -x_8 = l_Lean_Compiler_LCNF_PassManager_run___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +size_t x_11; lean_object* x_12; +x_11 = lean_unbox_usize(x_4); +lean_dec(x_4); +x_12 = l_Lean_Compiler_LCNF_PassManager_run___lambda__1(x_1, x_2, x_3, x_11, x_5, x_6, x_7, x_8, x_9, x_10); +lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); -lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -return x_8; +lean_dec(x_1); +return x_12; } } -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { -lean_object* x_8; -x_8 = l_Lean_Compiler_LCNF_PassManager_run___lambda__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +size_t x_11; lean_object* x_12; +x_11 = lean_unbox_usize(x_3); +lean_dec(x_3); +x_12 = l_Lean_Compiler_LCNF_PassManager_run___lambda__2(x_1, x_2, x_11, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); lean_dec(x_2); -return x_8; +lean_dec(x_1); +return x_12; } } -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { -lean_object* x_8; -x_8 = l_Lean_Compiler_LCNF_PassManager_run___lambda__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +lean_object* x_9; +x_9 = l_Lean_Compiler_LCNF_PassManager_run___lambda__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_3); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +size_t x_11; lean_object* x_12; +x_11 = lean_unbox_usize(x_4); +lean_dec(x_4); +x_12 = l_Lean_Compiler_LCNF_PassManager_run___lambda__4(x_1, x_2, x_3, x_11, x_5, x_6, x_7, x_8, x_9, x_10); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_3); +lean_dec(x_1); +return x_12; +} +} +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__5___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +size_t x_11; lean_object* x_12; +x_11 = lean_unbox_usize(x_3); +lean_dec(x_3); +x_12 = l_Lean_Compiler_LCNF_PassManager_run___lambda__5(x_1, x_2, x_11, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); lean_dec(x_2); -return x_8; +lean_dec(x_1); +return x_12; +} +} +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___lambda__6___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; +x_9 = l_Lean_Compiler_LCNF_PassManager_run___lambda__6(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_3); +return x_9; } } LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_PassManager_run___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { @@ -9509,7 +10256,7 @@ lean_dec(x_1); return x_7; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__1() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__1() { _start: { lean_object* x_1; @@ -9517,17 +10264,17 @@ x_1 = lean_mk_string_unchecked("init", 4, 4); return x_1; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__2() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__1; -x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__1; +x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__1; x_3 = l_Lean_Name_mkStr2(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__3() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__3() { _start: { lean_object* x_1; @@ -9535,27 +10282,27 @@ x_1 = lean_mk_string_unchecked("Lean", 4, 4); return x_1; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__4() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__3; +x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__3; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__5() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__5() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__4; +x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__4; x_2 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__1; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__6() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__6() { _start: { lean_object* x_1; @@ -9563,17 +10310,17 @@ x_1 = lean_mk_string_unchecked("LCNF", 4, 4); return x_1; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__7() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__7() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__5; -x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__6; +x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__5; +x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__6; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__8() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__8() { _start: { lean_object* x_1; @@ -9581,17 +10328,17 @@ x_1 = lean_mk_string_unchecked("initFn", 6, 6); return x_1; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__9() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__9() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__7; -x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__8; +x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__7; +x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__8; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__10() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__10() { _start: { lean_object* x_1; @@ -9599,47 +10346,47 @@ x_1 = lean_mk_string_unchecked("_@", 2, 2); return x_1; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__11() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__11() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__9; -x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__10; +x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__9; +x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__10; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__12() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__12() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__11; -x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__3; +x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__11; +x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__3; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__13() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__13() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__12; +x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__12; x_2 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__1; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__14() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__14() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__13; -x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__6; +x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__13; +x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__6; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__15() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__15() { _start: { lean_object* x_1; @@ -9647,17 +10394,17 @@ x_1 = lean_mk_string_unchecked("Main", 4, 4); return x_1; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__16() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__16() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__14; -x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__15; +x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__14; +x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__15; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__17() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__17() { _start: { lean_object* x_1; @@ -9665,27 +10412,27 @@ x_1 = lean_mk_string_unchecked("_hyg", 4, 4); return x_1; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__18() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__18() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__16; -x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__17; +x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__16; +x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__17; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__19() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__19() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__18; -x_2 = lean_unsigned_to_nat(1452u); +x_1 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__18; +x_2 = lean_unsigned_to_nat(1699u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__20() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__20() { _start: { lean_object* x_1; @@ -9693,17 +10440,17 @@ x_1 = lean_mk_string_unchecked("test", 4, 4); return x_1; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__21() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__21() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__1; -x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__20; +x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__20; x_3 = l_Lean_Name_mkStr2(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__22() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__22() { _start: { lean_object* x_1; @@ -9711,23 +10458,23 @@ x_1 = lean_mk_string_unchecked("jp", 2, 2); return x_1; } } -static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__23() { +static lean_object* _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__23() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_checkpoint___spec__1___lambda__3___closed__1; -x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__22; +x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__22; x_3 = l_Lean_Name_mkStr2(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699_(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; -x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__2; +x_2 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__2; x_3 = 1; -x_4 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__19; +x_4 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__19; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); if (lean_obj_tag(x_5) == 0) { @@ -9735,7 +10482,7 @@ lean_object* x_6; lean_object* x_7; lean_object* x_8; x_6 = lean_ctor_get(x_5, 1); lean_inc(x_6); lean_dec(x_5); -x_7 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__21; +x_7 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__21; x_8 = l_Lean_registerTraceClass(x_7, x_3, x_4, x_6); if (lean_obj_tag(x_8) == 0) { @@ -9743,7 +10490,7 @@ lean_object* x_9; lean_object* x_10; lean_object* x_11; x_9 = lean_ctor_get(x_8, 1); lean_inc(x_9); lean_dec(x_8); -x_10 = l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__2; +x_10 = l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__2; x_11 = l_Lean_registerTraceClass(x_10, x_3, x_4, x_9); if (lean_obj_tag(x_11) == 0) { @@ -9751,7 +10498,7 @@ lean_object* x_12; lean_object* x_13; uint8_t x_14; lean_object* x_15; x_12 = lean_ctor_get(x_11, 1); lean_inc(x_12); lean_dec(x_11); -x_13 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__23; +x_13 = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__23; x_14 = 0; x_15 = l_Lean_registerTraceClass(x_13, x_14, x_4, x_12); return x_15; @@ -9828,6 +10575,10 @@ return x_27; } lean_object* initialize_Lean_Compiler_Options(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Compiler_ExternAttr(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Compiler_IR(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Compiler_IR_Basic(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Compiler_IR_Checker(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Compiler_IR_ToIR(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Compiler_LCNF_PassManager(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Compiler_LCNF_Passes(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Compiler_LCNF_PrettyPrinter(uint8_t builtin, lean_object*); @@ -9847,6 +10598,18 @@ lean_dec_ref(res); res = initialize_Lean_Compiler_ExternAttr(builtin, lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); +res = initialize_Lean_Compiler_IR(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +res = initialize_Lean_Compiler_IR_Basic(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +res = initialize_Lean_Compiler_IR_Checker(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +res = initialize_Lean_Compiler_IR_ToIR(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); res = initialize_Lean_Compiler_LCNF_PassManager(builtin, lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); @@ -9999,14 +10762,20 @@ l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8__ lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__1); l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__2 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__2(); lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__2); -l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__3 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__3(); -lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__3); -l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__4 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__4(); -lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__8___closed__4); +l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__1 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__1(); +lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__1); +l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__2 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__2(); +lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__2); +l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__3 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__3(); +lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__3); +l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__4 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__4(); +lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_PassManager_run___spec__11___closed__4); l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__1 = _init_l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__1(); lean_mark_persistent(l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__1); -l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__2 = _init_l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__2(); -lean_mark_persistent(l_Lean_Compiler_LCNF_PassManager_run___lambda__2___closed__2); +l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__1 = _init_l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__1(); +lean_mark_persistent(l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__1); +l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__2 = _init_l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__2(); +lean_mark_persistent(l_Lean_Compiler_LCNF_PassManager_run___lambda__3___closed__2); l_Lean_Compiler_LCNF_compile___closed__1 = _init_l_Lean_Compiler_LCNF_compile___closed__1(); lean_mark_persistent(l_Lean_Compiler_LCNF_compile___closed__1); l_Lean_Compiler_LCNF_compile___closed__2 = _init_l_Lean_Compiler_LCNF_compile___closed__2(); @@ -10027,53 +10796,53 @@ l_Lean_Compiler_LCNF_main___lambda__1___closed__2 = _init_l_Lean_Compiler_LCNF_m lean_mark_persistent(l_Lean_Compiler_LCNF_main___lambda__1___closed__2); l_Lean_Compiler_LCNF_main___closed__1 = _init_l_Lean_Compiler_LCNF_main___closed__1(); lean_mark_persistent(l_Lean_Compiler_LCNF_main___closed__1); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__1 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__1(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__1); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__2 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__2(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__2); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__3 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__3(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__3); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__4 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__4(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__4); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__5 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__5(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__5); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__6 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__6(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__6); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__7 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__7(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__7); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__8 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__8(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__8); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__9 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__9(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__9); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__10 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__10(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__10); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__11 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__11(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__11); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__12 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__12(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__12); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__13 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__13(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__13); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__14 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__14(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__14); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__15 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__15(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__15); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__16 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__16(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__16); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__17 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__17(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__17); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__18 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__18(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__18); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__19 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__19(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__19); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__20 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__20(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__20); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__21 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__21(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__21); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__22 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__22(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__22); -l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__23 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__23(); -lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452____closed__23); -if (builtin) {res = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1452_(lean_io_mk_world()); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__1 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__1(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__1); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__2 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__2(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__2); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__3 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__3(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__3); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__4 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__4(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__4); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__5 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__5(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__5); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__6 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__6(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__6); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__7 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__7(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__7); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__8 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__8(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__8); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__9 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__9(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__9); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__10 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__10(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__10); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__11 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__11(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__11); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__12 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__12(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__12); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__13 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__13(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__13); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__14 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__14(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__14); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__15 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__15(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__15); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__16 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__16(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__16); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__17 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__17(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__17); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__18 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__18(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__18); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__19 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__19(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__19); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__20 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__20(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__20); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__21 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__21(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__21); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__22 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__22(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__22); +l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__23 = _init_l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__23(); +lean_mark_persistent(l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699____closed__23); +if (builtin) {res = l_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_Main___hyg_1699_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); }return lean_io_result_mk_ok(lean_box(0)); diff --git a/stage0/stdlib/Lean/Compiler/LCNF/MonoTypes.c b/stage0/stdlib/Lean/Compiler/LCNF/MonoTypes.c index 70770e7c9ce3..899625fe5f2f 100644 --- a/stage0/stdlib/Lean/Compiler/LCNF/MonoTypes.c +++ b/stage0/stdlib/Lean/Compiler/LCNF/MonoTypes.c @@ -57,7 +57,6 @@ LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Compiler_LCNF_hasT uint8_t lean_string_dec_eq(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Compiler_LCNF_toMonoType_visitApp___spec__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Compiler_LCNF_MonoTypes_0__Lean_Compiler_LCNF_reprTrivialStructureInfo____x40_Lean_Compiler_LCNF_MonoTypes___hyg_249_(lean_object*, lean_object*); -uint8_t l_List_elem___at_Lean_Environment_realizeConst___spec__6(lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_getRelevantCtorFields___closed__4; lean_object* l_Lean_Compiler_LCNF_CacheExtension_register___at_Lean_Compiler_LCNF_initFn____x40_Lean_Compiler_LCNF_BaseTypes___hyg_3____spec__1(lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_getRelevantCtorFields___lambda__1___closed__1; @@ -128,6 +127,7 @@ static lean_object* l___private_Lean_Compiler_LCNF_MonoTypes_0__Lean_Compiler_LC static lean_object* l_Lean_Compiler_LCNF_toMonoType_visitApp___closed__5; static lean_object* l_Lean_Compiler_LCNF_getRelevantCtorFields___closed__12; lean_object* lean_array_set(lean_object*, lean_object*, lean_object*); +uint8_t l_Array_contains___at_Lean_registerInternalExceptionId___spec__1(lean_object*, lean_object*); lean_object* lean_panic_fn(lean_object*, lean_object*); lean_object* lean_nat_sub(lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_toMonoType_visitApp___closed__1; @@ -1734,7 +1734,7 @@ LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_hasTrivialStructure_x3f(lean_objec { lean_object* x_5; uint8_t x_6; x_5 = l_Lean_Compiler_LCNF_builtinRuntimeTypes; -x_6 = l_List_elem___at_Lean_Environment_realizeConst___spec__6(x_1, x_5); +x_6 = l_Array_contains___at_Lean_registerInternalExceptionId___spec__1(x_5, x_1); if (x_6 == 0) { lean_object* x_7; lean_object* x_8; diff --git a/stage0/stdlib/Lean/Compiler/LCNF/ToLCNF.c b/stage0/stdlib/Lean/Compiler/LCNF/ToLCNF.c index 1b410dd7ca1e..8ca2e68b1eba 100644 --- a/stage0/stdlib/Lean/Compiler/LCNF/ToLCNF.c +++ b/stage0/stdlib/Lean/Compiler/LCNF/ToLCNF.c @@ -142,7 +142,6 @@ static lean_object* l_panic___at_Lean_Compiler_LCNF_ToLCNF_toLCNF_visitCasesImpl uint8_t l_Lean_MapDeclarationExtension_contains___rarg(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_ToLCNF_toLCNF_visitApp___closed__11; LEAN_EXPORT lean_object* l_Array_mapFinIdxM_map___at_Lean_Compiler_LCNF_ToLCNF_toLCNF_visitCasesImplementedBy___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -uint8_t l_List_elem___at_Lean_Environment_realizeConst___spec__6(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_ToLCNF_instInhabitedElement; static lean_object* l_Lean_Compiler_LCNF_ToLCNF_bindCases_go___closed__8; lean_object* l_Lean_Compiler_LCNF_LCtx_toLocalContext(lean_object*); @@ -408,6 +407,7 @@ LEAN_EXPORT lean_object* l_Array_isEqvAux___at_Lean_Compiler_LCNF_ToLCNF_toLCNF_ LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Compiler_LCNF_ToLCNF_toLCNF_mkOverApplication___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_set(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_ToLCNF_toLCNF_visitQuotLift___lambda__1___closed__5; +uint8_t l_Array_contains___at_Lean_registerInternalExceptionId___spec__1(lean_object*, lean_object*); uint64_t lean_uint64_xor(uint64_t, uint64_t); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_ToLCNF_toLCNF_visitCasesImplementedBy___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_panic_fn(lean_object*, lean_object*); @@ -17417,7 +17417,7 @@ lean_object* x_9; lean_object* x_10; lean_object* x_11; uint8_t x_12; x_9 = lean_ctor_get(x_1, 0); x_10 = l_Lean_Name_getPrefix(x_9); x_11 = l_Lean_Compiler_LCNF_builtinRuntimeTypes; -x_12 = l_List_elem___at_Lean_Environment_realizeConst___spec__6(x_10, x_11); +x_12 = l_Array_contains___at_Lean_registerInternalExceptionId___spec__1(x_11, x_10); lean_dec(x_10); if (x_12 == 0) { diff --git a/stage0/stdlib/Lean/Compiler/LCNF/Util.c b/stage0/stdlib/Lean/Compiler/LCNF/Util.c index ce4a65402ca1..762bdcdcba24 100644 --- a/stage0/stdlib/Lean/Compiler/LCNF/Util.c +++ b/stage0/stdlib/Lean/Compiler/LCNF/Util.c @@ -40,7 +40,6 @@ static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__36; lean_object* l_Lean_stringToMessageData(lean_object*); static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__12; static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__42; -uint8_t l_List_elem___at_Lean_Environment_realizeConst___spec__6(lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__6; lean_object* l_Lean_Expr_appArg_x21(lean_object*); LEAN_EXPORT lean_object* l_Lean_Compiler_LCNF_isRuntimeBultinType___boxed(lean_object*); @@ -97,6 +96,7 @@ uint8_t lean_nat_dec_eq(lean_object*, lean_object*); static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__45; static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__34; static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__1; +uint8_t l_Array_contains___at_Lean_registerInternalExceptionId___spec__1(lean_object*, lean_object*); lean_object* lean_panic_fn(lean_object*, lean_object*); lean_object* l_Lean_Expr_getAppFn(lean_object*); static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__35; @@ -126,6 +126,7 @@ lean_object* l_Lean_InductiveVal_numCtors(lean_object*); lean_object* lean_array_uset(lean_object*, size_t, lean_object*); static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__33; static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__23; +static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__46; static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__30; static lean_object* l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__19; LEAN_EXPORT uint8_t l_Lean_Compiler_LCNF_isCompilerRelevantMData(lean_object* x_1) { @@ -1977,11 +1978,20 @@ lean_ctor_set(x_3, 1, x_2); return x_3; } } +static lean_object* _init_l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__46() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__45; +x_2 = lean_array_mk(x_1); +return x_2; +} +} static lean_object* _init_l_Lean_Compiler_LCNF_builtinRuntimeTypes() { _start: { lean_object* x_1; -x_1 = l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__45; +x_1 = l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__46; return x_1; } } @@ -1990,7 +2000,7 @@ LEAN_EXPORT uint8_t l_Lean_Compiler_LCNF_isRuntimeBultinType(lean_object* x_1) { { lean_object* x_2; uint8_t x_3; x_2 = l_Lean_Compiler_LCNF_builtinRuntimeTypes; -x_3 = l_List_elem___at_Lean_Environment_realizeConst___spec__6(x_1, x_2); +x_3 = l_Array_contains___at_Lean_registerInternalExceptionId___spec__1(x_2, x_1); return x_3; } } @@ -2147,6 +2157,8 @@ l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__44 = _init_l_Lean_Compiler_LC lean_mark_persistent(l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__44); l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__45 = _init_l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__45(); lean_mark_persistent(l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__45); +l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__46 = _init_l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__46(); +lean_mark_persistent(l_Lean_Compiler_LCNF_builtinRuntimeTypes___closed__46); l_Lean_Compiler_LCNF_builtinRuntimeTypes = _init_l_Lean_Compiler_LCNF_builtinRuntimeTypes(); lean_mark_persistent(l_Lean_Compiler_LCNF_builtinRuntimeTypes); return lean_io_result_mk_ok(lean_box(0)); diff --git a/stage0/stdlib/Lean/CoreM.c b/stage0/stdlib/Lean/CoreM.c index 0b205aeeb76e..9cb9e4d4f321 100644 --- a/stage0/stdlib/Lean/CoreM.c +++ b/stage0/stdlib/Lean/CoreM.c @@ -23,7 +23,6 @@ static lean_object* l___private_Lean_CoreM_0__Lean_supportedRecursors___closed__ LEAN_EXPORT lean_object* l_Lean_Core_instMonadLogCoreM___lambda__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Core_checkMaxHeartbeats___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Environment_enableRealizationsForConst(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__1; static lean_object* l_Lean_Core_instMonadCoreM___closed__1; static lean_object* l_Lean_Core_instMonadCoreM___closed__5; LEAN_EXPORT lean_object* l___private_Lean_CoreM_0__Lean_Core_withCurrHeartbeatsImp(lean_object*); @@ -50,13 +49,12 @@ lean_object* lean_mk_empty_array_with_capacity(lean_object*); static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_5____closed__1; LEAN_EXPORT lean_object* l___auto____x40_Lean_CoreM___hyg_4116_; size_t lean_usize_shift_right(size_t, size_t); -LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_foldlM___at_Lean_compileDecls_doCompile___spec__10(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_elem___at_Lean_catchInternalIds___spec__1___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Core_withRestoreOrSaveFull___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_ConstantVal_instantiateTypeLevelParams(lean_object*, lean_object*); static lean_object* l_Lean_Core_initFn____x40_Lean_CoreM___hyg_263____closed__1; -LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_ImportM_runCoreM___rarg___closed__2; LEAN_EXPORT lean_object* l_Lean_Core_getAndEmptyMessageLog(lean_object*); LEAN_EXPORT lean_object* l_Lean_mkArrow___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -72,6 +70,7 @@ uint8_t l_List_all___rarg(lean_object*, lean_object*); lean_object* l_Lean_PersistentArray_toArray___rarg(lean_object*); static lean_object* l_Lean_compileDecls___lambda__2___closed__2; lean_object* l_Lean_profileitIOUnsafe___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6713_(lean_object*); static lean_object* l_Lean_Core_initFn____x40_Lean_CoreM___hyg_263____closed__15; uint8_t l_Lean_Exception_isMaxRecDepth(lean_object*); LEAN_EXPORT lean_object* l_Lean_catchInternalIds___rarg(lean_object*, lean_object*, lean_object*, lean_object*); @@ -156,7 +155,6 @@ uint8_t lean_usize_dec_eq(size_t, size_t); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Core_wrapAsyncAsSnapshot___spec__12___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_5____closed__3; LEAN_EXPORT lean_object* l_Lean_Core_setMessageLog___boxed(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_compileDecls_doCompile___lambda__5___closed__1; static double l_Lean_withTraceNode___at_Lean_Core_wrapAsyncAsSnapshot___spec__1___lambda__2___closed__1; static lean_object* l_Lean_Core_instantiateValueLevelParams___lambda__2___closed__1; LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -231,6 +229,7 @@ lean_object* lean_lcnf_compile_decls(lean_object*, lean_object*, lean_object*, l extern lean_object* l_Lean_trace_profiler_useHeartbeats; lean_object* l_Lean_stringToMessageData(lean_object*); static lean_object* l_Lean_compileDecls___closed__2; +static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__5; static lean_object* l_Lean_useDiagnosticMsg___lambda__2___closed__10; static lean_object* l___auto____x40_Lean_CoreM___hyg_3975____closed__30; LEAN_EXPORT lean_object* l_Lean_Core_saveState(lean_object*); @@ -273,7 +272,8 @@ LEAN_EXPORT lean_object* l_Lean_throwKernelException___at_Lean_compileDecls_doCo lean_object* lean_string_utf8_byte_size(lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAux___at_Lean_Core_instantiateTypeLevelParams___spec__2(lean_object*, size_t, size_t, lean_object*, lean_object*); static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_153____closed__5; -LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__4(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__1; +LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__4(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_withTraceNode___at_Lean_Core_wrapAsyncAsSnapshot___spec__1___lambda__4___closed__3; uint8_t lean_string_validate_utf8(lean_object*); LEAN_EXPORT lean_object* l_Lean_addMessageContextPartial___at_Lean_Core_instAddMessageContextCoreM___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); @@ -364,14 +364,12 @@ lean_object* l_IO_addHeartbeats(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_traceBlock___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Core_instInhabitedCache___closed__3; static lean_object* l_List_foldlM___at___private_Lean_CoreM_0__Lean_checkUnsupported___spec__2___rarg___lambda__2___closed__2; -static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__3; lean_object* l_instDecidableEqPos___boxed(lean_object*, lean_object*); lean_object* l_Lean_Option_get___at_Lean_profiler_threshold_getSecs___spec__1(lean_object*, lean_object*); static lean_object* l___auto____x40_Lean_CoreM___hyg_3975____closed__8; static lean_object* l___private_Lean_CoreM_0__Lean_supportedRecursors___closed__29; static lean_object* l_Lean_logAt___at_Lean_Core_wrapAsyncAsSnapshot___spec__14___closed__1; static lean_object* l_Lean_compileDecls___lambda__2___closed__1; -static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__5; lean_object* l_Lean_registerTraceClass(lean_object*, uint8_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Core_wrapAsyncAsSnapshot(lean_object*); LEAN_EXPORT uint8_t l_Lean_Core_instMonadLogCoreM___lambda__5(lean_object*); @@ -414,6 +412,7 @@ LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_traceBlock___spec__2___r LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_async; static lean_object* l___auto____x40_Lean_CoreM___hyg_3975____closed__40; +static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__6; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_compileDecls_doCompile___spec__2(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_traceBlock___spec__2___rarg___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_st_ref_get(lean_object*, lean_object*); @@ -485,7 +484,6 @@ LEAN_EXPORT lean_object* l_Lean_Core_wrapAsyncAsSnapshot___rarg___lambda__4(lean static lean_object* l___auto____x40_Lean_CoreM___hyg_3975____closed__20; static lean_object* l_Lean_Core_initFn____x40_Lean_CoreM___hyg_263____closed__12; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_compileDecls_doCompile___spec__2___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__4; LEAN_EXPORT lean_object* l_Lean_enableRealizationsForConst(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Core_wrapAsyncAsSnapshot___spec__9___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_addMacroScope(lean_object*, lean_object*, lean_object*); @@ -553,8 +551,10 @@ LEAN_EXPORT lean_object* l_Lean_Core_instantiateValueLevelParams___lambda__1(lea LEAN_EXPORT lean_object* l_Lean_compileDecls(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Environment_PromiseCheckedResult_commitChecked(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_40____closed__5; +static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__3; LEAN_EXPORT lean_object* l_Lean_Core_SavedState_restore(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_throwMaxRecDepthAt___at_Lean_Core_withIncRecDepth___spec__1___rarg___closed__1; +static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__4; LEAN_EXPORT lean_object* l_Lean_Core_CoreM_toIO(lean_object*); lean_object* lean_task_get_own(lean_object*); LEAN_EXPORT lean_object* l_Lean_Core_tryCatchRuntimeEx___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -629,7 +629,6 @@ static lean_object* l_Lean_Core_wrapAsyncAsSnapshot___rarg___lambda__4___closed_ LEAN_EXPORT lean_object* l_Lean_Core_SavedState_restore___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwInterruptException___at_Lean_Core_checkInterrupted___spec__1___rarg(lean_object*); LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_40_(lean_object*); -static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__6; LEAN_EXPORT lean_object* l_Lean_setEnv___at_Lean_compileDecls_doCompile___spec__12(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Core_instMonadLogCoreM___lambda__6(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Core_instMonadTraceCoreM___lambda__3___boxed(lean_object*, lean_object*, lean_object*); @@ -689,7 +688,6 @@ LEAN_EXPORT lean_object* l_Lean_Declaration_foldExprM___at___private_Lean_CoreM_ LEAN_EXPORT lean_object* l_Lean_logMessageKind___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_catchInternalIds___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Core_instMonadQuotationCoreM___lambda__1___boxed(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__5(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldrMUnsafe_fold___at_Lean_mkArrowN___spec__1(lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_List_foldlM___at___private_Lean_CoreM_0__Lean_checkUnsupported___spec__2___rarg___lambda__2___closed__3; lean_object* l_Lean_PersistentHashMap_mkEmptyEntries(lean_object*, lean_object*); @@ -727,7 +725,6 @@ static lean_object* l___auto____x40_Lean_CoreM___hyg_3975____closed__18; static lean_object* l_Lean_traceBlock___rarg___lambda__3___closed__1; static lean_object* l___private_Lean_CoreM_0__Lean_supportedRecursors___closed__18; LEAN_EXPORT lean_object* l_Lean_Core_withRestoreOrSaveFull___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__2; static lean_object* l_Lean_Core_throwMaxHeartbeat___closed__10; static lean_object* l_Lean_Core_initFn____x40_Lean_CoreM___hyg_263____closed__11; LEAN_EXPORT lean_object* l_Lean_Core_instMonadInfoTreeCoreM___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*); @@ -744,6 +741,7 @@ LEAN_EXPORT uint8_t l_List_foldlM___at___private_Lean_CoreM_0__Lean_checkUnsuppo static lean_object* l___auto____x40_Lean_CoreM___hyg_3975____closed__11; lean_object* lean_nat_mul(lean_object*, lean_object*); static lean_object* l___private_Lean_CoreM_0__Lean_supportedRecursors___closed__5; +static lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__2; static size_t l_Lean_PersistentHashMap_insertAux___at_Lean_Core_instantiateTypeLevelParams___spec__2___closed__2; LEAN_EXPORT lean_object* l_Lean_Core_resetMessageLog(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Core_throwMaxHeartbeat___closed__12; @@ -818,7 +816,6 @@ extern lean_object* l_Lean_trace_profiler; uint8_t l_Lean_PersistentArray_isEmpty___rarg(lean_object*); LEAN_EXPORT lean_object* l_Lean_Core_wrapAsyncAsSnapshot___rarg___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_io_error_to_string(lean_object*); -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6589_(lean_object*); LEAN_EXPORT lean_object* l_Lean_Core_withRestoreOrSaveFull___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l___private_Lean_Message_0__Lean_beqMessageSeverity____x40_Lean_Message___hyg_107_(uint8_t, uint8_t); lean_object* lean_st_ref_set(lean_object*, lean_object*, lean_object*); @@ -949,6 +946,7 @@ LEAN_EXPORT lean_object* l_Lean_Core_getMaxHeartbeats___boxed(lean_object*); static lean_object* l_Lean_useDiagnosticMsg___closed__1; static lean_object* l_Lean_addMessageContextPartial___at_Lean_Core_instAddMessageContextCoreM___spec__1___closed__1; double lean_float_sub(double, double); +static lean_object* l_Lean_compileDecls_doCompile___lambda__4___closed__2; LEAN_EXPORT lean_object* l_Lean_Core_wrapAsync(lean_object*, lean_object*); static lean_object* l_Lean_Core_instMonadLogCoreM___lambda__6___closed__1; static lean_object* l___auto____x40_Lean_CoreM___hyg_3975____closed__16; @@ -26096,6 +26094,14 @@ return x_8; static lean_object* _init_l_Lean_compileDecls_doCompile___lambda__4___closed__1() { _start: { +lean_object* x_1; +x_1 = l_Lean_compiler_enableNew; +return x_1; +} +} +static lean_object* _init_l_Lean_compileDecls_doCompile___lambda__4___closed__2() { +_start: +{ lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); x_2 = l_Lean_initFn____x40_Lean_CoreM___hyg_5018____closed__1; @@ -26103,132 +26109,139 @@ x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__4(lean_object* x_1, lean_object* x_2, uint8_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__4(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { _start: { -lean_object* x_9; lean_object* x_10; lean_object* x_11; uint8_t x_12; lean_object* x_13; lean_object* x_14; +lean_object* x_8; lean_object* x_9; uint8_t x_10; +x_8 = lean_ctor_get(x_5, 2); +lean_inc(x_8); +x_9 = l_Lean_compileDecls_doCompile___lambda__4___closed__1; +x_10 = l_Lean_Option_get___at___private_Lean_Util_Profile_0__Lean_get__profiler___spec__1(x_8, x_9); +if (x_10 == 0) +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; uint8_t x_14; lean_object* x_15; lean_object* x_16; lean_inc(x_1); -x_9 = lean_alloc_closure((void*)(l_Lean_compileDecls_doCompile___lambda__1___boxed), 5, 1); -lean_closure_set(x_9, 0, x_1); -x_10 = lean_alloc_closure((void*)(l_Lean_compileDecls_doCompile___lambda__2___boxed), 5, 2); -lean_closure_set(x_10, 0, x_2); -lean_closure_set(x_10, 1, x_1); -x_11 = l_Lean_compileDecls_doCompile___lambda__4___closed__1; -x_12 = 1; -x_13 = l_Lean_initFn____x40_Lean_CoreM___hyg_80____closed__3; -lean_inc(x_7); +x_11 = lean_alloc_closure((void*)(l_Lean_compileDecls_doCompile___lambda__1___boxed), 5, 1); +lean_closure_set(x_11, 0, x_1); +x_12 = lean_alloc_closure((void*)(l_Lean_compileDecls_doCompile___lambda__2___boxed), 5, 2); +lean_closure_set(x_12, 0, x_8); +lean_closure_set(x_12, 1, x_1); +x_13 = l_Lean_compileDecls_doCompile___lambda__4___closed__2; +x_14 = 1; +x_15 = l_Lean_initFn____x40_Lean_CoreM___hyg_80____closed__3; lean_inc(x_6); -x_14 = l_Lean_withTraceNode___at_Lean_compileDecls_doCompile___spec__2(x_11, x_9, x_10, x_12, x_13, x_6, x_7, x_8); -if (lean_obj_tag(x_14) == 0) +lean_inc(x_5); +x_16 = l_Lean_withTraceNode___at_Lean_compileDecls_doCompile___spec__2(x_13, x_11, x_12, x_14, x_15, x_5, x_6, x_7); +if (lean_obj_tag(x_16) == 0) { -lean_object* x_15; -x_15 = lean_ctor_get(x_14, 0); -lean_inc(x_15); -if (lean_obj_tag(x_15) == 0) +lean_object* x_17; +x_17 = lean_ctor_get(x_16, 0); +lean_inc(x_17); +if (lean_obj_tag(x_17) == 0) { -lean_object* x_16; -x_16 = lean_ctor_get(x_15, 0); -lean_inc(x_16); -lean_dec(x_15); -if (lean_obj_tag(x_16) == 12) +lean_object* x_18; +x_18 = lean_ctor_get(x_17, 0); +lean_inc(x_18); +lean_dec(x_17); +if (lean_obj_tag(x_18) == 12) { -if (x_3 == 0) +if (x_2 == 0) { -uint8_t x_17; -lean_dec(x_16); -lean_dec(x_7); +uint8_t x_19; +lean_dec(x_18); lean_dec(x_6); -lean_dec(x_4); -x_17 = !lean_is_exclusive(x_14); -if (x_17 == 0) +lean_dec(x_5); +lean_dec(x_3); +x_19 = !lean_is_exclusive(x_16); +if (x_19 == 0) { -lean_object* x_18; lean_object* x_19; -x_18 = lean_ctor_get(x_14, 0); -lean_dec(x_18); -x_19 = lean_box(0); -lean_ctor_set(x_14, 0, x_19); -return x_14; +lean_object* x_20; lean_object* x_21; +x_20 = lean_ctor_get(x_16, 0); +lean_dec(x_20); +x_21 = lean_box(0); +lean_ctor_set(x_16, 0, x_21); +return x_16; } else { -lean_object* x_20; lean_object* x_21; lean_object* x_22; -x_20 = lean_ctor_get(x_14, 1); -lean_inc(x_20); -lean_dec(x_14); -x_21 = lean_box(0); -x_22 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_22, 0, x_21); -lean_ctor_set(x_22, 1, x_20); -return x_22; +lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_22 = lean_ctor_get(x_16, 1); +lean_inc(x_22); +lean_dec(x_16); +x_23 = lean_box(0); +x_24 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_24, 0, x_23); +lean_ctor_set(x_24, 1, x_22); +return x_24; } } else { -if (lean_obj_tag(x_4) == 0) +if (lean_obj_tag(x_3) == 0) { -lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; -x_23 = lean_ctor_get(x_14, 1); -lean_inc(x_23); -lean_dec(x_14); -x_24 = lean_ctor_get(x_16, 0); -lean_inc(x_24); +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_25 = lean_ctor_get(x_16, 1); +lean_inc(x_25); lean_dec(x_16); -x_25 = lean_box(0); -x_26 = l_Lean_compileDecls_doCompile___lambda__3(x_24, x_25, x_6, x_7, x_23); -lean_dec(x_7); +x_26 = lean_ctor_get(x_18, 0); +lean_inc(x_26); +lean_dec(x_18); +x_27 = lean_box(0); +x_28 = l_Lean_compileDecls_doCompile___lambda__3(x_26, x_27, x_5, x_6, x_25); lean_dec(x_6); -return x_26; +lean_dec(x_5); +return x_28; } else { -lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; -x_27 = lean_ctor_get(x_14, 1); -lean_inc(x_27); -lean_dec(x_14); -x_28 = lean_ctor_get(x_16, 0); -lean_inc(x_28); -lean_dec(x_16); -x_29 = lean_ctor_get(x_4, 0); +lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; +x_29 = lean_ctor_get(x_16, 1); lean_inc(x_29); -lean_dec(x_4); -x_30 = l___private_Lean_CoreM_0__Lean_checkUnsupported___at_Lean_compileDecls_doCompile___spec__6(x_29, x_6, x_7, x_27); -if (lean_obj_tag(x_30) == 0) -{ -lean_object* x_31; lean_object* x_32; lean_object* x_33; -x_31 = lean_ctor_get(x_30, 0); +lean_dec(x_16); +x_30 = lean_ctor_get(x_18, 0); +lean_inc(x_30); +lean_dec(x_18); +x_31 = lean_ctor_get(x_3, 0); lean_inc(x_31); -x_32 = lean_ctor_get(x_30, 1); -lean_inc(x_32); -lean_dec(x_30); -x_33 = l_Lean_compileDecls_doCompile___lambda__3(x_28, x_31, x_6, x_7, x_32); -lean_dec(x_7); +lean_dec(x_3); +x_32 = l___private_Lean_CoreM_0__Lean_checkUnsupported___at_Lean_compileDecls_doCompile___spec__6(x_31, x_5, x_6, x_29); +if (lean_obj_tag(x_32) == 0) +{ +lean_object* x_33; lean_object* x_34; lean_object* x_35; +x_33 = lean_ctor_get(x_32, 0); +lean_inc(x_33); +x_34 = lean_ctor_get(x_32, 1); +lean_inc(x_34); +lean_dec(x_32); +x_35 = l_Lean_compileDecls_doCompile___lambda__3(x_30, x_33, x_5, x_6, x_34); lean_dec(x_6); -lean_dec(x_31); -return x_33; +lean_dec(x_5); +lean_dec(x_33); +return x_35; } else { -uint8_t x_34; -lean_dec(x_28); -lean_dec(x_7); +uint8_t x_36; +lean_dec(x_30); lean_dec(x_6); -x_34 = !lean_is_exclusive(x_30); -if (x_34 == 0) +lean_dec(x_5); +x_36 = !lean_is_exclusive(x_32); +if (x_36 == 0) { -return x_30; +return x_32; } else { -lean_object* x_35; lean_object* x_36; lean_object* x_37; -x_35 = lean_ctor_get(x_30, 0); -x_36 = lean_ctor_get(x_30, 1); -lean_inc(x_36); -lean_inc(x_35); -lean_dec(x_30); -x_37 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_37, 0, x_35); -lean_ctor_set(x_37, 1, x_36); -return x_37; +lean_object* x_37; lean_object* x_38; lean_object* x_39; +x_37 = lean_ctor_get(x_32, 0); +x_38 = lean_ctor_get(x_32, 1); +lean_inc(x_38); +lean_inc(x_37); +lean_dec(x_32); +x_39 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_39, 0, x_37); +lean_ctor_set(x_39, 1, x_38); +return x_39; } } } @@ -26236,158 +26249,168 @@ return x_37; } else { -lean_dec(x_4); -if (x_3 == 0) +lean_dec(x_3); +if (x_2 == 0) { -uint8_t x_38; -lean_dec(x_16); -lean_dec(x_7); +uint8_t x_40; +lean_dec(x_18); lean_dec(x_6); -x_38 = !lean_is_exclusive(x_14); -if (x_38 == 0) +lean_dec(x_5); +x_40 = !lean_is_exclusive(x_16); +if (x_40 == 0) { -lean_object* x_39; lean_object* x_40; -x_39 = lean_ctor_get(x_14, 0); -lean_dec(x_39); -x_40 = lean_box(0); -lean_ctor_set(x_14, 0, x_40); -return x_14; +lean_object* x_41; lean_object* x_42; +x_41 = lean_ctor_get(x_16, 0); +lean_dec(x_41); +x_42 = lean_box(0); +lean_ctor_set(x_16, 0, x_42); +return x_16; } else { -lean_object* x_41; lean_object* x_42; lean_object* x_43; -x_41 = lean_ctor_get(x_14, 1); -lean_inc(x_41); -lean_dec(x_14); -x_42 = lean_box(0); -x_43 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_43, 0, x_42); -lean_ctor_set(x_43, 1, x_41); -return x_43; +lean_object* x_43; lean_object* x_44; lean_object* x_45; +x_43 = lean_ctor_get(x_16, 1); +lean_inc(x_43); +lean_dec(x_16); +x_44 = lean_box(0); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_43); +return x_45; } } else { -lean_object* x_44; lean_object* x_45; -x_44 = lean_ctor_get(x_14, 1); -lean_inc(x_44); -lean_dec(x_14); -x_45 = l_Lean_throwKernelException___at_Lean_compileDecls_doCompile___spec__4(x_16, x_6, x_7, x_44); -lean_dec(x_7); -return x_45; +lean_object* x_46; lean_object* x_47; +x_46 = lean_ctor_get(x_16, 1); +lean_inc(x_46); +lean_dec(x_16); +x_47 = l_Lean_throwKernelException___at_Lean_compileDecls_doCompile___spec__4(x_18, x_5, x_6, x_46); +lean_dec(x_6); +return x_47; } } } else { -lean_object* x_46; lean_object* x_47; lean_object* x_48; -lean_dec(x_4); -x_46 = lean_ctor_get(x_14, 1); -lean_inc(x_46); -lean_dec(x_14); -x_47 = lean_ctor_get(x_15, 0); -lean_inc(x_47); -lean_dec(x_15); -x_48 = l_Lean_setEnv___at_Lean_compileDecls_doCompile___spec__12(x_47, x_6, x_7, x_46); -lean_dec(x_7); +lean_object* x_48; lean_object* x_49; lean_object* x_50; +lean_dec(x_3); +x_48 = lean_ctor_get(x_16, 1); +lean_inc(x_48); +lean_dec(x_16); +x_49 = lean_ctor_get(x_17, 0); +lean_inc(x_49); +lean_dec(x_17); +x_50 = l_Lean_setEnv___at_Lean_compileDecls_doCompile___spec__12(x_49, x_5, x_6, x_48); lean_dec(x_6); -return x_48; +lean_dec(x_5); +return x_50; } } else { -uint8_t x_49; -lean_dec(x_7); +uint8_t x_51; lean_dec(x_6); -lean_dec(x_4); -x_49 = !lean_is_exclusive(x_14); -if (x_49 == 0) +lean_dec(x_5); +lean_dec(x_3); +x_51 = !lean_is_exclusive(x_16); +if (x_51 == 0) { -return x_14; +return x_16; } else { -lean_object* x_50; lean_object* x_51; lean_object* x_52; -x_50 = lean_ctor_get(x_14, 0); -x_51 = lean_ctor_get(x_14, 1); -lean_inc(x_51); -lean_inc(x_50); -lean_dec(x_14); -x_52 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_52, 0, x_50); -lean_ctor_set(x_52, 1, x_51); -return x_52; +lean_object* x_52; lean_object* x_53; lean_object* x_54; +x_52 = lean_ctor_get(x_16, 0); +x_53 = lean_ctor_get(x_16, 1); +lean_inc(x_53); +lean_inc(x_52); +lean_dec(x_16); +x_54 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_54, 0, x_52); +lean_ctor_set(x_54, 1, x_53); +return x_54; } } } +else +{ +lean_object* x_55; +lean_dec(x_8); +lean_dec(x_3); +x_55 = lean_lcnf_compile_decls(x_1, x_5, x_6, x_7); +if (lean_obj_tag(x_55) == 0) +{ +uint8_t x_56; +x_56 = !lean_is_exclusive(x_55); +if (x_56 == 0) +{ +return x_55; } -static lean_object* _init_l_Lean_compileDecls_doCompile___lambda__5___closed__1() { -_start: +else { -lean_object* x_1; -x_1 = l_Lean_compiler_enableNew; -return x_1; +lean_object* x_57; lean_object* x_58; lean_object* x_59; +x_57 = lean_ctor_get(x_55, 0); +x_58 = lean_ctor_get(x_55, 1); +lean_inc(x_58); +lean_inc(x_57); +lean_dec(x_55); +x_59 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_59, 0, x_57); +lean_ctor_set(x_59, 1, x_58); +return x_59; } } -LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__5(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { -_start: +else { -lean_object* x_8; lean_object* x_9; uint8_t x_10; -x_8 = lean_ctor_get(x_5, 2); -lean_inc(x_8); -x_9 = l_Lean_compileDecls_doCompile___lambda__5___closed__1; -x_10 = l_Lean_Option_get___at___private_Lean_Util_Profile_0__Lean_get__profiler___spec__1(x_8, x_9); -if (x_10 == 0) +if (x_2 == 0) { -lean_object* x_11; lean_object* x_12; -x_11 = lean_box(0); -x_12 = l_Lean_compileDecls_doCompile___lambda__4(x_1, x_8, x_2, x_3, x_11, x_5, x_6, x_7); -return x_12; +uint8_t x_60; +x_60 = !lean_is_exclusive(x_55); +if (x_60 == 0) +{ +lean_object* x_61; lean_object* x_62; +x_61 = lean_ctor_get(x_55, 0); +lean_dec(x_61); +x_62 = lean_box(0); +lean_ctor_set_tag(x_55, 0); +lean_ctor_set(x_55, 0, x_62); +return x_55; } else { -lean_object* x_13; -lean_inc(x_6); -lean_inc(x_5); -lean_inc(x_1); -x_13 = lean_lcnf_compile_decls(x_1, x_5, x_6, x_7); -if (lean_obj_tag(x_13) == 0) -{ -lean_object* x_14; lean_object* x_15; lean_object* x_16; -x_14 = lean_ctor_get(x_13, 0); -lean_inc(x_14); -x_15 = lean_ctor_get(x_13, 1); -lean_inc(x_15); -lean_dec(x_13); -x_16 = l_Lean_compileDecls_doCompile___lambda__4(x_1, x_8, x_2, x_3, x_14, x_5, x_6, x_15); -lean_dec(x_14); -return x_16; +lean_object* x_63; lean_object* x_64; lean_object* x_65; +x_63 = lean_ctor_get(x_55, 1); +lean_inc(x_63); +lean_dec(x_55); +x_64 = lean_box(0); +x_65 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_65, 0, x_64); +lean_ctor_set(x_65, 1, x_63); +return x_65; +} } else { -uint8_t x_17; -lean_dec(x_8); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_3); -lean_dec(x_1); -x_17 = !lean_is_exclusive(x_13); -if (x_17 == 0) +uint8_t x_66; +x_66 = !lean_is_exclusive(x_55); +if (x_66 == 0) { -return x_13; +return x_55; } else { -lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_18 = lean_ctor_get(x_13, 0); -x_19 = lean_ctor_get(x_13, 1); -lean_inc(x_19); -lean_inc(x_18); -lean_dec(x_13); -x_20 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_20, 0, x_18); -lean_ctor_set(x_20, 1, x_19); -return x_20; +lean_object* x_67; lean_object* x_68; lean_object* x_69; +x_67 = lean_ctor_get(x_55, 0); +x_68 = lean_ctor_get(x_55, 1); +lean_inc(x_68); +lean_inc(x_67); +lean_dec(x_55); +x_69 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_69, 0, x_67); +lean_ctor_set(x_69, 1, x_68); +return x_69; +} } } } @@ -26428,7 +26451,7 @@ else lean_object* x_16; lean_object* x_17; lean_free_object(x_7); x_16 = lean_box(0); -x_17 = l_Lean_compileDecls_doCompile___lambda__5(x_1, x_3, x_2, x_16, x_4, x_5, x_10); +x_17 = l_Lean_compileDecls_doCompile___lambda__4(x_1, x_3, x_2, x_16, x_4, x_5, x_10); return x_17; } } @@ -26465,7 +26488,7 @@ else { lean_object* x_26; lean_object* x_27; x_26 = lean_box(0); -x_27 = l_Lean_compileDecls_doCompile___lambda__5(x_1, x_3, x_2, x_26, x_4, x_5, x_19); +x_27 = l_Lean_compileDecls_doCompile___lambda__4(x_1, x_3, x_2, x_26, x_4, x_5, x_19); return x_27; } } @@ -26685,24 +26708,13 @@ lean_dec(x_2); return x_6; } } -LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { -_start: -{ -uint8_t x_9; lean_object* x_10; -x_9 = lean_unbox(x_3); -lean_dec(x_3); -x_10 = l_Lean_compileDecls_doCompile___lambda__4(x_1, x_2, x_9, x_4, x_5, x_6, x_7, x_8); -lean_dec(x_5); -return x_10; -} -} -LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__5___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Lean_compileDecls_doCompile___lambda__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { _start: { uint8_t x_8; lean_object* x_9; x_8 = lean_unbox(x_2); lean_dec(x_2); -x_9 = l_Lean_compileDecls_doCompile___lambda__5(x_1, x_8, x_3, x_4, x_5, x_6, x_7); +x_9 = l_Lean_compileDecls_doCompile___lambda__4(x_1, x_8, x_3, x_4, x_5, x_6, x_7); lean_dec(x_4); return x_9; } @@ -29238,7 +29250,7 @@ lean_dec(x_3); return x_5; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__1() { +static lean_object* _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__1() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; @@ -29248,63 +29260,63 @@ x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__2() { +static lean_object* _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__1; +x_1 = l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__1; x_2 = l_Lean_Core_initFn____x40_Lean_CoreM___hyg_263____closed__8; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__3() { +static lean_object* _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__2; +x_1 = l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__2; x_2 = l_Lean_initFn____x40_Lean_CoreM___hyg_5____closed__5; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__4() { +static lean_object* _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__3; +x_1 = l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__3; x_2 = l_Lean_Core_initFn____x40_Lean_CoreM___hyg_263____closed__11; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__5() { +static lean_object* _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__5() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__4; +x_1 = l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__4; x_2 = l_Lean_Core_initFn____x40_Lean_CoreM___hyg_263____closed__13; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__6() { +static lean_object* _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__5; -x_2 = lean_unsigned_to_nat(6589u); +x_1 = l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__5; +x_2 = lean_unsigned_to_nat(6713u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6589_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_CoreM___hyg_6713_(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_2 = l_Lean_initFn____x40_Lean_CoreM___hyg_114____closed__3; x_3 = 0; -x_4 = l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__6; +x_4 = l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__6; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); if (lean_obj_tag(x_5) == 0) { @@ -30024,8 +30036,8 @@ l_Lean_compileDecls_doCompile___lambda__1___closed__2 = _init_l_Lean_compileDecl lean_mark_persistent(l_Lean_compileDecls_doCompile___lambda__1___closed__2); l_Lean_compileDecls_doCompile___lambda__4___closed__1 = _init_l_Lean_compileDecls_doCompile___lambda__4___closed__1(); lean_mark_persistent(l_Lean_compileDecls_doCompile___lambda__4___closed__1); -l_Lean_compileDecls_doCompile___lambda__5___closed__1 = _init_l_Lean_compileDecls_doCompile___lambda__5___closed__1(); -lean_mark_persistent(l_Lean_compileDecls_doCompile___lambda__5___closed__1); +l_Lean_compileDecls_doCompile___lambda__4___closed__2 = _init_l_Lean_compileDecls_doCompile___lambda__4___closed__2(); +lean_mark_persistent(l_Lean_compileDecls_doCompile___lambda__4___closed__2); l_Lean_compileDecls___lambda__2___closed__1 = _init_l_Lean_compileDecls___lambda__2___closed__1(); lean_mark_persistent(l_Lean_compileDecls___lambda__2___closed__1); l_Lean_compileDecls___lambda__2___closed__2 = _init_l_Lean_compileDecls___lambda__2___closed__2(); @@ -30061,19 +30073,19 @@ l_Lean_instMonadExceptOfExceptionCoreM___closed__3 = _init_l_Lean_instMonadExcep lean_mark_persistent(l_Lean_instMonadExceptOfExceptionCoreM___closed__3); l_Lean_instMonadExceptOfExceptionCoreM = _init_l_Lean_instMonadExceptOfExceptionCoreM(); lean_mark_persistent(l_Lean_instMonadExceptOfExceptionCoreM); -l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__1 = _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__1(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__1); -l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__2 = _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__2(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__2); -l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__3 = _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__3(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__3); -l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__4 = _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__4(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__4); -l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__5 = _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__5(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__5); -l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__6 = _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__6(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_CoreM___hyg_6589____closed__6); -if (builtin) {res = l_Lean_initFn____x40_Lean_CoreM___hyg_6589_(lean_io_mk_world()); +l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__1 = _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__1(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__1); +l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__2 = _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__2(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__2); +l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__3 = _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__3(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__3); +l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__4 = _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__4(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__4); +l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__5 = _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__5(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__5); +l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__6 = _init_l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__6(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_CoreM___hyg_6713____closed__6); +if (builtin) {res = l_Lean_initFn____x40_Lean_CoreM___hyg_6713_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); }return lean_io_result_mk_ok(lean_box(0)); diff --git a/stage0/stdlib/Lean/Data/Json/FromToJson.c b/stage0/stdlib/Lean/Data/Json/FromToJson.c index f13f0cd35f1d..6d62916b19ef 100644 --- a/stage0/stdlib/Lean/Data/Json/FromToJson.c +++ b/stage0/stdlib/Lean/Data/Json/FromToJson.c @@ -37,6 +37,7 @@ uint64_t lean_uint64_of_nat(lean_object*); static lean_object* l_Lean_instFromJsonEmpty___closed__2; LEAN_EXPORT lean_object* l_Lean_instFromJsonFloat___closed__14___boxed__const__1; LEAN_EXPORT lean_object* l_Lean_instToJsonFilePath(lean_object*); +LEAN_EXPORT lean_object* l_Lean_instToJsonNameMap___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instFromJsonString; LEAN_EXPORT lean_object* l_Lean_instFromJsonJsonNumber; lean_object* l_Lean_Name_toString(lean_object*, uint8_t, lean_object*); @@ -52,6 +53,7 @@ static lean_object* l_Lean_instFromJsonFloat___closed__8; static double l_Lean_instFromJsonFloat___closed__13; LEAN_EXPORT lean_object* l_Lean_instFromJsonOption(lean_object*); LEAN_EXPORT lean_object* l_Lean_instToJsonJsonNumber(lean_object*); +LEAN_EXPORT lean_object* l_Lean_instFromJsonNameMap___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instToJsonBool(uint8_t); LEAN_EXPORT lean_object* l_Lean_instToJsonName(lean_object*); LEAN_EXPORT lean_object* l_Lean_instFromJsonUSize___lambda__1(lean_object*, lean_object*); @@ -71,6 +73,7 @@ LEAN_EXPORT lean_object* l_Lean_instFromJsonRBMapString(lean_object*); lean_object* l_Lean_Json_getObjVal_x3f(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instToJsonUSize(size_t); static double l_Lean_instFromJsonFloat___closed__9; +LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at_Lean_instFromJsonNameMap___spec__1(lean_object*); LEAN_EXPORT lean_object* l_Lean_instToJsonName___lambda__1___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_instToJsonArray___rarg(lean_object*, lean_object*); lean_object* l_Lean_Json_getStr_x3f(lean_object*); @@ -98,6 +101,7 @@ LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_instFromJsonArray___sp LEAN_EXPORT lean_object* l_Lean_instToJsonList___rarg(lean_object*, lean_object*); lean_object* l_Lean_Json_setObjVal_x21(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Json_getObjValD(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at_Lean_instFromJsonNameMap___spec__1___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_instToJsonArray___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instFromJsonFloat___closed__12___boxed__const__1; LEAN_EXPORT lean_object* l_Lean_Json_instToJsonStructured(lean_object*); @@ -121,6 +125,7 @@ lean_object* l_Lean_RBNode_insert___rarg(lean_object*, lean_object*, lean_object LEAN_EXPORT lean_object* l_Lean_instFromJsonInt; static lean_object* l_Lean_instFromJsonName___closed__1; LEAN_EXPORT lean_object* l_Lean_instFromJsonJson(lean_object*); +lean_object* l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(lean_object*, lean_object*, lean_object*); extern lean_object* l_System_Platform_numBits; static lean_object* l_Lean_instToJsonJson___closed__1; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_instToJsonArray___spec__1(lean_object*); @@ -132,6 +137,7 @@ lean_object* l_Lean_Syntax_decodeNatLitVal_x3f(lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_instFromJsonList___spec__1(lean_object*); LEAN_EXPORT lean_object* l_Lean_instToJsonArray(lean_object*); LEAN_EXPORT lean_object* l_Lean_bignumFromJson_x3f(lean_object*); +static lean_object* l_Lean_instFromJsonNameMap___rarg___closed__1; LEAN_EXPORT lean_object* l_Lean_instFromJsonUInt64___lambda__1___boxed(lean_object*, lean_object*); static lean_object* l_Lean_instFromJsonFloat___closed__14; lean_object* lean_usize_to_nat(size_t); @@ -160,9 +166,11 @@ lean_object* l_id___rarg___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_instFromJsonRBMapString___rarg(lean_object*, lean_object*, lean_object*); lean_object* l_String_toName(lean_object*); LEAN_EXPORT lean_object* l_Lean_instFromJsonList___rarg(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_instFromJsonNameMap(lean_object*); lean_object* l_Lean_Json_getNat_x3f(lean_object*); static double l_Lean_instFromJsonFloat___closed__7; lean_object* l_Lean_Name_getString_x21(lean_object*); +LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lean_instToJsonNameMap___spec__1___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_bignumToJson(lean_object*); LEAN_EXPORT lean_object* l_Lean_instFromJsonList(lean_object*); lean_object* lean_array_mk(lean_object*); @@ -171,6 +179,7 @@ size_t lean_usize_add(size_t, size_t); lean_object* lean_array_uget(lean_object*, size_t); size_t lean_array_size(lean_object*); static double l_Lean_instFromJsonFloat___closed__10; +LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lean_instToJsonNameMap___spec__1(lean_object*); LEAN_EXPORT lean_object* l_Lean_instFromJsonProd___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_map___at_Lean_instToJsonRBMapString___spec__1(lean_object*); static lean_object* l_Lean_Json_instFromJsonStructured___closed__1; @@ -186,6 +195,7 @@ uint8_t lean_usize_dec_lt(size_t, size_t); static lean_object* l_Lean_Json_parseTagged___closed__4; static lean_object* l_Lean_instFromJsonFloat___closed__2; static lean_object* l_Lean_instFromJsonOption___rarg___closed__1; +lean_object* l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Json_pretty(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Json_toStructured_x3f(lean_object*); lean_object* lean_array_uset(lean_object*, size_t, lean_object*); @@ -193,6 +203,7 @@ LEAN_EXPORT lean_object* l_Lean_instToJsonFloat(double); static lean_object* l_Lean_instFromJsonUSize___closed__3; lean_object* l___private_Init_Data_Repr_0__Nat_reprFast(lean_object*); LEAN_EXPORT lean_object* l_Lean_instToJsonOption(lean_object*); +LEAN_EXPORT lean_object* l_Lean_instToJsonNameMap(lean_object*); lean_object* l_Lean_Json_getInt_x3f(lean_object*); LEAN_EXPORT lean_object* l_Lean_instFromJsonJson(lean_object* x_1) { _start: @@ -1698,6 +1709,462 @@ x_3 = lean_box(x_2); return x_3; } } +LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at_Lean_instFromJsonNameMap___spec__1___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_3) == 0) +{ +lean_object* x_4; +lean_dec(x_1); +x_4 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_4, 0, x_2); +return x_4; +} +else +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_5 = lean_ctor_get(x_3, 0); +lean_inc(x_5); +x_6 = lean_ctor_get(x_3, 1); +lean_inc(x_6); +x_7 = lean_ctor_get(x_3, 2); +lean_inc(x_7); +x_8 = lean_ctor_get(x_3, 3); +lean_inc(x_8); +lean_dec(x_3); +lean_inc(x_1); +x_9 = l_Lean_RBNode_foldM___at_Lean_instFromJsonNameMap___spec__1___rarg(x_1, x_2, x_5); +if (lean_obj_tag(x_9) == 0) +{ +uint8_t x_10; +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_1); +x_10 = !lean_is_exclusive(x_9); +if (x_10 == 0) +{ +return x_9; +} +else +{ +lean_object* x_11; lean_object* x_12; +x_11 = lean_ctor_get(x_9, 0); +lean_inc(x_11); +lean_dec(x_9); +x_12 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_12, 0, x_11); +return x_12; +} +} +else +{ +uint8_t x_13; +x_13 = !lean_is_exclusive(x_9); +if (x_13 == 0) +{ +lean_object* x_14; lean_object* x_15; uint8_t x_16; +x_14 = lean_ctor_get(x_9, 0); +x_15 = l_Lean_instFromJsonName___closed__1; +x_16 = lean_string_dec_eq(x_6, x_15); +if (x_16 == 0) +{ +lean_object* x_17; uint8_t x_18; +lean_inc(x_6); +x_17 = l_String_toName(x_6); +x_18 = l_Lean_Name_isAnonymous(x_17); +if (x_18 == 0) +{ +lean_object* x_19; +lean_free_object(x_9); +lean_dec(x_6); +lean_inc(x_1); +x_19 = lean_apply_1(x_1, x_7); +if (lean_obj_tag(x_19) == 0) +{ +uint8_t x_20; +lean_dec(x_17); +lean_dec(x_14); +lean_dec(x_8); +lean_dec(x_1); +x_20 = !lean_is_exclusive(x_19); +if (x_20 == 0) +{ +return x_19; +} +else +{ +lean_object* x_21; lean_object* x_22; +x_21 = lean_ctor_get(x_19, 0); +lean_inc(x_21); +lean_dec(x_19); +x_22 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_22, 0, x_21); +return x_22; +} +} +else +{ +lean_object* x_23; lean_object* x_24; +x_23 = lean_ctor_get(x_19, 0); +lean_inc(x_23); +lean_dec(x_19); +x_24 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_14, x_17, x_23); +x_2 = x_24; +x_3 = x_8; +goto _start; +} +} +else +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; +lean_dec(x_17); +lean_dec(x_14); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_1); +x_26 = l_Lean_instFromJsonName___closed__2; +x_27 = lean_string_append(x_26, x_6); +lean_dec(x_6); +x_28 = l_Lean_instFromJsonArray___rarg___closed__2; +x_29 = lean_string_append(x_27, x_28); +lean_ctor_set_tag(x_9, 0); +lean_ctor_set(x_9, 0, x_29); +return x_9; +} +} +else +{ +lean_object* x_30; +lean_free_object(x_9); +lean_dec(x_6); +lean_inc(x_1); +x_30 = lean_apply_1(x_1, x_7); +if (lean_obj_tag(x_30) == 0) +{ +uint8_t x_31; +lean_dec(x_14); +lean_dec(x_8); +lean_dec(x_1); +x_31 = !lean_is_exclusive(x_30); +if (x_31 == 0) +{ +return x_30; +} +else +{ +lean_object* x_32; lean_object* x_33; +x_32 = lean_ctor_get(x_30, 0); +lean_inc(x_32); +lean_dec(x_30); +x_33 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_33, 0, x_32); +return x_33; +} +} +else +{ +lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_34 = lean_ctor_get(x_30, 0); +lean_inc(x_34); +lean_dec(x_30); +x_35 = lean_box(0); +x_36 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_14, x_35, x_34); +x_2 = x_36; +x_3 = x_8; +goto _start; +} +} +} +else +{ +lean_object* x_38; lean_object* x_39; uint8_t x_40; +x_38 = lean_ctor_get(x_9, 0); +lean_inc(x_38); +lean_dec(x_9); +x_39 = l_Lean_instFromJsonName___closed__1; +x_40 = lean_string_dec_eq(x_6, x_39); +if (x_40 == 0) +{ +lean_object* x_41; uint8_t x_42; +lean_inc(x_6); +x_41 = l_String_toName(x_6); +x_42 = l_Lean_Name_isAnonymous(x_41); +if (x_42 == 0) +{ +lean_object* x_43; +lean_dec(x_6); +lean_inc(x_1); +x_43 = lean_apply_1(x_1, x_7); +if (lean_obj_tag(x_43) == 0) +{ +lean_object* x_44; lean_object* x_45; lean_object* x_46; +lean_dec(x_41); +lean_dec(x_38); +lean_dec(x_8); +lean_dec(x_1); +x_44 = lean_ctor_get(x_43, 0); +lean_inc(x_44); +if (lean_is_exclusive(x_43)) { + lean_ctor_release(x_43, 0); + x_45 = x_43; +} else { + lean_dec_ref(x_43); + x_45 = lean_box(0); +} +if (lean_is_scalar(x_45)) { + x_46 = lean_alloc_ctor(0, 1, 0); +} else { + x_46 = x_45; +} +lean_ctor_set(x_46, 0, x_44); +return x_46; +} +else +{ +lean_object* x_47; lean_object* x_48; +x_47 = lean_ctor_get(x_43, 0); +lean_inc(x_47); +lean_dec(x_43); +x_48 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_38, x_41, x_47); +x_2 = x_48; +x_3 = x_8; +goto _start; +} +} +else +{ +lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; +lean_dec(x_41); +lean_dec(x_38); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_1); +x_50 = l_Lean_instFromJsonName___closed__2; +x_51 = lean_string_append(x_50, x_6); +lean_dec(x_6); +x_52 = l_Lean_instFromJsonArray___rarg___closed__2; +x_53 = lean_string_append(x_51, x_52); +x_54 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_54, 0, x_53); +return x_54; +} +} +else +{ +lean_object* x_55; +lean_dec(x_6); +lean_inc(x_1); +x_55 = lean_apply_1(x_1, x_7); +if (lean_obj_tag(x_55) == 0) +{ +lean_object* x_56; lean_object* x_57; lean_object* x_58; +lean_dec(x_38); +lean_dec(x_8); +lean_dec(x_1); +x_56 = lean_ctor_get(x_55, 0); +lean_inc(x_56); +if (lean_is_exclusive(x_55)) { + lean_ctor_release(x_55, 0); + x_57 = x_55; +} else { + lean_dec_ref(x_55); + x_57 = lean_box(0); +} +if (lean_is_scalar(x_57)) { + x_58 = lean_alloc_ctor(0, 1, 0); +} else { + x_58 = x_57; +} +lean_ctor_set(x_58, 0, x_56); +return x_58; +} +else +{ +lean_object* x_59; lean_object* x_60; lean_object* x_61; +x_59 = lean_ctor_get(x_55, 0); +lean_inc(x_59); +lean_dec(x_55); +x_60 = lean_box(0); +x_61 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_38, x_60, x_59); +x_2 = x_61; +x_3 = x_8; +goto _start; +} +} +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at_Lean_instFromJsonNameMap___spec__1(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_RBNode_foldM___at_Lean_instFromJsonNameMap___spec__1___rarg), 3, 0); +return x_2; +} +} +static lean_object* _init_l_Lean_instFromJsonNameMap___rarg___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("expected a `NameMap`, got '", 27, 27); +return x_1; +} +} +LEAN_EXPORT lean_object* l_Lean_instFromJsonNameMap___rarg(lean_object* x_1, lean_object* x_2) { +_start: +{ +switch (lean_obj_tag(x_2)) { +case 0: +{ +lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +lean_dec(x_1); +x_3 = lean_unsigned_to_nat(80u); +x_4 = l_Lean_Json_pretty(x_2, x_3); +x_5 = l_Lean_instFromJsonNameMap___rarg___closed__1; +x_6 = lean_string_append(x_5, x_4); +lean_dec(x_4); +x_7 = l_Lean_instFromJsonArray___rarg___closed__2; +x_8 = lean_string_append(x_6, x_7); +x_9 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_9, 0, x_8); +return x_9; +} +case 1: +{ +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; +lean_dec(x_1); +x_10 = lean_unsigned_to_nat(80u); +x_11 = l_Lean_Json_pretty(x_2, x_10); +x_12 = l_Lean_instFromJsonNameMap___rarg___closed__1; +x_13 = lean_string_append(x_12, x_11); +lean_dec(x_11); +x_14 = l_Lean_instFromJsonArray___rarg___closed__2; +x_15 = lean_string_append(x_13, x_14); +x_16 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_16, 0, x_15); +return x_16; +} +case 5: +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_17 = lean_ctor_get(x_2, 0); +lean_inc(x_17); +lean_dec(x_2); +x_18 = lean_box(0); +x_19 = l_Lean_RBNode_foldM___at_Lean_instFromJsonNameMap___spec__1___rarg(x_1, x_18, x_17); +return x_19; +} +default: +{ +lean_object* x_20; lean_object* x_21; uint8_t x_22; +lean_dec(x_1); +x_20 = lean_unsigned_to_nat(80u); +lean_inc(x_2); +x_21 = l_Lean_Json_pretty(x_2, x_20); +x_22 = !lean_is_exclusive(x_2); +if (x_22 == 0) +{ +lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_23 = lean_ctor_get(x_2, 0); +lean_dec(x_23); +x_24 = l_Lean_instFromJsonNameMap___rarg___closed__1; +x_25 = lean_string_append(x_24, x_21); +lean_dec(x_21); +x_26 = l_Lean_instFromJsonArray___rarg___closed__2; +x_27 = lean_string_append(x_25, x_26); +lean_ctor_set_tag(x_2, 0); +lean_ctor_set(x_2, 0, x_27); +return x_2; +} +else +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; +lean_dec(x_2); +x_28 = l_Lean_instFromJsonNameMap___rarg___closed__1; +x_29 = lean_string_append(x_28, x_21); +lean_dec(x_21); +x_30 = l_Lean_instFromJsonArray___rarg___closed__2; +x_31 = lean_string_append(x_29, x_30); +x_32 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_32, 0, x_31); +return x_32; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_instFromJsonNameMap(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_instFromJsonNameMap___rarg), 2, 0); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lean_instToJsonNameMap___spec__1___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_3) == 0) +{ +lean_dec(x_1); +return x_2; +} +else +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; uint8_t x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; +x_4 = lean_ctor_get(x_3, 0); +lean_inc(x_4); +x_5 = lean_ctor_get(x_3, 1); +lean_inc(x_5); +x_6 = lean_ctor_get(x_3, 2); +lean_inc(x_6); +x_7 = lean_ctor_get(x_3, 3); +lean_inc(x_7); +lean_dec(x_3); +lean_inc(x_1); +x_8 = l_Lean_RBNode_fold___at_Lean_instToJsonNameMap___spec__1___rarg(x_1, x_2, x_4); +x_9 = 1; +x_10 = l_Lean_instToJsonName___closed__1; +x_11 = l_Lean_Name_toString(x_5, x_9, x_10); +lean_inc(x_1); +x_12 = lean_apply_1(x_1, x_6); +x_13 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_8, x_11, x_12); +x_2 = x_13; +x_3 = x_7; +goto _start; +} +} +} +LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lean_instToJsonNameMap___spec__1(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_RBNode_fold___at_Lean_instToJsonNameMap___spec__1___rarg), 3, 0); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_instToJsonNameMap___rarg(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; lean_object* x_4; lean_object* x_5; +x_3 = lean_box(0); +x_4 = l_Lean_RBNode_fold___at_Lean_instToJsonNameMap___spec__1___rarg(x_1, x_3, x_2); +x_5 = lean_alloc_ctor(5, 1, 0); +lean_ctor_set(x_5, 0, x_4); +return x_5; +} +} +LEAN_EXPORT lean_object* l_Lean_instToJsonNameMap(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_instToJsonNameMap___rarg), 2, 0); +return x_2; +} +} static lean_object* _init_l_Lean_bignumFromJson_x3f___closed__1() { _start: { @@ -3602,6 +4069,8 @@ l_Lean_instFromJsonName___closed__3 = _init_l_Lean_instFromJsonName___closed__3( lean_mark_persistent(l_Lean_instFromJsonName___closed__3); l_Lean_instToJsonName___closed__1 = _init_l_Lean_instToJsonName___closed__1(); lean_mark_persistent(l_Lean_instToJsonName___closed__1); +l_Lean_instFromJsonNameMap___rarg___closed__1 = _init_l_Lean_instFromJsonNameMap___rarg___closed__1(); +lean_mark_persistent(l_Lean_instFromJsonNameMap___rarg___closed__1); l_Lean_bignumFromJson_x3f___closed__1 = _init_l_Lean_bignumFromJson_x3f___closed__1(); lean_mark_persistent(l_Lean_bignumFromJson_x3f___closed__1); l_Lean_instFromJsonUSize___closed__1 = _init_l_Lean_instFromJsonUSize___closed__1(); diff --git a/stage0/stdlib/Lean/Data/Lsp/Capabilities.c b/stage0/stdlib/Lean/Data/Lsp/Capabilities.c index 2c2b43e4b3ba..c753d735c117 100644 --- a/stage0/stdlib/Lean/Data/Lsp/Capabilities.c +++ b/stage0/stdlib/Lean/Data/Lsp/Capabilities.c @@ -111,7 +111,6 @@ static lean_object* l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_fromJsonS static lean_object* l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_fromJsonServerCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1933____closed__63; static lean_object* l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_fromJsonServerCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1933____closed__5; static lean_object* l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_fromJsonCompletionItemCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_48____closed__5; -lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_fromJsonClientCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1347____spec__3___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_fromJsonWindowClientCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_598____spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_fromJsonClientCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1347_(lean_object*); @@ -226,6 +225,7 @@ static lean_object* l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_fromJsonL LEAN_EXPORT lean_object* l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_fromJsonCompletionItemCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_48_(lean_object*); lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); lean_object* l___private_Lean_Data_Lsp_TextSync_0__Lean_Lsp_fromJsonTextDocumentSyncOptions____x40_Lean_Data_Lsp_TextSync___hyg_1223_(lean_object*); +lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(lean_object*, lean_object*); static lean_object* l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_fromJsonCompletionItemCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_48____closed__4; static lean_object* l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_fromJsonWorkspaceEditClientCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_847____closed__2; LEAN_EXPORT lean_object* l_Lean_Json_opt___at___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_toJsonTextDocumentClientCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_258____spec__3(lean_object*, lean_object*); @@ -2320,7 +2320,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_from { lean_object* x_2; lean_object* x_3; x_2 = l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_toJsonShowDocumentClientCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_451____closed__1; -x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_2); +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_2); if (lean_obj_tag(x_3) == 0) { uint8_t x_4; @@ -8129,7 +8129,7 @@ lean_inc(x_23); lean_dec(x_14); x_24 = l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_toJsonServerCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1745____closed__3; lean_inc(x_1); -x_25 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_24); +x_25 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_24); if (lean_obj_tag(x_25) == 0) { uint8_t x_26; @@ -8169,7 +8169,7 @@ lean_inc(x_34); lean_dec(x_25); x_35 = l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_toJsonServerCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1745____closed__4; lean_inc(x_1); -x_36 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_35); +x_36 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_35); if (lean_obj_tag(x_36) == 0) { uint8_t x_37; @@ -8210,7 +8210,7 @@ lean_inc(x_45); lean_dec(x_36); x_46 = l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_toJsonServerCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1745____closed__5; lean_inc(x_1); -x_47 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_46); +x_47 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_46); if (lean_obj_tag(x_47) == 0) { uint8_t x_48; @@ -8252,7 +8252,7 @@ lean_inc(x_56); lean_dec(x_47); x_57 = l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_toJsonServerCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1745____closed__6; lean_inc(x_1); -x_58 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_57); +x_58 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_57); if (lean_obj_tag(x_58) == 0) { uint8_t x_59; @@ -8295,7 +8295,7 @@ lean_inc(x_67); lean_dec(x_58); x_68 = l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_toJsonServerCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1745____closed__7; lean_inc(x_1); -x_69 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_68); +x_69 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_68); if (lean_obj_tag(x_69) == 0) { uint8_t x_70; @@ -8339,7 +8339,7 @@ lean_inc(x_78); lean_dec(x_69); x_79 = l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_toJsonServerCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1745____closed__8; lean_inc(x_1); -x_80 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_79); +x_80 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_79); if (lean_obj_tag(x_80) == 0) { uint8_t x_81; @@ -8384,7 +8384,7 @@ lean_inc(x_89); lean_dec(x_80); x_90 = l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_toJsonServerCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1745____closed__9; lean_inc(x_1); -x_91 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_90); +x_91 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_90); if (lean_obj_tag(x_91) == 0) { uint8_t x_92; @@ -8430,7 +8430,7 @@ lean_inc(x_100); lean_dec(x_91); x_101 = l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_toJsonServerCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1745____closed__10; lean_inc(x_1); -x_102 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_101); +x_102 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_101); if (lean_obj_tag(x_102) == 0) { uint8_t x_103; @@ -8525,7 +8525,7 @@ lean_inc(x_122); lean_dec(x_113); x_123 = l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_toJsonServerCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1745____closed__12; lean_inc(x_1); -x_124 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_123); +x_124 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_123); if (lean_obj_tag(x_124) == 0) { uint8_t x_125; @@ -8574,7 +8574,7 @@ lean_inc(x_133); lean_dec(x_124); x_134 = l___private_Lean_Data_Lsp_Capabilities_0__Lean_Lsp_toJsonServerCapabilities____x40_Lean_Data_Lsp_Capabilities___hyg_1745____closed__13; lean_inc(x_1); -x_135 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_134); +x_135 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_134); if (lean_obj_tag(x_135) == 0) { uint8_t x_136; diff --git a/stage0/stdlib/Lean/Data/Lsp/CodeActions.c b/stage0/stdlib/Lean/Data/Lsp/CodeActions.c index b964cb9507a3..61d1ee6f87f8 100644 --- a/stage0/stdlib/Lean/Data/Lsp/CodeActions.c +++ b/stage0/stdlib/Lean/Data/Lsp/CodeActions.c @@ -98,7 +98,6 @@ static lean_object* l___private_Lean_Data_Lsp_CodeActions_0__Lean_Lsp_fromJsonCo static lean_object* l___private_Lean_Data_Lsp_CodeActions_0__Lean_Lsp_fromJsonCodeActionContext____x40_Lean_Data_Lsp_CodeActions___hyg_148____closed__19; static lean_object* l_Lean_Lsp_instFromJsonCodeActionDisabled___closed__1; LEAN_EXPORT lean_object* l_Lean_Json_opt___at___private_Lean_Data_Lsp_CodeActions_0__Lean_Lsp_toJsonCodeActionContext____x40_Lean_Data_Lsp_CodeActions___hyg_293____spec__3___boxed(lean_object*, lean_object*); -lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(lean_object*, lean_object*); static lean_object* l___private_Lean_Data_Lsp_CodeActions_0__Lean_Lsp_fromJsonCodeActionOptions____x40_Lean_Data_Lsp_CodeActions___hyg_885____closed__2; static lean_object* l___private_Lean_Data_Lsp_CodeActions_0__Lean_Lsp_fromJsonCodeActionClientCapabilities____x40_Lean_Data_Lsp_CodeActions___hyg_1932____closed__34; static lean_object* l___private_Lean_Data_Lsp_CodeActions_0__Lean_Lsp_fromJsonCodeActionOptions____x40_Lean_Data_Lsp_CodeActions___hyg_885____closed__9; @@ -211,6 +210,7 @@ static lean_object* l___private_Lean_Data_Lsp_CodeActions_0__Lean_Lsp_fromJsonCo static lean_object* l_Lean_Lsp_instToJsonCodeActionTriggerKind___closed__1; lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); static lean_object* l___private_Lean_Data_Lsp_CodeActions_0__Lean_Lsp_fromJsonCodeActionParams____x40_Lean_Data_Lsp_CodeActions___hyg_390____closed__14; +lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(lean_object*, lean_object*); static lean_object* l_Lean_Lsp_instToJsonCodeActionClientCapabilities___closed__1; static lean_object* l___private_Lean_Data_Lsp_CodeActions_0__Lean_Lsp_fromJsonCodeActionLiteralSupport____x40_Lean_Data_Lsp_CodeActions___hyg_1753____closed__5; static lean_object* l_Lean_Lsp_instFromJsonCodeActionLiteralSupportValueSet___closed__1; @@ -2881,7 +2881,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Data_Lsp_CodeActions_0__Lean_Lsp_fromJ lean_object* x_2; lean_object* x_3; x_2 = l___private_Lean_Data_Lsp_CodeActions_0__Lean_Lsp_toJsonCodeActionOptions____x40_Lean_Data_Lsp_CodeActions___hyg_839____closed__1; lean_inc(x_1); -x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_2); +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_2); if (lean_obj_tag(x_3) == 0) { uint8_t x_4; diff --git a/stage0/stdlib/Lean/Data/NameMap.c b/stage0/stdlib/Lean/Data/NameMap.c index d7dfb6c1867f..59954f434744 100644 --- a/stage0/stdlib/Lean/Data/NameMap.c +++ b/stage0/stdlib/Lean/Data/NameMap.c @@ -27,6 +27,7 @@ LEAN_EXPORT lean_object* l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1 LEAN_EXPORT lean_object* l_Lean_NameMap_insert___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_NameSet_contains___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_NameHashSet_instInhabited; +extern lean_object* l_Lean_Name_instRepr; LEAN_EXPORT lean_object* l_Lean_NameMap_find_x3f___rarg(lean_object*, lean_object*); static lean_object* l_Lean_SMap_empty___at_Lean_NameSSet_empty___spec__1___closed__6; LEAN_EXPORT lean_object* l_Lean_NameSet_append___lambda__1(lean_object*, lean_object*, lean_object*); @@ -38,6 +39,7 @@ LEAN_EXPORT lean_object* l_Lean_RBNode_find___at_Lean_NameMap_contains___spec__1 uint8_t l_Lean_RBNode_isRed___rarg(lean_object*); LEAN_EXPORT lean_object* l_Lean_NameHashSet_filter(lean_object*, lean_object*); lean_object* lean_array_push(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_NameMap_instRepr___rarg(lean_object*); size_t lean_usize_mul(size_t, size_t); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insert___at_Lean_NameSSet_insert___spec__2(lean_object*, lean_object*, lean_object*); uint8_t lean_usize_dec_eq(size_t, size_t); @@ -89,9 +91,11 @@ static size_t l_Lean_PersistentHashMap_insertAux___at_Lean_NameSSet_insert___spe static lean_object* l_Lean_SMap_empty___at_Lean_NameSSet_empty___spec__1___closed__2; LEAN_EXPORT lean_object* l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_NameMap_instForInProdName___closed__1; +lean_object* l_Lean_RBMap_instRepr___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAtCollisionNodeAux___at_Lean_NameSSet_insert___spec__5(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_name_eq(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_filter_go___at_Lean_NameHashSet_filter___spec__3(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_NameMap_instRepr(lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_contains___at_Lean_NameSSet_contains___spec__2___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAux___at_Lean_NameSSet_insert___spec__3(lean_object*, size_t, size_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_NameMap_find_x3f___rarg___boxed(lean_object*, lean_object*); @@ -177,6 +181,25 @@ x_2 = lean_box(0); return x_2; } } +LEAN_EXPORT lean_object* l_Lean_NameMap_instRepr___rarg(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; +x_2 = l_Lean_Name_instRepr; +x_3 = lean_alloc_closure((void*)(l_Lean_RBMap_instRepr___rarg___boxed), 4, 2); +lean_closure_set(x_3, 0, x_2); +lean_closure_set(x_3, 1, x_1); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_NameMap_instRepr(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_NameMap_instRepr___rarg), 1, 0); +return x_2; +} +} LEAN_EXPORT lean_object* l_Lean_NameMap_instEmptyCollection(lean_object* x_1) { _start: { diff --git a/stage0/stdlib/Lean/Elab/Frontend.c b/stage0/stdlib/Lean/Elab/Frontend.c index 043fc0634759..5a38c8b0a873 100644 --- a/stage0/stdlib/Lean/Elab/Frontend.c +++ b/stage0/stdlib/Lean/Elab/Frontend.c @@ -16,42 +16,44 @@ extern "C" { lean_object* l_Lean_Language_Lean_process(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_profileit(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_processCommands___boxed(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__8(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__2(lean_object*, size_t, size_t, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__8(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_setMessages___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Firefox_Profile_export(lean_object*, double, lean_object*, lean_object*, lean_object*); lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_Command_runLintersAsync___spec__2(lean_object*, size_t, size_t, lean_object*); lean_object* l_Lean_Json_compress(lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_getParserState(lean_object*); lean_object* lean_mk_empty_array_with_capacity(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__9(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_IO_processCommandsIncrementally(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_getCommandState___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__1(size_t, size_t, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__4(lean_object*, size_t, size_t, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__2(lean_object*, lean_object*, lean_object*, uint32_t, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_processCommand___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_profileitIOUnsafe___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_runFrontend___lambda__4___closed__2; lean_object* l_Lean_MessageData_toString(lean_object*, lean_object*); double lean_float_div(double, double); +static lean_object* l_Lean_Elab_runFrontend___lambda__9___closed__1; static lean_object* l_Lean_Elab_process___closed__1; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__3(lean_object*, size_t, size_t, lean_object*); lean_object* l_Lean_Elab_Command_elabCommandTopLevel(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Name_toString(lean_object*, uint8_t, lean_object*); +static lean_object* l_Lean_Elab_runFrontend___lambda__2___closed__1; lean_object* lean_array_push(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_getParserState___rarg(lean_object*, lean_object*); lean_object* l___private_Init_GetElem_0__List_get_x21Internal___rarg(lean_object*, lean_object*, lean_object*); uint8_t lean_usize_dec_eq(size_t, size_t); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_Command_mkState(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_runFrontend___spec__1(size_t, size_t, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__1___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_setCommandState___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___private_Lean_Language_Lean_Types_0__Lean_Language_Lean_pushOpt___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_processCommands(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_runFrontend___lambda__7___closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_updateCmdPos___rarg___boxed(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_Frontend_runCommandElabM___rarg___closed__1; -static lean_object* l_Lean_Elab_runFrontend___lambda__4___closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_updateCmdPos___boxed(lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__4(size_t, size_t, lean_object*); lean_object* l_List_head_x21___rarg(lean_object*, lean_object*); @@ -61,13 +63,13 @@ static lean_object* l_Lean_Elab_runFrontend___lambda__6___closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_runCommandElabM(lean_object*); static double l_Lean_Elab_runFrontend___closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_getCommandState___rarg___boxed(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__3___boxed(lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__1___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Server_findModuleRefs(lean_object*, lean_object*, uint8_t, uint8_t); -static lean_object* l_Lean_Elab_runFrontend___lambda__8___closed__1; static lean_object* l_Lean_Elab_runFrontend___lambda__5___closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_getCommandState___boxed(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_usize_of_nat(lean_object*); uint8_t l_Lean_Parser_isTerminalCommand(lean_object*); LEAN_EXPORT lean_object* l_Array_filterMapM___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__2(lean_object*, lean_object*, lean_object*); @@ -76,15 +78,17 @@ lean_object* lean_st_ref_take(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_elabCommandAtFrontend(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Language_Lean_processCommands(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_runFrontend___closed__3; -LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__7(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__7(lean_object*, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_runFrontend___spec__2___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_runCommandElabM___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_process___closed__2; lean_object* l_Lean_Environment_displayStats(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__1(lean_object*, size_t, size_t, lean_object*, lean_object*); extern lean_object* l_Lean_Elab_async; lean_object* lean_st_ref_get(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_setParserState___boxed(lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_LeanOptions_toOptions(lean_object*); lean_object* l_Lean_Parser_mkInputContext(lean_object*, lean_object*, uint8_t); lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_toJsonProfile____x40_Lean_Util_Profiler___hyg_4847_(lean_object*); lean_object* lean_st_mk_ref(lean_object*, lean_object*); @@ -96,19 +100,23 @@ lean_object* l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(lean LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_elabCommandAtFrontend___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_filterMapM___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__2___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_processCommand___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +uint8_t l_Lean_Elab_HeaderSyntax_isModule(lean_object*); +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_ModuleSetup_load(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__4___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_Command_runLintersAsync___spec__1(size_t, size_t, lean_object*); lean_object* lean_task_get_own(lean_object*); +lean_object* l_Lean_Elab_HeaderSyntax_imports(lean_object*); lean_object* l_Array_append___rarg(lean_object*, lean_object*); lean_object* l_Lean_Server_ModuleRefs_toLspModuleRefs(lean_object*, lean_object*); static lean_object* l_Lean_Elab_Frontend_elabCommandAtFrontend___closed__1; double l_Float_ofScientific(lean_object*, uint8_t, lean_object*); static lean_object* l_Lean_Elab_Frontend_runCommandElabM___rarg___closed__2; -LEAN_EXPORT lean_object* lean_run_frontend(lean_object*, lean_object*, lean_object*, lean_object*, uint32_t, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, uint8_t, lean_object*); -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_runFrontend___spec__1___boxed(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_runFrontend___lambda__5___closed__2; +LEAN_EXPORT lean_object* lean_run_frontend(lean_object*, lean_object*, lean_object*, lean_object*, uint32_t, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_setMessages(lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_NameSet_empty; -LEAN_EXPORT uint8_t l_Lean_Elab_runFrontend___lambda__3(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_getCommandState(lean_object*); static lean_object* l_Lean_Elab_Frontend_elabCommandAtFrontend___closed__3; uint8_t lean_nat_dec_lt(lean_object*, lean_object*); @@ -119,12 +127,12 @@ lean_object* l_Lean_Language_Lean_instToSnapshotTreeCommandParsedSnapshot_go(lea lean_object* l_Lean_Language_Lean_waitForFinalCmdState_x3f(lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__3(lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_IO_processCommandsIncrementally_go(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__6(lean_object*, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__6(lean_object*, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_processCommand___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__1(lean_object*, lean_object*, uint32_t, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__1(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_runFrontend___closed__2; LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_processCommand(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__4___boxed(lean_object*); extern lean_object* l_Lean_firstFrontendMacroScope; lean_object* l_Lean_Option_setIfNotSet___at_Lean_Language_Lean_process_processHeader___spec__2(lean_object*, lean_object*, uint8_t); extern lean_object* l_Lean_Elab_Command_instInhabitedScope; @@ -135,10 +143,11 @@ LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_updateCmdPos___rarg(lean_object*, size_t lean_usize_add(size_t, size_t); lean_object* l_Lean_Language_SnapshotTask_map___rarg(lean_object*, lean_object*, lean_object*, lean_object*, uint8_t); lean_object* l_Lean_MessageLog_append(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*); +LEAN_EXPORT uint8_t l_Lean_Elab_runFrontend___lambda__4(lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_getInputContext___boxed(lean_object*, lean_object*, lean_object*); lean_object* lean_array_uget(lean_object*, size_t); size_t lean_array_size(lean_object*); +lean_object* l_Lean_KVMap_mergeBy(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Language_SnapshotTask_get___rarg(lean_object*); lean_object* lean_st_ref_set(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_writeModule(lean_object*, lean_object*, lean_object*); @@ -148,20 +157,22 @@ lean_object* lean_string_append(lean_object*, lean_object*); extern lean_object* l_Lean_trace_profiler_output; lean_object* l_Lean_Language_SnapshotTree_foldM___at_Lean_Language_SnapshotTree_runAndReport___spec__1(lean_object*, uint8_t, lean_object*, lean_object*, uint8_t, lean_object*); lean_object* lean_array_get_size(lean_object*); +lean_object* lean_load_dynlib(lean_object*, lean_object*); uint8_t lean_nat_dec_le(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_setParserState(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_usize_dec_lt(size_t, size_t); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Option_get_x3f___at_Lean_addTraceAsMessages___spec__17(lean_object*, lean_object*); static lean_object* l_Lean_Elab_Frontend_processCommand___closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_getParserState___boxed(lean_object*); lean_object* l_Lean_Parser_parseCommand(lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*); extern lean_object* l_Lean_internal_cmdlineSnapshots; lean_object* lean_array_uset(lean_object*, size_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_runCommandElabM___rarg(lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_filterMapM___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__2___closed__1; +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_runFrontend___spec__2(size_t, size_t, lean_object*); lean_object* lean_mk_empty_array_with_capacity(lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Frontend_updateCmdPos(lean_object*); static lean_object* l_Lean_Elab_Frontend_elabCommandAtFrontend___closed__4; @@ -2842,7 +2853,66 @@ return x_41; } } } -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_runFrontend___spec__1(size_t x_1, size_t x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__1(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +uint8_t x_6; +x_6 = lean_usize_dec_eq(x_2, x_3); +if (x_6 == 0) +{ +lean_object* x_7; lean_object* x_8; +lean_dec(x_4); +x_7 = lean_array_uget(x_1, x_2); +x_8 = lean_load_dynlib(x_7, x_5); +if (lean_obj_tag(x_8) == 0) +{ +lean_object* x_9; lean_object* x_10; size_t x_11; size_t x_12; +x_9 = lean_ctor_get(x_8, 0); +lean_inc(x_9); +x_10 = lean_ctor_get(x_8, 1); +lean_inc(x_10); +lean_dec(x_8); +x_11 = 1; +x_12 = lean_usize_add(x_2, x_11); +x_2 = x_12; +x_4 = x_9; +x_5 = x_10; +goto _start; +} +else +{ +uint8_t x_14; +x_14 = !lean_is_exclusive(x_8); +if (x_14 == 0) +{ +return x_8; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_15 = lean_ctor_get(x_8, 0); +x_16 = lean_ctor_get(x_8, 1); +lean_inc(x_16); +lean_inc(x_15); +lean_dec(x_8); +x_17 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_17, 0, x_15); +lean_ctor_set(x_17, 1, x_16); +return x_17; +} +} +} +else +{ +lean_object* x_18; +x_18 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18, 0, x_4); +lean_ctor_set(x_18, 1, x_5); +return x_18; +} +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_runFrontend___spec__2(size_t x_1, size_t x_2, lean_object* x_3) { _start: { uint8_t x_4; @@ -2869,7 +2939,7 @@ goto _start; } } } -LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__2(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__3(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4) { _start: { uint8_t x_5; @@ -2916,7 +2986,7 @@ return x_4; } } } -LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__3(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__4(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4) { _start: { uint8_t x_5; @@ -2940,24 +3010,219 @@ return x_4; } } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__1(lean_object* x_1, lean_object* x_2, uint32_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { -lean_object* x_8; lean_object* x_9; lean_object* x_10; -x_8 = lean_alloc_ctor(0, 3, 4); -lean_ctor_set(x_8, 0, x_1); -lean_ctor_set(x_8, 1, x_2); -lean_ctor_set(x_8, 2, x_4); -lean_ctor_set_uint32(x_8, sizeof(void*)*3, x_3); -x_9 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_9, 0, x_8); -x_10 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_10, 0, x_9); -lean_ctor_set(x_10, 1, x_7); -return x_10; +lean_inc(x_3); +return x_3; +} +} +static lean_object* _init_l_Lean_Elab_runFrontend___lambda__2___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l_Lean_Elab_runFrontend___lambda__1___boxed), 3, 0); +return x_1; +} +} +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, uint32_t x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +uint8_t x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_9 = l_Lean_Elab_HeaderSyntax_isModule(x_6); +x_10 = l_Lean_Elab_HeaderSyntax_imports(x_6); +x_11 = lean_box(0); +x_12 = lean_alloc_ctor(0, 5, 5); +lean_ctor_set(x_12, 0, x_2); +lean_ctor_set(x_12, 1, x_10); +lean_ctor_set(x_12, 2, x_3); +lean_ctor_set(x_12, 3, x_11); +lean_ctor_set(x_12, 4, x_5); +lean_ctor_set_uint8(x_12, sizeof(void*)*5 + 4, x_9); +lean_ctor_set_uint32(x_12, sizeof(void*)*5, x_4); +x_13 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_13, 0, x_12); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_8); +return x_14; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; +lean_dec(x_6); +lean_dec(x_2); +x_15 = lean_ctor_get(x_1, 0); +lean_inc(x_15); +if (lean_is_exclusive(x_1)) { + lean_ctor_release(x_1, 0); + x_16 = x_1; +} else { + lean_dec_ref(x_1); + x_16 = lean_box(0); +} +x_17 = l_Lean_ModuleSetup_load(x_15, x_8); +lean_dec(x_15); +if (lean_obj_tag(x_17) == 0) +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_36; lean_object* x_37; lean_object* x_38; uint8_t x_39; +x_18 = lean_ctor_get(x_17, 0); +lean_inc(x_18); +x_19 = lean_ctor_get(x_17, 1); +lean_inc(x_19); +if (lean_is_exclusive(x_17)) { + lean_ctor_release(x_17, 0); + lean_ctor_release(x_17, 1); + x_20 = x_17; +} else { + lean_dec_ref(x_17); + x_20 = lean_box(0); +} +x_36 = lean_ctor_get(x_18, 3); +lean_inc(x_36); +x_37 = lean_array_get_size(x_36); +x_38 = lean_unsigned_to_nat(0u); +x_39 = lean_nat_dec_lt(x_38, x_37); +if (x_39 == 0) +{ +lean_dec(x_37); +lean_dec(x_36); +x_21 = x_19; +goto block_35; +} +else +{ +uint8_t x_40; +x_40 = lean_nat_dec_le(x_37, x_37); +if (x_40 == 0) +{ +lean_dec(x_37); +lean_dec(x_36); +x_21 = x_19; +goto block_35; +} +else +{ +size_t x_41; size_t x_42; lean_object* x_43; lean_object* x_44; +x_41 = 0; +x_42 = lean_usize_of_nat(x_37); +lean_dec(x_37); +x_43 = lean_box(0); +x_44 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__1(x_36, x_41, x_42, x_43, x_19); +lean_dec(x_36); +if (lean_obj_tag(x_44) == 0) +{ +lean_object* x_45; +x_45 = lean_ctor_get(x_44, 1); +lean_inc(x_45); +lean_dec(x_44); +x_21 = x_45; +goto block_35; +} +else +{ +uint8_t x_46; +lean_dec(x_20); +lean_dec(x_18); +lean_dec(x_16); +lean_dec(x_5); +lean_dec(x_3); +x_46 = !lean_is_exclusive(x_44); +if (x_46 == 0) +{ +return x_44; +} +else +{ +lean_object* x_47; lean_object* x_48; lean_object* x_49; +x_47 = lean_ctor_get(x_44, 0); +x_48 = lean_ctor_get(x_44, 1); +lean_inc(x_48); +lean_inc(x_47); +lean_dec(x_44); +x_49 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_49, 0, x_47); +lean_ctor_set(x_49, 1, x_48); +return x_49; +} +} +} } +block_35: +{ +lean_object* x_22; uint8_t x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_22 = lean_ctor_get(x_18, 0); +lean_inc(x_22); +x_23 = lean_ctor_get_uint8(x_18, sizeof(void*)*6); +x_24 = lean_ctor_get(x_18, 1); +lean_inc(x_24); +x_25 = lean_ctor_get(x_18, 2); +lean_inc(x_25); +x_26 = lean_ctor_get(x_18, 4); +lean_inc(x_26); +x_27 = lean_ctor_get(x_18, 5); +lean_inc(x_27); +lean_dec(x_18); +x_28 = l_Lean_LeanOptions_toOptions(x_27); +x_29 = l_Lean_Elab_runFrontend___lambda__2___closed__1; +x_30 = l_Lean_KVMap_mergeBy(x_29, x_3, x_28); +x_31 = l_Array_append___rarg(x_5, x_26); +lean_dec(x_26); +x_32 = lean_alloc_ctor(0, 5, 5); +lean_ctor_set(x_32, 0, x_22); +lean_ctor_set(x_32, 1, x_24); +lean_ctor_set(x_32, 2, x_30); +lean_ctor_set(x_32, 3, x_25); +lean_ctor_set(x_32, 4, x_31); +lean_ctor_set_uint8(x_32, sizeof(void*)*5 + 4, x_23); +lean_ctor_set_uint32(x_32, sizeof(void*)*5, x_4); +if (lean_is_scalar(x_16)) { + x_33 = lean_alloc_ctor(1, 1, 0); +} else { + x_33 = x_16; +} +lean_ctor_set(x_33, 0, x_32); +if (lean_is_scalar(x_20)) { + x_34 = lean_alloc_ctor(0, 2, 0); +} else { + x_34 = x_20; +} +lean_ctor_set(x_34, 0, x_33); +lean_ctor_set(x_34, 1, x_21); +return x_34; +} +} +else +{ +uint8_t x_50; +lean_dec(x_16); +lean_dec(x_5); +lean_dec(x_3); +x_50 = !lean_is_exclusive(x_17); +if (x_50 == 0) +{ +return x_17; +} +else +{ +lean_object* x_51; lean_object* x_52; lean_object* x_53; +x_51 = lean_ctor_get(x_17, 0); +x_52 = lean_ctor_get(x_17, 1); +lean_inc(x_52); +lean_inc(x_51); +lean_dec(x_17); +x_53 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_53, 0, x_51); +lean_ctor_set(x_53, 1, x_52); +return x_53; } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; uint8_t x_6; @@ -2988,7 +3253,7 @@ return x_11; } } } -LEAN_EXPORT uint8_t l_Lean_Elab_runFrontend___lambda__3(lean_object* x_1) { +LEAN_EXPORT uint8_t l_Lean_Elab_runFrontend___lambda__4(lean_object* x_1) { _start: { uint8_t x_2; @@ -2996,7 +3261,7 @@ x_2 = 0; return x_2; } } -static lean_object* _init_l_Lean_Elab_runFrontend___lambda__4___closed__1() { +static lean_object* _init_l_Lean_Elab_runFrontend___lambda__5___closed__1() { _start: { lean_object* x_1; @@ -3004,26 +3269,26 @@ x_1 = l_Lean_trace_profiler_output; return x_1; } } -static lean_object* _init_l_Lean_Elab_runFrontend___lambda__4___closed__2() { +static lean_object* _init_l_Lean_Elab_runFrontend___lambda__5___closed__2() { _start: { lean_object* x_1; -x_1 = lean_alloc_closure((void*)(l_Lean_Elab_runFrontend___lambda__3___boxed), 1, 0); +x_1 = lean_alloc_closure((void*)(l_Lean_Elab_runFrontend___lambda__4___boxed), 1, 0); return x_1; } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__4(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, double x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__5(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, double x_5, lean_object* x_6, lean_object* x_7) { _start: { lean_object* x_8; lean_object* x_9; -x_8 = l_Lean_Elab_runFrontend___lambda__4___closed__1; +x_8 = l_Lean_Elab_runFrontend___lambda__5___closed__1; x_9 = l_Lean_Option_get_x3f___at_Lean_addTraceAsMessages___spec__17(x_3, x_8); if (lean_obj_tag(x_9) == 0) { lean_object* x_10; lean_object* x_11; lean_dec(x_4); x_10 = lean_box(0); -x_11 = l_Lean_Elab_runFrontend___lambda__2(x_1, x_2, x_10, x_7); +x_11 = l_Lean_Elab_runFrontend___lambda__3(x_1, x_2, x_10, x_7); return x_11; } else @@ -3036,9 +3301,9 @@ lean_inc(x_1); x_13 = l_Lean_Language_SnapshotTree_getAll(x_1); x_14 = lean_array_size(x_13); x_15 = 0; -x_16 = l_Array_mapMUnsafe_map___at_Lean_Elab_runFrontend___spec__1(x_14, x_15, x_13); +x_16 = l_Array_mapMUnsafe_map___at_Lean_Elab_runFrontend___spec__2(x_14, x_15, x_13); x_17 = 1; -x_18 = l_Lean_Elab_runFrontend___lambda__4___closed__2; +x_18 = l_Lean_Elab_runFrontend___lambda__5___closed__2; x_19 = l_Lean_Name_toString(x_4, x_17, x_18); x_20 = l_Lean_Firefox_Profile_export(x_19, x_5, x_16, x_3, x_7); lean_dec(x_16); @@ -3060,7 +3325,7 @@ lean_inc(x_26); x_27 = lean_ctor_get(x_25, 1); lean_inc(x_27); lean_dec(x_25); -x_28 = l_Lean_Elab_runFrontend___lambda__2(x_1, x_2, x_26, x_27); +x_28 = l_Lean_Elab_runFrontend___lambda__3(x_1, x_2, x_26, x_27); lean_dec(x_26); return x_28; } @@ -3091,7 +3356,7 @@ return x_32; } } } -static lean_object* _init_l_Lean_Elab_runFrontend___lambda__5___closed__1() { +static lean_object* _init_l_Lean_Elab_runFrontend___lambda__6___closed__1() { _start: { lean_object* x_1; lean_object* x_2; @@ -3100,7 +3365,7 @@ x_2 = lean_mk_empty_array_with_capacity(x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__5(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, double x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__6(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, double x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: { if (lean_obj_tag(x_6) == 0) @@ -3108,7 +3373,7 @@ if (lean_obj_tag(x_6) == 0) lean_object* x_10; lean_object* x_11; lean_dec(x_7); x_10 = lean_box(0); -x_11 = l_Lean_Elab_runFrontend___lambda__4(x_1, x_2, x_3, x_4, x_5, x_10, x_9); +x_11 = l_Lean_Elab_runFrontend___lambda__5(x_1, x_2, x_3, x_4, x_5, x_10, x_9); return x_11; } else @@ -3128,7 +3393,7 @@ lean_dec(x_13); x_17 = lean_ctor_get(x_7, 2); lean_inc(x_17); lean_dec(x_7); -x_18 = l_Lean_Elab_runFrontend___lambda__5___closed__1; +x_18 = l_Lean_Elab_runFrontend___lambda__6___closed__1; x_19 = 0; x_20 = l_Lean_Server_findModuleRefs(x_17, x_18, x_19, x_19); x_21 = l_Lean_Server_ModuleRefs_toLspModuleRefs(x_20, x_9); @@ -3155,7 +3420,7 @@ lean_inc(x_29); x_30 = lean_ctor_get(x_28, 1); lean_inc(x_30); lean_dec(x_28); -x_31 = l_Lean_Elab_runFrontend___lambda__4(x_1, x_2, x_3, x_4, x_5, x_29, x_30); +x_31 = l_Lean_Elab_runFrontend___lambda__5(x_1, x_2, x_3, x_4, x_5, x_29, x_30); lean_dec(x_29); return x_31; } @@ -3197,7 +3462,7 @@ if (x_37 == 0) lean_object* x_38; uint8_t x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_dec(x_14); lean_dec(x_13); -x_38 = l_Lean_Elab_runFrontend___lambda__5___closed__1; +x_38 = l_Lean_Elab_runFrontend___lambda__6___closed__1; x_39 = 0; x_40 = l_Lean_Server_findModuleRefs(x_36, x_38, x_39, x_39); x_41 = l_Lean_Server_ModuleRefs_toLspModuleRefs(x_40, x_9); @@ -3224,7 +3489,7 @@ lean_inc(x_49); x_50 = lean_ctor_get(x_48, 1); lean_inc(x_50); lean_dec(x_48); -x_51 = l_Lean_Elab_runFrontend___lambda__4(x_1, x_2, x_3, x_4, x_5, x_49, x_50); +x_51 = l_Lean_Elab_runFrontend___lambda__5(x_1, x_2, x_3, x_4, x_5, x_49, x_50); lean_dec(x_49); return x_51; } @@ -3260,8 +3525,8 @@ size_t x_56; size_t x_57; lean_object* x_58; lean_object* x_59; uint8_t x_60; le x_56 = 0; x_57 = lean_usize_of_nat(x_14); lean_dec(x_14); -x_58 = l_Lean_Elab_runFrontend___lambda__5___closed__1; -x_59 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__2(x_13, x_56, x_57, x_58); +x_58 = l_Lean_Elab_runFrontend___lambda__6___closed__1; +x_59 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__3(x_13, x_56, x_57, x_58); lean_dec(x_13); x_60 = 0; x_61 = l_Lean_Server_findModuleRefs(x_36, x_59, x_60, x_60); @@ -3290,7 +3555,7 @@ lean_inc(x_70); x_71 = lean_ctor_get(x_69, 1); lean_inc(x_71); lean_dec(x_69); -x_72 = l_Lean_Elab_runFrontend___lambda__4(x_1, x_2, x_3, x_4, x_5, x_70, x_71); +x_72 = l_Lean_Elab_runFrontend___lambda__5(x_1, x_2, x_3, x_4, x_5, x_70, x_71); lean_dec(x_70); return x_72; } @@ -3324,7 +3589,7 @@ return x_76; } } } -static lean_object* _init_l_Lean_Elab_runFrontend___lambda__6___closed__1() { +static lean_object* _init_l_Lean_Elab_runFrontend___lambda__7___closed__1() { _start: { lean_object* x_1; @@ -3332,14 +3597,14 @@ x_1 = lean_mk_string_unchecked(".olean serialization", 20, 20); return x_1; } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__6(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, double x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__7(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, double x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { if (lean_obj_tag(x_8) == 0) { lean_object* x_12; lean_object* x_13; x_12 = lean_box(0); -x_13 = l_Lean_Elab_runFrontend___lambda__5(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_12, x_11); +x_13 = l_Lean_Elab_runFrontend___lambda__6(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_12, x_11); return x_13; } else @@ -3352,7 +3617,7 @@ lean_inc(x_2); x_15 = lean_alloc_closure((void*)(l_Lean_writeModule), 3, 2); lean_closure_set(x_15, 0, x_2); lean_closure_set(x_15, 1, x_14); -x_16 = l_Lean_Elab_runFrontend___lambda__6___closed__1; +x_16 = l_Lean_Elab_runFrontend___lambda__7___closed__1; x_17 = lean_box(0); x_18 = l_Lean_profileitIOUnsafe___rarg(x_16, x_9, x_15, x_17, x_11); if (lean_obj_tag(x_18) == 0) @@ -3363,7 +3628,7 @@ lean_inc(x_19); x_20 = lean_ctor_get(x_18, 1); lean_inc(x_20); lean_dec(x_18); -x_21 = l_Lean_Elab_runFrontend___lambda__5(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_19, x_20); +x_21 = l_Lean_Elab_runFrontend___lambda__6(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_19, x_20); lean_dec(x_19); return x_21; } @@ -3396,7 +3661,7 @@ return x_25; } } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__7(uint8_t x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, double x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__8(uint8_t x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, double x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { _start: { if (x_1 == 0) @@ -3404,7 +3669,7 @@ if (x_1 == 0) lean_object* x_14; lean_object* x_15; lean_dec(x_11); x_14 = lean_box(0); -x_15 = l_Lean_Elab_runFrontend___lambda__6(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_14, x_13); +x_15 = l_Lean_Elab_runFrontend___lambda__7(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_14, x_13); return x_15; } else @@ -3422,7 +3687,7 @@ return x_16; } } } -static lean_object* _init_l_Lean_Elab_runFrontend___lambda__8___closed__1() { +static lean_object* _init_l_Lean_Elab_runFrontend___lambda__9___closed__1() { _start: { lean_object* x_1; @@ -3430,7 +3695,7 @@ x_1 = lean_alloc_closure((void*)(l_Lean_Language_Lean_instToSnapshotTreeCommandP return x_1; } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__8(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__9(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; @@ -3484,7 +3749,7 @@ x_16 = lean_ctor_get(x_15, 0); lean_inc(x_16); x_17 = lean_ctor_get(x_15, 1); lean_inc(x_17); -x_18 = l_Lean_Elab_runFrontend___lambda__8___closed__1; +x_18 = l_Lean_Elab_runFrontend___lambda__9___closed__1; x_19 = 1; x_20 = l_Lean_Language_SnapshotTask_map___rarg(x_15, x_18, x_16, x_17, x_19); lean_ctor_set(x_4, 0, x_20); @@ -3505,7 +3770,7 @@ x_24 = lean_ctor_get(x_23, 0); lean_inc(x_24); x_25 = lean_ctor_get(x_23, 1); lean_inc(x_25); -x_26 = l_Lean_Elab_runFrontend___lambda__8___closed__1; +x_26 = l_Lean_Elab_runFrontend___lambda__9___closed__1; x_27 = 1; x_28 = l_Lean_Language_SnapshotTask_map___rarg(x_23, x_26, x_24, x_25, x_27); x_29 = lean_alloc_ctor(1, 1, 0); @@ -3537,7 +3802,7 @@ x_35 = lean_ctor_get(x_34, 0); lean_inc(x_35); x_36 = lean_ctor_get(x_34, 1); lean_inc(x_36); -x_37 = l_Lean_Elab_runFrontend___lambda__8___closed__1; +x_37 = l_Lean_Elab_runFrontend___lambda__9___closed__1; x_38 = 1; x_39 = l_Lean_Language_SnapshotTask_map___rarg(x_34, x_37, x_35, x_36, x_38); if (lean_is_scalar(x_33)) { @@ -3592,259 +3857,260 @@ x_3 = l___private_Lean_Language_Lean_Types_0__Lean_Language_Lean_pushOpt___rarg( return x_3; } } -LEAN_EXPORT lean_object* lean_run_frontend(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, uint32_t x_5, lean_object* x_6, lean_object* x_7, uint8_t x_8, lean_object* x_9, lean_object* x_10, uint8_t x_11, lean_object* x_12) { +LEAN_EXPORT lean_object* lean_run_frontend(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, uint32_t x_5, lean_object* x_6, lean_object* x_7, uint8_t x_8, lean_object* x_9, lean_object* x_10, uint8_t x_11, lean_object* x_12, lean_object* x_13) { _start: { -lean_object* x_13; lean_object* x_14; lean_object* x_15; uint8_t x_16; lean_object* x_17; double x_18; double x_19; double x_20; uint8_t x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; uint8_t x_31; -x_13 = lean_io_mono_nanos_now(x_12); -x_14 = lean_ctor_get(x_13, 0); -lean_inc(x_14); -x_15 = lean_ctor_get(x_13, 1); +lean_object* x_14; lean_object* x_15; lean_object* x_16; uint8_t x_17; lean_object* x_18; double x_19; double x_20; double x_21; uint8_t x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; uint8_t x_32; +x_14 = lean_io_mono_nanos_now(x_13); +x_15 = lean_ctor_get(x_14, 0); lean_inc(x_15); -lean_dec(x_13); -x_16 = 0; -x_17 = lean_unsigned_to_nat(0u); -x_18 = l_Float_ofScientific(x_14, x_16, x_17); +x_16 = lean_ctor_get(x_14, 1); +lean_inc(x_16); lean_dec(x_14); -x_19 = l_Lean_Elab_runFrontend___closed__1; -x_20 = lean_float_div(x_18, x_19); -x_21 = 1; -x_22 = l_Lean_Parser_mkInputContext(x_1, x_3, x_21); -x_23 = l_Lean_Elab_runFrontend___closed__2; -x_24 = l_Lean_Option_setIfNotSet___at_Lean_Language_Lean_process_processHeader___spec__2(x_2, x_23, x_21); -x_25 = l_Lean_Elab_runFrontend___closed__3; -x_26 = l_Lean_Option_setIfNotSet___at_Lean_Language_Lean_process_processHeader___spec__2(x_24, x_25, x_21); -x_27 = lean_box_uint32(x_5); -lean_inc(x_26); +x_17 = 0; +x_18 = lean_unsigned_to_nat(0u); +x_19 = l_Float_ofScientific(x_15, x_17, x_18); +lean_dec(x_15); +x_20 = l_Lean_Elab_runFrontend___closed__1; +x_21 = lean_float_div(x_19, x_20); +x_22 = 1; +x_23 = l_Lean_Parser_mkInputContext(x_1, x_3, x_22); +x_24 = l_Lean_Elab_runFrontend___closed__2; +x_25 = l_Lean_Option_setIfNotSet___at_Lean_Language_Lean_process_processHeader___spec__2(x_2, x_24, x_22); +x_26 = l_Lean_Elab_runFrontend___closed__3; +x_27 = l_Lean_Option_setIfNotSet___at_Lean_Language_Lean_process_processHeader___spec__2(x_25, x_26, x_22); +x_28 = lean_box_uint32(x_5); +lean_inc(x_27); lean_inc(x_4); -x_28 = lean_alloc_closure((void*)(l_Lean_Elab_runFrontend___lambda__1___boxed), 7, 4); -lean_closure_set(x_28, 0, x_4); -lean_closure_set(x_28, 1, x_26); -lean_closure_set(x_28, 2, x_27); -lean_closure_set(x_28, 3, x_10); -x_29 = lean_box(0); -lean_inc(x_22); -x_30 = l_Lean_Language_Lean_process(x_28, x_29, x_22, x_15); -x_31 = !lean_is_exclusive(x_30); -if (x_31 == 0) -{ -lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; uint8_t x_38; lean_object* x_39; -x_32 = lean_ctor_get(x_30, 0); -x_33 = lean_ctor_get(x_30, 1); -x_34 = lean_ctor_get(x_32, 0); -lean_inc(x_34); -x_35 = lean_ctor_get(x_32, 3); +x_29 = lean_alloc_closure((void*)(l_Lean_Elab_runFrontend___lambda__2___boxed), 8, 5); +lean_closure_set(x_29, 0, x_12); +lean_closure_set(x_29, 1, x_4); +lean_closure_set(x_29, 2, x_27); +lean_closure_set(x_29, 3, x_28); +lean_closure_set(x_29, 4, x_10); +x_30 = lean_box(0); +lean_inc(x_23); +x_31 = l_Lean_Language_Lean_process(x_29, x_30, x_23, x_16); +x_32 = !lean_is_exclusive(x_31); +if (x_32 == 0) +{ +lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; uint8_t x_39; lean_object* x_40; +x_33 = lean_ctor_get(x_31, 0); +x_34 = lean_ctor_get(x_31, 1); +x_35 = lean_ctor_get(x_33, 0); lean_inc(x_35); -x_36 = lean_box(0); -x_37 = lean_array_get_size(x_9); -x_38 = lean_nat_dec_lt(x_17, x_37); -if (lean_obj_tag(x_35) == 0) -{ -lean_object* x_98; -x_98 = l_Lean_Elab_runFrontend___closed__4; -lean_ctor_set(x_30, 1, x_98); -lean_ctor_set(x_30, 0, x_34); -x_39 = x_30; -goto block_97; +x_36 = lean_ctor_get(x_33, 3); +lean_inc(x_36); +x_37 = lean_box(0); +x_38 = lean_array_get_size(x_9); +x_39 = lean_nat_dec_lt(x_18, x_38); +if (lean_obj_tag(x_36) == 0) +{ +lean_object* x_99; +x_99 = l_Lean_Elab_runFrontend___closed__4; +lean_ctor_set(x_31, 1, x_99); +lean_ctor_set(x_31, 0, x_35); +x_40 = x_31; +goto block_98; } else { -uint8_t x_99; -x_99 = !lean_is_exclusive(x_35); -if (x_99 == 0) +uint8_t x_100; +x_100 = !lean_is_exclusive(x_36); +if (x_100 == 0) { -lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; -x_100 = lean_ctor_get(x_35, 0); -x_101 = lean_ctor_get(x_100, 1); -lean_inc(x_101); -lean_dec(x_100); -x_102 = l_Array_filterMapM___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__2___closed__1; -x_103 = lean_alloc_closure((void*)(l_Lean_Elab_runFrontend___lambda__8), 3, 2); -lean_closure_set(x_103, 0, x_29); -lean_closure_set(x_103, 1, x_102); -x_104 = lean_ctor_get(x_101, 0); -lean_inc(x_104); -x_105 = lean_ctor_get(x_101, 1); +lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; lean_object* x_108; +x_101 = lean_ctor_get(x_36, 0); +x_102 = lean_ctor_get(x_101, 1); +lean_inc(x_102); +lean_dec(x_101); +x_103 = l_Array_filterMapM___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__2___closed__1; +x_104 = lean_alloc_closure((void*)(l_Lean_Elab_runFrontend___lambda__9), 3, 2); +lean_closure_set(x_104, 0, x_30); +lean_closure_set(x_104, 1, x_103); +x_105 = lean_ctor_get(x_102, 0); lean_inc(x_105); -x_106 = l_Lean_Language_SnapshotTask_map___rarg(x_101, x_103, x_104, x_105, x_21); -lean_ctor_set(x_35, 0, x_106); -x_107 = l___private_Lean_Language_Lean_Types_0__Lean_Language_Lean_pushOpt___rarg(x_35, x_102); -lean_ctor_set(x_30, 1, x_107); -lean_ctor_set(x_30, 0, x_34); -x_39 = x_30; -goto block_97; +x_106 = lean_ctor_get(x_102, 1); +lean_inc(x_106); +x_107 = l_Lean_Language_SnapshotTask_map___rarg(x_102, x_104, x_105, x_106, x_22); +lean_ctor_set(x_36, 0, x_107); +x_108 = l___private_Lean_Language_Lean_Types_0__Lean_Language_Lean_pushOpt___rarg(x_36, x_103); +lean_ctor_set(x_31, 1, x_108); +lean_ctor_set(x_31, 0, x_35); +x_40 = x_31; +goto block_98; } else { -lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; -x_108 = lean_ctor_get(x_35, 0); -lean_inc(x_108); -lean_dec(x_35); -x_109 = lean_ctor_get(x_108, 1); +lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; +x_109 = lean_ctor_get(x_36, 0); lean_inc(x_109); -lean_dec(x_108); -x_110 = l_Array_filterMapM___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__2___closed__1; -x_111 = lean_alloc_closure((void*)(l_Lean_Elab_runFrontend___lambda__8), 3, 2); -lean_closure_set(x_111, 0, x_29); -lean_closure_set(x_111, 1, x_110); -x_112 = lean_ctor_get(x_109, 0); -lean_inc(x_112); -x_113 = lean_ctor_get(x_109, 1); +lean_dec(x_36); +x_110 = lean_ctor_get(x_109, 1); +lean_inc(x_110); +lean_dec(x_109); +x_111 = l_Array_filterMapM___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__2___closed__1; +x_112 = lean_alloc_closure((void*)(l_Lean_Elab_runFrontend___lambda__9), 3, 2); +lean_closure_set(x_112, 0, x_30); +lean_closure_set(x_112, 1, x_111); +x_113 = lean_ctor_get(x_110, 0); lean_inc(x_113); -x_114 = l_Lean_Language_SnapshotTask_map___rarg(x_109, x_111, x_112, x_113, x_21); -x_115 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_115, 0, x_114); -x_116 = l___private_Lean_Language_Lean_Types_0__Lean_Language_Lean_pushOpt___rarg(x_115, x_110); -lean_ctor_set(x_30, 1, x_116); -lean_ctor_set(x_30, 0, x_34); -x_39 = x_30; -goto block_97; +x_114 = lean_ctor_get(x_110, 1); +lean_inc(x_114); +x_115 = l_Lean_Language_SnapshotTask_map___rarg(x_110, x_112, x_113, x_114, x_22); +x_116 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_116, 0, x_115); +x_117 = l___private_Lean_Language_Lean_Types_0__Lean_Language_Lean_pushOpt___rarg(x_116, x_111); +lean_ctor_set(x_31, 1, x_117); +lean_ctor_set(x_31, 0, x_35); +x_40 = x_31; +goto block_98; } } -block_97: +block_98: { -lean_object* x_40; -if (x_38 == 0) +lean_object* x_41; +if (x_39 == 0) { -lean_dec(x_37); +lean_dec(x_38); lean_dec(x_9); -x_40 = x_36; -goto block_92; +x_41 = x_37; +goto block_93; } else { -uint8_t x_93; -x_93 = lean_nat_dec_le(x_37, x_37); -if (x_93 == 0) +uint8_t x_94; +x_94 = lean_nat_dec_le(x_38, x_38); +if (x_94 == 0) { -lean_dec(x_37); +lean_dec(x_38); lean_dec(x_9); -x_40 = x_36; -goto block_92; +x_41 = x_37; +goto block_93; } else { -size_t x_94; size_t x_95; lean_object* x_96; -x_94 = 0; -x_95 = lean_usize_of_nat(x_37); -lean_dec(x_37); -x_96 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__3(x_9, x_94, x_95, x_36); +size_t x_95; size_t x_96; lean_object* x_97; +x_95 = 0; +x_96 = lean_usize_of_nat(x_38); +lean_dec(x_38); +x_97 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__4(x_9, x_95, x_96, x_37); lean_dec(x_9); -x_40 = x_96; -goto block_92; +x_41 = x_97; +goto block_93; } } -block_92: +block_93: { -lean_object* x_41; -x_41 = l_Lean_Language_SnapshotTree_foldM___at_Lean_Language_SnapshotTree_runAndReport___spec__1(x_26, x_8, x_40, x_39, x_16, x_33); -lean_dec(x_40); -if (lean_obj_tag(x_41) == 0) +lean_object* x_42; +x_42 = l_Lean_Language_SnapshotTree_foldM___at_Lean_Language_SnapshotTree_runAndReport___spec__1(x_27, x_8, x_41, x_40, x_17, x_34); +lean_dec(x_41); +if (lean_obj_tag(x_42) == 0) { -uint8_t x_42; -x_42 = !lean_is_exclusive(x_41); -if (x_42 == 0) +uint8_t x_43; +x_43 = !lean_is_exclusive(x_42); +if (x_43 == 0) { -lean_object* x_43; lean_object* x_44; lean_object* x_45; -x_43 = lean_ctor_get(x_41, 0); -x_44 = lean_ctor_get(x_41, 1); -x_45 = l_Lean_Language_Lean_waitForFinalCmdState_x3f(x_32); -if (lean_obj_tag(x_45) == 0) +lean_object* x_44; lean_object* x_45; lean_object* x_46; +x_44 = lean_ctor_get(x_42, 0); +x_45 = lean_ctor_get(x_42, 1); +x_46 = l_Lean_Language_Lean_waitForFinalCmdState_x3f(x_33); +if (lean_obj_tag(x_46) == 0) { -lean_dec(x_43); -lean_dec(x_39); -lean_dec(x_26); -lean_dec(x_22); +lean_dec(x_44); +lean_dec(x_40); +lean_dec(x_27); +lean_dec(x_23); lean_dec(x_7); lean_dec(x_6); lean_dec(x_4); -lean_ctor_set(x_41, 0, x_29); -return x_41; +lean_ctor_set(x_42, 0, x_30); +return x_42; } else { -lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; -lean_free_object(x_41); -x_46 = lean_ctor_get(x_45, 0); -lean_inc(x_46); -lean_dec(x_45); +lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; +lean_free_object(x_42); x_47 = lean_ctor_get(x_46, 0); lean_inc(x_47); -x_48 = lean_ctor_get(x_46, 2); -lean_inc(x_48); lean_dec(x_46); -x_49 = l_Lean_Elab_Command_instInhabitedScope; -x_50 = l___private_Init_GetElem_0__List_get_x21Internal___rarg(x_49, x_48, x_17); -lean_dec(x_48); +x_48 = lean_ctor_get(x_47, 0); +lean_inc(x_48); +x_49 = lean_ctor_get(x_47, 2); +lean_inc(x_49); +lean_dec(x_47); +x_50 = l_Lean_Elab_Command_instInhabitedScope; +x_51 = l___private_Init_GetElem_0__List_get_x21Internal___rarg(x_50, x_49, x_18); +lean_dec(x_49); if (x_11 == 0) { -lean_object* x_51; lean_object* x_52; uint8_t x_53; lean_object* x_54; -x_51 = lean_ctor_get(x_50, 1); -lean_inc(x_51); -lean_dec(x_50); -x_52 = lean_box(0); -x_53 = lean_unbox(x_43); -lean_dec(x_43); -x_54 = l_Lean_Elab_runFrontend___lambda__7(x_53, x_39, x_47, x_26, x_4, x_20, x_7, x_22, x_6, x_51, x_29, x_52, x_44); +lean_object* x_52; lean_object* x_53; uint8_t x_54; lean_object* x_55; +x_52 = lean_ctor_get(x_51, 1); +lean_inc(x_52); lean_dec(x_51); +x_53 = lean_box(0); +x_54 = lean_unbox(x_44); +lean_dec(x_44); +x_55 = l_Lean_Elab_runFrontend___lambda__8(x_54, x_40, x_48, x_27, x_4, x_21, x_7, x_23, x_6, x_52, x_30, x_53, x_45); +lean_dec(x_52); lean_dec(x_7); -lean_dec(x_26); -return x_54; +lean_dec(x_27); +return x_55; } else { -lean_object* x_55; lean_object* x_56; -x_55 = lean_ctor_get(x_50, 1); -lean_inc(x_55); -lean_dec(x_50); -lean_inc(x_47); -x_56 = l_Lean_Environment_displayStats(x_47, x_44); -if (lean_obj_tag(x_56) == 0) +lean_object* x_56; lean_object* x_57; +x_56 = lean_ctor_get(x_51, 1); +lean_inc(x_56); +lean_dec(x_51); +lean_inc(x_48); +x_57 = l_Lean_Environment_displayStats(x_48, x_45); +if (lean_obj_tag(x_57) == 0) { -lean_object* x_57; lean_object* x_58; uint8_t x_59; lean_object* x_60; -x_57 = lean_ctor_get(x_56, 0); -lean_inc(x_57); -x_58 = lean_ctor_get(x_56, 1); +lean_object* x_58; lean_object* x_59; uint8_t x_60; lean_object* x_61; +x_58 = lean_ctor_get(x_57, 0); lean_inc(x_58); -lean_dec(x_56); -x_59 = lean_unbox(x_43); -lean_dec(x_43); -x_60 = l_Lean_Elab_runFrontend___lambda__7(x_59, x_39, x_47, x_26, x_4, x_20, x_7, x_22, x_6, x_55, x_29, x_57, x_58); +x_59 = lean_ctor_get(x_57, 1); +lean_inc(x_59); lean_dec(x_57); -lean_dec(x_55); +x_60 = lean_unbox(x_44); +lean_dec(x_44); +x_61 = l_Lean_Elab_runFrontend___lambda__8(x_60, x_40, x_48, x_27, x_4, x_21, x_7, x_23, x_6, x_56, x_30, x_58, x_59); +lean_dec(x_58); +lean_dec(x_56); lean_dec(x_7); -lean_dec(x_26); -return x_60; +lean_dec(x_27); +return x_61; } else { -uint8_t x_61; -lean_dec(x_55); -lean_dec(x_47); -lean_dec(x_43); -lean_dec(x_39); -lean_dec(x_26); -lean_dec(x_22); +uint8_t x_62; +lean_dec(x_56); +lean_dec(x_48); +lean_dec(x_44); +lean_dec(x_40); +lean_dec(x_27); +lean_dec(x_23); lean_dec(x_7); lean_dec(x_6); lean_dec(x_4); -x_61 = !lean_is_exclusive(x_56); -if (x_61 == 0) +x_62 = !lean_is_exclusive(x_57); +if (x_62 == 0) { -return x_56; +return x_57; } else { -lean_object* x_62; lean_object* x_63; lean_object* x_64; -x_62 = lean_ctor_get(x_56, 0); -x_63 = lean_ctor_get(x_56, 1); +lean_object* x_63; lean_object* x_64; lean_object* x_65; +x_63 = lean_ctor_get(x_57, 0); +x_64 = lean_ctor_get(x_57, 1); +lean_inc(x_64); lean_inc(x_63); -lean_inc(x_62); -lean_dec(x_56); -x_64 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_64, 0, x_62); -lean_ctor_set(x_64, 1, x_63); -return x_64; +lean_dec(x_57); +x_65 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_65, 0, x_63); +lean_ctor_set(x_65, 1, x_64); +return x_65; } } } @@ -3852,114 +4118,114 @@ return x_64; } else { -lean_object* x_65; lean_object* x_66; lean_object* x_67; -x_65 = lean_ctor_get(x_41, 0); -x_66 = lean_ctor_get(x_41, 1); +lean_object* x_66; lean_object* x_67; lean_object* x_68; +x_66 = lean_ctor_get(x_42, 0); +x_67 = lean_ctor_get(x_42, 1); +lean_inc(x_67); lean_inc(x_66); -lean_inc(x_65); -lean_dec(x_41); -x_67 = l_Lean_Language_Lean_waitForFinalCmdState_x3f(x_32); -if (lean_obj_tag(x_67) == 0) +lean_dec(x_42); +x_68 = l_Lean_Language_Lean_waitForFinalCmdState_x3f(x_33); +if (lean_obj_tag(x_68) == 0) { -lean_object* x_68; -lean_dec(x_65); -lean_dec(x_39); -lean_dec(x_26); -lean_dec(x_22); +lean_object* x_69; +lean_dec(x_66); +lean_dec(x_40); +lean_dec(x_27); +lean_dec(x_23); lean_dec(x_7); lean_dec(x_6); lean_dec(x_4); -x_68 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_68, 0, x_29); -lean_ctor_set(x_68, 1, x_66); -return x_68; +x_69 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_69, 0, x_30); +lean_ctor_set(x_69, 1, x_67); +return x_69; } else { -lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; -x_69 = lean_ctor_get(x_67, 0); -lean_inc(x_69); -lean_dec(x_67); -x_70 = lean_ctor_get(x_69, 0); +lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; +x_70 = lean_ctor_get(x_68, 0); lean_inc(x_70); -x_71 = lean_ctor_get(x_69, 2); +lean_dec(x_68); +x_71 = lean_ctor_get(x_70, 0); lean_inc(x_71); -lean_dec(x_69); -x_72 = l_Lean_Elab_Command_instInhabitedScope; -x_73 = l___private_Init_GetElem_0__List_get_x21Internal___rarg(x_72, x_71, x_17); -lean_dec(x_71); +x_72 = lean_ctor_get(x_70, 2); +lean_inc(x_72); +lean_dec(x_70); +x_73 = l_Lean_Elab_Command_instInhabitedScope; +x_74 = l___private_Init_GetElem_0__List_get_x21Internal___rarg(x_73, x_72, x_18); +lean_dec(x_72); if (x_11 == 0) { -lean_object* x_74; lean_object* x_75; uint8_t x_76; lean_object* x_77; -x_74 = lean_ctor_get(x_73, 1); -lean_inc(x_74); -lean_dec(x_73); -x_75 = lean_box(0); -x_76 = lean_unbox(x_65); -lean_dec(x_65); -x_77 = l_Lean_Elab_runFrontend___lambda__7(x_76, x_39, x_70, x_26, x_4, x_20, x_7, x_22, x_6, x_74, x_29, x_75, x_66); +lean_object* x_75; lean_object* x_76; uint8_t x_77; lean_object* x_78; +x_75 = lean_ctor_get(x_74, 1); +lean_inc(x_75); lean_dec(x_74); +x_76 = lean_box(0); +x_77 = lean_unbox(x_66); +lean_dec(x_66); +x_78 = l_Lean_Elab_runFrontend___lambda__8(x_77, x_40, x_71, x_27, x_4, x_21, x_7, x_23, x_6, x_75, x_30, x_76, x_67); +lean_dec(x_75); lean_dec(x_7); -lean_dec(x_26); -return x_77; +lean_dec(x_27); +return x_78; } else { -lean_object* x_78; lean_object* x_79; -x_78 = lean_ctor_get(x_73, 1); -lean_inc(x_78); -lean_dec(x_73); -lean_inc(x_70); -x_79 = l_Lean_Environment_displayStats(x_70, x_66); -if (lean_obj_tag(x_79) == 0) +lean_object* x_79; lean_object* x_80; +x_79 = lean_ctor_get(x_74, 1); +lean_inc(x_79); +lean_dec(x_74); +lean_inc(x_71); +x_80 = l_Lean_Environment_displayStats(x_71, x_67); +if (lean_obj_tag(x_80) == 0) { -lean_object* x_80; lean_object* x_81; uint8_t x_82; lean_object* x_83; -x_80 = lean_ctor_get(x_79, 0); -lean_inc(x_80); -x_81 = lean_ctor_get(x_79, 1); +lean_object* x_81; lean_object* x_82; uint8_t x_83; lean_object* x_84; +x_81 = lean_ctor_get(x_80, 0); lean_inc(x_81); -lean_dec(x_79); -x_82 = lean_unbox(x_65); -lean_dec(x_65); -x_83 = l_Lean_Elab_runFrontend___lambda__7(x_82, x_39, x_70, x_26, x_4, x_20, x_7, x_22, x_6, x_78, x_29, x_80, x_81); +x_82 = lean_ctor_get(x_80, 1); +lean_inc(x_82); lean_dec(x_80); -lean_dec(x_78); +x_83 = lean_unbox(x_66); +lean_dec(x_66); +x_84 = l_Lean_Elab_runFrontend___lambda__8(x_83, x_40, x_71, x_27, x_4, x_21, x_7, x_23, x_6, x_79, x_30, x_81, x_82); +lean_dec(x_81); +lean_dec(x_79); lean_dec(x_7); -lean_dec(x_26); -return x_83; +lean_dec(x_27); +return x_84; } else { -lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; -lean_dec(x_78); -lean_dec(x_70); -lean_dec(x_65); -lean_dec(x_39); -lean_dec(x_26); -lean_dec(x_22); +lean_object* x_85; lean_object* x_86; lean_object* x_87; lean_object* x_88; +lean_dec(x_79); +lean_dec(x_71); +lean_dec(x_66); +lean_dec(x_40); +lean_dec(x_27); +lean_dec(x_23); lean_dec(x_7); lean_dec(x_6); lean_dec(x_4); -x_84 = lean_ctor_get(x_79, 0); -lean_inc(x_84); -x_85 = lean_ctor_get(x_79, 1); +x_85 = lean_ctor_get(x_80, 0); lean_inc(x_85); -if (lean_is_exclusive(x_79)) { - lean_ctor_release(x_79, 0); - lean_ctor_release(x_79, 1); - x_86 = x_79; +x_86 = lean_ctor_get(x_80, 1); +lean_inc(x_86); +if (lean_is_exclusive(x_80)) { + lean_ctor_release(x_80, 0); + lean_ctor_release(x_80, 1); + x_87 = x_80; } else { - lean_dec_ref(x_79); - x_86 = lean_box(0); + lean_dec_ref(x_80); + x_87 = lean_box(0); } -if (lean_is_scalar(x_86)) { - x_87 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_87)) { + x_88 = lean_alloc_ctor(1, 2, 0); } else { - x_87 = x_86; + x_88 = x_87; } -lean_ctor_set(x_87, 0, x_84); -lean_ctor_set(x_87, 1, x_85); -return x_87; +lean_ctor_set(x_88, 0, x_85); +lean_ctor_set(x_88, 1, x_86); +return x_88; } } } @@ -3967,31 +4233,31 @@ return x_87; } else { -uint8_t x_88; -lean_dec(x_39); -lean_dec(x_32); -lean_dec(x_26); -lean_dec(x_22); +uint8_t x_89; +lean_dec(x_40); +lean_dec(x_33); +lean_dec(x_27); +lean_dec(x_23); lean_dec(x_7); lean_dec(x_6); lean_dec(x_4); -x_88 = !lean_is_exclusive(x_41); -if (x_88 == 0) +x_89 = !lean_is_exclusive(x_42); +if (x_89 == 0) { -return x_41; +return x_42; } else { -lean_object* x_89; lean_object* x_90; lean_object* x_91; -x_89 = lean_ctor_get(x_41, 0); -x_90 = lean_ctor_get(x_41, 1); +lean_object* x_90; lean_object* x_91; lean_object* x_92; +x_90 = lean_ctor_get(x_42, 0); +x_91 = lean_ctor_get(x_42, 1); +lean_inc(x_91); lean_inc(x_90); -lean_inc(x_89); -lean_dec(x_41); -x_91 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_91, 0, x_89); -lean_ctor_set(x_91, 1, x_90); -return x_91; +lean_dec(x_42); +x_92 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_92, 0, x_90); +lean_ctor_set(x_92, 1, x_91); +return x_92; } } } @@ -3999,267 +4265,280 @@ return x_91; } else { -lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; uint8_t x_123; lean_object* x_124; -x_117 = lean_ctor_get(x_30, 0); -x_118 = lean_ctor_get(x_30, 1); -lean_inc(x_118); -lean_inc(x_117); -lean_dec(x_30); -x_119 = lean_ctor_get(x_117, 0); +lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; uint8_t x_124; lean_object* x_125; +x_118 = lean_ctor_get(x_31, 0); +x_119 = lean_ctor_get(x_31, 1); lean_inc(x_119); -x_120 = lean_ctor_get(x_117, 3); +lean_inc(x_118); +lean_dec(x_31); +x_120 = lean_ctor_get(x_118, 0); lean_inc(x_120); -x_121 = lean_box(0); -x_122 = lean_array_get_size(x_9); -x_123 = lean_nat_dec_lt(x_17, x_122); -if (lean_obj_tag(x_120) == 0) -{ -lean_object* x_161; lean_object* x_162; -x_161 = l_Lean_Elab_runFrontend___closed__4; -x_162 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_162, 0, x_119); -lean_ctor_set(x_162, 1, x_161); -x_124 = x_162; -goto block_160; +x_121 = lean_ctor_get(x_118, 3); +lean_inc(x_121); +x_122 = lean_box(0); +x_123 = lean_array_get_size(x_9); +x_124 = lean_nat_dec_lt(x_18, x_123); +if (lean_obj_tag(x_121) == 0) +{ +lean_object* x_162; lean_object* x_163; +x_162 = l_Lean_Elab_runFrontend___closed__4; +x_163 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_163, 0, x_120); +lean_ctor_set(x_163, 1, x_162); +x_125 = x_163; +goto block_161; } else { -lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; -x_163 = lean_ctor_get(x_120, 0); -lean_inc(x_163); -if (lean_is_exclusive(x_120)) { - lean_ctor_release(x_120, 0); - x_164 = x_120; +lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; lean_object* x_174; +x_164 = lean_ctor_get(x_121, 0); +lean_inc(x_164); +if (lean_is_exclusive(x_121)) { + lean_ctor_release(x_121, 0); + x_165 = x_121; } else { - lean_dec_ref(x_120); - x_164 = lean_box(0); -} -x_165 = lean_ctor_get(x_163, 1); -lean_inc(x_165); -lean_dec(x_163); -x_166 = l_Array_filterMapM___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__2___closed__1; -x_167 = lean_alloc_closure((void*)(l_Lean_Elab_runFrontend___lambda__8), 3, 2); -lean_closure_set(x_167, 0, x_29); -lean_closure_set(x_167, 1, x_166); -x_168 = lean_ctor_get(x_165, 0); -lean_inc(x_168); -x_169 = lean_ctor_get(x_165, 1); + lean_dec_ref(x_121); + x_165 = lean_box(0); +} +x_166 = lean_ctor_get(x_164, 1); +lean_inc(x_166); +lean_dec(x_164); +x_167 = l_Array_filterMapM___at_Lean_Elab_IO_processCommandsIncrementally_go___spec__2___closed__1; +x_168 = lean_alloc_closure((void*)(l_Lean_Elab_runFrontend___lambda__9), 3, 2); +lean_closure_set(x_168, 0, x_30); +lean_closure_set(x_168, 1, x_167); +x_169 = lean_ctor_get(x_166, 0); lean_inc(x_169); -x_170 = l_Lean_Language_SnapshotTask_map___rarg(x_165, x_167, x_168, x_169, x_21); -if (lean_is_scalar(x_164)) { - x_171 = lean_alloc_ctor(1, 1, 0); +x_170 = lean_ctor_get(x_166, 1); +lean_inc(x_170); +x_171 = l_Lean_Language_SnapshotTask_map___rarg(x_166, x_168, x_169, x_170, x_22); +if (lean_is_scalar(x_165)) { + x_172 = lean_alloc_ctor(1, 1, 0); } else { - x_171 = x_164; + x_172 = x_165; } -lean_ctor_set(x_171, 0, x_170); -x_172 = l___private_Lean_Language_Lean_Types_0__Lean_Language_Lean_pushOpt___rarg(x_171, x_166); -x_173 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_173, 0, x_119); -lean_ctor_set(x_173, 1, x_172); -x_124 = x_173; -goto block_160; +lean_ctor_set(x_172, 0, x_171); +x_173 = l___private_Lean_Language_Lean_Types_0__Lean_Language_Lean_pushOpt___rarg(x_172, x_167); +x_174 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_174, 0, x_120); +lean_ctor_set(x_174, 1, x_173); +x_125 = x_174; +goto block_161; } -block_160: +block_161: { -lean_object* x_125; -if (x_123 == 0) +lean_object* x_126; +if (x_124 == 0) { -lean_dec(x_122); +lean_dec(x_123); lean_dec(x_9); -x_125 = x_121; -goto block_155; +x_126 = x_122; +goto block_156; } else { -uint8_t x_156; -x_156 = lean_nat_dec_le(x_122, x_122); -if (x_156 == 0) +uint8_t x_157; +x_157 = lean_nat_dec_le(x_123, x_123); +if (x_157 == 0) { -lean_dec(x_122); +lean_dec(x_123); lean_dec(x_9); -x_125 = x_121; -goto block_155; +x_126 = x_122; +goto block_156; } else { -size_t x_157; size_t x_158; lean_object* x_159; -x_157 = 0; -x_158 = lean_usize_of_nat(x_122); -lean_dec(x_122); -x_159 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__3(x_9, x_157, x_158, x_121); +size_t x_158; size_t x_159; lean_object* x_160; +x_158 = 0; +x_159 = lean_usize_of_nat(x_123); +lean_dec(x_123); +x_160 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__4(x_9, x_158, x_159, x_122); lean_dec(x_9); -x_125 = x_159; -goto block_155; +x_126 = x_160; +goto block_156; } } -block_155: +block_156: { -lean_object* x_126; -x_126 = l_Lean_Language_SnapshotTree_foldM___at_Lean_Language_SnapshotTree_runAndReport___spec__1(x_26, x_8, x_125, x_124, x_16, x_118); -lean_dec(x_125); -if (lean_obj_tag(x_126) == 0) +lean_object* x_127; +x_127 = l_Lean_Language_SnapshotTree_foldM___at_Lean_Language_SnapshotTree_runAndReport___spec__1(x_27, x_8, x_126, x_125, x_17, x_119); +lean_dec(x_126); +if (lean_obj_tag(x_127) == 0) { -lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; -x_127 = lean_ctor_get(x_126, 0); -lean_inc(x_127); -x_128 = lean_ctor_get(x_126, 1); +lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; +x_128 = lean_ctor_get(x_127, 0); lean_inc(x_128); -if (lean_is_exclusive(x_126)) { - lean_ctor_release(x_126, 0); - lean_ctor_release(x_126, 1); - x_129 = x_126; +x_129 = lean_ctor_get(x_127, 1); +lean_inc(x_129); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + lean_ctor_release(x_127, 1); + x_130 = x_127; } else { - lean_dec_ref(x_126); - x_129 = lean_box(0); + lean_dec_ref(x_127); + x_130 = lean_box(0); } -x_130 = l_Lean_Language_Lean_waitForFinalCmdState_x3f(x_117); -if (lean_obj_tag(x_130) == 0) +x_131 = l_Lean_Language_Lean_waitForFinalCmdState_x3f(x_118); +if (lean_obj_tag(x_131) == 0) { -lean_object* x_131; -lean_dec(x_127); -lean_dec(x_124); -lean_dec(x_26); -lean_dec(x_22); +lean_object* x_132; +lean_dec(x_128); +lean_dec(x_125); +lean_dec(x_27); +lean_dec(x_23); lean_dec(x_7); lean_dec(x_6); lean_dec(x_4); -if (lean_is_scalar(x_129)) { - x_131 = lean_alloc_ctor(0, 2, 0); +if (lean_is_scalar(x_130)) { + x_132 = lean_alloc_ctor(0, 2, 0); } else { - x_131 = x_129; + x_132 = x_130; } -lean_ctor_set(x_131, 0, x_29); -lean_ctor_set(x_131, 1, x_128); -return x_131; +lean_ctor_set(x_132, 0, x_30); +lean_ctor_set(x_132, 1, x_129); +return x_132; } else { -lean_object* x_132; lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; -lean_dec(x_129); -x_132 = lean_ctor_get(x_130, 0); -lean_inc(x_132); +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_dec(x_130); -x_133 = lean_ctor_get(x_132, 0); +x_133 = lean_ctor_get(x_131, 0); lean_inc(x_133); -x_134 = lean_ctor_get(x_132, 2); +lean_dec(x_131); +x_134 = lean_ctor_get(x_133, 0); lean_inc(x_134); -lean_dec(x_132); -x_135 = l_Lean_Elab_Command_instInhabitedScope; -x_136 = l___private_Init_GetElem_0__List_get_x21Internal___rarg(x_135, x_134, x_17); -lean_dec(x_134); +x_135 = lean_ctor_get(x_133, 2); +lean_inc(x_135); +lean_dec(x_133); +x_136 = l_Lean_Elab_Command_instInhabitedScope; +x_137 = l___private_Init_GetElem_0__List_get_x21Internal___rarg(x_136, x_135, x_18); +lean_dec(x_135); if (x_11 == 0) { -lean_object* x_137; lean_object* x_138; uint8_t x_139; lean_object* x_140; -x_137 = lean_ctor_get(x_136, 1); -lean_inc(x_137); -lean_dec(x_136); -x_138 = lean_box(0); -x_139 = lean_unbox(x_127); -lean_dec(x_127); -x_140 = l_Lean_Elab_runFrontend___lambda__7(x_139, x_124, x_133, x_26, x_4, x_20, x_7, x_22, x_6, x_137, x_29, x_138, x_128); +lean_object* x_138; lean_object* x_139; uint8_t x_140; lean_object* x_141; +x_138 = lean_ctor_get(x_137, 1); +lean_inc(x_138); lean_dec(x_137); +x_139 = lean_box(0); +x_140 = lean_unbox(x_128); +lean_dec(x_128); +x_141 = l_Lean_Elab_runFrontend___lambda__8(x_140, x_125, x_134, x_27, x_4, x_21, x_7, x_23, x_6, x_138, x_30, x_139, x_129); +lean_dec(x_138); lean_dec(x_7); -lean_dec(x_26); -return x_140; +lean_dec(x_27); +return x_141; } else { -lean_object* x_141; lean_object* x_142; -x_141 = lean_ctor_get(x_136, 1); -lean_inc(x_141); -lean_dec(x_136); -lean_inc(x_133); -x_142 = l_Lean_Environment_displayStats(x_133, x_128); -if (lean_obj_tag(x_142) == 0) +lean_object* x_142; lean_object* x_143; +x_142 = lean_ctor_get(x_137, 1); +lean_inc(x_142); +lean_dec(x_137); +lean_inc(x_134); +x_143 = l_Lean_Environment_displayStats(x_134, x_129); +if (lean_obj_tag(x_143) == 0) { -lean_object* x_143; lean_object* x_144; uint8_t x_145; lean_object* x_146; -x_143 = lean_ctor_get(x_142, 0); -lean_inc(x_143); -x_144 = lean_ctor_get(x_142, 1); +lean_object* x_144; lean_object* x_145; uint8_t x_146; lean_object* x_147; +x_144 = lean_ctor_get(x_143, 0); lean_inc(x_144); -lean_dec(x_142); -x_145 = lean_unbox(x_127); -lean_dec(x_127); -x_146 = l_Lean_Elab_runFrontend___lambda__7(x_145, x_124, x_133, x_26, x_4, x_20, x_7, x_22, x_6, x_141, x_29, x_143, x_144); +x_145 = lean_ctor_get(x_143, 1); +lean_inc(x_145); lean_dec(x_143); -lean_dec(x_141); +x_146 = lean_unbox(x_128); +lean_dec(x_128); +x_147 = l_Lean_Elab_runFrontend___lambda__8(x_146, x_125, x_134, x_27, x_4, x_21, x_7, x_23, x_6, x_142, x_30, x_144, x_145); +lean_dec(x_144); +lean_dec(x_142); lean_dec(x_7); -lean_dec(x_26); -return x_146; +lean_dec(x_27); +return x_147; } else { -lean_object* x_147; lean_object* x_148; lean_object* x_149; lean_object* x_150; -lean_dec(x_141); -lean_dec(x_133); -lean_dec(x_127); -lean_dec(x_124); -lean_dec(x_26); -lean_dec(x_22); +lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; +lean_dec(x_142); +lean_dec(x_134); +lean_dec(x_128); +lean_dec(x_125); +lean_dec(x_27); +lean_dec(x_23); lean_dec(x_7); lean_dec(x_6); lean_dec(x_4); -x_147 = lean_ctor_get(x_142, 0); -lean_inc(x_147); -x_148 = lean_ctor_get(x_142, 1); +x_148 = lean_ctor_get(x_143, 0); lean_inc(x_148); -if (lean_is_exclusive(x_142)) { - lean_ctor_release(x_142, 0); - lean_ctor_release(x_142, 1); - x_149 = x_142; +x_149 = lean_ctor_get(x_143, 1); +lean_inc(x_149); +if (lean_is_exclusive(x_143)) { + lean_ctor_release(x_143, 0); + lean_ctor_release(x_143, 1); + x_150 = x_143; } else { - lean_dec_ref(x_142); - x_149 = lean_box(0); + lean_dec_ref(x_143); + x_150 = lean_box(0); } -if (lean_is_scalar(x_149)) { - x_150 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_150)) { + x_151 = lean_alloc_ctor(1, 2, 0); } else { - x_150 = x_149; + x_151 = x_150; } -lean_ctor_set(x_150, 0, x_147); -lean_ctor_set(x_150, 1, x_148); -return x_150; +lean_ctor_set(x_151, 0, x_148); +lean_ctor_set(x_151, 1, x_149); +return x_151; } } } } else { -lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; -lean_dec(x_124); -lean_dec(x_117); -lean_dec(x_26); -lean_dec(x_22); +lean_object* x_152; lean_object* x_153; lean_object* x_154; lean_object* x_155; +lean_dec(x_125); +lean_dec(x_118); +lean_dec(x_27); +lean_dec(x_23); lean_dec(x_7); lean_dec(x_6); lean_dec(x_4); -x_151 = lean_ctor_get(x_126, 0); -lean_inc(x_151); -x_152 = lean_ctor_get(x_126, 1); +x_152 = lean_ctor_get(x_127, 0); lean_inc(x_152); -if (lean_is_exclusive(x_126)) { - lean_ctor_release(x_126, 0); - lean_ctor_release(x_126, 1); - x_153 = x_126; +x_153 = lean_ctor_get(x_127, 1); +lean_inc(x_153); +if (lean_is_exclusive(x_127)) { + lean_ctor_release(x_127, 0); + lean_ctor_release(x_127, 1); + x_154 = x_127; } else { - lean_dec_ref(x_126); - x_153 = lean_box(0); + lean_dec_ref(x_127); + x_154 = lean_box(0); } -if (lean_is_scalar(x_153)) { - x_154 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_154)) { + x_155 = lean_alloc_ctor(1, 2, 0); } else { - x_154 = x_153; + x_155 = x_154; } -lean_ctor_set(x_154, 0, x_151); -lean_ctor_set(x_154, 1, x_152); -return x_154; +lean_ctor_set(x_155, 0, x_152); +lean_ctor_set(x_155, 1, x_153); +return x_155; } } } } } } -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_runFrontend___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +size_t x_6; size_t x_7; lean_object* x_8; +x_6 = lean_unbox_usize(x_2); +lean_dec(x_2); +x_7 = lean_unbox_usize(x_3); +lean_dec(x_3); +x_8 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__1(x_1, x_6, x_7, x_4, x_5); +lean_dec(x_1); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_runFrontend___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { size_t x_4; size_t x_5; lean_object* x_6; @@ -4267,11 +4546,11 @@ x_4 = lean_unbox_usize(x_1); lean_dec(x_1); x_5 = lean_unbox_usize(x_2); lean_dec(x_2); -x_6 = l_Array_mapMUnsafe_map___at_Lean_Elab_runFrontend___spec__1(x_4, x_5, x_3); +x_6 = l_Array_mapMUnsafe_map___at_Lean_Elab_runFrontend___spec__2(x_4, x_5, x_3); return x_6; } } -LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { size_t x_5; size_t x_6; lean_object* x_7; @@ -4279,12 +4558,12 @@ x_5 = lean_unbox_usize(x_2); lean_dec(x_2); x_6 = lean_unbox_usize(x_3); lean_dec(x_3); -x_7 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__2(x_1, x_5, x_6, x_4); +x_7 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__3(x_1, x_5, x_6, x_4); lean_dec(x_1); return x_7; } } -LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { size_t x_5; size_t x_6; lean_object* x_7; @@ -4292,74 +4571,84 @@ x_5 = lean_unbox_usize(x_2); lean_dec(x_2); x_6 = lean_unbox_usize(x_3); lean_dec(x_3); -x_7 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__3(x_1, x_5, x_6, x_4); +x_7 = l_Array_foldlMUnsafe_fold___at_Lean_Elab_runFrontend___spec__4(x_1, x_5, x_6, x_4); lean_dec(x_1); return x_7; } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { -uint32_t x_8; lean_object* x_9; -x_8 = lean_unbox_uint32(x_3); +lean_object* x_4; +x_4 = l_Lean_Elab_runFrontend___lambda__1(x_1, x_2, x_3); lean_dec(x_3); -x_9 = l_Lean_Elab_runFrontend___lambda__1(x_1, x_2, x_8, x_4, x_5, x_6, x_7); -lean_dec(x_6); -lean_dec(x_5); -return x_9; +lean_dec(x_2); +lean_dec(x_1); +return x_4; +} +} +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +uint32_t x_9; lean_object* x_10; +x_9 = lean_unbox_uint32(x_4); +lean_dec(x_4); +x_10 = l_Lean_Elab_runFrontend___lambda__2(x_1, x_2, x_3, x_9, x_5, x_6, x_7, x_8); +lean_dec(x_7); +return x_10; } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; -x_5 = l_Lean_Elab_runFrontend___lambda__2(x_1, x_2, x_3, x_4); +x_5 = l_Lean_Elab_runFrontend___lambda__3(x_1, x_2, x_3, x_4); lean_dec(x_3); return x_5; } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__3___boxed(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__4___boxed(lean_object* x_1) { _start: { uint8_t x_2; lean_object* x_3; -x_2 = l_Lean_Elab_runFrontend___lambda__3(x_1); +x_2 = l_Lean_Elab_runFrontend___lambda__4(x_1); lean_dec(x_1); x_3 = lean_box(x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__5___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { _start: { double x_8; lean_object* x_9; x_8 = lean_unbox_float(x_5); lean_dec(x_5); -x_9 = l_Lean_Elab_runFrontend___lambda__4(x_1, x_2, x_3, x_4, x_8, x_6, x_7); +x_9 = l_Lean_Elab_runFrontend___lambda__5(x_1, x_2, x_3, x_4, x_8, x_6, x_7); lean_dec(x_6); lean_dec(x_3); return x_9; } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__5___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__6___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: { double x_10; lean_object* x_11; x_10 = lean_unbox_float(x_5); lean_dec(x_5); -x_11 = l_Lean_Elab_runFrontend___lambda__5(x_1, x_2, x_3, x_4, x_10, x_6, x_7, x_8, x_9); +x_11 = l_Lean_Elab_runFrontend___lambda__6(x_1, x_2, x_3, x_4, x_10, x_6, x_7, x_8, x_9); lean_dec(x_8); lean_dec(x_6); lean_dec(x_3); return x_11; } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__6___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__7___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { double x_12; lean_object* x_13; x_12 = lean_unbox_float(x_5); lean_dec(x_5); -x_13 = l_Lean_Elab_runFrontend___lambda__6(x_1, x_2, x_3, x_4, x_12, x_6, x_7, x_8, x_9, x_10, x_11); +x_13 = l_Lean_Elab_runFrontend___lambda__7(x_1, x_2, x_3, x_4, x_12, x_6, x_7, x_8, x_9, x_10, x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_6); @@ -4367,7 +4656,7 @@ lean_dec(x_3); return x_13; } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__7___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___lambda__8___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { _start: { uint8_t x_14; double x_15; lean_object* x_16; @@ -4375,7 +4664,7 @@ x_14 = lean_unbox(x_1); lean_dec(x_1); x_15 = lean_unbox_float(x_6); lean_dec(x_6); -x_16 = l_Lean_Elab_runFrontend___lambda__7(x_14, x_2, x_3, x_4, x_5, x_15, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +x_16 = l_Lean_Elab_runFrontend___lambda__8(x_14, x_2, x_3, x_4, x_5, x_15, x_7, x_8, x_9, x_10, x_11, x_12, x_13); lean_dec(x_12); lean_dec(x_10); lean_dec(x_7); @@ -4383,18 +4672,18 @@ lean_dec(x_4); return x_16; } } -LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +LEAN_EXPORT lean_object* l_Lean_Elab_runFrontend___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { _start: { -uint32_t x_13; uint8_t x_14; uint8_t x_15; lean_object* x_16; -x_13 = lean_unbox_uint32(x_5); +uint32_t x_14; uint8_t x_15; uint8_t x_16; lean_object* x_17; +x_14 = lean_unbox_uint32(x_5); lean_dec(x_5); -x_14 = lean_unbox(x_8); +x_15 = lean_unbox(x_8); lean_dec(x_8); -x_15 = lean_unbox(x_11); +x_16 = lean_unbox(x_11); lean_dec(x_11); -x_16 = lean_run_frontend(x_1, x_2, x_3, x_4, x_13, x_6, x_7, x_14, x_9, x_10, x_15, x_12); -return x_16; +x_17 = lean_run_frontend(x_1, x_2, x_3, x_4, x_14, x_6, x_7, x_15, x_9, x_10, x_16, x_12, x_13); +return x_17; } } lean_object* initialize_Lean_Language_Lean(uint8_t builtin, lean_object*); @@ -4438,16 +4727,18 @@ l_Lean_Elab_process___closed__1 = _init_l_Lean_Elab_process___closed__1(); lean_mark_persistent(l_Lean_Elab_process___closed__1); l_Lean_Elab_process___closed__2 = _init_l_Lean_Elab_process___closed__2(); lean_mark_persistent(l_Lean_Elab_process___closed__2); -l_Lean_Elab_runFrontend___lambda__4___closed__1 = _init_l_Lean_Elab_runFrontend___lambda__4___closed__1(); -lean_mark_persistent(l_Lean_Elab_runFrontend___lambda__4___closed__1); -l_Lean_Elab_runFrontend___lambda__4___closed__2 = _init_l_Lean_Elab_runFrontend___lambda__4___closed__2(); -lean_mark_persistent(l_Lean_Elab_runFrontend___lambda__4___closed__2); +l_Lean_Elab_runFrontend___lambda__2___closed__1 = _init_l_Lean_Elab_runFrontend___lambda__2___closed__1(); +lean_mark_persistent(l_Lean_Elab_runFrontend___lambda__2___closed__1); l_Lean_Elab_runFrontend___lambda__5___closed__1 = _init_l_Lean_Elab_runFrontend___lambda__5___closed__1(); lean_mark_persistent(l_Lean_Elab_runFrontend___lambda__5___closed__1); +l_Lean_Elab_runFrontend___lambda__5___closed__2 = _init_l_Lean_Elab_runFrontend___lambda__5___closed__2(); +lean_mark_persistent(l_Lean_Elab_runFrontend___lambda__5___closed__2); l_Lean_Elab_runFrontend___lambda__6___closed__1 = _init_l_Lean_Elab_runFrontend___lambda__6___closed__1(); lean_mark_persistent(l_Lean_Elab_runFrontend___lambda__6___closed__1); -l_Lean_Elab_runFrontend___lambda__8___closed__1 = _init_l_Lean_Elab_runFrontend___lambda__8___closed__1(); -lean_mark_persistent(l_Lean_Elab_runFrontend___lambda__8___closed__1); +l_Lean_Elab_runFrontend___lambda__7___closed__1 = _init_l_Lean_Elab_runFrontend___lambda__7___closed__1(); +lean_mark_persistent(l_Lean_Elab_runFrontend___lambda__7___closed__1); +l_Lean_Elab_runFrontend___lambda__9___closed__1 = _init_l_Lean_Elab_runFrontend___lambda__9___closed__1(); +lean_mark_persistent(l_Lean_Elab_runFrontend___lambda__9___closed__1); l_Lean_Elab_runFrontend___closed__1 = _init_l_Lean_Elab_runFrontend___closed__1(); l_Lean_Elab_runFrontend___closed__2 = _init_l_Lean_Elab_runFrontend___closed__2(); lean_mark_persistent(l_Lean_Elab_runFrontend___closed__2); diff --git a/stage0/stdlib/Lean/Elab/Import.c b/stage0/stdlib/Lean/Elab/Import.c index 75e003e0edd3..aba83f65e282 100644 --- a/stage0/stdlib/Lean/Elab/Import.c +++ b/stage0/stdlib/Lean/Elab/Import.c @@ -13,112 +13,182 @@ #ifdef __cplusplus extern "C" { #endif -static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___closed__1; -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1(uint8_t, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_IO_println___at_Lean_Elab_printImports___spec__1(lean_object*, lean_object*); -static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__5; -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_headerToImports___lambda__2___closed__1; -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__1; -static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___closed__1; -static lean_object* l_Lean_Elab_headerToImports___lambda__1___closed__7; -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3(lean_object*, lean_object*, size_t, size_t, lean_object*); +static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___closed__1; +LEAN_EXPORT lean_object* l_Lean_Elab_processHeaderCore(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, uint32_t, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_FileMap_toPosition(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_processHeader___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_headerToImports___lambda__1___closed__3; lean_object* l_Lean_Syntax_getId(lean_object*); -static lean_object* l_Lean_Elab_headerToImports___lambda__1___closed__6; -LEAN_EXPORT lean_object* l_Lean_Elab_headerToImports___lambda__2___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* l_panic___at_Lean_Elab_headerToImports___spec__1___closed__1; LEAN_EXPORT lean_object* lean_print_imports(lean_object*, lean_object*, lean_object*); +static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__1; lean_object* l_Lean_Syntax_getArgs(lean_object*); +static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__2; +static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__1; lean_object* l_Lean_Syntax_getPos_x3f(lean_object*, uint8_t); -static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__1; -static lean_object* l_Lean_Elab_headerToImports___closed__3; -static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__2; -static lean_object* l_Lean_Elab_headerToImports___lambda__1___closed__4; +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__4; +static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___closed__2; LEAN_EXPORT lean_object* l_Lean_Elab_headerToImports(lean_object*); uint8_t l_Lean_Syntax_isOfKind(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_processHeader(lean_object*, lean_object*, lean_object*, lean_object*, uint32_t, lean_object*, uint8_t, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_headerToImports___closed__2; -static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__2; -LEAN_EXPORT lean_object* l_panic___at_Lean_Elab_headerToImports___spec__2(lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_HeaderSyntax_imports___closed__3; lean_object* lean_string_push(lean_object*, uint32_t); -static lean_object* l_Lean_Elab_processHeader___closed__2; -static lean_object* l_Lean_Elab_headerToImports___lambda__1___closed__5; -static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__4; -LEAN_EXPORT lean_object* l_Lean_Elab_processHeader___lambda__1(lean_object*, lean_object*, lean_object*); -static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___closed__2; +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__4; +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1(uint8_t, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*); +static lean_object* l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1___closed__1; +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_startPos(lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_HeaderSyntax_imports___closed__4; +static lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__6; +static lean_object* l_Lean_Elab_processHeaderCore___closed__2; lean_object* l_Lean_MessageData_ofFormat(lean_object*); LEAN_EXPORT lean_object* lean_print_import_srcs(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Parser_mkInputContext(lean_object*, lean_object*, uint8_t); -static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___closed__1; +LEAN_EXPORT lean_object* l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__2(lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__2___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_getSrcSearchPath(lean_object*); +static lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__2; lean_object* l_Lean_Name_getRoot(lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_HeaderSyntax_imports___closed__2; +static lean_object* l_Lean_Elab_processHeaderCore___closed__1; lean_object* l_Lean_Environment_setMainModule(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_printImports___spec__2(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*); lean_object* l_IO_print___at_IO_println___spec__1(lean_object*, lean_object*); -static lean_object* l_Lean_Elab_headerToImports___lambda__2___closed__2; +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_isModule___boxed(lean_object*); uint8_t lean_name_eq(lean_object*, lean_object*); lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); +static lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__8; uint8_t l_Lean_Option_get___at___private_Lean_Util_Profile_0__Lean_get__profiler___spec__1(lean_object*, lean_object*); -static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__4; -static lean_object* l_Lean_Elab_headerToImports___closed__1; lean_object* l_Lean_Syntax_getArg(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__2(lean_object*, lean_object*, lean_object*); lean_object* l___private_Init_Util_0__mkPanicMessageWithDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Syntax_matchesNull(lean_object*, lean_object*); -static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__3; -static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__2; -static lean_object* l_Lean_Elab_headerToImports___closed__4; +LEAN_EXPORT uint8_t l_Lean_Elab_HeaderSyntax_isModule(lean_object*); +static lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__3; extern lean_object* l_Lean_instInhabitedImport; +static lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__2; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_printImportSrcs___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___closed__2; +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3(lean_object*, lean_object*, size_t, size_t, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_imports(lean_object*); lean_object* l_Array_append___rarg(lean_object*, lean_object*); +static lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__5; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_printImports___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_headerToImports___lambda__1___closed__1; -static lean_object* l_Lean_Elab_processHeader___closed__1; +static lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__7; +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2(lean_object*, lean_object*, lean_object*); +static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__2; +static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__2; +static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__3; +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1(lean_object*); static lean_object* l_Lean_Elab_parseImports___closed__1; lean_object* l_Lean_findOLean(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, size_t, size_t, lean_object*, lean_object*); +static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_parseImports(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Syntax_isNone(lean_object*); lean_object* lean_panic_fn(lean_object*, lean_object*); -static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__3; -static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__3; +static lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__3; +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__4; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_printImportSrcs___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_processHeaderCore___lambda__1(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_processHeaderCore___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_mk(lean_object*); size_t lean_usize_add(size_t, size_t); -LEAN_EXPORT lean_object* l_panic___at_Lean_Elab_headerToImports___spec__1(lean_object*); -static lean_object* l_Lean_Elab_headerToImports___lambda__1___closed__8; +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_startPos___boxed(lean_object*); lean_object* lean_array_uget(lean_object*, size_t); size_t lean_array_size(lean_object*); -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_findLean(lean_object*, lean_object*, lean_object*); -static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___closed__2; -static lean_object* l_Lean_Elab_headerToImports___lambda__2___closed__3; lean_object* lean_io_error_to_string(lean_object*); lean_object* l_Lean_Name_mkStr4(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__3; +static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__5; +static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___closed__2; +static lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__1; +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__1___boxed(lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_Elab_inServer; -LEAN_EXPORT lean_object* l_Lean_Elab_headerToImports___lambda__2(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Parser_parseHeader(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2(lean_object*, lean_object*, lean_object*); uint8_t lean_usize_dec_lt(size_t, size_t); lean_object* lean_mk_empty_environment(uint32_t, lean_object*); -static lean_object* l_Lean_Elab_headerToImports___lambda__1___closed__2; -static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__1; -lean_object* l_Lean_importModules(lean_object*, lean_object*, uint32_t, lean_object*, uint8_t, uint8_t, uint8_t, lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_importModules(lean_object*, lean_object*, uint32_t, lean_object*, uint8_t, uint8_t, uint8_t, lean_object*, lean_object*); +static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___closed__1; +static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__3; +static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__1; lean_object* lean_array_uset(lean_object*, size_t, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_headerToImports___lambda__1(lean_object*, lean_object*, lean_object*); +static lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___closed__2; +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___boxed(lean_object*, lean_object*, lean_object*); lean_object* lean_mk_empty_array_with_capacity(lean_object*); lean_object* l_Lean_MessageLog_add(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_headerToImports___lambda__1___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* _init_l_panic___at_Lean_Elab_headerToImports___spec__1___closed__1() { +static lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__1; +static lean_object* l_Lean_Elab_HeaderSyntax_imports___closed__1; +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_startPos(lean_object* x_1) { +_start: +{ +uint8_t x_2; lean_object* x_3; +x_2 = 0; +x_3 = l_Lean_Syntax_getPos_x3f(x_1, x_2); +if (lean_obj_tag(x_3) == 0) +{ +lean_object* x_4; +x_4 = lean_unsigned_to_nat(0u); +return x_4; +} +else +{ +lean_object* x_5; +x_5 = lean_ctor_get(x_3, 0); +lean_inc(x_5); +lean_dec(x_3); +return x_5; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_startPos___boxed(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = l_Lean_Elab_HeaderSyntax_startPos(x_1); +lean_dec(x_1); +return x_2; +} +} +LEAN_EXPORT uint8_t l_Lean_Elab_HeaderSyntax_isModule(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; uint8_t x_4; +x_2 = lean_unsigned_to_nat(0u); +x_3 = l_Lean_Syntax_getArg(x_1, x_2); +x_4 = l_Lean_Syntax_isNone(x_3); +lean_dec(x_3); +if (x_4 == 0) +{ +uint8_t x_5; +x_5 = 1; +return x_5; +} +else +{ +uint8_t x_6; +x_6 = 0; +return x_6; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_isModule___boxed(lean_object* x_1) { +_start: +{ +uint8_t x_2; lean_object* x_3; +x_2 = l_Lean_Elab_HeaderSyntax_isModule(x_1); +lean_dec(x_1); +x_3 = lean_box(x_2); +return x_3; +} +} +static lean_object* _init_l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1___closed__1() { _start: { lean_object* x_1; lean_object* x_2; @@ -127,16 +197,16 @@ x_2 = lean_mk_empty_array_with_capacity(x_1); return x_2; } } -LEAN_EXPORT lean_object* l_panic___at_Lean_Elab_headerToImports___spec__1(lean_object* x_1) { +LEAN_EXPORT lean_object* l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1(lean_object* x_1) { _start: { lean_object* x_2; lean_object* x_3; -x_2 = l_panic___at_Lean_Elab_headerToImports___spec__1___closed__1; +x_2 = l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1___closed__1; x_3 = lean_panic_fn(x_2, x_1); return x_3; } } -LEAN_EXPORT lean_object* l_panic___at_Lean_Elab_headerToImports___spec__2(lean_object* x_1) { +LEAN_EXPORT lean_object* l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__2(lean_object* x_1) { _start: { lean_object* x_2; lean_object* x_3; @@ -145,7 +215,7 @@ x_3 = lean_panic_fn(x_2, x_1); return x_3; } } -static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__1() { +static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__1() { _start: { lean_object* x_1; @@ -153,15 +223,15 @@ x_1 = lean_mk_string_unchecked("Lean.Elab.Import", 16, 16); return x_1; } } -static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__2() { +static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__2() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("Lean.Elab.headerToImports", 25, 25); +x_1 = lean_mk_string_unchecked("Lean.Elab.HeaderSyntax.imports", 30, 30); return x_1; } } -static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__3() { +static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__3() { _start: { lean_object* x_1; @@ -169,20 +239,20 @@ x_1 = lean_mk_string_unchecked("unreachable code has been reached", 33, 33); return x_1; } } -static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__4() { +static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__1; -x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__2; -x_3 = lean_unsigned_to_nat(19u); +x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__1; +x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__2; +x_3 = lean_unsigned_to_nat(27u); x_4 = lean_unsigned_to_nat(13u); -x_5 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__3; +x_5 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__3; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); return x_6; } } -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; uint8_t x_10; @@ -196,8 +266,8 @@ if (x_10 == 0) { lean_object* x_11; lean_object* x_12; lean_dec(x_6); -x_11 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__4; -x_12 = l_panic___at_Lean_Elab_headerToImports___spec__2(x_11); +x_11 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__4; +x_12 = l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__2(x_11); return x_12; } else @@ -256,7 +326,7 @@ return x_23; } } } -static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__1() { +static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__1() { _start: { lean_object* x_1; @@ -264,7 +334,7 @@ x_1 = lean_mk_string_unchecked("Lean", 4, 4); return x_1; } } -static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__2() { +static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__2() { _start: { lean_object* x_1; @@ -272,7 +342,7 @@ x_1 = lean_mk_string_unchecked("Parser", 6, 6); return x_1; } } -static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__3() { +static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__3() { _start: { lean_object* x_1; @@ -280,7 +350,7 @@ x_1 = lean_mk_string_unchecked("Module", 6, 6); return x_1; } } -static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__4() { +static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__4() { _start: { lean_object* x_1; @@ -288,19 +358,19 @@ x_1 = lean_mk_string_unchecked("all", 3, 3); return x_1; } } -static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__5() { +static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__5() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__1; -x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__2; -x_3 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__3; -x_4 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__4; +x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__1; +x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__2; +x_3 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__3; +x_4 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__4; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; lean_object* x_5; uint8_t x_6; @@ -317,8 +387,8 @@ if (x_8 == 0) { lean_object* x_9; lean_object* x_10; lean_dec(x_5); -x_9 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__4; -x_10 = l_panic___at_Lean_Elab_headerToImports___spec__2(x_9); +x_9 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__4; +x_10 = l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__2(x_9); return x_10; } else @@ -327,15 +397,15 @@ lean_object* x_11; lean_object* x_12; lean_object* x_13; uint8_t x_14; x_11 = lean_unsigned_to_nat(0u); x_12 = l_Lean_Syntax_getArg(x_5, x_11); lean_dec(x_5); -x_13 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__5; +x_13 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__5; lean_inc(x_12); x_14 = l_Lean_Syntax_isOfKind(x_12, x_13); if (x_14 == 0) { lean_object* x_15; lean_object* x_16; lean_dec(x_12); -x_15 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__4; -x_16 = l_panic___at_Lean_Elab_headerToImports___spec__2(x_15); +x_15 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__4; +x_16 = l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__2(x_15); return x_16; } else @@ -346,7 +416,7 @@ lean_dec(x_12); x_18 = lean_alloc_ctor(1, 1, 0); lean_ctor_set(x_18, 0, x_17); x_19 = lean_box(0); -x_20 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1(x_1, x_3, x_19, x_18); +x_20 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1(x_1, x_3, x_19, x_18); lean_dec(x_18); return x_20; } @@ -358,12 +428,12 @@ lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_dec(x_5); x_21 = lean_box(0); x_22 = lean_box(0); -x_23 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1(x_1, x_3, x_22, x_21); +x_23 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1(x_1, x_3, x_22, x_21); return x_23; } } } -static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___closed__1() { +static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___closed__1() { _start: { lean_object* x_1; @@ -371,19 +441,19 @@ x_1 = lean_mk_string_unchecked("private", 7, 7); return x_1; } } -static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___closed__2() { +static lean_object* _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__1; -x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__2; -x_3 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__3; -x_4 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___closed__1; +x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__1; +x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__2; +x_3 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__3; +x_4 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___closed__1; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3(lean_object* x_1, lean_object* x_2, size_t x_3, size_t x_4, lean_object* x_5) { +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3(lean_object* x_1, lean_object* x_2, size_t x_3, size_t x_4, lean_object* x_5) { _start: { uint8_t x_6; @@ -406,8 +476,8 @@ if (x_10 == 0) { lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_dec(x_7); -x_13 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__4; -x_14 = l_panic___at_Lean_Elab_headerToImports___spec__2(x_13); +x_13 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__4; +x_14 = l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__2(x_13); x_15 = lean_array_uset(x_9, x_4, x_14); x_4 = x_12; x_5 = x_15; @@ -429,8 +499,8 @@ if (x_20 == 0) lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_dec(x_17); lean_dec(x_7); -x_21 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__4; -x_22 = l_panic___at_Lean_Elab_headerToImports___spec__2(x_21); +x_21 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__4; +x_22 = l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__2(x_21); x_23 = lean_array_uset(x_9, x_4, x_22); x_4 = x_12; x_5 = x_23; @@ -441,7 +511,7 @@ else lean_object* x_25; lean_object* x_26; uint8_t x_27; x_25 = l_Lean_Syntax_getArg(x_17, x_8); lean_dec(x_17); -x_26 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___closed__2; +x_26 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___closed__2; lean_inc(x_25); x_27 = l_Lean_Syntax_isOfKind(x_25, x_26); if (x_27 == 0) @@ -449,8 +519,8 @@ if (x_27 == 0) lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_dec(x_25); lean_dec(x_7); -x_28 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__4; -x_29 = l_panic___at_Lean_Elab_headerToImports___spec__2(x_28); +x_28 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__4; +x_29 = l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__2(x_28); x_30 = lean_array_uset(x_9, x_4, x_29); x_4 = x_12; x_5 = x_30; @@ -464,7 +534,7 @@ lean_dec(x_25); x_33 = lean_alloc_ctor(1, 1, 0); lean_ctor_set(x_33, 0, x_32); x_34 = lean_box(0); -x_35 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2(x_7, x_34, x_33); +x_35 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2(x_7, x_34, x_33); lean_dec(x_33); lean_dec(x_7); x_36 = lean_array_uset(x_9, x_4, x_35); @@ -480,7 +550,7 @@ lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_dec(x_17); x_38 = lean_box(0); x_39 = lean_box(0); -x_40 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2(x_7, x_39, x_38); +x_40 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2(x_7, x_39, x_38); lean_dec(x_7); x_41 = lean_array_uset(x_9, x_4, x_40); x_4 = x_12; @@ -491,7 +561,7 @@ goto _start; } } } -static lean_object* _init_l_Lean_Elab_headerToImports___lambda__1___closed__1() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__1() { _start: { lean_object* x_1; @@ -499,19 +569,19 @@ x_1 = lean_mk_string_unchecked("import", 6, 6); return x_1; } } -static lean_object* _init_l_Lean_Elab_headerToImports___lambda__1___closed__2() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__1; -x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__2; -x_3 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__3; -x_4 = l_Lean_Elab_headerToImports___lambda__1___closed__1; +x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__1; +x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__2; +x_3 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__3; +x_4 = l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__1; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l_Lean_Elab_headerToImports___lambda__1___closed__3() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__3() { _start: { lean_object* x_1; @@ -519,21 +589,21 @@ x_1 = lean_mk_string_unchecked("Init", 4, 4); return x_1; } } -static lean_object* _init_l_Lean_Elab_headerToImports___lambda__1___closed__4() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l_Lean_Elab_headerToImports___lambda__1___closed__3; +x_2 = l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__3; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_headerToImports___lambda__1___closed__5() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__5() { _start: { lean_object* x_1; uint8_t x_2; uint8_t x_3; lean_object* x_4; -x_1 = l_Lean_Elab_headerToImports___lambda__1___closed__4; +x_1 = l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__4; x_2 = 0; x_3 = 1; x_4 = lean_alloc_ctor(0, 1, 2); @@ -543,28 +613,28 @@ lean_ctor_set_uint8(x_4, sizeof(void*)*1 + 1, x_3); return x_4; } } -static lean_object* _init_l_Lean_Elab_headerToImports___lambda__1___closed__6() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l_Lean_Elab_headerToImports___lambda__1___closed__5; +x_2 = l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__5; x_3 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_3, 0, x_2); lean_ctor_set(x_3, 1, x_1); return x_3; } } -static lean_object* _init_l_Lean_Elab_headerToImports___lambda__1___closed__7() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__7() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Elab_headerToImports___lambda__1___closed__6; +x_1 = l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__6; x_2 = lean_array_mk(x_1); return x_2; } } -static lean_object* _init_l_Lean_Elab_headerToImports___lambda__1___closed__8() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__8() { _start: { lean_object* x_1; lean_object* x_2; @@ -573,7 +643,7 @@ x_2 = lean_array_mk(x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Lean_Elab_headerToImports___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; size_t x_8; size_t x_9; lean_object* x_10; lean_object* x_11; @@ -584,12 +654,12 @@ x_7 = l_Lean_Syntax_getArgs(x_5); lean_dec(x_5); x_8 = lean_array_size(x_7); x_9 = 0; -x_10 = l_Lean_Elab_headerToImports___lambda__1___closed__2; -x_11 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3(x_10, x_6, x_8, x_9, x_7); +x_10 = l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__2; +x_11 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3(x_10, x_6, x_8, x_9, x_7); if (lean_obj_tag(x_3) == 0) { lean_object* x_12; lean_object* x_13; -x_12 = l_Lean_Elab_headerToImports___lambda__1___closed__7; +x_12 = l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__7; x_13 = l_Array_append___rarg(x_12, x_11); lean_dec(x_11); return x_13; @@ -597,27 +667,27 @@ return x_13; else { lean_object* x_14; lean_object* x_15; -x_14 = l_Lean_Elab_headerToImports___lambda__1___closed__8; +x_14 = l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__8; x_15 = l_Array_append___rarg(x_14, x_11); lean_dec(x_11); return x_15; } } } -static lean_object* _init_l_Lean_Elab_headerToImports___lambda__2___closed__1() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__1() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__1; -x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__2; -x_3 = lean_unsigned_to_nat(20u); +x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__1; +x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__2; +x_3 = lean_unsigned_to_nat(28u); x_4 = lean_unsigned_to_nat(9u); -x_5 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__3; +x_5 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__3; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); return x_6; } } -static lean_object* _init_l_Lean_Elab_headerToImports___lambda__2___closed__2() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__2() { _start: { lean_object* x_1; @@ -625,19 +695,19 @@ x_1 = lean_mk_string_unchecked("prelude", 7, 7); return x_1; } } -static lean_object* _init_l_Lean_Elab_headerToImports___lambda__2___closed__3() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__1; -x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__2; -x_3 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__3; -x_4 = l_Lean_Elab_headerToImports___lambda__2___closed__2; +x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__1; +x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__2; +x_3 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__3; +x_4 = l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__2; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -LEAN_EXPORT lean_object* l_Lean_Elab_headerToImports___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; lean_object* x_5; uint8_t x_6; @@ -653,8 +723,8 @@ if (x_7 == 0) { lean_object* x_8; lean_object* x_9; lean_dec(x_5); -x_8 = l_Lean_Elab_headerToImports___lambda__2___closed__1; -x_9 = l_panic___at_Lean_Elab_headerToImports___spec__1(x_8); +x_8 = l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__1; +x_9 = l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1(x_8); return x_9; } else @@ -663,15 +733,15 @@ lean_object* x_10; lean_object* x_11; lean_object* x_12; uint8_t x_13; x_10 = lean_unsigned_to_nat(0u); x_11 = l_Lean_Syntax_getArg(x_5, x_10); lean_dec(x_5); -x_12 = l_Lean_Elab_headerToImports___lambda__2___closed__3; +x_12 = l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__3; lean_inc(x_11); x_13 = l_Lean_Syntax_isOfKind(x_11, x_12); if (x_13 == 0) { lean_object* x_14; lean_object* x_15; lean_dec(x_11); -x_14 = l_Lean_Elab_headerToImports___lambda__2___closed__1; -x_15 = l_panic___at_Lean_Elab_headerToImports___spec__1(x_14); +x_14 = l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__1; +x_15 = l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1(x_14); return x_15; } else @@ -682,7 +752,7 @@ lean_dec(x_11); x_17 = lean_alloc_ctor(1, 1, 0); lean_ctor_set(x_17, 0, x_16); x_18 = lean_box(0); -x_19 = l_Lean_Elab_headerToImports___lambda__1(x_1, x_18, x_17); +x_19 = l_Lean_Elab_HeaderSyntax_imports___lambda__1(x_1, x_18, x_17); lean_dec(x_17); return x_19; } @@ -694,12 +764,12 @@ lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_dec(x_5); x_20 = lean_box(0); x_21 = lean_box(0); -x_22 = l_Lean_Elab_headerToImports___lambda__1(x_1, x_21, x_20); +x_22 = l_Lean_Elab_HeaderSyntax_imports___lambda__1(x_1, x_21, x_20); return x_22; } } } -static lean_object* _init_l_Lean_Elab_headerToImports___closed__1() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___closed__1() { _start: { lean_object* x_1; @@ -707,19 +777,19 @@ x_1 = lean_mk_string_unchecked("header", 6, 6); return x_1; } } -static lean_object* _init_l_Lean_Elab_headerToImports___closed__2() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__1; -x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__2; -x_3 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__3; -x_4 = l_Lean_Elab_headerToImports___closed__1; +x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__1; +x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__2; +x_3 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__3; +x_4 = l_Lean_Elab_HeaderSyntax_imports___closed__1; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l_Lean_Elab_headerToImports___closed__3() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___closed__3() { _start: { lean_object* x_1; @@ -727,31 +797,31 @@ x_1 = lean_mk_string_unchecked("moduleTk", 8, 8); return x_1; } } -static lean_object* _init_l_Lean_Elab_headerToImports___closed__4() { +static lean_object* _init_l_Lean_Elab_HeaderSyntax_imports___closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__1; -x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__2; -x_3 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__3; -x_4 = l_Lean_Elab_headerToImports___closed__3; +x_1 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__1; +x_2 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__2; +x_3 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__3; +x_4 = l_Lean_Elab_HeaderSyntax_imports___closed__3; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -LEAN_EXPORT lean_object* l_Lean_Elab_headerToImports(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_imports(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; -x_2 = l_Lean_Elab_headerToImports___closed__2; +x_2 = l_Lean_Elab_HeaderSyntax_imports___closed__2; lean_inc(x_1); x_3 = l_Lean_Syntax_isOfKind(x_1, x_2); if (x_3 == 0) { lean_object* x_4; lean_object* x_5; lean_dec(x_1); -x_4 = l_Lean_Elab_headerToImports___lambda__2___closed__1; -x_5 = l_panic___at_Lean_Elab_headerToImports___spec__1(x_4); +x_4 = l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__1; +x_5 = l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1(x_4); return x_5; } else @@ -771,8 +841,8 @@ if (x_10 == 0) lean_object* x_11; lean_object* x_12; lean_dec(x_7); lean_dec(x_1); -x_11 = l_Lean_Elab_headerToImports___lambda__2___closed__1; -x_12 = l_panic___at_Lean_Elab_headerToImports___spec__1(x_11); +x_11 = l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__1; +x_12 = l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1(x_11); return x_12; } else @@ -780,7 +850,7 @@ else lean_object* x_13; lean_object* x_14; uint8_t x_15; x_13 = l_Lean_Syntax_getArg(x_7, x_6); lean_dec(x_7); -x_14 = l_Lean_Elab_headerToImports___closed__4; +x_14 = l_Lean_Elab_HeaderSyntax_imports___closed__4; lean_inc(x_13); x_15 = l_Lean_Syntax_isOfKind(x_13, x_14); if (x_15 == 0) @@ -788,8 +858,8 @@ if (x_15 == 0) lean_object* x_16; lean_object* x_17; lean_dec(x_13); lean_dec(x_1); -x_16 = l_Lean_Elab_headerToImports___lambda__2___closed__1; -x_17 = l_panic___at_Lean_Elab_headerToImports___spec__1(x_16); +x_16 = l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__1; +x_17 = l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1(x_16); return x_17; } else @@ -800,7 +870,7 @@ lean_dec(x_13); x_19 = lean_alloc_ctor(1, 1, 0); lean_ctor_set(x_19, 0, x_18); x_20 = lean_box(0); -x_21 = l_Lean_Elab_headerToImports___lambda__2(x_1, x_20, x_19); +x_21 = l_Lean_Elab_HeaderSyntax_imports___lambda__2(x_1, x_20, x_19); lean_dec(x_19); lean_dec(x_1); return x_21; @@ -813,18 +883,18 @@ lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_dec(x_7); x_22 = lean_box(0); x_23 = lean_box(0); -x_24 = l_Lean_Elab_headerToImports___lambda__2(x_1, x_23, x_22); +x_24 = l_Lean_Elab_HeaderSyntax_imports___lambda__2(x_1, x_23, x_22); lean_dec(x_1); return x_24; } } } } -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; -x_5 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1(x_1, x_2, x_3, x_4); +x_5 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1(x_1, x_2, x_3, x_4); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); @@ -832,18 +902,18 @@ lean_dec(x_1); return x_5; } } -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; -x_4 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2(x_1, x_2, x_3); +x_4 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2(x_1, x_2, x_3); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); return x_4; } } -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { _start: { size_t x_6; size_t x_7; lean_object* x_8; @@ -851,35 +921,43 @@ x_6 = lean_unbox_usize(x_3); lean_dec(x_3); x_7 = lean_unbox_usize(x_4); lean_dec(x_4); -x_8 = l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3(x_1, x_2, x_6, x_7, x_5); +x_8 = l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3(x_1, x_2, x_6, x_7, x_5); lean_dec(x_2); lean_dec(x_1); return x_8; } } -LEAN_EXPORT lean_object* l_Lean_Elab_headerToImports___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; -x_4 = l_Lean_Elab_headerToImports___lambda__1(x_1, x_2, x_3); +x_4 = l_Lean_Elab_HeaderSyntax_imports___lambda__1(x_1, x_2, x_3); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); return x_4; } } -LEAN_EXPORT lean_object* l_Lean_Elab_headerToImports___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_Lean_Elab_HeaderSyntax_imports___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; -x_4 = l_Lean_Elab_headerToImports___lambda__2(x_1, x_2, x_3); +x_4 = l_Lean_Elab_HeaderSyntax_imports___lambda__2(x_1, x_2, x_3); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); return x_4; } } -static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__1() { +LEAN_EXPORT lean_object* l_Lean_Elab_headerToImports(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = l_Lean_Elab_HeaderSyntax_imports(x_1); +return x_2; +} +} +static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__1() { _start: { lean_object* x_1; @@ -887,17 +965,17 @@ x_1 = lean_mk_string_unchecked("cannot use `private import` without `module`", 4 return x_1; } } -static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__2() { +static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__2() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__1; +x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__1; x_2 = lean_alloc_ctor(18, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__3() { +static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__3() { _start: { lean_object* x_1; lean_object* x_2; @@ -907,7 +985,7 @@ lean_ctor_set(x_2, 0, x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1(uint8_t x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1(uint8_t x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { if (x_1 == 0) @@ -917,7 +995,7 @@ x_5 = lean_ctor_get_uint8(x_2, sizeof(void*)*1 + 1); if (x_5 == 0) { lean_object* x_6; lean_object* x_7; -x_6 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__2; +x_6 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__2; x_7 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_7, 0, x_6); lean_ctor_set(x_7, 1, x_4); @@ -926,7 +1004,7 @@ return x_7; else { lean_object* x_8; lean_object* x_9; -x_8 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__3; +x_8 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__3; x_9 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_9, 0, x_8); lean_ctor_set(x_9, 1, x_4); @@ -936,7 +1014,7 @@ return x_9; else { lean_object* x_10; lean_object* x_11; -x_10 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__3; +x_10 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__3; x_11 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_11, 0, x_10); lean_ctor_set(x_11, 1, x_4); @@ -944,7 +1022,7 @@ return x_11; } } } -static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___closed__1() { +static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___closed__1() { _start: { lean_object* x_1; @@ -952,17 +1030,17 @@ x_1 = lean_mk_string_unchecked("cannot use `import all` across module path roots return x_1; } } -static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___closed__2() { +static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___closed__2() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___closed__1; +x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___closed__1; x_2 = lean_alloc_ctor(18, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2(uint8_t x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2(uint8_t x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { _start: { uint8_t x_6; @@ -971,7 +1049,7 @@ if (x_6 == 0) { lean_object* x_7; lean_object* x_8; x_7 = lean_box(0); -x_8 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1(x_1, x_2, x_7, x_5); +x_8 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1(x_1, x_2, x_7, x_5); return x_8; } else @@ -986,7 +1064,7 @@ lean_dec(x_9); if (x_12 == 0) { lean_object* x_13; lean_object* x_14; -x_13 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___closed__2; +x_13 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___closed__2; x_14 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_14, 0, x_13); lean_ctor_set(x_14, 1, x_5); @@ -996,13 +1074,13 @@ else { lean_object* x_15; lean_object* x_16; x_15 = lean_box(0); -x_16 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1(x_1, x_2, x_15, x_5); +x_16 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1(x_1, x_2, x_15, x_5); return x_16; } } } } -static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___closed__1() { +static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___closed__1() { _start: { lean_object* x_1; @@ -1010,17 +1088,17 @@ x_1 = lean_mk_string_unchecked("cannot use `import all` without `module`", 40, 4 return x_1; } } -static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___closed__2() { +static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___closed__2() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___closed__1; +x_1 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___closed__1; x_2 = lean_alloc_ctor(18, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, uint8_t x_4, lean_object* x_5, size_t x_6, size_t x_7, lean_object* x_8, lean_object* x_9) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, size_t x_6, size_t x_7, lean_object* x_8, lean_object* x_9) { _start: { uint8_t x_10; @@ -1038,7 +1116,7 @@ else lean_object* x_12; lean_dec(x_8); x_12 = lean_array_uget(x_5, x_7); -if (x_4 == 0) +if (x_2 == 0) { uint8_t x_13; x_13 = lean_ctor_get_uint8(x_12, sizeof(void*)*1); @@ -1046,7 +1124,7 @@ if (x_13 == 0) { lean_object* x_14; lean_object* x_15; x_14 = lean_box(0); -x_15 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2(x_4, x_12, x_1, x_14, x_9); +x_15 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2(x_2, x_12, x_3, x_14, x_9); lean_dec(x_12); if (lean_obj_tag(x_15) == 0) { @@ -1093,7 +1171,7 @@ else { lean_object* x_26; lean_object* x_27; lean_dec(x_12); -x_26 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___closed__2; +x_26 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___closed__2; x_27 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_27, 0, x_26); lean_ctor_set(x_27, 1, x_9); @@ -1104,7 +1182,7 @@ else { lean_object* x_28; lean_object* x_29; x_28 = lean_box(0); -x_29 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2(x_4, x_12, x_1, x_28, x_9); +x_29 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2(x_2, x_12, x_3, x_28, x_9); lean_dec(x_12); if (lean_obj_tag(x_29) == 0) { @@ -1150,7 +1228,7 @@ return x_39; } } } -LEAN_EXPORT lean_object* l_Lean_Elab_processHeader___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_Lean_Elab_processHeaderCore___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { uint8_t x_4; @@ -1185,7 +1263,7 @@ return x_12; } } } -static lean_object* _init_l_Lean_Elab_processHeader___closed__1() { +static lean_object* _init_l_Lean_Elab_processHeaderCore___closed__1() { _start: { lean_object* x_1; @@ -1193,7 +1271,7 @@ x_1 = lean_mk_string_unchecked("", 0, 0); return x_1; } } -static lean_object* _init_l_Lean_Elab_processHeader___closed__2() { +static lean_object* _init_l_Lean_Elab_processHeaderCore___closed__2() { _start: { lean_object* x_1; @@ -1201,400 +1279,373 @@ x_1 = l_Lean_Elab_inServer; return x_1; } } -LEAN_EXPORT lean_object* l_Lean_Elab_processHeader(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, uint32_t x_5, lean_object* x_6, uint8_t x_7, lean_object* x_8, lean_object* x_9) { +LEAN_EXPORT lean_object* l_Lean_Elab_processHeaderCore(lean_object* x_1, lean_object* x_2, uint8_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, uint32_t x_7, lean_object* x_8, uint8_t x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { -lean_object* x_10; lean_object* x_11; uint8_t x_12; lean_object* x_13; lean_object* x_14; size_t x_15; size_t x_16; lean_object* x_17; lean_object* x_18; uint8_t x_51; -x_10 = lean_unsigned_to_nat(0u); -x_11 = l_Lean_Syntax_getArg(x_1, x_10); -x_12 = l_Lean_Syntax_isNone(x_11); -lean_dec(x_11); -lean_inc(x_1); -x_13 = l_Lean_Elab_headerToImports(x_1); -x_14 = lean_box(0); -x_15 = lean_array_size(x_13); -x_16 = 0; -if (x_12 == 0) -{ -uint8_t x_102; -x_102 = 1; -x_51 = x_102; -goto block_101; -} -else +lean_object* x_13; size_t x_14; size_t x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_13 = lean_box(0); +x_14 = lean_array_size(x_2); +x_15 = 0; +x_16 = lean_box(0); +x_17 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1(x_2, x_3, x_10, x_13, x_2, x_14, x_15, x_16, x_12); +if (x_3 == 0) { -uint8_t x_103; -x_103 = 0; -x_51 = x_103; -goto block_101; -} -block_50: +if (lean_obj_tag(x_17) == 0) { -uint32_t x_19; lean_object* x_20; -x_19 = 0; -x_20 = lean_mk_empty_environment(x_19, x_18); -if (lean_obj_tag(x_20) == 0) +lean_object* x_43; uint8_t x_44; uint8_t x_45; lean_object* x_46; +x_43 = lean_ctor_get(x_17, 1); +lean_inc(x_43); +lean_dec(x_17); +x_44 = 1; +x_45 = 2; +x_46 = l_Lean_importModules(x_2, x_4, x_7, x_8, x_9, x_44, x_45, x_11, x_43); +if (lean_obj_tag(x_46) == 0) { -lean_object* x_21; lean_object* x_22; uint8_t x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; -x_21 = lean_ctor_get(x_20, 0); -lean_inc(x_21); -x_22 = lean_ctor_get(x_20, 1); -lean_inc(x_22); -lean_dec(x_20); -x_23 = 0; -x_24 = l_Lean_Syntax_getPos_x3f(x_1, x_23); -lean_dec(x_1); -x_25 = lean_ctor_get(x_4, 2); -lean_inc(x_25); -x_26 = lean_ctor_get(x_4, 1); -lean_inc(x_26); -lean_dec(x_4); -x_27 = lean_box(0); -x_28 = lean_io_error_to_string(x_17); -x_29 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_29, 0, x_28); -x_30 = l_Lean_MessageData_ofFormat(x_29); -if (lean_obj_tag(x_24) == 0) +uint8_t x_47; +lean_dec(x_6); +x_47 = !lean_is_exclusive(x_46); +if (x_47 == 0) { -lean_object* x_31; uint8_t x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; -x_31 = l_Lean_FileMap_toPosition(x_25, x_10); -x_32 = 2; -x_33 = l_Lean_Elab_processHeader___closed__1; -x_34 = lean_alloc_ctor(0, 5, 3); -lean_ctor_set(x_34, 0, x_26); -lean_ctor_set(x_34, 1, x_31); -lean_ctor_set(x_34, 2, x_27); -lean_ctor_set(x_34, 3, x_33); -lean_ctor_set(x_34, 4, x_30); -lean_ctor_set_uint8(x_34, sizeof(void*)*5, x_23); -lean_ctor_set_uint8(x_34, sizeof(void*)*5 + 1, x_32); -lean_ctor_set_uint8(x_34, sizeof(void*)*5 + 2, x_23); -x_35 = l_Lean_MessageLog_add(x_34, x_3); -x_36 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_36, 0, x_21); -lean_ctor_set(x_36, 1, x_35); -x_37 = l_Lean_Elab_processHeader___lambda__1(x_8, x_36, x_22); -return x_37; +lean_object* x_48; lean_object* x_49; +x_48 = lean_ctor_get(x_46, 1); +lean_ctor_set(x_46, 1, x_5); +x_49 = l_Lean_Elab_processHeaderCore___lambda__1(x_10, x_46, x_48); +return x_49; } else { -lean_object* x_38; lean_object* x_39; uint8_t x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; -x_38 = lean_ctor_get(x_24, 0); -lean_inc(x_38); -lean_dec(x_24); -x_39 = l_Lean_FileMap_toPosition(x_25, x_38); -lean_dec(x_38); -x_40 = 2; -x_41 = l_Lean_Elab_processHeader___closed__1; -x_42 = lean_alloc_ctor(0, 5, 3); -lean_ctor_set(x_42, 0, x_26); -lean_ctor_set(x_42, 1, x_39); -lean_ctor_set(x_42, 2, x_27); -lean_ctor_set(x_42, 3, x_41); -lean_ctor_set(x_42, 4, x_30); -lean_ctor_set_uint8(x_42, sizeof(void*)*5, x_23); -lean_ctor_set_uint8(x_42, sizeof(void*)*5 + 1, x_40); -lean_ctor_set_uint8(x_42, sizeof(void*)*5 + 2, x_23); -x_43 = l_Lean_MessageLog_add(x_42, x_3); -x_44 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_44, 0, x_21); -lean_ctor_set(x_44, 1, x_43); -x_45 = l_Lean_Elab_processHeader___lambda__1(x_8, x_44, x_22); -return x_45; -} -} -else -{ -uint8_t x_46; -lean_dec(x_17); -lean_dec(x_8); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_1); -x_46 = !lean_is_exclusive(x_20); -if (x_46 == 0) -{ -return x_20; +lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; +x_50 = lean_ctor_get(x_46, 0); +x_51 = lean_ctor_get(x_46, 1); +lean_inc(x_51); +lean_inc(x_50); +lean_dec(x_46); +x_52 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_52, 0, x_50); +lean_ctor_set(x_52, 1, x_5); +x_53 = l_Lean_Elab_processHeaderCore___lambda__1(x_10, x_52, x_51); +return x_53; +} } else { -lean_object* x_47; lean_object* x_48; lean_object* x_49; -x_47 = lean_ctor_get(x_20, 0); -x_48 = lean_ctor_get(x_20, 1); -lean_inc(x_48); -lean_inc(x_47); -lean_dec(x_20); -x_49 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_49, 0, x_47); -lean_ctor_set(x_49, 1, x_48); -return x_49; +lean_object* x_54; lean_object* x_55; +x_54 = lean_ctor_get(x_46, 0); +lean_inc(x_54); +x_55 = lean_ctor_get(x_46, 1); +lean_inc(x_55); +lean_dec(x_46); +x_18 = x_54; +x_19 = x_55; +goto block_42; +} } +else +{ +lean_object* x_56; lean_object* x_57; +lean_dec(x_11); +lean_dec(x_8); +lean_dec(x_4); +lean_dec(x_2); +x_56 = lean_ctor_get(x_17, 0); +lean_inc(x_56); +x_57 = lean_ctor_get(x_17, 1); +lean_inc(x_57); +lean_dec(x_17); +x_18 = x_56; +x_19 = x_57; +goto block_42; } } -block_101: +else { -lean_object* x_52; lean_object* x_53; -x_52 = lean_box(0); -x_53 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1(x_8, x_13, x_14, x_51, x_13, x_15, x_16, x_52, x_9); -if (x_51 == 0) +lean_object* x_58; uint8_t x_59; +x_58 = l_Lean_Elab_processHeaderCore___closed__2; +x_59 = l_Lean_Option_get___at___private_Lean_Util_Profile_0__Lean_get__profiler___spec__1(x_4, x_58); +if (x_59 == 0) { -if (lean_obj_tag(x_53) == 0) +if (lean_obj_tag(x_17) == 0) { -lean_object* x_54; uint8_t x_55; uint8_t x_56; lean_object* x_57; -x_54 = lean_ctor_get(x_53, 1); -lean_inc(x_54); -lean_dec(x_53); -x_55 = 1; -x_56 = 2; -x_57 = l_Lean_importModules(x_13, x_2, x_5, x_6, x_7, x_55, x_56, x_54); -if (lean_obj_tag(x_57) == 0) +lean_object* x_60; uint8_t x_61; uint8_t x_62; lean_object* x_63; +x_60 = lean_ctor_get(x_17, 1); +lean_inc(x_60); +lean_dec(x_17); +x_61 = 1; +x_62 = 0; +x_63 = l_Lean_importModules(x_2, x_4, x_7, x_8, x_9, x_61, x_62, x_11, x_60); +if (lean_obj_tag(x_63) == 0) { -uint8_t x_58; -lean_dec(x_4); -lean_dec(x_1); -x_58 = !lean_is_exclusive(x_57); -if (x_58 == 0) +uint8_t x_64; +lean_dec(x_6); +x_64 = !lean_is_exclusive(x_63); +if (x_64 == 0) { -lean_object* x_59; lean_object* x_60; -x_59 = lean_ctor_get(x_57, 1); -lean_ctor_set(x_57, 1, x_3); -x_60 = l_Lean_Elab_processHeader___lambda__1(x_8, x_57, x_59); -return x_60; +lean_object* x_65; lean_object* x_66; +x_65 = lean_ctor_get(x_63, 1); +lean_ctor_set(x_63, 1, x_5); +x_66 = l_Lean_Elab_processHeaderCore___lambda__1(x_10, x_63, x_65); +return x_66; } else { -lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; -x_61 = lean_ctor_get(x_57, 0); -x_62 = lean_ctor_get(x_57, 1); -lean_inc(x_62); -lean_inc(x_61); -lean_dec(x_57); -x_63 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_63, 0, x_61); -lean_ctor_set(x_63, 1, x_3); -x_64 = l_Lean_Elab_processHeader___lambda__1(x_8, x_63, x_62); -return x_64; +lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; +x_67 = lean_ctor_get(x_63, 0); +x_68 = lean_ctor_get(x_63, 1); +lean_inc(x_68); +lean_inc(x_67); +lean_dec(x_63); +x_69 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_69, 0, x_67); +lean_ctor_set(x_69, 1, x_5); +x_70 = l_Lean_Elab_processHeaderCore___lambda__1(x_10, x_69, x_68); +return x_70; } } else { -lean_object* x_65; lean_object* x_66; -x_65 = lean_ctor_get(x_57, 0); -lean_inc(x_65); -x_66 = lean_ctor_get(x_57, 1); -lean_inc(x_66); -lean_dec(x_57); -x_17 = x_65; -x_18 = x_66; -goto block_50; +lean_object* x_71; lean_object* x_72; +x_71 = lean_ctor_get(x_63, 0); +lean_inc(x_71); +x_72 = lean_ctor_get(x_63, 1); +lean_inc(x_72); +lean_dec(x_63); +x_18 = x_71; +x_19 = x_72; +goto block_42; } } else { -lean_object* x_67; lean_object* x_68; -lean_dec(x_13); -lean_dec(x_6); +lean_object* x_73; lean_object* x_74; +lean_dec(x_11); +lean_dec(x_8); +lean_dec(x_4); lean_dec(x_2); -x_67 = lean_ctor_get(x_53, 0); -lean_inc(x_67); -x_68 = lean_ctor_get(x_53, 1); -lean_inc(x_68); -lean_dec(x_53); -x_17 = x_67; -x_18 = x_68; -goto block_50; +x_73 = lean_ctor_get(x_17, 0); +lean_inc(x_73); +x_74 = lean_ctor_get(x_17, 1); +lean_inc(x_74); +lean_dec(x_17); +x_18 = x_73; +x_19 = x_74; +goto block_42; } } else { -lean_object* x_69; uint8_t x_70; -x_69 = l_Lean_Elab_processHeader___closed__2; -x_70 = l_Lean_Option_get___at___private_Lean_Util_Profile_0__Lean_get__profiler___spec__1(x_2, x_69); -if (x_70 == 0) -{ -if (lean_obj_tag(x_53) == 0) -{ -lean_object* x_71; uint8_t x_72; uint8_t x_73; lean_object* x_74; -x_71 = lean_ctor_get(x_53, 1); -lean_inc(x_71); -lean_dec(x_53); -x_72 = 1; -x_73 = 0; -x_74 = l_Lean_importModules(x_13, x_2, x_5, x_6, x_7, x_72, x_73, x_71); -if (lean_obj_tag(x_74) == 0) +if (lean_obj_tag(x_17) == 0) { -uint8_t x_75; -lean_dec(x_4); -lean_dec(x_1); -x_75 = !lean_is_exclusive(x_74); -if (x_75 == 0) +lean_object* x_75; uint8_t x_76; uint8_t x_77; lean_object* x_78; +x_75 = lean_ctor_get(x_17, 1); +lean_inc(x_75); +lean_dec(x_17); +x_76 = 1; +x_77 = 1; +x_78 = l_Lean_importModules(x_2, x_4, x_7, x_8, x_9, x_76, x_77, x_11, x_75); +if (lean_obj_tag(x_78) == 0) { -lean_object* x_76; lean_object* x_77; -x_76 = lean_ctor_get(x_74, 1); -lean_ctor_set(x_74, 1, x_3); -x_77 = l_Lean_Elab_processHeader___lambda__1(x_8, x_74, x_76); -return x_77; -} -else +uint8_t x_79; +lean_dec(x_6); +x_79 = !lean_is_exclusive(x_78); +if (x_79 == 0) { -lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; -x_78 = lean_ctor_get(x_74, 0); -x_79 = lean_ctor_get(x_74, 1); -lean_inc(x_79); -lean_inc(x_78); -lean_dec(x_74); -x_80 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_80, 0, x_78); -lean_ctor_set(x_80, 1, x_3); -x_81 = l_Lean_Elab_processHeader___lambda__1(x_8, x_80, x_79); +lean_object* x_80; lean_object* x_81; +x_80 = lean_ctor_get(x_78, 1); +lean_ctor_set(x_78, 1, x_5); +x_81 = l_Lean_Elab_processHeaderCore___lambda__1(x_10, x_78, x_80); return x_81; } -} else { -lean_object* x_82; lean_object* x_83; -x_82 = lean_ctor_get(x_74, 0); -lean_inc(x_82); -x_83 = lean_ctor_get(x_74, 1); +lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; +x_82 = lean_ctor_get(x_78, 0); +x_83 = lean_ctor_get(x_78, 1); lean_inc(x_83); -lean_dec(x_74); -x_17 = x_82; -x_18 = x_83; -goto block_50; +lean_inc(x_82); +lean_dec(x_78); +x_84 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_84, 0, x_82); +lean_ctor_set(x_84, 1, x_5); +x_85 = l_Lean_Elab_processHeaderCore___lambda__1(x_10, x_84, x_83); +return x_85; } } else { -lean_object* x_84; lean_object* x_85; -lean_dec(x_13); -lean_dec(x_6); -lean_dec(x_2); -x_84 = lean_ctor_get(x_53, 0); -lean_inc(x_84); -x_85 = lean_ctor_get(x_53, 1); -lean_inc(x_85); -lean_dec(x_53); -x_17 = x_84; -x_18 = x_85; -goto block_50; +lean_object* x_86; lean_object* x_87; +x_86 = lean_ctor_get(x_78, 0); +lean_inc(x_86); +x_87 = lean_ctor_get(x_78, 1); +lean_inc(x_87); +lean_dec(x_78); +x_18 = x_86; +x_19 = x_87; +goto block_42; } } else { -if (lean_obj_tag(x_53) == 0) -{ -lean_object* x_86; uint8_t x_87; uint8_t x_88; lean_object* x_89; -x_86 = lean_ctor_get(x_53, 1); -lean_inc(x_86); -lean_dec(x_53); -x_87 = 1; -x_88 = 1; -x_89 = l_Lean_importModules(x_13, x_2, x_5, x_6, x_7, x_87, x_88, x_86); -if (lean_obj_tag(x_89) == 0) -{ -uint8_t x_90; +lean_object* x_88; lean_object* x_89; +lean_dec(x_11); +lean_dec(x_8); lean_dec(x_4); -lean_dec(x_1); -x_90 = !lean_is_exclusive(x_89); -if (x_90 == 0) -{ -lean_object* x_91; lean_object* x_92; -x_91 = lean_ctor_get(x_89, 1); -lean_ctor_set(x_89, 1, x_3); -x_92 = l_Lean_Elab_processHeader___lambda__1(x_8, x_89, x_91); -return x_92; +lean_dec(x_2); +x_88 = lean_ctor_get(x_17, 0); +lean_inc(x_88); +x_89 = lean_ctor_get(x_17, 1); +lean_inc(x_89); +lean_dec(x_17); +x_18 = x_88; +x_19 = x_89; +goto block_42; } -else -{ -lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; -x_93 = lean_ctor_get(x_89, 0); -x_94 = lean_ctor_get(x_89, 1); -lean_inc(x_94); -lean_inc(x_93); -lean_dec(x_89); -x_95 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_95, 0, x_93); -lean_ctor_set(x_95, 1, x_3); -x_96 = l_Lean_Elab_processHeader___lambda__1(x_8, x_95, x_94); -return x_96; } } -else +block_42: { -lean_object* x_97; lean_object* x_98; -x_97 = lean_ctor_get(x_89, 0); -lean_inc(x_97); -x_98 = lean_ctor_get(x_89, 1); -lean_inc(x_98); -lean_dec(x_89); -x_17 = x_97; -x_18 = x_98; -goto block_50; -} +uint32_t x_20; lean_object* x_21; +x_20 = 0; +x_21 = lean_mk_empty_environment(x_20, x_19); +if (lean_obj_tag(x_21) == 0) +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; uint8_t x_31; uint8_t x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; +x_22 = lean_ctor_get(x_21, 0); +lean_inc(x_22); +x_23 = lean_ctor_get(x_21, 1); +lean_inc(x_23); +lean_dec(x_21); +x_24 = lean_ctor_get(x_6, 2); +lean_inc(x_24); +x_25 = l_Lean_FileMap_toPosition(x_24, x_1); +x_26 = lean_ctor_get(x_6, 1); +lean_inc(x_26); +lean_dec(x_6); +x_27 = lean_box(0); +x_28 = lean_io_error_to_string(x_18); +x_29 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_29, 0, x_28); +x_30 = l_Lean_MessageData_ofFormat(x_29); +x_31 = 0; +x_32 = 2; +x_33 = l_Lean_Elab_processHeaderCore___closed__1; +x_34 = lean_alloc_ctor(0, 5, 3); +lean_ctor_set(x_34, 0, x_26); +lean_ctor_set(x_34, 1, x_25); +lean_ctor_set(x_34, 2, x_27); +lean_ctor_set(x_34, 3, x_33); +lean_ctor_set(x_34, 4, x_30); +lean_ctor_set_uint8(x_34, sizeof(void*)*5, x_31); +lean_ctor_set_uint8(x_34, sizeof(void*)*5 + 1, x_32); +lean_ctor_set_uint8(x_34, sizeof(void*)*5 + 2, x_31); +x_35 = l_Lean_MessageLog_add(x_34, x_5); +x_36 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_36, 0, x_22); +lean_ctor_set(x_36, 1, x_35); +x_37 = l_Lean_Elab_processHeaderCore___lambda__1(x_10, x_36, x_23); +return x_37; } else { -lean_object* x_99; lean_object* x_100; -lean_dec(x_13); +uint8_t x_38; +lean_dec(x_18); +lean_dec(x_10); lean_dec(x_6); -lean_dec(x_2); -x_99 = lean_ctor_get(x_53, 0); -lean_inc(x_99); -x_100 = lean_ctor_get(x_53, 1); -lean_inc(x_100); -lean_dec(x_53); -x_17 = x_99; -x_18 = x_100; -goto block_50; +lean_dec(x_5); +x_38 = !lean_is_exclusive(x_21); +if (x_38 == 0) +{ +return x_21; } +else +{ +lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_39 = lean_ctor_get(x_21, 0); +x_40 = lean_ctor_get(x_21, 1); +lean_inc(x_40); +lean_inc(x_39); +lean_dec(x_21); +x_41 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_41, 0, x_39); +lean_ctor_set(x_41, 1, x_40); +return x_41; } } } } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { uint8_t x_5; lean_object* x_6; x_5 = lean_unbox(x_1); lean_dec(x_1); -x_6 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1(x_5, x_2, x_3, x_4); +x_6 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1(x_5, x_2, x_3, x_4); lean_dec(x_3); lean_dec(x_2); return x_6; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { _start: { uint8_t x_6; lean_object* x_7; x_6 = lean_unbox(x_1); lean_dec(x_1); -x_7 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2(x_6, x_2, x_3, x_4, x_5); +x_7 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2(x_6, x_2, x_3, x_4, x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); return x_7; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: { uint8_t x_10; size_t x_11; size_t x_12; lean_object* x_13; -x_10 = lean_unbox(x_4); -lean_dec(x_4); +x_10 = lean_unbox(x_2); +lean_dec(x_2); x_11 = lean_unbox_usize(x_6); lean_dec(x_6); x_12 = lean_unbox_usize(x_7); lean_dec(x_7); -x_13 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1(x_1, x_2, x_3, x_10, x_5, x_11, x_12, x_8, x_9); +x_13 = l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1(x_1, x_10, x_3, x_4, x_5, x_11, x_12, x_8, x_9); lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); lean_dec(x_1); return x_13; } } +LEAN_EXPORT lean_object* l_Lean_Elab_processHeaderCore___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +uint8_t x_13; uint32_t x_14; uint8_t x_15; lean_object* x_16; +x_13 = lean_unbox(x_3); +lean_dec(x_3); +x_14 = lean_unbox_uint32(x_7); +lean_dec(x_7); +x_15 = lean_unbox(x_9); +lean_dec(x_9); +x_16 = l_Lean_Elab_processHeaderCore(x_1, x_2, x_13, x_4, x_5, x_6, x_14, x_8, x_15, x_10, x_11, x_12); +lean_dec(x_1); +return x_16; +} +} +LEAN_EXPORT lean_object* l_Lean_Elab_processHeader(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, uint32_t x_5, lean_object* x_6, uint8_t x_7, lean_object* x_8, lean_object* x_9) { +_start: +{ +lean_object* x_10; lean_object* x_11; uint8_t x_12; lean_object* x_13; lean_object* x_14; +x_10 = l_Lean_Elab_HeaderSyntax_startPos(x_1); +lean_inc(x_1); +x_11 = l_Lean_Elab_HeaderSyntax_imports(x_1); +x_12 = l_Lean_Elab_HeaderSyntax_isModule(x_1); +lean_dec(x_1); +x_13 = lean_box(0); +x_14 = l_Lean_Elab_processHeaderCore(x_10, x_11, x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_13, x_9); +lean_dec(x_10); +return x_14; +} +} LEAN_EXPORT lean_object* l_Lean_Elab_processHeader___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: { @@ -1651,7 +1702,7 @@ if (x_15 == 0) { lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; x_16 = lean_ctor_get(x_9, 0); -x_17 = l_Lean_Elab_headerToImports(x_13); +x_17 = l_Lean_Elab_HeaderSyntax_imports(x_13); x_18 = lean_ctor_get(x_6, 2); lean_inc(x_18); lean_dec(x_6); @@ -1672,7 +1723,7 @@ x_22 = lean_ctor_get(x_9, 1); lean_inc(x_22); lean_inc(x_21); lean_dec(x_9); -x_23 = l_Lean_Elab_headerToImports(x_13); +x_23 = l_Lean_Elab_HeaderSyntax_imports(x_13); x_24 = lean_ctor_get(x_6, 2); lean_inc(x_24); lean_dec(x_6); @@ -1707,7 +1758,7 @@ if (lean_is_exclusive(x_9)) { lean_dec_ref(x_9); x_31 = lean_box(0); } -x_32 = l_Lean_Elab_headerToImports(x_28); +x_32 = l_Lean_Elab_HeaderSyntax_imports(x_28); x_33 = lean_ctor_get(x_6, 2); lean_inc(x_33); lean_dec(x_6); @@ -1758,7 +1809,7 @@ if (lean_is_exclusive(x_9)) { lean_dec_ref(x_9); x_43 = lean_box(0); } -x_44 = l_Lean_Elab_headerToImports(x_39); +x_44 = l_Lean_Elab_HeaderSyntax_imports(x_39); x_45 = lean_ctor_get(x_6, 2); lean_inc(x_45); lean_dec(x_6); @@ -1846,7 +1897,7 @@ if (x_66 == 0) { lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; x_67 = lean_ctor_get(x_60, 0); -x_68 = l_Lean_Elab_headerToImports(x_64); +x_68 = l_Lean_Elab_HeaderSyntax_imports(x_64); x_69 = lean_ctor_get(x_57, 2); lean_inc(x_69); lean_dec(x_57); @@ -1867,7 +1918,7 @@ x_73 = lean_ctor_get(x_60, 1); lean_inc(x_73); lean_inc(x_72); lean_dec(x_60); -x_74 = l_Lean_Elab_headerToImports(x_64); +x_74 = l_Lean_Elab_HeaderSyntax_imports(x_64); x_75 = lean_ctor_get(x_57, 2); lean_inc(x_75); lean_dec(x_57); @@ -1902,7 +1953,7 @@ if (lean_is_exclusive(x_60)) { lean_dec_ref(x_60); x_82 = lean_box(0); } -x_83 = l_Lean_Elab_headerToImports(x_79); +x_83 = l_Lean_Elab_HeaderSyntax_imports(x_79); x_84 = lean_ctor_get(x_57, 2); lean_inc(x_84); lean_dec(x_57); @@ -1953,7 +2004,7 @@ if (lean_is_exclusive(x_60)) { lean_dec_ref(x_60); x_94 = lean_box(0); } -x_95 = l_Lean_Elab_headerToImports(x_90); +x_95 = l_Lean_Elab_HeaderSyntax_imports(x_90); x_96 = lean_ctor_get(x_57, 2); lean_inc(x_96); lean_dec(x_57); @@ -2478,78 +2529,78 @@ lean_dec_ref(res); res = initialize_Lean_CoreM(builtin, lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); -l_panic___at_Lean_Elab_headerToImports___spec__1___closed__1 = _init_l_panic___at_Lean_Elab_headerToImports___spec__1___closed__1(); -lean_mark_persistent(l_panic___at_Lean_Elab_headerToImports___spec__1___closed__1); -l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__1 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__1(); -lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__1); -l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__2 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__2(); -lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__2); -l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__3 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__3(); -lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__3); -l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__4 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__4(); -lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__1___closed__4); -l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__1 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__1(); -lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__1); -l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__2 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__2(); -lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__2); -l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__3 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__3(); -lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__3); -l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__4 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__4(); -lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__4); -l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__5 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__5(); -lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___lambda__2___closed__5); -l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___closed__1 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___closed__1(); -lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___closed__1); -l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___closed__2 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___closed__2(); -lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_headerToImports___spec__3___closed__2); -l_Lean_Elab_headerToImports___lambda__1___closed__1 = _init_l_Lean_Elab_headerToImports___lambda__1___closed__1(); -lean_mark_persistent(l_Lean_Elab_headerToImports___lambda__1___closed__1); -l_Lean_Elab_headerToImports___lambda__1___closed__2 = _init_l_Lean_Elab_headerToImports___lambda__1___closed__2(); -lean_mark_persistent(l_Lean_Elab_headerToImports___lambda__1___closed__2); -l_Lean_Elab_headerToImports___lambda__1___closed__3 = _init_l_Lean_Elab_headerToImports___lambda__1___closed__3(); -lean_mark_persistent(l_Lean_Elab_headerToImports___lambda__1___closed__3); -l_Lean_Elab_headerToImports___lambda__1___closed__4 = _init_l_Lean_Elab_headerToImports___lambda__1___closed__4(); -lean_mark_persistent(l_Lean_Elab_headerToImports___lambda__1___closed__4); -l_Lean_Elab_headerToImports___lambda__1___closed__5 = _init_l_Lean_Elab_headerToImports___lambda__1___closed__5(); -lean_mark_persistent(l_Lean_Elab_headerToImports___lambda__1___closed__5); -l_Lean_Elab_headerToImports___lambda__1___closed__6 = _init_l_Lean_Elab_headerToImports___lambda__1___closed__6(); -lean_mark_persistent(l_Lean_Elab_headerToImports___lambda__1___closed__6); -l_Lean_Elab_headerToImports___lambda__1___closed__7 = _init_l_Lean_Elab_headerToImports___lambda__1___closed__7(); -lean_mark_persistent(l_Lean_Elab_headerToImports___lambda__1___closed__7); -l_Lean_Elab_headerToImports___lambda__1___closed__8 = _init_l_Lean_Elab_headerToImports___lambda__1___closed__8(); -lean_mark_persistent(l_Lean_Elab_headerToImports___lambda__1___closed__8); -l_Lean_Elab_headerToImports___lambda__2___closed__1 = _init_l_Lean_Elab_headerToImports___lambda__2___closed__1(); -lean_mark_persistent(l_Lean_Elab_headerToImports___lambda__2___closed__1); -l_Lean_Elab_headerToImports___lambda__2___closed__2 = _init_l_Lean_Elab_headerToImports___lambda__2___closed__2(); -lean_mark_persistent(l_Lean_Elab_headerToImports___lambda__2___closed__2); -l_Lean_Elab_headerToImports___lambda__2___closed__3 = _init_l_Lean_Elab_headerToImports___lambda__2___closed__3(); -lean_mark_persistent(l_Lean_Elab_headerToImports___lambda__2___closed__3); -l_Lean_Elab_headerToImports___closed__1 = _init_l_Lean_Elab_headerToImports___closed__1(); -lean_mark_persistent(l_Lean_Elab_headerToImports___closed__1); -l_Lean_Elab_headerToImports___closed__2 = _init_l_Lean_Elab_headerToImports___closed__2(); -lean_mark_persistent(l_Lean_Elab_headerToImports___closed__2); -l_Lean_Elab_headerToImports___closed__3 = _init_l_Lean_Elab_headerToImports___closed__3(); -lean_mark_persistent(l_Lean_Elab_headerToImports___closed__3); -l_Lean_Elab_headerToImports___closed__4 = _init_l_Lean_Elab_headerToImports___closed__4(); -lean_mark_persistent(l_Lean_Elab_headerToImports___closed__4); -l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__1 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__1(); -lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__1); -l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__2 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__2(); -lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__2); -l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__3 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__3(); -lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__1___closed__3); -l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___closed__1 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___closed__1(); -lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___closed__1); -l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___closed__2 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___closed__2(); -lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___lambda__2___closed__2); -l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___closed__1 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___closed__1(); -lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___closed__1); -l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___closed__2 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___closed__2(); -lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeader___spec__1___closed__2); -l_Lean_Elab_processHeader___closed__1 = _init_l_Lean_Elab_processHeader___closed__1(); -lean_mark_persistent(l_Lean_Elab_processHeader___closed__1); -l_Lean_Elab_processHeader___closed__2 = _init_l_Lean_Elab_processHeader___closed__2(); -lean_mark_persistent(l_Lean_Elab_processHeader___closed__2); +l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1___closed__1 = _init_l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1___closed__1(); +lean_mark_persistent(l_panic___at_Lean_Elab_HeaderSyntax_imports___spec__1___closed__1); +l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__1 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__1(); +lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__1); +l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__2 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__2(); +lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__2); +l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__3 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__3(); +lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__3); +l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__4 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__4(); +lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__1___closed__4); +l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__1 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__1(); +lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__1); +l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__2 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__2(); +lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__2); +l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__3 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__3(); +lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__3); +l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__4 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__4(); +lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__4); +l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__5 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__5(); +lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___lambda__2___closed__5); +l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___closed__1 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___closed__1(); +lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___closed__1); +l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___closed__2 = _init_l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___closed__2(); +lean_mark_persistent(l_Array_mapMUnsafe_map___at_Lean_Elab_HeaderSyntax_imports___spec__3___closed__2); +l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__1 = _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__1(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__1); +l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__2 = _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__2(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__2); +l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__3 = _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__3(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__3); +l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__4 = _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__4(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__4); +l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__5 = _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__5(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__5); +l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__6 = _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__6(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__6); +l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__7 = _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__7(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__7); +l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__8 = _init_l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__8(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___lambda__1___closed__8); +l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__1 = _init_l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__1(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__1); +l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__2 = _init_l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__2(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__2); +l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__3 = _init_l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__3(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___lambda__2___closed__3); +l_Lean_Elab_HeaderSyntax_imports___closed__1 = _init_l_Lean_Elab_HeaderSyntax_imports___closed__1(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___closed__1); +l_Lean_Elab_HeaderSyntax_imports___closed__2 = _init_l_Lean_Elab_HeaderSyntax_imports___closed__2(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___closed__2); +l_Lean_Elab_HeaderSyntax_imports___closed__3 = _init_l_Lean_Elab_HeaderSyntax_imports___closed__3(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___closed__3); +l_Lean_Elab_HeaderSyntax_imports___closed__4 = _init_l_Lean_Elab_HeaderSyntax_imports___closed__4(); +lean_mark_persistent(l_Lean_Elab_HeaderSyntax_imports___closed__4); +l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__1 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__1(); +lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__1); +l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__2 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__2(); +lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__2); +l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__3 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__3(); +lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__1___closed__3); +l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___closed__1 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___closed__1(); +lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___closed__1); +l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___closed__2 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___closed__2(); +lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___lambda__2___closed__2); +l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___closed__1 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___closed__1(); +lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___closed__1); +l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___closed__2 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___closed__2(); +lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_processHeaderCore___spec__1___closed__2); +l_Lean_Elab_processHeaderCore___closed__1 = _init_l_Lean_Elab_processHeaderCore___closed__1(); +lean_mark_persistent(l_Lean_Elab_processHeaderCore___closed__1); +l_Lean_Elab_processHeaderCore___closed__2 = _init_l_Lean_Elab_processHeaderCore___closed__2(); +lean_mark_persistent(l_Lean_Elab_processHeaderCore___closed__2); l_Lean_Elab_parseImports___closed__1 = _init_l_Lean_Elab_parseImports___closed__1(); lean_mark_persistent(l_Lean_Elab_parseImports___closed__1); return lean_io_result_mk_ok(lean_box(0)); diff --git a/stage0/stdlib/Lean/Elab/MutualDef.c b/stage0/stdlib/Lean/Elab/MutualDef.c index feb9f4c76eed..b7705fb16052 100644 --- a/stage0/stdlib/Lean/Elab/MutualDef.c +++ b/stage0/stdlib/Lean/Elab/MutualDef.c @@ -129,7 +129,6 @@ LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_Mutua double lean_float_div(double, double); LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_pushLocalDecl___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_withHeaderSecVars_collectUsed___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177_(lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkFreeVarMap___spec__1___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_expandWhereStructInst___closed__7; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkInitialUsedFVarsMap___spec__6___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkInitialUsedFVarsMap___spec__7___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, lean_object*, lean_object*, size_t, size_t, lean_object*); @@ -147,6 +146,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_regis static lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_check___lambda__2___closed__4; static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabHeaders___spec__12___lambda__4___closed__6; LEAN_EXPORT lean_object* l_Lean_Elab_Term_withRestoreOrSaveFull___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabHeaders___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__1; LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_withUsed(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkLetRecClosureFor___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Array_anyMUnsafe_any___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_logGoalsAccomplishedSnapshotTask___spec__7(lean_object*, size_t, size_t); @@ -154,7 +154,6 @@ LEAN_EXPORT lean_object* l_Lean_Elab_Term_elabMutualDef_processDeriving(lean_obj lean_object* l_Lean_MessageData_joinSep(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Core_withRestoreOrSaveFull___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabFunValues___spec__13___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_uint64_to_usize(uint64_t); -static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__4; static lean_object* l_Lean_Elab_Term_initFn____x40_Lean_Elab_MutualDef___hyg_5693____closed__3; static lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_expandWhereStructInst___lambda__1___closed__7; LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkInitialUsedFVarsMap___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkLetRecClosures___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -176,6 +175,7 @@ lean_object* l_Lean_profileitM___at___private_Lean_Elab_Term_0__Lean_Elab_Term_a lean_object* l_Lean_Syntax_getId(lean_object*); static lean_object* l_Nat_foldM_loop___at_Lean_Elab_Term_MutualClosure_pushMain___spec__1___closed__4; lean_object* l_Lean_Elab_Term_elabTermEnsuringType(lean_object*, lean_object*, uint8_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180_(lean_object*); static lean_object* l_Lean_Elab_instInhabitedDefViewElabHeader___closed__7; lean_object* l_Lean_Expr_sort___override(lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_checkAllDeclNamesDistinct___spec__6(lean_object*, lean_object*); @@ -194,7 +194,6 @@ static lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_expandWher lean_object* l_Lean_LocalDecl_replaceFVarId(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Core_getMessageLog___rarg(lean_object*, lean_object*); static lean_object* l_Lean_Elab_Term_elabMutualDef_go___closed__1; -static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__3; size_t lean_usize_mul(size_t, size_t); LEAN_EXPORT lean_object* l_Lean_Elab_Term_MutualClosure_pushMain___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkInitialUsedFVarsMap___rarg___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*); @@ -310,6 +309,7 @@ static lean_object* l_Lean_Elab_addDeclarationRangesForBuiltin___at_Lean_Elab_Te static lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabFunValues___spec__18___lambda__1___closed__1; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_Term_elabMutualDef___spec__5(lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_Term_elabMutualDef_finishElab___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__3; LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_checkModifiers___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT uint8_t l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_isExample(lean_object*); LEAN_EXPORT uint8_t l_Array_anyMUnsafe_any___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_withHeaderSecVars_collectUsed___spec__5___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_withHeaderSecVars_collectUsed___spec__6(lean_object*, lean_object*, size_t, size_t); @@ -497,7 +497,6 @@ static lean_object* l_Lean_Elab_elabTerminationHints___at___private_Lean_Elab_Mu lean_object* l_Lean_Syntax_eqWithInfoAndTraceReuse(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_LocalDecl_setUserName(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_elabAttr___at_Lean_Elab_Command_elabMutualDef___spec__5___lambda__1___boxed(lean_object*); -static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__2; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkFreeVarMap___spec__5(lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_Term_elabMutualDef___spec__10(lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_instantiateMVarsProfiling(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -617,6 +616,7 @@ LEAN_EXPORT lean_object* l_Lean_Elab_addDeclarationRangesForBuiltin___at_Lean_El LEAN_EXPORT lean_object* l_Lean_Elab_addDeclarationRangesForBuiltin___at_Lean_Elab_Term_elabMutualDef_go___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabHeaders___closed__1; static lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_check___lambda__3___closed__2; +static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__8; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_Term_elabMutualDef___spec__11(lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Core_withRestoreOrSaveFull___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabFunValues___spec__13___lambda__8___boxed(lean_object**); LEAN_EXPORT lean_object* l_Lean_Elab_elabTerminationHints___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_declValToTerminationHint___spec__1___lambda__12___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -835,6 +835,7 @@ lean_object* l_Lean_LocalDecl_fvarId(lean_object*); static lean_object* l_Lean_Elab_Term_initFn____x40_Lean_Elab_MutualDef___hyg_5771____closed__4; LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_FixPoint_resetModified___rarg(lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_checkAllDeclNamesDistinct___spec__7___closed__1; +static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__2; static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabHeaders___spec__12___lambda__4___closed__9; LEAN_EXPORT lean_object* l_Array_anyMUnsafe_any___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabFunValues___spec__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_levelMVarToParamHeaders___spec__1(size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -842,7 +843,6 @@ LEAN_EXPORT lean_object* l_Lean_addTrace___at___private_Lean_Elab_MutualDef_0__L LEAN_EXPORT lean_object* l_Lean_Meta_withAuxDecl___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_withFunLocalDecls_loop___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Expr_appFn_x21(lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkInitialUsedFVarsMap___spec__6___rarg___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__6; LEAN_EXPORT lean_object* l_Lean_throwError___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_declValToTerminationHint___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Core_withRestoreOrSaveFull___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabFunValues___spec__13___lambda__10___closed__1; lean_object* l_Lean_Elab_Command_getScope___rarg(lean_object*, lean_object*); @@ -870,6 +870,7 @@ LEAN_EXPORT lean_object* l_Lean_throwErrorAt___at___private_Lean_Elab_MutualDef_ LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_logGoalsAccomplishedSnapshotTask(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_instInhabitedDefViewElabHeader___closed__2; static lean_object* l_Lean_addDeclarationRanges___at_Lean_Elab_Term_elabMutualDef_go___spec__7___closed__1; +static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__5; LEAN_EXPORT lean_object* l_Lean_Core_withRestoreOrSaveFull___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabFunValues___spec__13___lambda__7(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_getZetaDeltaFVarIds___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_checkAllDeclNamesDistinct___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -999,6 +1000,7 @@ LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_Term_MutualClosur LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkLetRecClosures___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkLetRecClosures___spec__10___boxed(lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_NameSet_empty; +static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__4; LEAN_EXPORT lean_object* l_List_mapM_loop___at_Lean_Elab_Term_MutualClosure_main___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_collectUsed___spec__2(lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Elab_Term_elabMutualDef_go___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1270,7 +1272,6 @@ LEAN_EXPORT lean_object* l_Lean_Core_withRestoreOrSaveFull___at___private_Lean_E static lean_object* l_List_mapTR_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_getPendingMVarErrorMessage___spec__2___closed__2; static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkInitialUsedFVarsMap___spec__4___rarg___lambda__4___closed__1; LEAN_EXPORT lean_object* l_List_foldlM___at_Lean_Elab_Term_MutualClosure_pushLetRecs___spec__1___lambda__2(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__7; lean_object* l_Lean_Elab_Term_withLevelNames___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_List_mapTR_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_getPendingMVarErrorMessage___spec__1___closed__1; LEAN_EXPORT lean_object* l_Lean_Core_withRestoreOrSaveFull___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabHeaders___spec__5___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1332,7 +1333,6 @@ LEAN_EXPORT lean_object* l_Lean_Elab_Term_elabMutualDef_finishElab___lambda__1__ LEAN_EXPORT lean_object* l_Lean_Elab_Term_elabMutualDef_finishElab___lambda__3(lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Elab_Command_elabMutualDef___spec__10___closed__1; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_levelMVarToParamHeaders_process___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__8; static lean_object* l_Lean_Elab_Term_elabMutualDef_elabAsync___lambda__2___closed__2; static lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabHeaders_mkTacTask___lambda__1___closed__9; static lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabHeaders_mkTacTask___lambda__1___closed__7; @@ -1372,6 +1372,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_check lean_object* lean_infer_type(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_elabTerminationHints___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_declValToTerminationHint___spec__1___lambda__5___closed__5; lean_object* lean_find_expr(lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__7; uint8_t lean_nat_dec_le(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Term_elabMutualDef_elabAsync(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabHeaders(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1425,7 +1426,6 @@ lean_object* l_Lean_Expr_mvarId_x21(lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_withHeaderSecVars_collectUsed___spec__7___lambda__1___closed__1; static double l_Lean_withTraceNode___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabHeaders___spec__10___lambda__4___closed__4; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Elab_Term_elabMutualDef_finishElab___spec__3___closed__3; -static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__1; LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Elab_Command_elabMutualDef___spec__9___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_Term_mkBodyInfo(lean_object*, lean_object*); static lean_object* l_Lean_withTraceNode___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabHeaders___spec__10___lambda__4___closed__2; @@ -1435,7 +1435,6 @@ LEAN_EXPORT lean_object* l_Lean_PersistentArray_anyMAux___at___private_Lean_Elab lean_object* l_Lean_Elab_fixLevelParams(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_Term_expandWhereDecls(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_uset(lean_object*, size_t, lean_object*); -static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__5; lean_object* l_Lean_MessageData_ofName(lean_object*); static lean_object* l_Lean_Elab_initFn____x40_Lean_Elab_MutualDef___hyg_5____closed__2; LEAN_EXPORT lean_object* l_Lean_Core_withRestoreOrSaveFull___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_elabFunValues___spec__13___lambda__6(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1450,6 +1449,7 @@ LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at___private_Lean_Elab_Mutu LEAN_EXPORT uint8_t l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_typeHasRecFun___lambda__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Nat_foldTR_loop___at_Lean_Elab_Term_MutualClosure_insertReplacementForMainFns___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwErrorAt___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_declValToTerminationHint___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__6; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkInitialUsedFVarsMap___spec__4___rarg___lambda__1(lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_elabTerminationHints___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_declValToTerminationHint___spec__1___lambda__15(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_getMax_x3f___at___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_pickMaxFVar_x3f___spec__1___boxed(lean_object*, lean_object*); @@ -61507,22 +61507,22 @@ lean_dec(x_10); if (x_12 == 0) { lean_object* x_13; lean_object* x_14; +lean_dec(x_7); +lean_dec(x_5); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); x_13 = lean_box(0); -x_14 = l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_logGoalsAccomplishedSnapshotTask___lambda__3(x_1, x_2, x_13, x_3, x_4, x_5, x_6, x_7, x_8, x_9); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_9); return x_14; } else { lean_object* x_15; lean_object* x_16; -lean_dec(x_7); -lean_dec(x_5); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); x_15 = lean_box(0); -x_16 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_16, 0, x_15); -lean_ctor_set(x_16, 1, x_9); +x_16 = l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_logGoalsAccomplishedSnapshotTask___lambda__3(x_1, x_2, x_15, x_3, x_4, x_5, x_6, x_7, x_8, x_9); return x_16; } } @@ -65544,7 +65544,7 @@ lean_dec(x_3); return x_7; } } -static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__1() { +static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__1() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; @@ -65554,83 +65554,83 @@ x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__2() { +static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__1; +x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__1; x_2 = l_Lean_Elab_initFn____x40_Lean_Elab_MutualDef___hyg_5____closed__8; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__3() { +static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__2; +x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__2; x_2 = l_Lean_Elab_initFn____x40_Lean_Elab_MutualDef___hyg_5____closed__10; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__4() { +static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__3; +x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__3; x_2 = l_Lean_Elab_initFn____x40_Lean_Elab_MutualDef___hyg_5____closed__4; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__5() { +static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__5() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__4; +x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__4; x_2 = l_Lean_Elab_initFn____x40_Lean_Elab_MutualDef___hyg_5____closed__6; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__6() { +static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__5; +x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__5; x_2 = l_Lean_Elab_initFn____x40_Lean_Elab_MutualDef___hyg_5____closed__14; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__7() { +static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__7() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__6; +x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__6; x_2 = l_Lean_Elab_initFn____x40_Lean_Elab_MutualDef___hyg_5____closed__16; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__8() { +static lean_object* _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__8() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__7; -x_2 = lean_unsigned_to_nat(18177u); +x_1 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__7; +x_2 = lean_unsigned_to_nat(18180u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180_(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_2 = l___private_Lean_Elab_MutualDef_0__Lean_Elab_Term_MutualClosure_mkClosureForAux___closed__2; x_3 = 0; -x_4 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__8; +x_4 = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__8; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); if (lean_obj_tag(x_5) == 0) { @@ -66459,23 +66459,23 @@ l_Lean_Elab_Command_elabMutualDef___closed__5 = _init_l_Lean_Elab_Command_elabMu lean_mark_persistent(l_Lean_Elab_Command_elabMutualDef___closed__5); l_Lean_Elab_Command_elabMutualDef___closed__6 = _init_l_Lean_Elab_Command_elabMutualDef___closed__6(); lean_mark_persistent(l_Lean_Elab_Command_elabMutualDef___closed__6); -l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__1 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__1(); -lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__1); -l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__2 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__2(); -lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__2); -l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__3 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__3(); -lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__3); -l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__4 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__4(); -lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__4); -l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__5 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__5(); -lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__5); -l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__6 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__6(); -lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__6); -l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__7 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__7(); -lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__7); -l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__8 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__8(); -lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177____closed__8); -if (builtin) {res = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18177_(lean_io_mk_world()); +l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__1 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__1(); +lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__1); +l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__2 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__2(); +lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__2); +l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__3 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__3(); +lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__3); +l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__4 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__4(); +lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__4); +l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__5 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__5(); +lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__5); +l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__6 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__6(); +lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__6); +l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__7 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__7(); +lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__7); +l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__8 = _init_l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__8(); +lean_mark_persistent(l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180____closed__8); +if (builtin) {res = l_Lean_Elab_Command_initFn____x40_Lean_Elab_MutualDef___hyg_18180_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); }return lean_io_result_mk_ok(lean_box(0)); diff --git a/stage0/stdlib/Lean/Elab/ParseImportsFast.c b/stage0/stdlib/Lean/Elab/ParseImportsFast.c index f2fb304468e0..1d26e460cc9b 100644 --- a/stage0/stdlib/Lean/Elab/ParseImportsFast.c +++ b/stage0/stdlib/Lean/Elab/ParseImportsFast.c @@ -23,6 +23,7 @@ LEAN_EXPORT lean_object* l_Lean_ParseImports_many___at_Lean_ParseImports_main___ LEAN_EXPORT lean_object* l_Lean_ParseImports_instInhabitedParser___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_ParseImports_takeWhile___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Json_mkObj(lean_object*); +LEAN_EXPORT lean_object* l_Lean_instToJsonImport__1; LEAN_EXPORT lean_object* l_Lean_ParseImports_andthen(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonImport____x40_Lean_Elab_ParseImportsFast___hyg_1380____closed__2; lean_object* l_Lean_Name_toString(lean_object*, uint8_t, lean_object*); @@ -38,6 +39,7 @@ static lean_object* l_Lean_ParseImports_keywordCore_go___at_Lean_ParseImports_ma extern uint32_t l_Lean_idBeginEscape; lean_object* l_List_flatMapTR_go___at___private_Lean_Server_Rpc_Basic_0__Lean_Lsp_toJsonRpcRef____x40_Lean_Server_Rpc_Basic___hyg_173____spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_ParseImports_keywordCore_go___at_Lean_ParseImports_main___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_instToJsonImport__1___closed__1; static lean_object* l___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonParseImportsResult____x40_Lean_Elab_ParseImportsFast___hyg_1467____closed__2; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonPrintImportResult____x40_Lean_Elab_ParseImportsFast___hyg_1625____spec__2(size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonPrintImportsResult____x40_Lean_Elab_ParseImportsFast___hyg_1680____spec__1(size_t, size_t, lean_object*); @@ -112,7 +114,6 @@ LEAN_EXPORT lean_object* l_Lean_ParseImports_moduleIdent___lambda__1___boxed(lea lean_object* l_IO_println___at_Lean_Environment_displayStats___spec__3(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_ParseImports_State_mkError(lean_object*, lean_object*); uint8_t lean_nat_dec_eq(lean_object*, lean_object*); -static lean_object* l_Lean_instToJsonImport___closed__1; static lean_object* l_Lean_ParseImports_moduleIdent_parse___closed__3; LEAN_EXPORT lean_object* l_Lean_ParseImports_keywordCore_go___at_Lean_ParseImports_main___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_isSubScriptAlnum(uint32_t); @@ -166,7 +167,6 @@ lean_object* lean_nat_add(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_ParseImports_State_setPos(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_ParseImports_takeWhile(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_instToJsonPrintImportsResult___closed__1; -LEAN_EXPORT lean_object* l_Lean_instToJsonImport; LEAN_EXPORT lean_object* lean_print_imports_json(lean_object*, lean_object*); lean_object* lean_array_uset(lean_object*, size_t, lean_object*); static lean_object* l_Lean_ParseImports_moduleIdent_parse___closed__2; @@ -4772,7 +4772,7 @@ x_3 = lean_box(x_2); return x_3; } } -static lean_object* _init_l_Lean_instToJsonImport___closed__1() { +static lean_object* _init_l_Lean_instToJsonImport__1___closed__1() { _start: { lean_object* x_1; @@ -4780,11 +4780,11 @@ x_1 = lean_alloc_closure((void*)(l___private_Lean_Elab_ParseImportsFast_0__Lean_ return x_1; } } -static lean_object* _init_l_Lean_instToJsonImport() { +static lean_object* _init_l_Lean_instToJsonImport__1() { _start: { lean_object* x_1; -x_1 = l_Lean_instToJsonImport___closed__1; +x_1 = l_Lean_instToJsonImport__1___closed__1; return x_1; } } @@ -5594,10 +5594,10 @@ l___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonImport____x40_Lean_Elab_Par lean_mark_persistent(l___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonImport____x40_Lean_Elab_ParseImportsFast___hyg_1380____closed__3); l___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonImport____x40_Lean_Elab_ParseImportsFast___hyg_1380____closed__4 = _init_l___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonImport____x40_Lean_Elab_ParseImportsFast___hyg_1380____closed__4(); lean_mark_persistent(l___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonImport____x40_Lean_Elab_ParseImportsFast___hyg_1380____closed__4); -l_Lean_instToJsonImport___closed__1 = _init_l_Lean_instToJsonImport___closed__1(); -lean_mark_persistent(l_Lean_instToJsonImport___closed__1); -l_Lean_instToJsonImport = _init_l_Lean_instToJsonImport(); -lean_mark_persistent(l_Lean_instToJsonImport); +l_Lean_instToJsonImport__1___closed__1 = _init_l_Lean_instToJsonImport__1___closed__1(); +lean_mark_persistent(l_Lean_instToJsonImport__1___closed__1); +l_Lean_instToJsonImport__1 = _init_l_Lean_instToJsonImport__1(); +lean_mark_persistent(l_Lean_instToJsonImport__1); l___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonParseImportsResult____x40_Lean_Elab_ParseImportsFast___hyg_1467____closed__1 = _init_l___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonParseImportsResult____x40_Lean_Elab_ParseImportsFast___hyg_1467____closed__1(); lean_mark_persistent(l___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonParseImportsResult____x40_Lean_Elab_ParseImportsFast___hyg_1467____closed__1); l___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonParseImportsResult____x40_Lean_Elab_ParseImportsFast___hyg_1467____closed__2 = _init_l___private_Lean_Elab_ParseImportsFast_0__Lean_toJsonParseImportsResult____x40_Lean_Elab_ParseImportsFast___hyg_1467____closed__2(); diff --git a/stage0/stdlib/Lean/Elab/Tactic/Basic.c b/stage0/stdlib/Lean/Elab/Tactic/Basic.c index 10895814f3ab..6c1928a751c8 100644 --- a/stage0/stdlib/Lean/Elab/Tactic/Basic.c +++ b/stage0/stdlib/Lean/Elab/Tactic/Basic.c @@ -43,7 +43,7 @@ LEAN_EXPORT lean_object* l_Lean_Elab_Term_withoutTacticIncrementality___at_Lean_ LEAN_EXPORT lean_object* l_Lean_Elab_liftMacroM___at_Lean_Elab_Tactic_evalTactic_expandEval___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_instMonadExceptExceptionTacticM___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_addTrace___at_Lean_Elab_Tactic_evalTactic_handleEx___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_admitGoal___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_admitGoal___lambda__1(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_liftMacroM___at_Lean_Elab_Tactic_evalTactic_expandEval___spec__1___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_Tactic_instMonadTacticM___closed__4; LEAN_EXPORT lean_object* l___private_Lean_Elab_Tactic_Basic_0__Lean_Elab_Tactic_TacticM_runCore_x27(lean_object*); @@ -102,6 +102,7 @@ lean_object* l_Lean_PersistentArray_push___rarg(lean_object*, lean_object*); static lean_object* l_Lean_Elab_Tactic_mkTacticAttribute___closed__10; lean_object* lean_array_push(lean_object*, lean_object*); static lean_object* l_Lean_Elab_Tactic_evalTactic___lambda__6___closed__5; +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__12; LEAN_EXPORT lean_object* l_List_forM___at_Lean_Elab_Tactic_evalTactic_expandEval___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Core_getMessageLog___rarg(lean_object*, lean_object*); lean_object* l_Lean_Core_checkSystem(lean_object*, lean_object*, lean_object*, lean_object*); @@ -113,6 +114,7 @@ uint8_t lean_usize_dec_eq(size_t, size_t); static lean_object* l_Lean_Elab_Tactic_mkTacticAttribute___closed__3; LEAN_EXPORT lean_object* l_Lean_isTracingEnabledFor___at_Lean_Elab_Tactic_evalTactic_handleEx___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_mkTacticInfo(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__8; lean_object* l_Lean_Meta_mkLabeledSorry(lean_object*, uint8_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_Elab_Term_instMonadTermElabM; lean_object* l_Lean_Syntax_getArgs(lean_object*); @@ -121,17 +123,17 @@ LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_appendGoals(lean_object*, lean_objec LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_withMacroExpansion___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_replaceRef(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_getMainGoal_loop(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__1; +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__7; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic_eval___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_ReaderT_pure___at_Lean_Elab_Tactic_saveTacticInfoForToken___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Syntax_getPos_x3f(lean_object*, uint8_t); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_withoutRecover___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853_(lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_pushGoal___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_isIncrementalElab___at_Lean_Elab_Tactic_evalTactic_eval___spec__2___closed__2; lean_object* l_Lean_Syntax_getTailPos_x3f(lean_object*, uint8_t); static lean_object* l_Lean_Elab_Tactic_instMonadExceptExceptionTacticM___closed__3; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_getMainGoal(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__2; static lean_object* l_Lean_Elab_Tactic_instMonadBacktrackSavedStateTacticM___closed__3; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_focus(lean_object*); uint8_t lean_float_decLt(double, double); @@ -143,7 +145,6 @@ LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Elab_Tactic_throwNoGoalsToB static lean_object* l_Lean_Elab_Tactic_evalTactic_expandEval___lambda__4___closed__5; lean_object* l_Lean_MessageData_hasSyntheticSorry(lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic_expandEval___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__4; static lean_object* l_List_forIn_x27_loop___at_Lean_Elab_Tactic_evalTactic_handleEx___spec__4___closed__1; LEAN_EXPORT lean_object* l_Lean_instantiateMVars___at_Lean_Elab_Tactic_getMainTarget___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_popMainGoal___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -169,13 +170,13 @@ LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_SavedState_restore(lean_object*, uin LEAN_EXPORT lean_object* l_Lean_Elab_throwAbortTactic___at_Lean_Elab_Tactic_done___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_ReaderT_pure___at_Lean_Elab_Tactic_saveTacticInfoForToken___spec__1(lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_replaceMainGoal___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8894_(lean_object*); uint8_t lean_string_dec_eq(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_tryTactic(lean_object*); lean_object* l_Lean_Exception_toMessageData(lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_withCaseRef(lean_object*, lean_object*); static lean_object* l_Lean_Elab_Tactic_mkTacticAttribute___closed__4; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_run(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_admitGoal___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_Term_withoutTacticIncrementality___at_Lean_Elab_Tactic_evalTactic_eval___spec__4___closed__2; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_throwNoGoalsToBeSolved___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_getMainModule___rarg___boxed(lean_object*, lean_object*); @@ -195,6 +196,7 @@ LEAN_EXPORT lean_object* l_Lean_Elab_liftMacroM___at_Lean_Elab_Tactic_evalTactic LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_pushGoals(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_MVarId_assign___at_Lean_Meta_getLevel___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_InternalExceptionId_getName(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856_(lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_logException___at_Lean_Elab_Tactic_closeUsingOrAdmit___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic_eval___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Elab_Tactic_instAlternativeTacticM___spec__1(lean_object*); @@ -215,12 +217,12 @@ LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_mkTacticAttribute(lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_getUnsolvedGoals(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_focusAndDone___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_Tactic_instMonadExceptExceptionTacticM___closed__1; +LEAN_EXPORT lean_object* l_Lean_Elab_admitGoal___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_instMonadTacticM___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_task_pure(lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_pushGoal(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_checked_assign(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Elab_Tactic_evalTactic_eval___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__12; LEAN_EXPORT lean_object* l_List_forM___at_Lean_Elab_Term_reportUnsolvedGoals___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_Tactic_evalTactic___lambda__6___closed__8; static lean_object* l_Lean_withTraceNode___at_Lean_Elab_Tactic_evalTactic___spec__3___lambda__4___closed__1; @@ -229,6 +231,7 @@ lean_object* lean_st_ref_take(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_getCurrMacroScope(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_liftExcept___at_Lean_Elab_liftMacroM___spec__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_withTacticInfoContext___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__4; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic_expandEval___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Elab_Tactic_evalTactic_expandEval___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Elab_expandMacroImpl_x3f(lean_object*, lean_object*, lean_object*, lean_object*); @@ -284,6 +287,7 @@ LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic_expandEval(lean_object*, LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_adaptExpander(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_instMonadTacticM; LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Elab_Tactic_evalTactic_eval___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__13; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_withMainContext___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_instMonadBacktrackSavedStateTacticM; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -302,7 +306,6 @@ LEAN_EXPORT lean_object* l___private_Lean_Util_Trace_0__Lean_getResetTraces___at static lean_object* l_Lean_throwMaxRecDepthAt___at_Lean_Elab_Tactic_evalTactic_expandEval___spec__5___closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_liftMetaMAtMain___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_st_mk_ref(lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__9; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Elab_Tactic_evalTactic___spec__3___lambda__3(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, uint8_t, double, double, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_instAlternativeTacticM___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_ReaderT_instApplicativeOfMonad___rarg(lean_object*); @@ -320,6 +323,7 @@ LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_getMainDecl(lean_object*, lean_objec lean_object* l_Lean_Name_append(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_orElse(lean_object*); uint8_t l_Lean_checkTraceOption(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic_eval(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_instInhabitedTacticM___rarg(lean_object*); lean_object* lean_io_mono_nanos_now(lean_object*); @@ -343,22 +347,19 @@ LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Elab_Tactic_tagUntagged LEAN_EXPORT lean_object* l_Lean_profileitM___at_Lean_Elab_Tactic_evalTactic___spec__9(lean_object*); extern lean_object* l_Task_Priority_default; lean_object* l_Lean_Core_withFreshMacroScope___rarg(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__8; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Elab_Tactic_evalTactic___spec__3___lambda__2(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, uint8_t, double, double, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__16; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic___lambda__1___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic_expandEval___lambda__6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_name_eq(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_getResetInfoTrees___at_Lean_Elab_Tactic_withTacticInfoContext___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_pruneSolvedGoals(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__10; lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); static lean_object* l_Lean_Elab_Tactic_closeMainGoal___lambda__1___closed__3; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic_eval___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_Elab_macroAttribute; extern lean_object* l_Lean_warningAsError; +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__5; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_getUnsolvedGoals___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__6; static lean_object* l_Lean_Elab_Tactic_mkTacticAttribute___closed__1; static lean_object* l_Lean_Elab_Tactic_closeMainGoal___lambda__1___closed__2; uint8_t l_Lean_Option_get___at___private_Lean_Util_Profile_0__Lean_get__profiler___spec__1(lean_object*, lean_object*); @@ -400,20 +401,20 @@ static lean_object* l_Lean_Elab_Tactic_evalTactic___closed__1; LEAN_EXPORT lean_object* l___private_Lean_Util_Trace_0__Lean_getResetTraces___at_Lean_Elab_Tactic_evalTactic___spec__4___rarg___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic_expandEval___lambda__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Environment_contains(lean_object*, lean_object*, uint8_t); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__14; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_throwNoGoalsToBeSolved(lean_object*); lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Util_Trace_0__Lean_addTraceNode___spec__1(size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_focusAndDone___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8897_(lean_object*); LEAN_EXPORT lean_object* l_Lean_throwErrorAt___at_Lean_Elab_Tactic_evalTactic___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); double l_Float_ofScientific(lean_object*, uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_throwUnsupportedSyntax___at_Lean_Elab_Tactic_evalTactic_expandEval___spec__6___rarg(lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_pruneSolvedGoals___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__11; lean_object* lean_name_append_index_after(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_saveState___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_getNameOfIdent_x27___boxed(lean_object*); size_t lean_usize_of_nat(lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__15; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_closeUsingOrAdmit(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Util_Trace_0__Lean_getResetTraces___at_Lean_Elab_Tactic_evalTactic___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Elab_Tactic_evalTactic___spec__3___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -433,12 +434,14 @@ LEAN_EXPORT lean_object* l_Lean_logAt___at_Lean_Elab_Term_reportUnsolvedGoals___ static lean_object* l_Lean_logAt___at_Lean_Elab_Term_reportUnsolvedGoals___spec__2___lambda__1___closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_NameSet_empty; +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__9; static lean_object* l_Lean_Elab_Tactic_evalTactic_expandEval___lambda__3___closed__4; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_getCurrMacroScope___rarg___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_throwMaxRecDepthAt___at_Lean_Elab_Tactic_evalTactic_expandEval___spec__5___closed__4; LEAN_EXPORT lean_object* l_Lean_throwErrorAt___at_Lean_Elab_Tactic_evalTactic_expandEval___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_Tactic_evalTactic___lambda__4___closed__3; LEAN_EXPORT lean_object* l_Lean_profileitM___at_Lean_Elab_Tactic_evalTactic___spec__9___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__16; LEAN_EXPORT lean_object* l_Lean_Elab_liftMacroM___at_Lean_Elab_Tactic_evalTactic_expandEval___spec__1___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTacticAtRaw(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_eq(lean_object*, lean_object*); @@ -457,6 +460,7 @@ LEAN_EXPORT lean_object* l_ReaderT_pure___at_Lean_Elab_Tactic_saveTacticInfoForT LEAN_EXPORT lean_object* l_List_forM___at_Lean_Elab_Tactic_evalTactic_expandEval___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_Tactic_closeMainGoal___lambda__1___closed__4; lean_object* l_Lean_indentExpr(lean_object*); +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__10; LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Elab_Tactic_evalTactic_throwExs___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_addTrace___at_Lean_Elab_Tactic_evalTactic_handleEx___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Util_Trace_0__Lean_addTraceNode___at_Lean_Elab_Tactic_evalTactic___spec__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -472,19 +476,20 @@ uint8_t l_Lean_KVMap_getBool(lean_object*, lean_object*, uint8_t); LEAN_EXPORT lean_object* l_Lean_Elab_Term_withoutTacticIncrementality___at_Lean_Elab_Tactic_evalTactic_eval___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_addMessageContextFull___at_Lean_Meta_instAddMessageContextMetaM___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_withMacroExpansion___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__3; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_appendGoals___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_isTracingEnabledFor___at_Lean_Elab_Tactic_evalTactic_handleEx___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_Tactic_evalTactic___lambda__4___closed__2; static lean_object* l_List_forIn_x27_loop___at_Lean_Elab_Tactic_tagUntaggedGoals___spec__2___closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_withCaseRef___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__13; lean_object* lean_task_map(lean_object*, lean_object*, lean_object*, uint8_t); lean_object* l_Lean_ResolveName_resolveGlobalName(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_nat_sub(lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__3; static lean_object* l_Lean_Elab_Tactic_evalTactic___lambda__4___closed__5; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic_handleEx___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_withMainContext(lean_object*); +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__14; +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__15; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_withRestoreOrSaveFull___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_throwAbortTactic___at_Lean_Elab_Tactic_done___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Elab_Tactic_evalTactic___spec__3___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -495,10 +500,10 @@ LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Elab_Tactic_evalTactic_ extern lean_object* l_Lean_Elab_incrementalAttr; static lean_object* l_Lean_Elab_Tactic_instOrElseTacticM___closed__1; static lean_object* l_Lean_Elab_Tactic_evalTactic_handleEx___closed__2; -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__11; lean_object* l_List_reverse___rarg(lean_object*); LEAN_EXPORT uint8_t l_Lean_Elab_Term_withoutTacticIncrementality___at_Lean_Elab_Tactic_evalTactic_eval___spec__4___lambda__1(uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_tryCatch___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__6; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_adaptExpander___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_mkInitialTacticInfo___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_mk(lean_object*); @@ -522,7 +527,6 @@ LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_getMainGoal_loop___boxed(lean_object LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_done___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_uget(lean_object*, size_t); size_t lean_array_size(lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__7; LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Elab_Tactic_evalTactic___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_withInfoTreeContext___at_Lean_Elab_Tactic_withMacroExpansion___spec__2(lean_object*); extern lean_object* l_Lean_trace_profiler; @@ -544,7 +548,6 @@ LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Elab_Tactic_throwNoGoalsToB LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_getMainTag___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_filterAuxM___at_Lean_Elab_Tactic_pruneSolvedGoals___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_throwAbortTactic___at_Lean_Elab_Tactic_done___spec__1___rarg(lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8894____closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_evalTactic(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Elab_Tactic_evalTactic___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Elab_Tactic_Basic_0__Lean_Elab_Tactic_TacticM_runCore(lean_object*); @@ -567,10 +570,9 @@ LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_ensureHasNoMVars___boxed(lean_object lean_object* lean_infer_type(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_le(lean_object*, lean_object*); static lean_object* l_Lean_throwMaxRecDepthAt___at_Lean_Elab_Tactic_evalTactic_expandEval___spec__5___closed__5; -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__2; extern lean_object* l_Lean_Elab_unsupportedSyntaxExceptionId; LEAN_EXPORT lean_object* l_Lean_throwErrorAt___at_Lean_Elab_Tactic_evalTactic_throwExs___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_admitGoal(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Elab_admitGoal(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Elab_Tactic_evalTactic___spec__3(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_setGoals(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_Tactic_instMonadTacticM___closed__5; @@ -598,13 +600,13 @@ LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_liftMetaTactic(lean_object*, lean_ob LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_done(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_run___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_getResetInfoTrees___at_Lean_Elab_Tactic_withTacticInfoContext___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8897____closed__1; static lean_object* l_Lean_logAt___at_Lean_Elab_Term_reportUnsolvedGoals___spec__2___lambda__2___closed__2; lean_object* lean_mk_empty_array_with_capacity(lean_object*); static lean_object* l_Lean_Elab_Tactic_instMonadTacticM___closed__3; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_pushGoals___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Elab_Tactic_tagUntaggedGoals___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_MVarId_withContext___at_Lean_Elab_Tactic_run___spec__1(lean_object*); -static lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__5; lean_object* l_Lean_MessageLog_add(lean_object*, lean_object*); lean_object* l_ReaderT_instMonad___rarg(lean_object*); extern lean_object* l_Lean_Elab_abortTacticExceptionId; @@ -617,112 +619,134 @@ uint8_t l_Lean_Expr_hasExprMVar(lean_object*); static lean_object* l_Lean_Elab_goalsToMessageData___closed__1; LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_tryTactic___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_withoutRecover(lean_object*); -LEAN_EXPORT lean_object* l_Lean_Elab_admitGoal___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Lean_Elab_admitGoal___lambda__1(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { -lean_object* x_8; +lean_object* x_9; +lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); -lean_inc(x_3); -x_8 = lean_infer_type(x_1, x_3, x_4, x_5, x_6, x_7); -if (lean_obj_tag(x_8) == 0) +x_9 = lean_infer_type(x_1, x_4, x_5, x_6, x_7, x_8); +if (lean_obj_tag(x_9) == 0) { -lean_object* x_9; lean_object* x_10; uint8_t x_11; lean_object* x_12; -x_9 = lean_ctor_get(x_8, 0); -lean_inc(x_9); -x_10 = lean_ctor_get(x_8, 1); +lean_object* x_10; lean_object* x_11; uint8_t x_12; lean_object* x_13; +x_10 = lean_ctor_get(x_9, 0); lean_inc(x_10); -lean_dec(x_8); -x_11 = 1; +x_11 = lean_ctor_get(x_9, 1); +lean_inc(x_11); +lean_dec(x_9); +x_12 = 1; +lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); -lean_inc(x_3); -x_12 = l_Lean_Meta_mkLabeledSorry(x_9, x_11, x_11, x_3, x_4, x_5, x_6, x_10); -if (lean_obj_tag(x_12) == 0) +x_13 = l_Lean_Meta_mkLabeledSorry(x_10, x_2, x_12, x_4, x_5, x_6, x_7, x_11); +if (lean_obj_tag(x_13) == 0) { -lean_object* x_13; lean_object* x_14; lean_object* x_15; -x_13 = lean_ctor_get(x_12, 0); -lean_inc(x_13); -x_14 = lean_ctor_get(x_12, 1); +lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_14 = lean_ctor_get(x_13, 0); lean_inc(x_14); -lean_dec(x_12); -x_15 = l_Lean_MVarId_assign___at_Lean_Meta_getLevel___spec__1(x_2, x_13, x_3, x_4, x_5, x_6, x_14); +x_15 = lean_ctor_get(x_13, 1); +lean_inc(x_15); +lean_dec(x_13); +x_16 = l_Lean_MVarId_assign___at_Lean_Meta_getLevel___spec__1(x_3, x_14, x_4, x_5, x_6, x_7, x_15); +lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_3); -return x_15; +return x_16; } else { -uint8_t x_16; +uint8_t x_17; +lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -x_16 = !lean_is_exclusive(x_12); -if (x_16 == 0) +x_17 = !lean_is_exclusive(x_13); +if (x_17 == 0) { -return x_12; +return x_13; } else { -lean_object* x_17; lean_object* x_18; lean_object* x_19; -x_17 = lean_ctor_get(x_12, 0); -x_18 = lean_ctor_get(x_12, 1); +lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_18 = lean_ctor_get(x_13, 0); +x_19 = lean_ctor_get(x_13, 1); +lean_inc(x_19); lean_inc(x_18); -lean_inc(x_17); -lean_dec(x_12); -x_19 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_19, 0, x_17); -lean_ctor_set(x_19, 1, x_18); -return x_19; +lean_dec(x_13); +x_20 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_20, 0, x_18); +lean_ctor_set(x_20, 1, x_19); +return x_20; } } } else { -uint8_t x_20; +uint8_t x_21; +lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -x_20 = !lean_is_exclusive(x_8); -if (x_20 == 0) +x_21 = !lean_is_exclusive(x_9); +if (x_21 == 0) { -return x_8; +return x_9; } else { -lean_object* x_21; lean_object* x_22; lean_object* x_23; -x_21 = lean_ctor_get(x_8, 0); -x_22 = lean_ctor_get(x_8, 1); +lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_22 = lean_ctor_get(x_9, 0); +x_23 = lean_ctor_get(x_9, 1); +lean_inc(x_23); lean_inc(x_22); -lean_inc(x_21); -lean_dec(x_8); -x_23 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_23, 0, x_21); -lean_ctor_set(x_23, 1, x_22); -return x_23; +lean_dec(x_9); +x_24 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_24, 0, x_22); +lean_ctor_set(x_24, 1, x_23); +return x_24; } } } } -LEAN_EXPORT lean_object* l_Lean_Elab_admitGoal(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +LEAN_EXPORT lean_object* l_Lean_Elab_admitGoal(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { _start: { -lean_object* x_7; lean_object* x_8; lean_object* x_9; +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_inc(x_1); -x_7 = l_Lean_Expr_mvar___override(x_1); +x_8 = l_Lean_Expr_mvar___override(x_1); +x_9 = lean_box(x_2); lean_inc(x_1); -x_8 = lean_alloc_closure((void*)(l_Lean_Elab_admitGoal___lambda__1), 7, 2); -lean_closure_set(x_8, 0, x_7); -lean_closure_set(x_8, 1, x_1); -x_9 = l_Lean_MVarId_withContext___at___private_Lean_Meta_SynthInstance_0__Lean_Meta_synthPendingImp___spec__2___rarg(x_1, x_8, x_2, x_3, x_4, x_5, x_6); +x_10 = lean_alloc_closure((void*)(l_Lean_Elab_admitGoal___lambda__1___boxed), 8, 3); +lean_closure_set(x_10, 0, x_8); +lean_closure_set(x_10, 1, x_9); +lean_closure_set(x_10, 2, x_1); +x_11 = l_Lean_MVarId_withContext___at___private_Lean_Meta_SynthInstance_0__Lean_Meta_synthPendingImp___spec__2___rarg(x_1, x_10, x_3, x_4, x_5, x_6, x_7); +return x_11; +} +} +LEAN_EXPORT lean_object* l_Lean_Elab_admitGoal___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +uint8_t x_9; lean_object* x_10; +x_9 = lean_unbox(x_2); +lean_dec(x_2); +x_10 = l_Lean_Elab_admitGoal___lambda__1(x_1, x_9, x_3, x_4, x_5, x_6, x_7, x_8); +return x_10; +} +} +LEAN_EXPORT lean_object* l_Lean_Elab_admitGoal___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +uint8_t x_8; lean_object* x_9; +x_8 = lean_unbox(x_2); +lean_dec(x_2); +x_9 = l_Lean_Elab_admitGoal(x_1, x_8, x_3, x_4, x_5, x_6, x_7); return x_9; } } @@ -1839,52 +1863,53 @@ return x_8; } else { -lean_object* x_9; lean_object* x_10; lean_object* x_11; +lean_object* x_9; lean_object* x_10; uint8_t x_11; lean_object* x_12; x_9 = lean_ctor_get(x_1, 0); lean_inc(x_9); x_10 = lean_ctor_get(x_1, 1); lean_inc(x_10); lean_dec(x_1); +x_11 = 1; lean_inc(x_5); lean_inc(x_4); lean_inc(x_3); lean_inc(x_2); -x_11 = l_Lean_Elab_admitGoal(x_9, x_2, x_3, x_4, x_5, x_6); -if (lean_obj_tag(x_11) == 0) +x_12 = l_Lean_Elab_admitGoal(x_9, x_11, x_2, x_3, x_4, x_5, x_6); +if (lean_obj_tag(x_12) == 0) { -lean_object* x_12; -x_12 = lean_ctor_get(x_11, 1); -lean_inc(x_12); -lean_dec(x_11); +lean_object* x_13; +x_13 = lean_ctor_get(x_12, 1); +lean_inc(x_13); +lean_dec(x_12); x_1 = x_10; -x_6 = x_12; +x_6 = x_13; goto _start; } else { -uint8_t x_14; +uint8_t x_15; lean_dec(x_10); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); -x_14 = !lean_is_exclusive(x_11); -if (x_14 == 0) +x_15 = !lean_is_exclusive(x_12); +if (x_15 == 0) { -return x_11; +return x_12; } else { -lean_object* x_15; lean_object* x_16; lean_object* x_17; -x_15 = lean_ctor_get(x_11, 0); -x_16 = lean_ctor_get(x_11, 1); +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_12, 0); +x_17 = lean_ctor_get(x_12, 1); +lean_inc(x_17); lean_inc(x_16); -lean_inc(x_15); -lean_dec(x_11); -x_17 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_17, 0, x_15); -lean_ctor_set(x_17, 1, x_16); -return x_17; +lean_dec(x_12); +x_18 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18, 0, x_16); +lean_ctor_set(x_18, 1, x_17); +return x_18; } } } @@ -19075,82 +19100,83 @@ lean_inc(x_11); x_16 = l_Lean_Elab_logException___at_Lean_Elab_Tactic_closeUsingOrAdmit___spec__1(x_2, x_1, x_3, x_4, x_5, x_6, x_7, x_11, x_12, x_13); if (lean_obj_tag(x_16) == 0) { -lean_object* x_17; lean_object* x_18; +lean_object* x_17; uint8_t x_18; lean_object* x_19; x_17 = lean_ctor_get(x_16, 1); lean_inc(x_17); lean_dec(x_16); +x_18 = 1; lean_inc(x_12); lean_inc(x_11); lean_inc(x_7); lean_inc(x_6); -x_18 = l_Lean_Elab_admitGoal(x_8, x_6, x_7, x_11, x_12, x_17); -if (lean_obj_tag(x_18) == 0) +x_19 = l_Lean_Elab_admitGoal(x_8, x_18, x_6, x_7, x_11, x_12, x_17); +if (lean_obj_tag(x_19) == 0) { -lean_object* x_19; lean_object* x_20; -x_19 = lean_ctor_get(x_18, 1); -lean_inc(x_19); -lean_dec(x_18); -x_20 = l_Lean_Elab_Tactic_setGoals(x_9, x_1, x_3, x_4, x_5, x_6, x_7, x_11, x_12, x_19); +lean_object* x_20; lean_object* x_21; +x_20 = lean_ctor_get(x_19, 1); +lean_inc(x_20); +lean_dec(x_19); +x_21 = l_Lean_Elab_Tactic_setGoals(x_9, x_1, x_3, x_4, x_5, x_6, x_7, x_11, x_12, x_20); lean_dec(x_12); lean_dec(x_11); lean_dec(x_7); lean_dec(x_6); -return x_20; +return x_21; } else { -uint8_t x_21; +uint8_t x_22; lean_dec(x_12); lean_dec(x_11); lean_dec(x_9); lean_dec(x_7); lean_dec(x_6); -x_21 = !lean_is_exclusive(x_18); -if (x_21 == 0) +x_22 = !lean_is_exclusive(x_19); +if (x_22 == 0) { -return x_18; +return x_19; } else { -lean_object* x_22; lean_object* x_23; lean_object* x_24; -x_22 = lean_ctor_get(x_18, 0); -x_23 = lean_ctor_get(x_18, 1); +lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_23 = lean_ctor_get(x_19, 0); +x_24 = lean_ctor_get(x_19, 1); +lean_inc(x_24); lean_inc(x_23); -lean_inc(x_22); -lean_dec(x_18); -x_24 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_24, 0, x_22); -lean_ctor_set(x_24, 1, x_23); -return x_24; +lean_dec(x_19); +x_25 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_25, 0, x_23); +lean_ctor_set(x_25, 1, x_24); +return x_25; } } } else { -uint8_t x_25; +uint8_t x_26; lean_dec(x_12); lean_dec(x_11); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); -x_25 = !lean_is_exclusive(x_16); -if (x_25 == 0) +x_26 = !lean_is_exclusive(x_16); +if (x_26 == 0) { return x_16; } else { -lean_object* x_26; lean_object* x_27; lean_object* x_28; -x_26 = lean_ctor_get(x_16, 0); -x_27 = lean_ctor_get(x_16, 1); +lean_object* x_27; lean_object* x_28; lean_object* x_29; +x_27 = lean_ctor_get(x_16, 0); +x_28 = lean_ctor_get(x_16, 1); +lean_inc(x_28); lean_inc(x_27); -lean_inc(x_26); lean_dec(x_16); -x_28 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_28, 0, x_26); -lean_ctor_set(x_28, 1, x_27); -return x_28; +x_29 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_29, 0, x_27); +lean_ctor_set(x_29, 1, x_28); +return x_29; } } } @@ -25045,7 +25071,7 @@ lean_dec(x_1); return x_5; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__1() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__1() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; @@ -25055,7 +25081,7 @@ x_3 = l_Lean_Name_mkStr2(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__2() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; @@ -25065,27 +25091,27 @@ x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__3() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__2; +x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__2; x_2 = l_Lean_logAt___at_Lean_Elab_Term_reportUnsolvedGoals___spec__2___lambda__2___closed__2; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__4() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__3; +x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__3; x_2 = l_Lean_logAt___at_Lean_Elab_Term_reportUnsolvedGoals___spec__2___lambda__2___closed__3; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__5() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__5() { _start: { lean_object* x_1; @@ -25093,17 +25119,17 @@ x_1 = lean_mk_string_unchecked("initFn", 6, 6); return x_1; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__6() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__4; -x_2 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__5; +x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__4; +x_2 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__5; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__7() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__7() { _start: { lean_object* x_1; @@ -25111,47 +25137,47 @@ x_1 = lean_mk_string_unchecked("_@", 2, 2); return x_1; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__8() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__8() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__6; -x_2 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__7; +x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__6; +x_2 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__7; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__9() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__9() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__8; +x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__8; x_2 = l_Lean_Elab_Tactic_mkTacticAttribute___closed__5; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__10() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__10() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__9; +x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__9; x_2 = l_Lean_logAt___at_Lean_Elab_Term_reportUnsolvedGoals___spec__2___lambda__2___closed__2; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__11() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__11() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__10; +x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__10; x_2 = l_Lean_logAt___at_Lean_Elab_Term_reportUnsolvedGoals___spec__2___lambda__2___closed__3; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__12() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__12() { _start: { lean_object* x_1; @@ -25159,17 +25185,17 @@ x_1 = lean_mk_string_unchecked("Basic", 5, 5); return x_1; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__13() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__13() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__11; -x_2 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__12; +x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__11; +x_2 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__12; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__14() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__14() { _start: { lean_object* x_1; @@ -25177,54 +25203,54 @@ x_1 = lean_mk_string_unchecked("_hyg", 4, 4); return x_1; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__15() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__15() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__13; -x_2 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__14; +x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__13; +x_2 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__14; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__16() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__16() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__15; -x_2 = lean_unsigned_to_nat(8853u); +x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__15; +x_2 = lean_unsigned_to_nat(8856u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856_(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; -x_2 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__1; +x_2 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__1; x_3 = 0; -x_4 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__16; +x_4 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__16; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); return x_5; } } -static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8894____closed__1() { +static lean_object* _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8897____closed__1() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__15; -x_2 = lean_unsigned_to_nat(8894u); +x_1 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__15; +x_2 = lean_unsigned_to_nat(8897u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8894_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8897_(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_2 = l_List_forIn_x27_loop___at_Lean_Elab_Tactic_evalTactic_handleEx___spec__4___closed__2; x_3 = 0; -x_4 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8894____closed__1; +x_4 = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8897____closed__1; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); return x_5; } @@ -25488,44 +25514,44 @@ l_Lean_Elab_Tactic_getNameOfIdent_x27___closed__1 = _init_l_Lean_Elab_Tactic_get lean_mark_persistent(l_Lean_Elab_Tactic_getNameOfIdent_x27___closed__1); l_Lean_Elab_Tactic_getNameOfIdent_x27___closed__2 = _init_l_Lean_Elab_Tactic_getNameOfIdent_x27___closed__2(); lean_mark_persistent(l_Lean_Elab_Tactic_getNameOfIdent_x27___closed__2); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__1 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__1(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__1); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__2 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__2(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__2); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__3 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__3(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__3); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__4 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__4(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__4); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__5 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__5(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__5); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__6 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__6(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__6); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__7 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__7(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__7); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__8 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__8(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__8); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__9 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__9(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__9); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__10 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__10(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__10); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__11 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__11(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__11); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__12 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__12(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__12); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__13 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__13(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__13); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__14 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__14(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__14); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__15 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__15(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__15); -l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__16 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__16(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853____closed__16); -if (builtin) {res = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8853_(lean_io_mk_world()); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__1 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__1(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__1); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__2 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__2(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__2); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__3 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__3(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__3); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__4 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__4(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__4); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__5 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__5(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__5); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__6 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__6(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__6); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__7 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__7(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__7); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__8 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__8(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__8); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__9 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__9(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__9); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__10 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__10(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__10); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__11 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__11(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__11); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__12 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__12(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__12); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__13 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__13(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__13); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__14 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__14(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__14); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__15 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__15(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__15); +l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__16 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__16(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856____closed__16); +if (builtin) {res = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8856_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); -}l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8894____closed__1 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8894____closed__1(); -lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8894____closed__1); -if (builtin) {res = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8894_(lean_io_mk_world()); +}l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8897____closed__1 = _init_l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8897____closed__1(); +lean_mark_persistent(l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8897____closed__1); +if (builtin) {res = l_Lean_Elab_Tactic_initFn____x40_Lean_Elab_Tactic_Basic___hyg_8897_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); }return lean_io_result_mk_ok(lean_box(0)); diff --git a/stage0/stdlib/Lean/Elab/Tactic/BuiltinTactic.c b/stage0/stdlib/Lean/Elab/Tactic/BuiltinTactic.c index e4c70389f6a3..29a619b06463 100644 --- a/stage0/stdlib/Lean/Elab/Tactic/BuiltinTactic.c +++ b/stage0/stdlib/Lean/Elab/Tactic/BuiltinTactic.c @@ -1409,7 +1409,7 @@ static lean_object* l___regBuiltin_Lean_Elab_Tactic_evalRotateRight_declRange__1 uint8_t lean_usize_dec_lt(size_t, size_t); static lean_object* l___regBuiltin_Lean_Elab_Tactic_evalTraceState__1___closed__5; LEAN_EXPORT lean_object* l_Lean_throwUnknownConstant___at_Lean_Elab_Tactic_evalOpen___spec__17(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Elab_admitGoal(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Elab_admitGoal(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_nestedExceptionToMessageData___at_Lean_Elab_Tactic_evalOpen___spec__38(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___regBuiltin_Lean_Elab_Tactic_evalRefl_declRange__1___closed__3; static lean_object* l___regBuiltin_Lean_Elab_Tactic_evalSleep_declRange__1___closed__2; @@ -23122,7 +23122,7 @@ lean_inc(x_15); x_60 = l_Lean_Elab_logException___at_Lean_Elab_Tactic_closeUsingOrAdmit___spec__1(x_55, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_56); if (lean_obj_tag(x_60) == 0) { -lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; uint8_t x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; +lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; uint8_t x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; uint8_t x_70; lean_object* x_71; x_61 = lean_ctor_get(x_60, 1); lean_inc(x_61); lean_dec(x_60); @@ -23141,20 +23141,20 @@ x_68 = l_Lean_Core_setMessageLog(x_63, x_15, x_16, x_67); x_69 = lean_ctor_get(x_68, 1); lean_inc(x_69); lean_dec(x_68); +x_70 = 1; lean_inc(x_16); lean_inc(x_15); lean_inc(x_14); lean_inc(x_13); -x_70 = l_Lean_Elab_admitGoal(x_20, x_13, x_14, x_15, x_16, x_69); -if (lean_obj_tag(x_70) == 0) +x_71 = l_Lean_Elab_admitGoal(x_20, x_70, x_13, x_14, x_15, x_16, x_69); +if (lean_obj_tag(x_71) == 0) { -lean_object* x_71; uint8_t x_72; -x_71 = lean_ctor_get(x_70, 1); -lean_inc(x_71); -lean_dec(x_70); -x_72 = 1; -x_30 = x_72; -x_31 = x_71; +lean_object* x_72; +x_72 = lean_ctor_get(x_71, 1); +lean_inc(x_72); +lean_dec(x_71); +x_30 = x_70; +x_31 = x_72; goto block_39; } else @@ -23172,19 +23172,19 @@ lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_3); -x_73 = !lean_is_exclusive(x_70); +x_73 = !lean_is_exclusive(x_71); if (x_73 == 0) { -return x_70; +return x_71; } else { lean_object* x_74; lean_object* x_75; lean_object* x_76; -x_74 = lean_ctor_get(x_70, 0); -x_75 = lean_ctor_get(x_70, 1); +x_74 = lean_ctor_get(x_71, 0); +x_75 = lean_ctor_get(x_71, 1); lean_inc(x_75); lean_inc(x_74); -lean_dec(x_70); +lean_dec(x_71); x_76 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_76, 0, x_74); lean_ctor_set(x_76, 1, x_75); @@ -23314,7 +23314,7 @@ lean_inc(x_15); x_87 = l_Lean_Elab_logException___at_Lean_Elab_Tactic_closeUsingOrAdmit___spec__1(x_81, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_82); if (lean_obj_tag(x_87) == 0) { -lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; uint8_t x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; +lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; uint8_t x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; uint8_t x_97; lean_object* x_98; x_88 = lean_ctor_get(x_87, 1); lean_inc(x_88); lean_dec(x_87); @@ -23333,20 +23333,20 @@ x_95 = l_Lean_Core_setMessageLog(x_90, x_15, x_16, x_94); x_96 = lean_ctor_get(x_95, 1); lean_inc(x_96); lean_dec(x_95); +x_97 = 1; lean_inc(x_16); lean_inc(x_15); lean_inc(x_14); lean_inc(x_13); -x_97 = l_Lean_Elab_admitGoal(x_20, x_13, x_14, x_15, x_16, x_96); -if (lean_obj_tag(x_97) == 0) +x_98 = l_Lean_Elab_admitGoal(x_20, x_97, x_13, x_14, x_15, x_16, x_96); +if (lean_obj_tag(x_98) == 0) { -lean_object* x_98; uint8_t x_99; -x_98 = lean_ctor_get(x_97, 1); -lean_inc(x_98); -lean_dec(x_97); -x_99 = 1; -x_30 = x_99; -x_31 = x_98; +lean_object* x_99; +x_99 = lean_ctor_get(x_98, 1); +lean_inc(x_99); +lean_dec(x_98); +x_30 = x_97; +x_31 = x_99; goto block_39; } else @@ -23364,16 +23364,16 @@ lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_3); -x_100 = lean_ctor_get(x_97, 0); +x_100 = lean_ctor_get(x_98, 0); lean_inc(x_100); -x_101 = lean_ctor_get(x_97, 1); +x_101 = lean_ctor_get(x_98, 1); lean_inc(x_101); -if (lean_is_exclusive(x_97)) { - lean_ctor_release(x_97, 0); - lean_ctor_release(x_97, 1); - x_102 = x_97; +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + lean_ctor_release(x_98, 1); + x_102 = x_98; } else { - lean_dec_ref(x_97); + lean_dec_ref(x_98); x_102 = lean_box(0); } if (lean_is_scalar(x_102)) { @@ -23678,7 +23678,7 @@ lean_inc(x_15); x_159 = l_Lean_Elab_logException___at_Lean_Elab_Tactic_closeUsingOrAdmit___spec__1(x_152, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_153); if (lean_obj_tag(x_159) == 0) { -lean_object* x_160; lean_object* x_161; lean_object* x_162; lean_object* x_163; uint8_t x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; +lean_object* x_160; lean_object* x_161; lean_object* x_162; lean_object* x_163; uint8_t x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; uint8_t x_169; lean_object* x_170; x_160 = lean_ctor_get(x_159, 1); lean_inc(x_160); lean_dec(x_159); @@ -23697,20 +23697,20 @@ x_167 = l_Lean_Core_setMessageLog(x_162, x_15, x_16, x_166); x_168 = lean_ctor_get(x_167, 1); lean_inc(x_168); lean_dec(x_167); +x_169 = 1; lean_inc(x_16); lean_inc(x_15); lean_inc(x_14); lean_inc(x_13); -x_169 = l_Lean_Elab_admitGoal(x_117, x_13, x_14, x_15, x_16, x_168); -if (lean_obj_tag(x_169) == 0) +x_170 = l_Lean_Elab_admitGoal(x_117, x_169, x_13, x_14, x_15, x_16, x_168); +if (lean_obj_tag(x_170) == 0) { -lean_object* x_170; uint8_t x_171; -x_170 = lean_ctor_get(x_169, 1); -lean_inc(x_170); -lean_dec(x_169); -x_171 = 1; -x_127 = x_171; -x_128 = x_170; +lean_object* x_171; +x_171 = lean_ctor_get(x_170, 1); +lean_inc(x_171); +lean_dec(x_170); +x_127 = x_169; +x_128 = x_171; goto block_136; } else @@ -23728,16 +23728,16 @@ lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_3); -x_172 = lean_ctor_get(x_169, 0); +x_172 = lean_ctor_get(x_170, 0); lean_inc(x_172); -x_173 = lean_ctor_get(x_169, 1); +x_173 = lean_ctor_get(x_170, 1); lean_inc(x_173); -if (lean_is_exclusive(x_169)) { - lean_ctor_release(x_169, 0); - lean_ctor_release(x_169, 1); - x_174 = x_169; +if (lean_is_exclusive(x_170)) { + lean_ctor_release(x_170, 0); + lean_ctor_release(x_170, 1); + x_174 = x_170; } else { - lean_dec_ref(x_169); + lean_dec_ref(x_170); x_174 = lean_box(0); } if (lean_is_scalar(x_174)) { diff --git a/stage0/stdlib/Lean/Elab/Tactic/Induction.c b/stage0/stdlib/Lean/Elab/Tactic/Induction.c index 5069f136ca53..5084fc79036d 100644 --- a/stage0/stdlib/Lean/Elab/Tactic/Induction.c +++ b/stage0/stdlib/Lean/Elab/Tactic/Induction.c @@ -914,7 +914,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Elab_Tactic_Induction_0__Lean_Elab_Tac static lean_object* l_Lean_Expr_withAppAux___at___private_Lean_Elab_Tactic_Induction_0__Lean_Elab_Tactic_elabFunTarget___spec__5___lambda__2___closed__11; static lean_object* l___private_Lean_Elab_Tactic_Induction_0__Lean_Elab_Tactic_withAltsOfOptInductionAlts___rarg___lambda__1___closed__2; static size_t l_Array_zipWithAux___at_Lean_Elab_Tactic_ElimApp_evalAlts_goWithInfo___spec__2___lambda__1___closed__3; -lean_object* l_Lean_Elab_admitGoal(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Elab_admitGoal(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Cases_unifyEqs_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_withSaveInfoContext___at___private_Lean_Elab_Tactic_Induction_0__Lean_Elab_Tactic_ElimApp_saveAltVarsInfo___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Tactic_ElimApp_mkElimApp_loop___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -16766,52 +16766,53 @@ return x_12; } else { -lean_object* x_13; lean_object* x_14; lean_object* x_15; +lean_object* x_13; lean_object* x_14; uint8_t x_15; lean_object* x_16; x_13 = lean_ctor_get(x_1, 0); lean_inc(x_13); x_14 = lean_ctor_get(x_1, 1); lean_inc(x_14); lean_dec(x_1); +x_15 = 1; lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); lean_inc(x_6); -x_15 = l_Lean_Elab_admitGoal(x_13, x_6, x_7, x_8, x_9, x_10); -if (lean_obj_tag(x_15) == 0) +x_16 = l_Lean_Elab_admitGoal(x_13, x_15, x_6, x_7, x_8, x_9, x_10); +if (lean_obj_tag(x_16) == 0) { -lean_object* x_16; -x_16 = lean_ctor_get(x_15, 1); -lean_inc(x_16); -lean_dec(x_15); +lean_object* x_17; +x_17 = lean_ctor_get(x_16, 1); +lean_inc(x_17); +lean_dec(x_16); x_1 = x_14; -x_10 = x_16; +x_10 = x_17; goto _start; } else { -uint8_t x_18; +uint8_t x_19; lean_dec(x_14); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); -x_18 = !lean_is_exclusive(x_15); -if (x_18 == 0) +x_19 = !lean_is_exclusive(x_16); +if (x_19 == 0) { -return x_15; +return x_16; } else { -lean_object* x_19; lean_object* x_20; lean_object* x_21; -x_19 = lean_ctor_get(x_15, 0); -x_20 = lean_ctor_get(x_15, 1); +lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_20 = lean_ctor_get(x_16, 0); +x_21 = lean_ctor_get(x_16, 1); +lean_inc(x_21); lean_inc(x_20); -lean_inc(x_19); -lean_dec(x_15); -x_21 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_21, 0, x_19); -lean_ctor_set(x_21, 1, x_20); -return x_21; +lean_dec(x_16); +x_22 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_22, 0, x_20); +lean_ctor_set(x_22, 1, x_21); +return x_22; } } } diff --git a/stage0/stdlib/Lean/Elab/Tactic/LibrarySearch.c b/stage0/stdlib/Lean/Elab/Tactic/LibrarySearch.c index 683022dfea9b..15d761649fa1 100644 --- a/stage0/stdlib/Lean/Elab/Tactic/LibrarySearch.c +++ b/stage0/stdlib/Lean/Elab/Tactic/LibrarySearch.c @@ -185,7 +185,7 @@ lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Syntax_SepArray_getElems___spec lean_object* lean_array_get_size(lean_object*); uint8_t lean_nat_dec_le(lean_object*, lean_object*); uint8_t lean_usize_dec_lt(size_t, size_t); -lean_object* l_Lean_Elab_admitGoal(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Elab_admitGoal(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_MVarId_intros(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___regBuiltin_Lean_Elab_LibrarySearch_evalExact_declRange__1___closed__6; LEAN_EXPORT lean_object* l_Lean_Elab_LibrarySearch_exact_x3f___lambda__6(size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -618,9 +618,10 @@ return x_25; LEAN_EXPORT lean_object* l_Lean_Elab_LibrarySearch_exact_x3f___lambda__4(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_12; -x_12 = l_Lean_Elab_admitGoal(x_1, x_7, x_8, x_9, x_10, x_11); -return x_12; +uint8_t x_12; lean_object* x_13; +x_12 = 0; +x_13 = l_Lean_Elab_admitGoal(x_1, x_12, x_7, x_8, x_9, x_10, x_11); +return x_13; } } static lean_object* _init_l_Lean_Elab_LibrarySearch_exact_x3f___lambda__5___closed__1() { @@ -700,35 +701,37 @@ lean_dec(x_24); x_26 = l_Array_isEmpty___rarg(x_2); if (x_26 == 0) { -lean_object* x_27; +uint8_t x_27; lean_object* x_28; lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); -x_27 = l_Lean_Elab_admitGoal(x_6, x_12, x_13, x_14, x_15, x_25); -return x_27; +x_27 = 0; +x_28 = l_Lean_Elab_admitGoal(x_6, x_27, x_12, x_13, x_14, x_15, x_25); +return x_28; } else { -lean_object* x_28; uint8_t x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; -x_28 = l_Lean_Elab_LibrarySearch_exact_x3f___lambda__5___closed__5; -x_29 = 2; +lean_object* x_29; uint8_t x_30; lean_object* x_31; lean_object* x_32; uint8_t x_33; lean_object* x_34; +x_29 = l_Lean_Elab_LibrarySearch_exact_x3f___lambda__5___closed__5; +x_30 = 2; lean_inc(x_14); -x_30 = l_Lean_log___at_Lean_Elab_Tactic_closeUsingOrAdmit___spec__3(x_28, x_29, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_25); +x_31 = l_Lean_log___at_Lean_Elab_Tactic_closeUsingOrAdmit___spec__3(x_29, x_30, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_25); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); -x_31 = lean_ctor_get(x_30, 1); -lean_inc(x_31); -lean_dec(x_30); -x_32 = l_Lean_Elab_admitGoal(x_6, x_12, x_13, x_14, x_15, x_31); -return x_32; +x_32 = lean_ctor_get(x_31, 1); +lean_inc(x_32); +lean_dec(x_31); +x_33 = 0; +x_34 = l_Lean_Elab_admitGoal(x_6, x_33, x_12, x_13, x_14, x_15, x_32); +return x_34; } } else { -uint8_t x_33; +uint8_t x_35; lean_dec(x_15); lean_dec(x_14); lean_dec(x_13); @@ -738,23 +741,23 @@ lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_6); -x_33 = !lean_is_exclusive(x_24); -if (x_33 == 0) +x_35 = !lean_is_exclusive(x_24); +if (x_35 == 0) { return x_24; } else { -lean_object* x_34; lean_object* x_35; lean_object* x_36; -x_34 = lean_ctor_get(x_24, 0); -x_35 = lean_ctor_get(x_24, 1); -lean_inc(x_35); -lean_inc(x_34); +lean_object* x_36; lean_object* x_37; lean_object* x_38; +x_36 = lean_ctor_get(x_24, 0); +x_37 = lean_ctor_get(x_24, 1); +lean_inc(x_37); +lean_inc(x_36); lean_dec(x_24); -x_36 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_36, 0, x_34); -lean_ctor_set(x_36, 1, x_35); -return x_36; +x_38 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_38, 0, x_36); +lean_ctor_set(x_38, 1, x_37); +return x_38; } } } diff --git a/stage0/stdlib/Lean/Environment.c b/stage0/stdlib/Lean/Environment.c index e6dfe7d138a7..f8fa26e05db1 100644 --- a/stage0/stdlib/Lean/Environment.c +++ b/stage0/stdlib/Lean/Environment.c @@ -1,6 +1,6 @@ // Lean compiler output // Module: Lean.Environment -// Imports: Init.Control.StateRef Init.Data.Array.BinSearch Init.Data.Stream Init.System.Promise Lean.ImportingFlag Lean.Data.NameTrie Lean.Data.SMap Lean.Declaration Lean.LocalContext Lean.Util.Path Lean.Util.FindExpr Lean.Util.Profile Lean.Util.InstantiateLevelParams Lean.Util.FoldConsts Lean.PrivateName Lean.LoadDynlib Init.Dynamic +// Imports: Init.Control.StateRef Init.Data.Array.BinSearch Init.Data.Stream Init.System.Promise Lean.ImportingFlag Lean.Data.NameTrie Lean.Data.SMap Lean.Setup Lean.Declaration Lean.LocalContext Lean.Util.Path Lean.Util.FindExpr Lean.Util.Profile Lean.Util.InstantiateLevelParams Lean.Util.FoldConsts Lean.PrivateName Lean.LoadDynlib Init.Dynamic #include <lean/lean.h> #if defined(__clang__) #pragma clang diagnostic ignored "-Wunused-parameter" @@ -21,11 +21,10 @@ LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_getSt LEAN_EXPORT lean_object* l_Lean_throwAlreadyImported___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Expr_const___override(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_EnvExtension_AsyncMode_noConfusion___rarg(uint8_t, uint8_t, lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_instBEqConstantKind___closed__1; LEAN_EXPORT lean_object* l_Lean_Environment_AddConstAsyncResult_commitConst___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instMonadEnvOfMonadLift(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__18; LEAN_EXPORT lean_object* l_Lean_instInhabitedVisibilityMap___rarg(lean_object*); lean_object* l_Lean_Name_reprPrec(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_AsyncConstantInfo_isUnsafe___boxed(lean_object*); @@ -40,12 +39,11 @@ LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_display static lean_object* l___private_Lean_Environment_0__Lean_Environment_throwUnexpectedType___rarg___closed__2; LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_findStateAsync___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_panic___at_Lean_readModuleData___spec__1(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____boxed(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__24; LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Environment_0__Lean_setImportedEntries___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand_go___at_Lean_importModulesCore_go___spec__4(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instInhabitedPersistentEnvExtension___lambda__1___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__22; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_findAtAux___at_Lean_Kernel_Environment_find_x3f___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_withExporting___rarg___lambda__4___closed__1; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_findAsyncCore_x3f(lean_object*, lean_object*, uint8_t); @@ -56,9 +54,10 @@ LEAN_EXPORT lean_object* l_List_mapTR_loop___at_Lean_Environment_dbgFormatAsyncS LEAN_EXPORT lean_object* l_Lean_readModuleData(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_mkModuleData___lambda__1(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__5; -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__24; LEAN_EXPORT lean_object* l_Lean_mkModuleData(lean_object*, uint8_t, lean_object*); LEAN_EXPORT uint8_t l_Lean_Environment_containsOnBranch(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__26; +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__11; LEAN_EXPORT lean_object* l_Lean_Option_set___at_Lean_Environment_realizeConst___spec__3(lean_object*, lean_object*, uint8_t); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModules___spec__1___closed__2; LEAN_EXPORT lean_object* lean_elab_environment_of_kernel_env(lean_object*); @@ -73,9 +72,9 @@ LEAN_EXPORT lean_object* l_Lean_Environment_header___boxed(lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_finalizeImport___spec__18___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_evalConst___boxed(lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_usize_shift_right(size_t, size_t); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__19; LEAN_EXPORT uint8_t l_Lean_instDecidableEqOLeanLevel(uint8_t, uint8_t); LEAN_EXPORT lean_object* l_Lean_ImportStateM_run___rarg(lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__30; LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_modifyState___rarg___lambda__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_addDeclCore(lean_object*, size_t, lean_object*, lean_object*, uint8_t); LEAN_EXPORT lean_object* l_Lean_finalizeImport___lambda__3(lean_object*, lean_object*, uint32_t, uint8_t, lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, uint8_t, uint8_t, uint8_t, size_t, lean_object*, lean_object*, lean_object*); @@ -92,7 +91,6 @@ static lean_object* l___private_Lean_Environment_0__Lean_looksLikeOldCodegenName LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_finalizeImport___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Kernel_Environment_Diagnostics_isEnabled___boxed(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Visibility_noConfusion___rarg___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6; lean_object* lean_uint32_to_nat(uint32_t); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_findAsyncCore_x3f___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_AddConstAsyncResult_commitConst___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -106,8 +104,10 @@ static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go_ static lean_object* l___private_Lean_Environment_0__Lean_looksLikeOldCodegenName___closed__12; static lean_object* l_Lean_readModuleData___closed__6; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_findRecTask___lambda__1___boxed(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_importModules___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_importModules___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_registerEnvExtension___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__35; +static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__3; static lean_object* l_Lean_registerPersistentEnvExtensionUnsafe___rarg___closed__2; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at___private_Lean_Environment_0__Lean_setImportedEntries___spec__2___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instInhabitedModuleData; @@ -118,7 +118,8 @@ lean_object* l_Lean_profileitIOUnsafe___rarg(lean_object*, lean_object*, lean_ob static lean_object* l_Lean_instModuleIdxToString___closed__1; static lean_object* l___private_Lean_Environment_0__Lean_Environment_throwUnexpectedType___rarg___closed__3; LEAN_EXPORT lean_object* l_panic___at_Lean_Environment_replayConsts_replayKernel___spec__7(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_importModules___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_ModuleArtifacts_oleanParts(lean_object*); +LEAN_EXPORT lean_object* l_Lean_importModules___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Option_set___at_Lean_Environment_realizeConst___spec__3___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__7(lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*); static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__12; @@ -130,17 +131,18 @@ LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_findS LEAN_EXPORT lean_object* l_Lean_Kernel_enableDiag___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_header(lean_object*); LEAN_EXPORT uint8_t l_Lean_Environment_isImportedConst(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__13; LEAN_EXPORT lean_object* l_panic___at_Lean_EnvExtension_modifyState___spec__4(lean_object*, lean_object*); uint8_t lean_usize_dec_le(size_t, size_t); lean_object* l_Lean_ConstantInfo_levelParams(lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__20; +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__34; LEAN_EXPORT lean_object* l_Lean_withoutExporting___rarg___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_allImportedModuleNames___boxed(lean_object*); lean_object* lean_io_promise_new(lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_AddConstAsyncResult_commitSignature___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_findStateAsyncUnsafe_findRecExts_x3f___lambda__1(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__20; -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4(lean_object*, uint8_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_PrefixTreeNode_empty(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__7___lambda__2(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*); lean_object* l_Lean_mkAppB(lean_object*, lean_object*, lean_object*); @@ -153,7 +155,6 @@ lean_object* lean_add_decl(lean_object*, size_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_mkModuleData___spec__7(size_t, size_t, lean_object*); static lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__16; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_finalizeImport___spec__15___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__38; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_setImportedEntries(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_mapTR_loop___at_Lean_Environment_realizeConst___spec__5(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_finalizeImport_unsafe__3___boxed(lean_object*); @@ -162,8 +163,8 @@ LEAN_EXPORT lean_object* l_Lean_finalizeImport___lambda__1(lean_object*, lean_ob static lean_object* l_panic___at_Lean_importModulesCore_go___spec__1___closed__3; LEAN_EXPORT lean_object* l_Lean_withExporting___rarg___lambda__4(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_subsumesInfo___boxed(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__39; LEAN_EXPORT lean_object* l_Std_Format_joinSep___at_Lean_Environment_dbgFormatAsyncState___spec__21(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__6; static lean_object* l_Lean_instModuleIdxBEq___closed__2; LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___lambda__2(lean_object*, uint8_t, lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Environment___hyg_6____closed__4; @@ -177,7 +178,7 @@ static lean_object* l_Lean_instGetElem_x3fArrayModuleIdxLtNatToNatSize___closed_ LEAN_EXPORT uint8_t l_Lean_Environment_hasUnsafe___lambda__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_findConstVal_x3f(lean_object*, lean_object*, uint8_t); static lean_object* l___private_Lean_Environment_0__Lean_Environment_throwUnexpectedType___rarg___closed__1; -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10(lean_object*, uint8_t, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_saveModuleDataParts___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_panic___at_Lean_EnvExtension_modifyState___spec__2(lean_object*, lean_object*); lean_object* l_Array_findIdx_x3f_loop___rarg(lean_object*, lean_object*, lean_object*); @@ -192,12 +193,12 @@ lean_object* lean_array_push(lean_object*, lean_object*); lean_object* l_Array_toSubarray___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_Environment_getModuleIdxFor_x3f___spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_enableRealizationsForConst___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__19; LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9(uint8_t, lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__1; static lean_object* l___private_Lean_Environment_0__Lean_looksLikeOldCodegenName___closed__10; LEAN_EXPORT uint8_t l_Lean_Environment_isRealizing(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__10; LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_getModuleEntries(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__1(lean_object*, lean_object*, lean_object*); size_t lean_usize_mul(size_t, size_t); LEAN_EXPORT lean_object* l_Lean_Environment_realizeConst___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Environment___hyg_6____closed__2; @@ -205,6 +206,7 @@ LEAN_EXPORT lean_object* l_Lean_withoutExporting___rarg(lean_object*, lean_objec LEAN_EXPORT lean_object* l_MonadExcept_ofExcept___at_Lean_Environment_replayConsts_replayKernel___spec__2(lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_AddConstAsyncResult_commitCheckEnv___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PrefixTreeNode_findLongestPrefix_x3f___at___private_Lean_Environment_0__Lean_AsyncConsts_findPrefix_x3f___spec__1(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__41; LEAN_EXPORT lean_object* l_Array_anyMUnsafe_any___at_Lean_registerPersistentEnvExtensionUnsafe___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at___private_Lean_Environment_0__Lean_setImportedEntries___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_SMap_numBuckets___at_Lean_Environment_displayStats___spec__4(lean_object*); @@ -215,31 +217,28 @@ static lean_object* l_Std_DHashMap_Internal_AssocList_get_x21___at_Lean_throwAlr LEAN_EXPORT lean_object* l_List_foldl___at_Lean_Environment_replayConsts___spec__1(uint8_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_mkExtNameMap(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_panic___at_Lean_EnvExtension_setState___spec__3(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__39; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_containsAtAux___at_Lean_Environment_addExtraName___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_mkModuleData___spec__1(lean_object*, uint8_t, size_t, size_t, lean_object*); uint8_t lean_usize_dec_eq(size_t, size_t); -LEAN_EXPORT lean_object* l_Lean_importModulesCore(lean_object*, uint8_t, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_importModulesCore(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__14; static lean_object* l_Lean_Environment_realizeConst___closed__1; LEAN_EXPORT uint8_t l___private_Lean_Environment_0__Lean_subsumesInfo(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1(lean_object*, uint8_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_EnvExtension_modifyState_unsafe__3___rarg(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Environment_0__Lean_looksLikeOldCodegenName___closed__11; LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___lambda__6___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at___private_Lean_Environment_0__Lean_setImportedEntries___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Lean_Environment_isConstructor(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlMAux_traverse___at_Lean_Environment_dbgFormatAsyncState___spec__27___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__28; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_VisibilityMap_get___rarg(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__3; lean_object* lean_mk_array(lean_object*, lean_object*); static lean_object* l_Lean_readModuleData___closed__1; -static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__5; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_mkModuleData___spec__7___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Environment_0__Lean_Environment_findTaskCore___lambda__3___closed__1; static lean_object* l_Lean_Kernel_resetDiag___closed__1; LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at_Lean_Environment_dbgFormatAsyncState___spec__2(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__25; LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_modifyState(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_foldl___at_Lean_Environment_replayConsts___spec__2___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; @@ -248,7 +247,6 @@ LEAN_EXPORT lean_object* l_panic___at_Lean_EnvExtension_setState___spec__4(lean_ static lean_object* l_Lean_Environment_ofKernelEnv___closed__1; LEAN_EXPORT lean_object* l_Lean_OLeanLevel_ofNat___boxed(lean_object*); LEAN_EXPORT lean_object* lean_elab_environment_to_kernel_env(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__7; LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_getState(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Environment_replayConsts_replayKernel___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*); lean_object* lean_array_fset(lean_object*, lean_object*, lean_object*); @@ -266,6 +264,7 @@ LEAN_EXPORT lean_object* l_Lean_OLeanLevel_toCtorIdx(uint8_t); LEAN_EXPORT lean_object* lean_environment_find(lean_object*, lean_object*); static lean_object* l_Lean_registerPersistentEnvExtensionUnsafe___rarg___lambda__3___closed__1; static lean_object* l_Lean_instInhabitedEnvExtension___closed__1; +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__50; LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___lambda__1___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_VisibilityMap_get(lean_object*); LEAN_EXPORT lean_object* l_Lean_EnvExtension_modifyState___rarg(lean_object*, lean_object*, lean_object*, uint8_t); @@ -278,13 +277,16 @@ static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed static lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_getStateUnsafe___rarg___closed__3; LEAN_EXPORT lean_object* l_Lean_instInhabitedAsyncConsts; static lean_object* l_Lean_Environment_AddConstAsyncResult_commitConst___closed__2; +static lean_object* l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; LEAN_EXPORT lean_object* l_Lean_Environment_find_x3f(lean_object*, lean_object*, uint8_t); static lean_object* l_Lean_Environment_realizeConst___lambda__5___closed__3; -LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1378____boxed(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1230____boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_importEnv_x3f(lean_object*); static lean_object* l_Lean_Environment_ofKernelEnv___closed__2; +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts(lean_object*, lean_object*); static size_t l_Lean_PersistentHashMap_findAux___at_Lean_Kernel_Environment_find_x3f___spec__3___closed__1; lean_object* l_Std_DHashMap_Raw_Internal_numBuckets___rarg(lean_object*); +static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__9; lean_object* l_Lean_RBNode_insert___at_Lean_NameSet_insert___spec__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_findAsyncCore_x3f___lambda__1(lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___lambda__3___boxed(lean_object*); @@ -293,7 +295,6 @@ LEAN_EXPORT lean_object* l_Lean_finalizeImport_unsafe__2(lean_object*, lean_obje LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlMAux_traverse___at_Lean_Environment_dbgFormatAsyncState___spec__27___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_instInhabitedEnvExtension___lambda__1___closed__1; extern lean_object* l_instInhabitedError; -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__5(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_find_x3f___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_imports___boxed(lean_object*); static lean_object* l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__5; @@ -305,14 +306,16 @@ static lean_object* l_Lean_Environment_dbgFormatAsyncState___closed__4; LEAN_EXPORT lean_object* l_Lean_Environment_dbgFormatCheckedSyncState(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_findRec_x3f___lambda__1___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_finalizePersistentExtensions_loop___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_mkModuleData___lambda__2___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__3(lean_object*, size_t, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_getNumBuiltinAttributes___boxed(lean_object*); lean_object* l_List_find_x3f___rarg(lean_object*, lean_object*); static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__9; -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__22; LEAN_EXPORT uint8_t l___private_Lean_Environment_0__Lean_AsyncContext_mayContain(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_finalizeImport___spec__11(size_t, size_t, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts___closed__1; LEAN_EXPORT lean_object* l_panic___at_Lean_Environment_enableRealizationsForConst___spec__1(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__16; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_enterAsync(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Kernel_getDiagnostics(lean_object*); LEAN_EXPORT lean_object* l_Lean_ImportStateM_run(lean_object*); @@ -320,7 +323,8 @@ static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed LEAN_EXPORT lean_object* l_panic___at_Lean_EnvExtension_setState___spec__2(lean_object*, lean_object*); static lean_object* l_Lean_instInhabitedAsyncConstantInfo___closed__5; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAux_traverse___at_Lean_Kernel_Environment_Diagnostics_recordUnfold___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_importModules___lambda__1(uint8_t, lean_object*, lean_object*, uint32_t, uint8_t, uint8_t, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_importModules___lambda__1(uint8_t, lean_object*, lean_object*, lean_object*, uint32_t, uint8_t, uint8_t, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__14; LEAN_EXPORT lean_object* l_Lean_EnvExtension_modifyState___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT uint8_t l___private_Lean_Environment_0__Lean_looksLikeOldCodegenName(lean_object*); LEAN_EXPORT lean_object* l_Lean_finalizeImport_unsafe__1(lean_object*, lean_object*); @@ -335,14 +339,15 @@ uint8_t l_Lean_ConstantInfo_isUnsafe(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_findTaskCore___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_findStateAsync(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand_go___at_Lean_mkExtNameMap___spec__3(lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__29; LEAN_EXPORT lean_object* l_Lean_OLeanLevel_noConfusion___rarg(uint8_t, uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_ConstantKind_noConfusion___rarg___boxed(lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__44; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_mkInitialExtensionStates(lean_object*); LEAN_EXPORT lean_object* l_Lean_EnvExtension_modifyState_unsafe__2(lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_addExtraName(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_setStateImpl___rarg(lean_object*, lean_object*, lean_object*); lean_object* l_EStateM_instMonad(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__26; static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__8; lean_object* l_IO_Promise_isResolved___rarg(lean_object*, lean_object*); lean_object* l___private_Lean_Data_NameTrie_0__Lean_toKey(lean_object*); @@ -366,30 +371,28 @@ LEAN_EXPORT lean_object* l_Lean_Environment_containsOnBranch___boxed(lean_object lean_object* l_System_FilePath_pathExists(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withoutExporting___rarg___lambda__2(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Name_isPrefixOf(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__8; LEAN_EXPORT uint8_t l_Lean_PersistentHashMap_containsAtAux___at_Lean_Environment_addExtraName___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_read_module_data_parts(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instInhabitedEnvExtension(lean_object*); LEAN_EXPORT lean_object* l_Lean_withExporting___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT uint8_t l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1378_(uint8_t, uint8_t); LEAN_EXPORT lean_object* l_Lean_finalizeImport___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_ConstantInfo_value_x21(lean_object*, uint8_t); LEAN_EXPORT lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8(lean_object*, lean_object*); lean_object* lean_string_utf8_byte_size(lean_object*); -static lean_object* l_Lean_instInhabitedImport___closed__1; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at_Lean_mkExtNameMap___spec__4(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Kernel_enableDiag___lambda__1(uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_finalizeImport___spec__16(size_t, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*); lean_object* lean_string_push(lean_object*, uint32_t); lean_object* l_Lean_Level_ofNat(lean_object*); -static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__6; LEAN_EXPORT lean_object* l_Lean_Environment_replayConsts_replayKernel___lambda__1(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Std_Format_isNil(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Kernel_Environment_isQuotInit___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_withExporting___rarg___lambda__3(lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at_Lean_Environment_dbgFormatAsyncState___spec__2___boxed(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__15; LEAN_EXPORT lean_object* l_Lean_Kernel_enableDiag(lean_object*, uint8_t); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3; static lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_modifyStateImpl___rarg___closed__1; LEAN_EXPORT lean_object* l_Lean_Environment_findConstVal_x3f___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_panic___at_Lean_Environment_replayConsts_replayKernel___spec__5(lean_object*, lean_object*); @@ -399,7 +402,6 @@ LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_finalizeImport___sp LEAN_EXPORT lean_object* lean_elab_environment_update_base_after_kernel_add(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__9; LEAN_EXPORT uint8_t l_Lean_PersistentHashMap_containsAux___at_Lean_Environment_addExtraName___spec__3(lean_object*, size_t, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__26; extern lean_object* l_instInhabitedPUnit; LEAN_EXPORT uint8_t l_Lean_Kernel_isDiagnosticsEnabled(lean_object*); static lean_object* l_Lean_EnvExtension_modifyState___rarg___closed__3; @@ -416,6 +418,7 @@ static lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; lean_object* l_Lean_PrefixTreeNode_findLongestPrefix_x3f_loop___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_AddConstAsyncResult_commitSignature___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__33; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_isReservedName___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* lean_environment_mark_quot_init(lean_object*); LEAN_EXPORT lean_object* l_List_mapTR_loop___at_Lean_Environment_dbgFormatAsyncState___spec__30(lean_object*, lean_object*); @@ -439,6 +442,7 @@ LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_modifyState___rarg(lean_o size_t lean_usize_of_nat(lean_object*); static lean_object* l_Lean_Environment_promiseChecked___closed__1; LEAN_EXPORT lean_object* l_panic___at___private_Lean_Environment_0__Lean_EnvExtension_modifyStateImpl___spec__1(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__32; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_findRecTask___lambda__2___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_find_x3f___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___lambda__2___boxed(lean_object*, lean_object*, lean_object*); @@ -449,8 +453,8 @@ LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAux___at___private_Lean_ LEAN_EXPORT lean_object* l_Lean_Environment_unlockAsync(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_setCheckedSync(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_registerPersistentEnvExtensionUnsafe___rarg___lambda__3___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__32; LEAN_EXPORT lean_object* l_Lean_debug_skipKernelTC; +static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__1; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_getStateUnsafe___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__26; lean_object* l_panic___rarg(lean_object*, lean_object*); @@ -460,7 +464,6 @@ LEAN_EXPORT lean_object* l_Lean_Environment_replayConsts___boxed(lean_object*, l static lean_object* l_Lean_instInhabitedPersistentEnvExtension___closed__2; LEAN_EXPORT lean_object* l_Lean_Environment_findTask___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_getMaxHeight___closed__2; -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__35; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_addDeclWithoutChecking___boxed(lean_object*, lean_object*); static lean_object* l_Lean_Environment_displayStats___closed__3; LEAN_EXPORT lean_object* l_Lean_EnvExtension_modifyState_unsafe__1(lean_object*); @@ -473,19 +476,16 @@ static lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFallbackC LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_getStateUnsafe___rarg(lean_object*, lean_object*, lean_object*, uint8_t); LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__1(size_t, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Kernel_instInhabitedDiagnostics___closed__1; -LEAN_EXPORT lean_object* l_Lean_importModulesCore_go___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_importModulesCore_go___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_importModulesCore_go___spec__7(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_SMap_insert___at___private_Lean_Environment_0__Lean_Kernel_Environment_add___spec__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_findRec_x3f(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__11; static lean_object* l_Lean_instInhabitedAsyncConstantInfo___closed__7; static lean_object* l_Lean_instInhabitedAsyncConstantInfo___closed__2; -LEAN_EXPORT lean_object* l_Lean_instCoeNameImport(lean_object*); static lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__4; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_ImportedModule_publicModule_x3f(lean_object*); static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__10; lean_object* lean_st_ref_take(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__15; static lean_object* l_Std_DHashMap_Internal_AssocList_get_x21___at_Lean_throwAlreadyImported___spec__1___closed__1; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_finalizeImport___spec__3(size_t, size_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_isImportedConst___boxed(lean_object*, lean_object*); @@ -496,13 +496,14 @@ LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_mkModuleData___spec__1 LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAux___at_Lean_Kernel_Environment_Diagnostics_recordUnfold___spec__5(lean_object*, size_t, size_t, lean_object*, lean_object*); uint8_t lean_nat_dec_eq(lean_object*, lean_object*); lean_object* lean_io_set_heartbeats(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__5; LEAN_EXPORT lean_object* l_Lean_getMaxHeight(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_instToStringImport(lean_object*); uint8_t lean_expr_eqv(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_findRecTask___lambda__2(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Environment_addConstAsync___lambda__8___closed__1; LEAN_EXPORT uint8_t l_Array_anyMUnsafe_any___at_Lean_registerPersistentEnvExtensionUnsafe___spec__1___rarg(lean_object*, lean_object*, size_t, size_t); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_findStateAsyncUnsafe(lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__46; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_findAsyncCore_x3f___lambda__3(uint8_t, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_finalizeImport___lambda__2(lean_object*, lean_object*, uint8_t, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_NameTrie_find_x3f___rarg(lean_object*, lean_object*); @@ -517,10 +518,8 @@ static lean_object* l_Lean_PrefixTreeNode_findLongestPrefix_x3f___at___private_L uint64_t lean_uint64_shift_right(uint64_t, uint64_t); static lean_object* l_Std_DHashMap_Internal_AssocList_get_x21___at_Lean_throwAlreadyImported___spec__1___closed__4; LEAN_EXPORT lean_object* l_Lean_instInhabitedAsyncConstantInfo; -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__17; LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_getModuleEntries___rarg(lean_object*, lean_object*, lean_object*, lean_object*, uint8_t); static lean_object* l_panic___at_Lean_Environment_enableRealizationsForConst___spec__1___closed__1; -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__25; LEAN_EXPORT lean_object* l_Array_filterMapM___at_Lean_finalizeImport___spec__1(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_containsAux___at_Lean_Environment_addExtraName___spec__3___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_freeRegions___spec__1(lean_object*, size_t, size_t, lean_object*, lean_object*); @@ -542,6 +541,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_VisibilityMap_get_ LEAN_EXPORT lean_object* l_Lean_EnvExtension_modifyState_unsafe__3___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__12(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_ConstantKind_toCtorIdx(uint8_t); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__47; LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_addEntry___rarg(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__13; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__7___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -549,7 +549,6 @@ LEAN_EXPORT lean_object* l_Lean_Kernel_Environment_isDiagnosticsEnabled___boxed( LEAN_EXPORT lean_object* l_Lean_Environment_enableRealizationsForConst___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_mkEmptyEnvironment___lambda__1(uint32_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instInhabitedVisibilityMap(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__4; LEAN_EXPORT lean_object* l_Lean_withExporting___rarg___lambda__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_finalizeImport___spec__8___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withExporting___rarg___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -562,19 +561,18 @@ static lean_object* l_Option_repr___at_Lean_Environment_dbgFormatAsyncState___sp lean_object* lean_eval_const(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lean_mkModuleData___spec__4(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Kernel_Environment_resetDiag(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__21; LEAN_EXPORT lean_object* l_Lean_instModuleIdxToString; LEAN_EXPORT lean_object* l_Lean_registerEnvExtension_unsafe__1___rarg___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_realizeConst___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_getMaxHeight___boxed__const__1; LEAN_EXPORT uint8_t l_Lean_AsyncConstantInfo_isUnsafe(lean_object*); static lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__18; static lean_object* l_Lean_instInhabitedAsyncConstantInfo___closed__3; static lean_object* l_Lean_mkEmptyEnvironment___closed__1; LEAN_EXPORT lean_object* l_Lean_instMonadEnvOfMonadLift___rarg___lambda__1(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_importModules___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_importModules___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_AddConstAsyncResult_commitConst___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__4; LEAN_EXPORT lean_object* l_Lean_instInhabitedPersistentEnvExtension(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Kernel_isDefEq___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_displayStats(lean_object*, lean_object*); @@ -586,19 +584,24 @@ LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___lambda__4___boxed(le static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__21; LEAN_EXPORT lean_object* l_Lean_Environment_AddConstAsyncResult_commitCheckEnv(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Environment_0__Lean_looksLikeOldCodegenName___closed__5; +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__25; +static lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__17; static lean_object* l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__3; LEAN_EXPORT lean_object* l_panic___at_Lean_EnvExtension_setState___spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_PromiseCheckedResult_commitChecked___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_findTaskCore___lambda__2(lean_object*, lean_object*); static lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__7___closed__1; +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__43; static lean_object* l_Lean_throwAlreadyImported___rarg___closed__1; LEAN_EXPORT lean_object* l_Lean_SMap_find_x3f_x27___at_Lean_Kernel_Environment_find_x3f___spec__1___boxed(lean_object*, lean_object*); static lean_object* l_Lean_Environment_PromiseCheckedResult_commitChecked___closed__2; +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__38; LEAN_EXPORT lean_object* l_Lean_Environment_replayConsts_replayKernel___lambda__1___boxed(lean_object*, lean_object*); static lean_object* l_Lean_EnvExtension_modifyState___rarg___closed__4; static lean_object* l___private_Lean_Environment_0__Lean_looksLikeOldCodegenName___closed__3; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAtCollisionNodeAux___at_Lean_Kernel_Environment_Diagnostics_recordUnfold___spec__7(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_foldl___at_Lean_Environment_realizeConst___spec__1(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_add___lambda__1___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_withExporting___rarg___lambda__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_st_ref_get(lean_object*, lean_object*); lean_object* l_Lean_mkPtrSet___rarg(lean_object*); @@ -609,14 +612,12 @@ LEAN_EXPORT lean_object* l_Lean_EnvExtension_modifyState___rarg___lambda__2(lean static lean_object* l_Lean_Environment_displayStats___closed__4; LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2(size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModules___spec__1___closed__1; -LEAN_EXPORT lean_object* l_Lean_importModules___lambda__2(lean_object*, uint8_t, lean_object*, lean_object*, uint32_t, uint8_t, uint8_t, size_t, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_importModules___lambda__2(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, uint32_t, uint8_t, uint8_t, size_t, lean_object*, lean_object*); uint8_t l_List_isEmpty___rarg(lean_object*); static lean_object* l_List_foldl___at___private_Lean_Environment_0__Lean_Environment_updateBaseAfterKernelAdd___spec__1___closed__4; LEAN_EXPORT uint8_t l_Lean_Environment_asyncMayContain(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_finalizeImport___spec__19(lean_object*, size_t, size_t, lean_object*); lean_object* lean_st_mk_ref(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793_(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__27; static lean_object* l_Lean_Environment_enableRealizationsForConst___lambda__3___closed__2; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_findStateAsyncUnsafe___rarg___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_isSafeDefinition___boxed(lean_object*, lean_object*); @@ -627,10 +628,8 @@ static lean_object* l_Lean_instInhabitedModuleData___closed__1; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_ensureExtensionsArraySize(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_contains___at_Lean_Environment_addExtraName___spec__2___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_finalizeImport___spec__14(size_t, lean_object*, size_t, size_t, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__30; LEAN_EXPORT lean_object* l_Lean_Environment_replayConsts___lambda__1(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Lean_PersistentHashMap_contains___at_Lean_Environment_addExtraName___spec__2(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__6; lean_object* l_Lean_Name_num___override(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_numBuckets___at_Lean_Environment_displayStats___spec__5(lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_mkExtNameMap___spec__2(lean_object*); @@ -640,33 +639,28 @@ LEAN_EXPORT lean_object* l_Lean_withExporting___rarg___lambda__3___boxed(lean_ob LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_EnvExtension_mkInitialExtStates___spec__1(size_t, size_t, lean_object*, lean_object*); static lean_object* l_Lean_Environment_dbgFormatCheckedSyncState___closed__1; LEAN_EXPORT lean_object* l_Lean_withImportModules___rarg(lean_object*, lean_object*, lean_object*, uint32_t, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__5; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_importModulesCore_go___spec__7___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_OLeanLevel_noConfusion___rarg___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_EStateM_pure___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_importEnv_x3f_unsafe__1(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__40; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_getStateImpl(lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___closed__1; LEAN_EXPORT lean_object* l_Lean_instInhabitedPersistentEnvExtensionState(lean_object*, lean_object*); static lean_object* l_Lean_Environment_AddConstAsyncResult_commitConst___lambda__2___closed__1; LEAN_EXPORT lean_object* l_Lean_Environment_addDeclCore___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_io_map_task(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__48; -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__18; static lean_object* l_Option_repr___at_Lean_Environment_dbgFormatAsyncState___spec__17___closed__1; uint8_t l_List_beq___at___private_Lean_Declaration_0__Lean_beqConstantVal____x40_Lean_Declaration___hyg_431____spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_ConstantKind_toCtorIdx___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_findAux___at_Lean_Kernel_Environment_find_x3f___spec__3(lean_object*, size_t, lean_object*); lean_object* l_Lean_privateToUserName(lean_object*); -LEAN_EXPORT lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_2021_; -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__1; +static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__5; lean_object* l_Array_get_x21Internal___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withExporting___rarg___lambda__2___boxed(lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Lean_instInhabitedConstantKind; static lean_object* l_Lean_registerEnvExtension___rarg___closed__2; -static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__2; LEAN_EXPORT uint8_t lean_environment_quot_init(lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts___closed__3; static lean_object* l_Lean_EnvExtension_modifyState___rarg___closed__6; LEAN_EXPORT uint8_t l_Std_DHashMap_Internal_AssocList_contains___at___private_Lean_Environment_0__Lean_Kernel_Environment_add___spec__6(lean_object*, lean_object*); static lean_object* l_Lean_instInhabitedAsyncConstantInfo___closed__8; @@ -679,12 +673,9 @@ lean_object* l_System_FilePath_addExtension(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__18(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Visibility_toCtorIdx___boxed(lean_object*); static lean_object* l_Lean_instGetElem_x3fArrayModuleIdxLtNatToNatSize___closed__1; -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__24; LEAN_EXPORT lean_object* l_Lean_Environment_setMainModule(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_addDeclCore___lambda__1(uint8_t, lean_object*, lean_object*, size_t, lean_object*, lean_object*); -static lean_object* l_Lean_instToStringImport___closed__1; -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__33; -static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__1; +LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlM___at_Lean_mkModuleData___spec__8(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_looksLikeOldCodegenName___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlM___at_Lean_Environment_dbgFormatAsyncState___spec__24(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_enterAsyncRealizing(lean_object*, lean_object*); @@ -695,7 +686,6 @@ lean_object* l_IO_print___at_IO_println___spec__1(lean_object*, lean_object*); static lean_object* l_Std_DHashMap_Internal_AssocList_get_x21___at_Lean_throwAlreadyImported___spec__1___closed__2; LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_getModuleEntries___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_panic___at_Lean_Environment_replayConsts_replayKernel___spec__6(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__23; LEAN_EXPORT lean_object* l_Lean_Environment_addExtraName___lambda__1(lean_object*, lean_object*); extern lean_object* l_Task_Priority_default; static lean_object* l_List_foldl___at_Lean_Environment_dbgFormatAsyncState___spec__32___closed__1; @@ -705,13 +695,12 @@ LEAN_EXPORT lean_object* l_panic___at___private_Lean_Environment_0__Lean_EnvExte static lean_object* l_Lean_instInhabitedAsyncConsts___closed__2; LEAN_EXPORT lean_object* l_Lean_withoutExporting___rarg___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_name_eq(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__20; LEAN_EXPORT lean_object* l_Lean_Environment_realizeConst_unsafe__1___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_find_x3f___lambda__1(lean_object*, lean_object*, uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_replace___at_Lean_mkExtNameMap___spec__5(lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__4; LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at_Lean_Environment_dbgFormatAsyncState___spec__4(lean_object*); lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Environment___hyg_8688_(lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at___private_Lean_Environment_0__Lean_setImportedEntries___spec__2(lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Lean_ConstantKind_ofConstantInfo(lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_finalizeImport___spec__9(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*); @@ -719,24 +708,19 @@ LEAN_EXPORT lean_object* l_List_mapTR_loop___at_Lean_Environment_dbgFormatAsyncS LEAN_EXPORT lean_object* l_panic___at_Lean_EnvExtension_modifyState___spec__3(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_OLeanLevel_adjustFileName___boxed(lean_object*, lean_object*); lean_object* l_instToStringNat(lean_object*); -LEAN_EXPORT lean_object* l_Lean_instToStringImport___lambda__1___boxed(lean_object*); LEAN_EXPORT lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__18___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Kernel_setDiagnostics___lambda__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_AsyncConstantInfo_toConstantInfo(lean_object*); LEAN_EXPORT lean_object* l_Lean_ConstantInfo_instantiateValueLevelParams_x21(lean_object*, lean_object*); -LEAN_EXPORT uint8_t l_Lean_instToStringImport___lambda__1(lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__1___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_finalizePersistentExtensions_loop___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_instMonadBaseIO; LEAN_EXPORT lean_object* l_Lean_registerPersistentEnvExtensionUnsafe___boxed(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__37; lean_object* l___private_Init_Util_0__mkPanicMessageWithDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Environment_AddConstAsyncResult_commitConst___lambda__3___closed__1; -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__50; -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__8; LEAN_EXPORT lean_object* l_Lean_Kernel_setDiagnostics(lean_object*, lean_object*); static lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8; lean_object* lean_update_env_attributes(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__42; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_setImportedEntries_unsafe__2___lambda__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_setStateImpl___rarg___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withoutExporting___rarg___lambda__1(lean_object*); @@ -744,23 +728,19 @@ static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed static lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__3; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___closed__1; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_setImportedEntries_unsafe__2___lambda__1___boxed(lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__36; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2___closed__2; LEAN_EXPORT lean_object* l_Lean_ConstantInfo_instantiateValueLevelParams_x21___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_EnvExtension_ensureExtensionsArraySize_loop(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_findStateAsyncUnsafe___rarg(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; LEAN_EXPORT lean_object* l_Lean_EnvExtension_setState___rarg___lambda__1(lean_object*, lean_object*); -static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__2; LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_getModuleEntries_unsafe__1(lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__43; +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__2(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at_Lean_Environment_dbgFormatAsyncState___spec__4___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_findTask(lean_object*, lean_object*, uint8_t); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__46; -static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__7; -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__20; static lean_object* l_Lean_readModuleData___closed__5; LEAN_EXPORT lean_object* l_Lean_Environment_getModuleIdx_x3f___lambda__1___boxed(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__23; lean_object* l_Lean_Expr_instantiateLevelParams(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_AddConstAsyncResult_commitConst___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_PromiseCheckedResult_commitChecked(lean_object*, lean_object*, lean_object*); @@ -773,7 +753,7 @@ LEAN_EXPORT lean_object* l_Lean_withImportModules(lean_object*); LEAN_EXPORT lean_object* l_Lean_instDecidableEqOLeanLevel___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_promiseChecked___lambda__1(lean_object*, lean_object*); static lean_object* l_Lean_withoutExporting___rarg___lambda__2___closed__1; -LEAN_EXPORT lean_object* l_Lean_instInhabitedImport; +static lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__11; static lean_object* l_Lean_Environment_addConstAsync___lambda__8___closed__2; static lean_object* l_Lean_mkEmptyEnvironment___closed__2; lean_object* lean_task_get_own(lean_object*); @@ -782,10 +762,9 @@ lean_object* l_Lean_Declaration_getTopLevelNames(lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_contains___at_Lean_mkExtNameMap___spec__1___boxed(lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Lean_Environment_hasUnsafe(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_freeRegions___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__2; LEAN_EXPORT lean_object* l_Lean_Kernel_Environment_enableDiag___boxed(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__13; LEAN_EXPORT lean_object* l_Lean_EnvExtension_modifyState_unsafe__1___rarg___boxed(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396_(uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Array_filterMapM___at_Lean_finalizeImport___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Environment_displayStats___closed__6; LEAN_EXPORT lean_object* l_Lean_registerEnvExtension___rarg(lean_object*, lean_object*, uint8_t, lean_object*); @@ -793,8 +772,10 @@ static lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_modifyStat static lean_object* l_Lean_PersistentHashMap_toList___at_Lean_Environment_dbgFormatAsyncState___spec__23___closed__1; LEAN_EXPORT lean_object* l_Lean_EnvExtension_setState___rarg___lambda__1___boxed(lean_object*, lean_object*); lean_object* lean_usize_to_nat(size_t); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__49; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAtCollisionNodeAux___at___private_Lean_Environment_0__Lean_Kernel_Environment_add___spec__5(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Environment_evalConstCheck___rarg___closed__1; +LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlM___at_Lean_mkModuleData___spec__8___boxed(lean_object*, lean_object*, lean_object*); uint8_t lean_is_reserved_name(lean_object*, lean_object*); static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__23; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -808,7 +789,6 @@ LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_findAtAux___at_Lean_Kernel_Env LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at_Lean_finalizeImport___spec__7(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Subarray_forInUnsafe_loop___at___private_Lean_Environment_0__Lean_setImportedEntries___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__5___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_registerPersistentEnvExtensionUnsafe___rarg___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Option_repr___at_Lean_Environment_dbgFormatAsyncState___spec__17___closed__4; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_findStateAsyncUnsafe___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); @@ -816,13 +796,12 @@ static lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlM___at_Lean_Environment_dbgFormatAsyncState___spec__24___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_enableRealizationsForConst___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Kernel_Environment_addDeclCore___boxed(lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__4; static lean_object* l_Lean_instInhabitedEnvExtension___lambda__1___closed__2; static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__15; -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__2; LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___lambda__7___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___lambda__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l___private_Lean_Declaration_0__Lean_beqConstantVal____x40_Lean_Declaration___hyg_431_(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__10; LEAN_EXPORT lean_object* l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_AddConstAsyncResult_commitConst(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwAlreadyImported___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -836,11 +815,9 @@ LEAN_EXPORT lean_object* l_Lean_ConstantKind_noConfusion___rarg(uint8_t, uint8_t LEAN_EXPORT lean_object* l_Lean_EnvExtension_ensureExtensionsArraySize(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instGetElemArrayModuleIdxLtNatToNatSize(lean_object*); LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__16; lean_object* l_Substring_nextn(lean_object*, lean_object*, lean_object*); static lean_object* l_List_foldl___at___private_Lean_Environment_0__Lean_Environment_updateBaseAfterKernelAdd___spec__1___closed__2; lean_object* l_Lean_withImporting___rarg(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__41; static lean_object* l_Lean_instInhabitedAsyncConstantInfo___closed__1; LEAN_EXPORT lean_object* l_Lean_SMap_contains___at_Lean_Environment_addExtraName___spec__1___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_finalizeImport___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -849,7 +826,6 @@ LEAN_EXPORT lean_object* l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState__ LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__7___lambda__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_finalizeImport_unsafe__3(lean_object*); LEAN_EXPORT lean_object* l_List_toString___at_Lean_Environment_displayStats___spec__1(lean_object*); -static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__3; LEAN_EXPORT lean_object* l_List_foldl___at_Lean_Environment_dbgFormatAsyncState___spec__22(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_dbgFormatAsyncState___spec__26___rarg(lean_object*, lean_object*, size_t, size_t, lean_object*); static lean_object* l_Lean_PersistentHashMap_insertAux___at___private_Lean_Environment_0__Lean_Kernel_Environment_add___spec__3___closed__1; @@ -857,8 +833,12 @@ LEAN_EXPORT lean_object* l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState__ LEAN_EXPORT lean_object* l_List_mapTR_loop___at_Lean_Environment_dbgFormatAsyncState___spec__33(lean_object*, lean_object*); static lean_object* l_Lean_EnvExtensionStateSpec___closed__1; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_finalizeImport___spec__17___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__48; +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6; static lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__1; +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__28; LEAN_EXPORT uint8_t l_Lean_Environment_getModuleIdx_x3f___lambda__1(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_mkModuleData___lambda__2(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Kernel_instInhabitedDiagnostics; lean_object* l_Lean_ConstantInfo_toConstantVal(lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Environment___hyg_6____closed__3; @@ -867,6 +847,7 @@ LEAN_EXPORT lean_object* l_Lean_withExporting___rarg___lambda__5(lean_object*, u LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_toList___at_Lean_Environment_dbgFormatAsyncState___spec__23___boxed(lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_finalizeImport___spec__17(lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__1; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_contains___at___private_Lean_Environment_0__Lean_Kernel_Environment_add___spec__6___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_hasUnsafe___lambda__1___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_finalizeImport___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -874,16 +855,15 @@ static lean_object* l_Lean_instInhabitedAsyncConstantInfo___closed__4; lean_object* lean_array_fget(lean_object*, lean_object*); static lean_object* l_Lean_Environment_PromiseCheckedResult_commitChecked___closed__3; LEAN_EXPORT lean_object* l_Lean_Environment_realizingStack(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__5; LEAN_EXPORT lean_object* l_Lean_registerEnvExtension_unsafe__1(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_findTaskCore___lambda__3(lean_object*, lean_object*, uint8_t, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__27; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_VisibilityMap_map(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_findStateAsyncUnsafe___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_hasUnsafe___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_finalizeImport___spec__10___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_mapTR_loop___at_Lean_Environment_realizeConst___spec__4(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Option_repr___at_Lean_Environment_dbgFormatAsyncState___spec__29(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__2___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_EnvExtension_modifyState___rarg___closed__9; static lean_object* l_Lean_mkEmptyEnvironment___lambda__1___closed__4; LEAN_EXPORT lean_object* l_Lean_withExporting___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, uint8_t); @@ -896,12 +876,9 @@ LEAN_EXPORT lean_object* l_Option_repr___at_Lean_Environment_dbgFormatAsyncState static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__3; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Visibility_noConfusion___rarg(uint8_t, uint8_t, lean_object*); LEAN_EXPORT uint8_t l_Lean_OLeanLevel_ofNat(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__7; LEAN_EXPORT lean_object* l_Lean_Environment_enableRealizationsForConst___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_NameSet_empty; static lean_object* l_Lean_EnvExtension_modifyState___rarg___closed__1; -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__12; -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__21; LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___lambda__1(lean_object*, uint8_t, lean_object*); static lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__7___lambda__2___closed__1; LEAN_EXPORT lean_object* l_Lean_Environment_replayConsts(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*); @@ -910,19 +887,17 @@ static lean_object* l_Lean_OLeanLevel_adjustFileName___closed__1; LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_string_length(lean_object*); LEAN_EXPORT lean_object* l_IO_println___at_Lean_Environment_displayStats___spec__3(lean_object*, lean_object*); -static lean_object* l_Lean_instReprImport___closed__1; -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__16; LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_mkExtNameMap___spec__6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_finalizeImport___spec__8___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_eq(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instInhabitedPersistentEnvExtension___lambda__1(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_instReprImport; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_findAux___at_Lean_Kernel_Environment_find_x3f___spec__3___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_setImportedEntries_unsafe__1___lambda__1___boxed(lean_object*, lean_object*); lean_object* l_List_takeTR_go___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_finalizePersistentExtensions_loop(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__21; static lean_object* l_Lean_instGetElem_x3fArrayModuleIdxLtNatToNatSize___closed__3; static lean_object* l_Lean_Environment_enableRealizationsForConst___closed__1; LEAN_EXPORT lean_object* lake_environment_add(lean_object*, lean_object*); @@ -937,6 +912,7 @@ static lean_object* l_Lean_readModuleData___closed__4; LEAN_EXPORT lean_object* l_Lean_Environment_mainModule(lean_object*); LEAN_EXPORT lean_object* l_Lean_EnvExtension_setState___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_mkModuleData___spec__1___lambda__1(lean_object*, lean_object*, uint8_t, uint8_t, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__37; static lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_getStateImpl___rarg___closed__1; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_modifyStateImpl(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_addDeclCheck___boxed(lean_object*, lean_object*, lean_object*, lean_object*); @@ -948,35 +924,27 @@ static lean_object* l_Lean_instInhabitedPersistentEnvExtension___closed__3; LEAN_EXPORT lean_object* l_Lean_Environment_enableRealizationsForConst___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_findTaskCore(lean_object*, lean_object*, uint8_t); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__22; static lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_getStateUnsafe___rarg___closed__2; LEAN_EXPORT lean_object* l_List_mapM_loop___at_Lean_Environment_dbgFormatAsyncState___spec__6(lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__45; -static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__8; -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__11; static lean_object* l_List_foldl___at___private_Lean_Environment_0__Lean_Environment_updateBaseAfterKernelAdd___spec__1___closed__3; -static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__9; lean_object* l_Lean_Name_mkStr2(lean_object*, lean_object*); static lean_object* l_panic___at_Lean_readModuleData___spec__1___closed__1; LEAN_EXPORT lean_object* l_Lean_instInhabitedPersistentEnvExtension___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo(lean_object*, uint8_t); LEAN_EXPORT lean_object* l_Lean_Environment_contains___boxed(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237_(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instInhabitedModuleIdx; LEAN_EXPORT lean_object* l_Lean_Environment_getModuleIdx_x3f(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_allImportedModuleNames(lean_object*); LEAN_EXPORT lean_object* l_List_foldl___at_Lean_Environment_dbgFormatAsyncState___spec__16(lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__34; +LEAN_EXPORT lean_object* l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645_(lean_object*); lean_object* l_Lean_findOLean(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__14; LEAN_EXPORT lean_object* l_panic___at___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___spec__1(lean_object*); lean_object* l_Lean_PersistentHashMap_mkEmptyEntries(lean_object*, lean_object*); static lean_object* l_Lean_EnvExtension_modifyState___rarg___closed__7; LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___lambda__7(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_isConstructor___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_AddConstAsyncResult_commitConst___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__10; -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__49; -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__9; LEAN_EXPORT lean_object* l_Lean_registerPersistentEnvExtensionUnsafe___rarg___lambda__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_finalizeImport___spec__5(lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_realizeConst___lambda__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -986,7 +954,6 @@ static lean_object* l_Lean_Environment_PromiseCheckedResult_commitChecked___clos LEAN_EXPORT uint8_t l_Lean_SMap_contains___at_Lean_Environment_addExtraName___spec__1(lean_object*, lean_object*); static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__22; LEAN_EXPORT lean_object* l_Lean_Environment_isRealizing___boxed(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__1; LEAN_EXPORT lean_object* l_panic___at_Lean_Environment_replayConsts_replayKernel___spec__4(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_ConstantInfo_instantiateTypeLevelParams___boxed(lean_object*, lean_object*); static lean_object* l_Lean_Environment_realizeConst___lambda__5___closed__2; @@ -996,7 +963,6 @@ static lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec LEAN_EXPORT uint8_t l_Lean_Environment_isSafeDefinition(lean_object*, lean_object*); static lean_object* l_Lean_Environment_PromiseCheckedResult_commitChecked___closed__1; lean_object* l_Std_Format_joinSep___at_Prod_repr___spec__1(lean_object*, lean_object*); -static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__3; lean_object* lean_array_set(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_replayConsts_replayKernel(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); uint64_t l_Lean_Name_hash___override(lean_object*); @@ -1004,11 +970,11 @@ LEAN_EXPORT uint8_t l_Lean_Environment_addDeclCore___lambda__2(lean_object*, lea lean_object* l_Lean_ConstantInfo_hints(lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___lambda__4(lean_object*, uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_const2ModIdx(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__29; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_find_x3f___at_Lean_Kernel_Environment_Diagnostics_recordUnfold___spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_findAsync_x3f___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instInhabitedPersistentEnvExtension___lambda__2___boxed(lean_object*); uint64_t lean_uint64_xor(uint64_t, uint64_t); +LEAN_EXPORT uint8_t l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1230_(uint8_t, uint8_t); lean_object* l_Repr_addAppParen(lean_object*, lean_object*); static lean_object* l_Lean_instInhabitedAsyncConsts___closed__1; LEAN_EXPORT lean_object* l_Lean_instTypeNameAsyncConsts; @@ -1020,29 +986,23 @@ lean_object* l_IO_Promise_result_x21___rarg(lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insert___at_Lean_Kernel_Environment_Diagnostics_recordUnfold___spec__4(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_mapTR_loop___at_Lean_Environment_dbgFormatAsyncState___spec__28(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instGetElem_x3fArrayModuleIdxLtNatToNatSize___lambda__1(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__47; static lean_object* l_Lean_throwAlreadyImported___rarg___closed__3; static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__2; LEAN_EXPORT lean_object* l_Lean_mkModuleData___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_findRecTask___lambda__1___closed__1; LEAN_EXPORT lean_object* l_Lean_instBEqConstantKind; lean_object* lean_task_map(lean_object*, lean_object*, lean_object*, uint8_t); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__14; static lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; lean_object* lean_nat_sub(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__15; +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____boxed(lean_object*, lean_object*); static lean_object* l___private_Lean_Environment_0__Lean_Environment_findAsyncCore_x3f___lambda__2___closed__2; LEAN_EXPORT lean_object* l_Lean_Environment_setMainModule___lambda__1(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__17; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand_go___at_Lean_finalizeImport___spec__6(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instInhabitedPersistentEnvExtension___lambda__2(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__44; LEAN_EXPORT lean_object* l_Lean_Environment_realizeConst___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__36; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlM___at_Lean_mkModuleData___spec__2(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Environment_0__Lean_Kernel_Environment_add___spec__7(lean_object*); LEAN_EXPORT lean_object* l_Lean_mkDefinitionValInferrringUnsafe___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3; static lean_object* l_Lean_instInhabitedPersistentEnvExtension___closed__5; lean_object* lean_nat_mul(lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Lean_Kernel_isDefEqGuarded(lean_object*, lean_object*, lean_object*, lean_object*); @@ -1050,6 +1010,7 @@ static lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFallbackC LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_finalizeImport___spec__9___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_replayConsts___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_findPrefix_x3f(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__23; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_VisibilityMap_const___rarg(lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_addEntry(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_evalConstCheck___rarg(lean_object*, lean_object*, lean_object*, lean_object*); @@ -1083,10 +1044,12 @@ LEAN_EXPORT lean_object* l_Lean_Environment_realizeConst(lean_object*, lean_obje lean_object* l_Lean_Expr_FoldConstsImpl_fold_visit___rarg(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__24; LEAN_EXPORT lean_object* l_Lean_persistentEnvExtensionsRef; +static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__7; lean_object* lean_io_initializing(lean_object*); LEAN_EXPORT uint32_t l_Lean_getMaxHeight___lambda__1(lean_object*, lean_object*, uint32_t); static lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__8; lean_object* l_List_reverse___rarg(lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__7; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_setImportedEntries_unsafe__1___lambda__1(lean_object*, lean_object*); static lean_object* l_Lean_Environment_realizeConst___lambda__5___closed__4; static lean_object* l_Lean_Environment_enableRealizationsForConst___lambda__3___closed__1; @@ -1097,7 +1060,8 @@ LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_finalizeImport___s LEAN_EXPORT lean_object* l_Lean_Environment_imports(lean_object*); static lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__10; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_setImportedEntries_unsafe__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_importModulesCore_go(lean_object*, uint8_t, lean_object*, lean_object*); +LEAN_EXPORT uint8_t l___private_Lean_Environment_0__Lean_AsyncConsts_add___lambda__1(lean_object*); +LEAN_EXPORT lean_object* l_Lean_importModulesCore_go(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_readModuleData___closed__2; size_t lean_usize_sub(size_t, size_t); lean_object* lean_array_mk(lean_object*); @@ -1107,13 +1071,15 @@ LEAN_EXPORT lean_object* l_Lean_instInhabitedPersistentEnvExtensionState___rarg( LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_ensureExtensionsArraySize___lambda__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_EnvExtension_AsyncMode_noConfusion(lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Environment___hyg_6____closed__1; +static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__8; LEAN_EXPORT lean_object* l_Lean_EnvExtension_modifyState(lean_object*); LEAN_EXPORT lean_object* l_List_foldl___at_Lean_Environment_dbgFormatAsyncState___spec__11(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_toString___at_Lean_Environment_dbgFormatAsyncState___spec__31___boxed(lean_object*); static lean_object* l_Lean_instInhabitedPersistentEnvExtension___closed__1; LEAN_EXPORT lean_object* l_Lean_Environment_findAsync_x3f(lean_object*, lean_object*, uint8_t); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_findTaskCore___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_importModulesCore___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__10; +LEAN_EXPORT lean_object* l_Lean_importModulesCore___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Substring_beq(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_toList___at_Lean_Environment_dbgFormatAsyncState___spec__23(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_VisibilityMap_map___rarg(lean_object*, lean_object*); @@ -1127,6 +1093,7 @@ LEAN_EXPORT lean_object* l_List_toString___at_Lean_Environment_dbgFormatAsyncSta LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at_Lean_Environment_dbgFormatAsyncState___spec__1___boxed(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Visibility_noConfusion(lean_object*); static lean_object* l_Lean_EnvExtension_modifyState___rarg___closed__2; +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Environment___hyg_8540_(lean_object*); static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__27; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_finalizeImport___spec__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_getModuleIdxFor_x3f___boxed(lean_object*, lean_object*); @@ -1139,7 +1106,6 @@ LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_find_x LEAN_EXPORT lean_object* l_Lean_registerEnvExtension___rarg___lambda__1(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); lean_object* lean_array_uget(lean_object*, size_t); LEAN_EXPORT lean_object* l_Lean_ConstantKind_noConfusion(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__13; LEAN_EXPORT lean_object* l_repr___at_Lean_Environment_dbgFormatAsyncState___spec__19(lean_object*); size_t lean_array_size(lean_object*); LEAN_EXPORT lean_object* l_panic___at_Lean_Environment_replayConsts_replayKernel___spec__3(lean_object*, lean_object*); @@ -1147,9 +1113,9 @@ LEAN_EXPORT lean_object* l_Lean_Environment_enableRealizationsForConst___lambda_ static lean_object* l_Lean_Environment_displayStats___closed__5; LEAN_EXPORT lean_object* l_List_foldl___at_Lean_Environment_replayConsts___spec__1___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Visibility_toCtorIdx(uint8_t); -static lean_object* l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; LEAN_EXPORT lean_object* l_Lean_CompactedRegion_isMemoryMapped___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_Kernel_resetDiag(lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248_(uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_mkModuleData___lambda__1___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Environment_0__Lean_looksLikeOldCodegenName___closed__2; lean_object* l_instInhabitedOfMonad___rarg(lean_object*, lean_object*); @@ -1157,11 +1123,8 @@ static lean_object* l_Lean_instGetElem_x3fArrayModuleIdxLtNatToNatSize___closed_ LEAN_EXPORT lean_object* l_Lean_SMap_numBuckets___at_Lean_Environment_displayStats___spec__4___boxed(lean_object*); lean_object* lean_st_ref_set(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___boxed(lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__28; LEAN_EXPORT lean_object* l_Lean_writeModule(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__3(lean_object*, uint8_t, lean_object*, uint8_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__12; -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__19; +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__3(uint8_t, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__7___lambda__1___closed__1; LEAN_EXPORT lean_object* l_Lean_registerEnvExtension___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_mkDefinitionValInferrringUnsafe___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1171,18 +1134,21 @@ lean_object* l_Lean_Name_mkStr4(lean_object*, lean_object*, lean_object*, lean_o static lean_object* l_Lean_EnvExtension_modifyState___rarg___closed__11; LEAN_EXPORT lean_object* l_Lean_SMap_find_x3f_x27___at_Lean_Kernel_Environment_find_x3f___spec__1(lean_object*, lean_object*); static lean_object* l_Lean_Environment_dbgFormatAsyncState___closed__2; +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__45; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_mkModuleData___spec__6(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*); static size_t l_Lean_PersistentHashMap_findAux___at_Lean_Kernel_Environment_find_x3f___spec__3___closed__2; LEAN_EXPORT lean_object* l_Lean_Environment_findTask___lambda__1(lean_object*, lean_object*, uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_finalizeImport___spec__8(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_repr___at_Lean_Environment_dbgFormatAsyncState___spec__9(lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_EnvExtension_modifyState___rarg___closed__10; static lean_object* l_Lean_Kernel_instInhabitedDiagnostics___closed__3; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_finalizeImport___spec__11___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_finalizeImport___spec__19___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_replayConsts_replayKernel___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__9; LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at_Lean_Environment_dbgFormatAsyncState___spec__5(lean_object*, lean_object*); +static lean_object* l_Lean_mkModuleData___closed__2; lean_object* l_instDecidableEqNat___boxed(lean_object*, lean_object*); static lean_object* l_Lean_mkEmptyEnvironment___lambda__1___closed__1; lean_object* lean_string_append(lean_object*, lean_object*); @@ -1191,17 +1157,20 @@ static lean_object* l___private_Lean_Environment_0__Lean_looksLikeOldCodegenName LEAN_EXPORT lean_object* l_Lean_Environment_findTask___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_panic___at_Lean_Environment_replayConsts_replayKernel___spec__8(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_evalConstCheck___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__42; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_modifyStateImpl___rarg(lean_object*, lean_object*, lean_object*); static lean_object* l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__1; LEAN_EXPORT lean_object* l_Lean_instGetElemArrayModuleIdxLtNatToNatSize___rarg(lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__12; LEAN_EXPORT lean_object* l_Lean_withExporting___rarg___lambda__1(uint8_t, uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_AddConstAsyncResult_commitConst___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_get_size(lean_object*); +static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__6; LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at_Lean_Environment_dbgFormatAsyncState___spec__5___boxed(lean_object*, lean_object*); static lean_object* l_Lean_instReprConstantKind___closed__1; LEAN_EXPORT lean_object* l_Lean_Environment_dbgFormatAsyncState(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Kernel_Environment_enableDiag(lean_object*, uint8_t); -LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____boxed(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__40; LEAN_EXPORT lean_object* l_List_foldl___at_Lean_Environment_dbgFormatAsyncState___spec__32___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_invalidExtMsg; LEAN_EXPORT lean_object* l_Lean_Environment_findAsync_x3f___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); @@ -1214,9 +1183,10 @@ static lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec LEAN_EXPORT lean_object* l_Lean_AsyncConstantInfo_toConstantVal(lean_object*); LEAN_EXPORT lean_object* l_Lean_saveModuleData___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instMonadEnvOfMonadLift___rarg(lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts___closed__2; lean_object* lean_array_get(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_instInhabitedAsyncConstantInfo___closed__9; -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_importModules___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_find_expr(lean_object*, lean_object*); uint8_t lean_nat_dec_le(lean_object*, lean_object*); @@ -1240,7 +1210,9 @@ static lean_object* l_Lean_Environment_realizeConst___lambda__5___closed__1; LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_mkExtNameMap___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_panic___at_Lean_importModulesCore_go___spec__1___closed__1; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2___boxed(lean_object*, lean_object*); +static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__2; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_finalizeImport___spec__13(uint8_t, lean_object*, size_t, size_t, lean_object*); +static lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__4; lean_object* lean_compacted_region_free(size_t, lean_object*); lean_object* lean_nat_add(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instGetElem_x3fArrayModuleIdxLtNatToNatSize___lambda__1___boxed(lean_object*, lean_object*); @@ -1252,7 +1224,7 @@ LEAN_EXPORT lean_object* l_Lean_Kernel_enableDiag___lambda__1___boxed(lean_objec LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_mkModuleData___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* lean_environment_add(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__12___boxed(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_importModules(lean_object*, lean_object*, uint32_t, lean_object*, uint8_t, uint8_t, uint8_t, lean_object*); +LEAN_EXPORT lean_object* l_Lean_importModules(lean_object*, lean_object*, uint32_t, lean_object*, uint8_t, uint8_t, uint8_t, lean_object*, lean_object*); static lean_object* l_Lean_EnvExtension_modifyState___rarg___closed__5; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_VisibilityMap_const(lean_object*); LEAN_EXPORT uint8_t l_Std_DHashMap_Internal_AssocList_contains___at_Lean_finalizeImport___spec__4(lean_object*, lean_object*); @@ -1262,6 +1234,8 @@ LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_find_x3f___at_Lean_Kernel_Envi LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at_Lean_Environment_dbgFormatAsyncState___spec__1(lean_object*); LEAN_EXPORT lean_object* l_Lean_importModules___boxed__const__1; LEAN_EXPORT lean_object* l_Array_filterMapM___at_Lean_mkModuleData___spec__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__2___closed__1; +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__17; static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__13; LEAN_EXPORT lean_object* l_Lean_Environment_setExporting(lean_object*, uint8_t); LEAN_EXPORT lean_object* l_Lean_EnvExtension_modifyState___rarg___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*); @@ -1271,14 +1245,17 @@ static lean_object* l_Lean_mkEmptyEnvironment___lambda__1___closed__2; static lean_object* l_Option_repr___at_Lean_Environment_dbgFormatAsyncState___spec__17___closed__2; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_findAux___at_Lean_Kernel_Environment_Diagnostics_recordUnfold___spec__2___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlM___at_Lean_mkModuleData___spec__2___boxed(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_instImpl____x40_Lean_Environment___hyg_1873_; static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__6; LEAN_EXPORT lean_object* lean_kernel_get_diag(lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_setMainModule_unsafe__1___boxed(lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__27; LEAN_EXPORT lean_object* l_Subarray_forInUnsafe_loop___at___private_Lean_Environment_0__Lean_setImportedEntries___spec__1(lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Environment_AddConstAsyncResult_commitConst___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_finalizeImport___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___closed__2; +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__31; static lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__7___lambda__1___closed__3; static lean_object* l_Lean_EnvExtension_ensureExtensionsArraySize_loop___closed__1; LEAN_EXPORT lean_object* l_Lean_Environment_addConstAsync___lambda__6(lean_object*, lean_object*); @@ -1305,6 +1282,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_findS LEAN_EXPORT lean_object* l_Lean_withExporting(lean_object*, lean_object*); static lean_object* l_Lean_instInhabitedPersistentEnvExtension___closed__4; LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_findStateAsync___rarg(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__2; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_ImportedModule_serverData_x3f___boxed(lean_object*, lean_object*); lean_object* lean_mk_empty_array_with_capacity(lean_object*); LEAN_EXPORT lean_object* l_Lean_registerPersistentEnvExtensionUnsafe___rarg___lambda__3(lean_object*, lean_object*, lean_object*); @@ -1312,7 +1290,6 @@ LEAN_EXPORT lean_object* l_panic___at_Lean_importModulesCore_go___spec__1(lean_o LEAN_EXPORT lean_object* l_Lean_OLeanLevel_adjustFileName(lean_object*, uint8_t); LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_updateBaseAfterKernelAdd___lambda__1(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_registerPersistentEnvExtensionUnsafe___rarg___lambda__3___closed__2; -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__29; static lean_object* l___private_Lean_Environment_0__Lean_setImportedEntries_unsafe__2___closed__1; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_findTaskCore___lambda__1___boxed(lean_object*); static lean_object* l_Lean_Environment_AddConstAsyncResult_commitSignature___closed__1; @@ -1325,14 +1302,14 @@ lean_object* l_ReaderT_instMonad___rarg(lean_object*); LEAN_EXPORT lean_object* l_Lean_withImportModules___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_usize_land(size_t, size_t); static lean_object* l___private_Lean_Environment_0__Lean_Environment_findTaskCore___lambda__3___closed__2; -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_saveModuleData(lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2(uint8_t, lean_object*, uint8_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2(uint8_t, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_mkEmptyEnvironment___lambda__1___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__9; LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_EnvExtension_setStateImpl(lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentEnvExtension_setState___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__6(lean_object*, size_t, size_t, lean_object*); +static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__8; LEAN_EXPORT lean_object* l_Lean_Environment_asyncMayContain___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Environment_0__Lean_Kernel_Environment_add___spec__9(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insert___at___private_Lean_Environment_0__Lean_Kernel_Environment_add___spec__2(lean_object*, lean_object*, lean_object*); @@ -1340,7 +1317,6 @@ LEAN_EXPORT lean_object* l_Lean_RBTree_toArray___at_Lean_mkModuleData___spec__3( static lean_object* l_Lean_Environment_realizeConst___lambda__5___closed__5; lean_object* l___private_Init_Dynamic_0__Dynamic_get_x3fImpl___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_ModuleIdx_toNat___boxed(lean_object*); -static lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__31; static lean_object* l_Lean_PersistentEnvExtensionDescr_name___autoParam___closed__19; static lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAux_traverse___at_Lean_Kernel_Environment_Diagnostics_recordUnfold___spec__6(size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1629,513 +1605,6 @@ lean_dec(x_1); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__1() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("module", 6, 6); -return x_1; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__2() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__1; -x_2 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__3() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = lean_box(0); -x_2 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__2; -x_3 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_3, 0, x_1); -lean_ctor_set(x_3, 1, x_2); -return x_3; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__4() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked(" := ", 4, 4); -return x_1; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__5() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__4; -x_2 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__6() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__3; -x_2 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__5; -x_3 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_3, 0, x_1); -lean_ctor_set(x_3, 1, x_2); -return x_3; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__7() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = lean_unsigned_to_nat(10u); -x_2 = lean_nat_to_int(x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__8() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked(",", 1, 1); -return x_1; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__9() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__8; -x_2 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__10() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("importAll", 9, 9); -return x_1; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__11() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__10; -x_2 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__12() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = lean_unsigned_to_nat(13u); -x_2 = lean_nat_to_int(x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__13() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("isExported", 10, 10); -return x_1; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__14() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__13; -x_2 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__15() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = lean_unsigned_to_nat(14u); -x_2 = lean_nat_to_int(x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__16() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("{ ", 2, 2); -return x_1; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__17() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__16; -x_2 = lean_string_length(x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__18() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__17; -x_2 = lean_nat_to_int(x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__19() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__16; -x_2 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__20() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked(" }", 2, 2); -return x_1; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__21() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__20; -x_2 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__22() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("false", 5, 5); -return x_1; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__23() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__22; -x_2 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__24() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__15; -x_2 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__23; -x_3 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_3, 0, x_1); -lean_ctor_set(x_3, 1, x_2); -return x_3; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__25() { -_start: -{ -lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__24; -x_2 = 0; -x_3 = lean_alloc_ctor(6, 1, 1); -lean_ctor_set(x_3, 0, x_1); -lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); -return x_3; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__26() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("true", 4, 4); -return x_1; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__27() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__26; -x_2 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__28() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__15; -x_2 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__27; -x_3 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_3, 0, x_1); -lean_ctor_set(x_3, 1, x_2); -return x_3; -} -} -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__29() { -_start: -{ -lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__28; -x_2 = 0; -x_3 = lean_alloc_ctor(6, 1, 1); -lean_ctor_set(x_3, 0, x_1); -lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); -return x_3; -} -} -LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237_(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; uint8_t x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; uint8_t x_21; lean_object* x_22; -x_3 = lean_ctor_get(x_1, 0); -lean_inc(x_3); -x_4 = lean_unsigned_to_nat(0u); -x_5 = l_Lean_Name_reprPrec(x_3, x_4); -x_6 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__7; -x_7 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_7, 0, x_6); -lean_ctor_set(x_7, 1, x_5); -x_8 = 0; -x_9 = lean_alloc_ctor(6, 1, 1); -lean_ctor_set(x_9, 0, x_7); -lean_ctor_set_uint8(x_9, sizeof(void*)*1, x_8); -x_10 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__6; -x_11 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_11, 0, x_10); -lean_ctor_set(x_11, 1, x_9); -x_12 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__9; -x_13 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_13, 0, x_11); -lean_ctor_set(x_13, 1, x_12); -x_14 = lean_box(1); -x_15 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_15, 0, x_13); -lean_ctor_set(x_15, 1, x_14); -x_16 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__11; -x_17 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_17, 0, x_15); -lean_ctor_set(x_17, 1, x_16); -x_18 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__5; -x_19 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_19, 0, x_17); -lean_ctor_set(x_19, 1, x_18); -x_20 = lean_ctor_get_uint8(x_1, sizeof(void*)*1); -x_21 = lean_ctor_get_uint8(x_1, sizeof(void*)*1 + 1); -lean_dec(x_1); -if (x_20 == 0) -{ -lean_object* x_51; -x_51 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__23; -x_22 = x_51; -goto block_50; -} -else -{ -lean_object* x_52; -x_52 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__27; -x_22 = x_52; -goto block_50; -} -block_50: -{ -lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; -x_23 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__12; -x_24 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_24, 0, x_23); -lean_ctor_set(x_24, 1, x_22); -x_25 = lean_alloc_ctor(6, 1, 1); -lean_ctor_set(x_25, 0, x_24); -lean_ctor_set_uint8(x_25, sizeof(void*)*1, x_8); -x_26 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_26, 0, x_19); -lean_ctor_set(x_26, 1, x_25); -x_27 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_27, 0, x_26); -lean_ctor_set(x_27, 1, x_12); -x_28 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_28, 0, x_27); -lean_ctor_set(x_28, 1, x_14); -x_29 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__14; -x_30 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_30, 0, x_28); -lean_ctor_set(x_30, 1, x_29); -x_31 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_31, 0, x_30); -lean_ctor_set(x_31, 1, x_18); -if (x_21 == 0) -{ -lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; -x_32 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__25; -x_33 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_33, 0, x_31); -lean_ctor_set(x_33, 1, x_32); -x_34 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__19; -x_35 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_35, 0, x_34); -lean_ctor_set(x_35, 1, x_33); -x_36 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__21; -x_37 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_37, 0, x_35); -lean_ctor_set(x_37, 1, x_36); -x_38 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__18; -x_39 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_39, 0, x_38); -lean_ctor_set(x_39, 1, x_37); -x_40 = lean_alloc_ctor(6, 1, 1); -lean_ctor_set(x_40, 0, x_39); -lean_ctor_set_uint8(x_40, sizeof(void*)*1, x_8); -return x_40; -} -else -{ -lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; -x_41 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__29; -x_42 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_42, 0, x_31); -lean_ctor_set(x_42, 1, x_41); -x_43 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__19; -x_44 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_44, 0, x_43); -lean_ctor_set(x_44, 1, x_42); -x_45 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__21; -x_46 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_46, 0, x_44); -lean_ctor_set(x_46, 1, x_45); -x_47 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__18; -x_48 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_48, 0, x_47); -lean_ctor_set(x_48, 1, x_46); -x_49 = lean_alloc_ctor(6, 1, 1); -lean_ctor_set(x_49, 0, x_48); -lean_ctor_set_uint8(x_49, sizeof(void*)*1, x_8); -return x_49; -} -} -} -} -LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____boxed(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; -x_3 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237_(x_1, x_2); -lean_dec(x_2); -return x_3; -} -} -static lean_object* _init_l_Lean_instReprImport___closed__1() { -_start: -{ -lean_object* x_1; -x_1 = lean_alloc_closure((void*)(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____boxed), 2, 0); -return x_1; -} -} -static lean_object* _init_l_Lean_instReprImport() { -_start: -{ -lean_object* x_1; -x_1 = l_Lean_instReprImport___closed__1; -return x_1; -} -} -static lean_object* _init_l_Lean_instInhabitedImport___closed__1() { -_start: -{ -lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = lean_box(0); -x_2 = 0; -x_3 = lean_alloc_ctor(0, 1, 2); -lean_ctor_set(x_3, 0, x_1); -lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); -lean_ctor_set_uint8(x_3, sizeof(void*)*1 + 1, x_2); -return x_3; -} -} -static lean_object* _init_l_Lean_instInhabitedImport() { -_start: -{ -lean_object* x_1; -x_1 = l_Lean_instInhabitedImport___closed__1; -return x_1; -} -} -LEAN_EXPORT lean_object* l_Lean_instCoeNameImport(lean_object* x_1) { -_start: -{ -uint8_t x_2; uint8_t x_3; lean_object* x_4; -x_2 = 0; -x_3 = 1; -x_4 = lean_alloc_ctor(0, 1, 2); -lean_ctor_set(x_4, 0, x_1); -lean_ctor_set_uint8(x_4, sizeof(void*)*1, x_2); -lean_ctor_set_uint8(x_4, sizeof(void*)*1 + 1, x_3); -return x_4; -} -} -LEAN_EXPORT uint8_t l_Lean_instToStringImport___lambda__1(lean_object* x_1) { -_start: -{ -uint8_t x_2; -x_2 = 0; -return x_2; -} -} -static lean_object* _init_l_Lean_instToStringImport___closed__1() { -_start: -{ -lean_object* x_1; -x_1 = lean_alloc_closure((void*)(l_Lean_instToStringImport___lambda__1___boxed), 1, 0); -return x_1; -} -} -LEAN_EXPORT lean_object* l_Lean_instToStringImport(lean_object* x_1) { -_start: -{ -lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; -x_2 = lean_ctor_get(x_1, 0); -lean_inc(x_2); -lean_dec(x_1); -x_3 = 1; -x_4 = l_Lean_instToStringImport___closed__1; -x_5 = l_Lean_Name_toString(x_2, x_3, x_4); -return x_5; -} -} -LEAN_EXPORT lean_object* l_Lean_instToStringImport___lambda__1___boxed(lean_object* x_1) { -_start: -{ -uint8_t x_2; lean_object* x_3; -x_2 = l_Lean_instToStringImport___lambda__1(x_1); -lean_dec(x_1); -x_3 = lean_box(x_2); -return x_3; -} -} LEAN_EXPORT lean_object* l_Lean_CompactedRegion_isMemoryMapped___boxed(lean_object* x_1) { _start: { @@ -5049,7 +4518,7 @@ x_1 = 0; return x_1; } } -LEAN_EXPORT uint8_t l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1378_(uint8_t x_1, uint8_t x_2) { +LEAN_EXPORT uint8_t l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1230_(uint8_t x_1, uint8_t x_2) { _start: { lean_object* x_3; lean_object* x_4; uint8_t x_5; @@ -5061,7 +4530,7 @@ lean_dec(x_3); return x_5; } } -LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1378____boxed(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1230____boxed(lean_object* x_1, lean_object* x_2) { _start: { uint8_t x_3; uint8_t x_4; uint8_t x_5; lean_object* x_6; @@ -5069,7 +4538,7 @@ x_3 = lean_unbox(x_1); lean_dec(x_1); x_4 = lean_unbox(x_2); lean_dec(x_2); -x_5 = l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1378_(x_3, x_4); +x_5 = l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1230_(x_3, x_4); x_6 = lean_box(x_5); return x_6; } @@ -5078,7 +4547,7 @@ static lean_object* _init_l_Lean_instBEqConstantKind___closed__1() { _start: { lean_object* x_1; -x_1 = lean_alloc_closure((void*)(l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1378____boxed), 2, 0); +x_1 = lean_alloc_closure((void*)(l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1230____boxed), 2, 0); return x_1; } } @@ -5090,7 +4559,7 @@ x_1 = l_Lean_instBEqConstantKind___closed__1; return x_1; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__1() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__1() { _start: { lean_object* x_1; @@ -5098,17 +4567,17 @@ x_1 = lean_mk_string_unchecked("Lean.ConstantKind.defn", 22, 22); return x_1; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__2() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__2() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__1; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__1; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3() { _start: { lean_object* x_1; lean_object* x_2; @@ -5117,23 +4586,23 @@ x_2 = lean_nat_to_int(x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__4() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__2; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__2; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__5() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__5() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__4; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__4; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5141,7 +4610,7 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6() { _start: { lean_object* x_1; lean_object* x_2; @@ -5150,23 +4619,23 @@ x_2 = lean_nat_to_int(x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__7() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__7() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__2; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__2; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__8() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__8() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__7; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__7; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5174,7 +4643,7 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__9() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__9() { _start: { lean_object* x_1; @@ -5182,33 +4651,33 @@ x_1 = lean_mk_string_unchecked("Lean.ConstantKind.thm", 21, 21); return x_1; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__10() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__10() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__9; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__9; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__11() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__11() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__10; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__10; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__12() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__12() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__11; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__11; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5216,23 +4685,23 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__13() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__13() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__10; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__10; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__14() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__14() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__13; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__13; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5240,7 +4709,7 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__15() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__15() { _start: { lean_object* x_1; @@ -5248,33 +4717,33 @@ x_1 = lean_mk_string_unchecked("Lean.ConstantKind.axiom", 23, 23); return x_1; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__16() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__16() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__15; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__15; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__17() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__17() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__16; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__16; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__18() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__18() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__17; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__17; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5282,23 +4751,23 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__19() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__19() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__16; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__16; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__20() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__20() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__19; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__19; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5306,7 +4775,7 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__21() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__21() { _start: { lean_object* x_1; @@ -5314,33 +4783,33 @@ x_1 = lean_mk_string_unchecked("Lean.ConstantKind.opaque", 24, 24); return x_1; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__22() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__22() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__21; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__21; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__23() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__23() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__22; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__22; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__24() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__24() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__23; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__23; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5348,23 +4817,23 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__25() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__25() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__22; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__22; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__26() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__26() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__25; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__25; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5372,7 +4841,7 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__27() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__27() { _start: { lean_object* x_1; @@ -5380,33 +4849,33 @@ x_1 = lean_mk_string_unchecked("Lean.ConstantKind.quot", 22, 22); return x_1; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__28() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__28() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__27; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__27; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__29() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__29() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__28; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__28; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__30() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__30() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__29; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__29; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5414,23 +4883,23 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__31() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__31() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__28; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__28; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__32() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__32() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__31; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__31; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5438,7 +4907,7 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__33() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__33() { _start: { lean_object* x_1; @@ -5446,33 +4915,33 @@ x_1 = lean_mk_string_unchecked("Lean.ConstantKind.induct", 24, 24); return x_1; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__34() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__34() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__33; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__33; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__35() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__35() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__34; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__34; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__36() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__36() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__35; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__35; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5480,23 +4949,23 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__37() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__37() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__34; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__34; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__38() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__38() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__37; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__37; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5504,7 +4973,7 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__39() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__39() { _start: { lean_object* x_1; @@ -5512,33 +4981,33 @@ x_1 = lean_mk_string_unchecked("Lean.ConstantKind.ctor", 22, 22); return x_1; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__40() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__40() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__39; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__39; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__41() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__41() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__40; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__40; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__42() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__42() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__41; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__41; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5546,23 +5015,23 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__43() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__43() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__40; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__40; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__44() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__44() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__43; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__43; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5570,7 +5039,7 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__45() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__45() { _start: { lean_object* x_1; @@ -5578,33 +5047,33 @@ x_1 = lean_mk_string_unchecked("Lean.ConstantKind.recursor", 26, 26); return x_1; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__46() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__46() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__45; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__45; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__47() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__47() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__46; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__46; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__48() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__48() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__47; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__47; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5612,23 +5081,23 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__49() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__49() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6; -x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__46; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6; +x_2 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__46; x_3 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__50() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__50() { _start: { lean_object* x_1; uint8_t x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__49; +x_1 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__49; x_2 = 0; x_3 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_3, 0, x_1); @@ -5636,7 +5105,7 @@ lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396_(uint8_t x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248_(uint8_t x_1, lean_object* x_2) { _start: { switch (x_1) { @@ -5648,14 +5117,14 @@ x_4 = lean_nat_dec_le(x_3, x_2); if (x_4 == 0) { lean_object* x_5; lean_object* x_6; -x_5 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__5; +x_5 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__5; x_6 = l_Repr_addAppParen(x_5, x_2); return x_6; } else { lean_object* x_7; lean_object* x_8; -x_7 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__8; +x_7 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__8; x_8 = l_Repr_addAppParen(x_7, x_2); return x_8; } @@ -5668,14 +5137,14 @@ x_10 = lean_nat_dec_le(x_9, x_2); if (x_10 == 0) { lean_object* x_11; lean_object* x_12; -x_11 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__12; +x_11 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__12; x_12 = l_Repr_addAppParen(x_11, x_2); return x_12; } else { lean_object* x_13; lean_object* x_14; -x_13 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__14; +x_13 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__14; x_14 = l_Repr_addAppParen(x_13, x_2); return x_14; } @@ -5688,14 +5157,14 @@ x_16 = lean_nat_dec_le(x_15, x_2); if (x_16 == 0) { lean_object* x_17; lean_object* x_18; -x_17 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__18; +x_17 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__18; x_18 = l_Repr_addAppParen(x_17, x_2); return x_18; } else { lean_object* x_19; lean_object* x_20; -x_19 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__20; +x_19 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__20; x_20 = l_Repr_addAppParen(x_19, x_2); return x_20; } @@ -5708,14 +5177,14 @@ x_22 = lean_nat_dec_le(x_21, x_2); if (x_22 == 0) { lean_object* x_23; lean_object* x_24; -x_23 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__24; +x_23 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__24; x_24 = l_Repr_addAppParen(x_23, x_2); return x_24; } else { lean_object* x_25; lean_object* x_26; -x_25 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__26; +x_25 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__26; x_26 = l_Repr_addAppParen(x_25, x_2); return x_26; } @@ -5728,14 +5197,14 @@ x_28 = lean_nat_dec_le(x_27, x_2); if (x_28 == 0) { lean_object* x_29; lean_object* x_30; -x_29 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__30; +x_29 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__30; x_30 = l_Repr_addAppParen(x_29, x_2); return x_30; } else { lean_object* x_31; lean_object* x_32; -x_31 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__32; +x_31 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__32; x_32 = l_Repr_addAppParen(x_31, x_2); return x_32; } @@ -5748,14 +5217,14 @@ x_34 = lean_nat_dec_le(x_33, x_2); if (x_34 == 0) { lean_object* x_35; lean_object* x_36; -x_35 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__36; +x_35 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__36; x_36 = l_Repr_addAppParen(x_35, x_2); return x_36; } else { lean_object* x_37; lean_object* x_38; -x_37 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__38; +x_37 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__38; x_38 = l_Repr_addAppParen(x_37, x_2); return x_38; } @@ -5768,14 +5237,14 @@ x_40 = lean_nat_dec_le(x_39, x_2); if (x_40 == 0) { lean_object* x_41; lean_object* x_42; -x_41 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__42; +x_41 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__42; x_42 = l_Repr_addAppParen(x_41, x_2); return x_42; } else { lean_object* x_43; lean_object* x_44; -x_43 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__44; +x_43 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__44; x_44 = l_Repr_addAppParen(x_43, x_2); return x_44; } @@ -5788,14 +5257,14 @@ x_46 = lean_nat_dec_le(x_45, x_2); if (x_46 == 0) { lean_object* x_47; lean_object* x_48; -x_47 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__48; +x_47 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__48; x_48 = l_Repr_addAppParen(x_47, x_2); return x_48; } else { lean_object* x_49; lean_object* x_50; -x_49 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__50; +x_49 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__50; x_50 = l_Repr_addAppParen(x_49, x_2); return x_50; } @@ -5803,13 +5272,13 @@ return x_50; } } } -LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____boxed(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____boxed(lean_object* x_1, lean_object* x_2) { _start: { uint8_t x_3; lean_object* x_4; x_3 = lean_unbox(x_1); lean_dec(x_1); -x_4 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396_(x_3, x_2); +x_4 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248_(x_3, x_2); lean_dec(x_2); return x_4; } @@ -5818,7 +5287,7 @@ static lean_object* _init_l_Lean_instReprConstantKind___closed__1() { _start: { lean_object* x_1; -x_1 = lean_alloc_closure((void*)(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____boxed), 2, 0); +x_1 = lean_alloc_closure((void*)(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____boxed), 2, 0); return x_1; } } @@ -6127,7 +5596,7 @@ x_1 = l_Lean_instInhabitedAsyncConsts___closed__2; return x_1; } } -static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__1() { +static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__1() { _start: { lean_object* x_1; @@ -6135,27 +5604,27 @@ x_1 = lean_mk_string_unchecked("_private", 8, 8); return x_1; } } -static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__2() { +static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__1; +x_2 = l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__1; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__3() { +static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__2; +x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__2; x_2 = l_Lean_initFn____x40_Lean_Environment___hyg_6____closed__6; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__4() { +static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__4() { _start: { lean_object* x_1; @@ -6163,37 +5632,37 @@ x_1 = lean_mk_string_unchecked("Environment", 11, 11); return x_1; } } -static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__5() { +static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__5() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__3; -x_2 = l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__4; +x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__3; +x_2 = l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__4; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__6() { +static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__5; +x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__5; x_2 = lean_unsigned_to_nat(0u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__7() { +static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__7() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__6; +x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__6; x_2 = l_Lean_initFn____x40_Lean_Environment___hyg_6____closed__6; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__8() { +static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__8() { _start: { lean_object* x_1; @@ -6201,21 +5670,21 @@ x_1 = lean_mk_string_unchecked("AsyncConsts", 11, 11); return x_1; } } -static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__9() { +static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__9() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__7; -x_2 = l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__8; +x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__7; +x_2 = l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__8; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021_() { +static lean_object* _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873_() { _start: { lean_object* x_1; -x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__9; +x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__9; return x_1; } } @@ -6223,7 +5692,7 @@ static lean_object* _init_l_Lean_instTypeNameAsyncConsts() { _start: { lean_object* x_1; -x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_2021_; +x_1 = l_Lean_instImpl____x40_Lean_Environment___hyg_1873_; return x_1; } } @@ -6235,11 +5704,19 @@ x_3 = lean_panic_fn(x_1, x_2); return x_3; } } +LEAN_EXPORT uint8_t l___private_Lean_Environment_0__Lean_AsyncConsts_add___lambda__1(lean_object* x_1) { +_start: +{ +uint8_t x_2; +x_2 = 0; +return x_2; +} +} static lean_object* _init_l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("duplicate normalized declaration name ", 38, 38); +x_1 = lean_alloc_closure((void*)(l___private_Lean_Environment_0__Lean_AsyncConsts_add___lambda__1___boxed), 1, 0); return x_1; } } @@ -6247,7 +5724,7 @@ static lean_object* _init_l___private_Lean_Environment_0__Lean_AsyncConsts_add__ _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked(" vs. ", 5, 5); +x_1 = lean_mk_string_unchecked("duplicate normalized declaration name ", 38, 38); return x_1; } } @@ -6255,7 +5732,7 @@ static lean_object* _init_l___private_Lean_Environment_0__Lean_AsyncConsts_add__ _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("", 0, 0); +x_1 = lean_mk_string_unchecked(" vs. ", 5, 5); return x_1; } } @@ -6263,7 +5740,7 @@ static lean_object* _init_l___private_Lean_Environment_0__Lean_AsyncConsts_add__ _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("Lean.Environment", 16, 16); +x_1 = lean_mk_string_unchecked("", 0, 0); return x_1; } } @@ -6271,6 +5748,14 @@ static lean_object* _init_l___private_Lean_Environment_0__Lean_AsyncConsts_add__ _start: { lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean.Environment", 16, 16); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__6() { +_start: +{ +lean_object* x_1; x_1 = lean_mk_string_unchecked("_private.Lean.Environment.0.Lean.AsyncConsts.add", 48, 48); return x_1; } @@ -6364,12 +5849,12 @@ x_27 = lean_ctor_get(x_10, 0); lean_inc(x_27); lean_dec(x_10); x_28 = 1; -x_29 = l_Lean_instToStringImport___closed__1; +x_29 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_30 = l_Lean_Name_toString(x_4, x_28, x_29); -x_31 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; +x_31 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__2; x_32 = lean_string_append(x_31, x_30); lean_dec(x_30); -x_33 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__2; +x_33 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; x_34 = lean_string_append(x_32, x_33); x_35 = lean_ctor_get(x_27, 0); lean_inc(x_35); @@ -6380,11 +5865,11 @@ lean_dec(x_35); x_37 = l_Lean_Name_toString(x_36, x_28, x_29); x_38 = lean_string_append(x_34, x_37); lean_dec(x_37); -x_39 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_39 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_40 = lean_string_append(x_38, x_39); -x_41 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; -x_42 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; -x_43 = lean_unsigned_to_nat(443u); +x_41 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; +x_42 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__6; +x_43 = lean_unsigned_to_nat(432u); x_44 = lean_unsigned_to_nat(4u); x_45 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_41, x_42, x_43, x_44, x_40); lean_dec(x_40); @@ -6393,6 +5878,16 @@ return x_46; } } } +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_add___lambda__1___boxed(lean_object* x_1) { +_start: +{ +uint8_t x_2; lean_object* x_3; +x_2 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___lambda__1(x_1); +lean_dec(x_1); +x_3 = lean_box(x_2); +return x_3; +} +} LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_AsyncConsts_find_x3f(lean_object* x_1, lean_object* x_2) { _start: { @@ -7534,7 +7029,7 @@ if (x_16 == 0) lean_object* x_17; uint8_t x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; x_17 = lean_ctor_get(x_13, 0); x_18 = 1; -x_19 = l_Lean_instToStringImport___closed__1; +x_19 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_20 = l_Lean_Name_toString(x_17, x_18, x_19); x_21 = l_Lean_Environment_addDeclCore___closed__1; x_22 = lean_string_append(x_21, x_20); @@ -7547,7 +7042,7 @@ lean_dec(x_10); x_26 = l_Lean_Name_toString(x_25, x_18, x_19); x_27 = lean_string_append(x_24, x_26); lean_dec(x_26); -x_28 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_28 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_29 = lean_string_append(x_27, x_28); lean_ctor_set_tag(x_13, 12); lean_ctor_set(x_13, 0, x_29); @@ -7562,7 +7057,7 @@ x_30 = lean_ctor_get(x_13, 0); lean_inc(x_30); lean_dec(x_13); x_31 = 1; -x_32 = l_Lean_instToStringImport___closed__1; +x_32 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_33 = l_Lean_Name_toString(x_30, x_31, x_32); x_34 = l_Lean_Environment_addDeclCore___closed__1; x_35 = lean_string_append(x_34, x_33); @@ -7575,7 +7070,7 @@ lean_dec(x_10); x_39 = l_Lean_Name_toString(x_38, x_31, x_32); x_40 = lean_string_append(x_37, x_39); lean_dec(x_39); -x_41 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_41 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_42 = lean_string_append(x_40, x_41); x_43 = lean_alloc_ctor(12, 1, 0); lean_ctor_set(x_43, 0, x_42); @@ -7621,7 +7116,7 @@ if (lean_is_exclusive(x_47)) { x_51 = lean_box(0); } x_52 = 1; -x_53 = l_Lean_instToStringImport___closed__1; +x_53 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_54 = l_Lean_Name_toString(x_50, x_52, x_53); x_55 = l_Lean_Environment_addDeclCore___closed__1; x_56 = lean_string_append(x_55, x_54); @@ -7634,7 +7129,7 @@ lean_dec(x_44); x_60 = l_Lean_Name_toString(x_59, x_52, x_53); x_61 = lean_string_append(x_58, x_60); lean_dec(x_60); -x_62 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_62 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_63 = lean_string_append(x_61, x_62); if (lean_is_scalar(x_51)) { x_64 = lean_alloc_ctor(12, 1, 0); @@ -9400,9 +8895,9 @@ if (x_10 == 0) uint8_t x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_dec(x_2); x_11 = 1; -x_12 = l_Lean_instToStringImport___closed__1; +x_12 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_13 = l_Lean_Name_toString(x_3, x_11, x_12); -x_14 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_14 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_15 = lean_string_append(x_14, x_13); lean_dec(x_13); x_16 = l_Lean_Environment_enableRealizationsForConst___lambda__3___closed__1; @@ -9414,9 +8909,9 @@ x_19 = l_Lean_Name_toString(x_18, x_11, x_12); x_20 = lean_string_append(x_17, x_19); lean_dec(x_19); x_21 = lean_string_append(x_20, x_14); -x_22 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_22 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_23 = l_Lean_Environment_enableRealizationsForConst___lambda__3___closed__2; -x_24 = lean_unsigned_to_nat(815u); +x_24 = lean_unsigned_to_nat(804u); x_25 = lean_unsigned_to_nat(6u); x_26 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_22, x_23, x_24, x_25, x_21); lean_dec(x_21); @@ -9509,16 +9004,16 @@ if (lean_obj_tag(x_6) == 0) uint8_t x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_dec(x_2); x_7 = 1; -x_8 = l_Lean_instToStringImport___closed__1; +x_8 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_9 = l_Lean_Name_toString(x_3, x_7, x_8); x_10 = l_Lean_Environment_enableRealizationsForConst___closed__1; x_11 = lean_string_append(x_10, x_9); lean_dec(x_9); x_12 = l_Lean_Environment_enableRealizationsForConst___closed__2; x_13 = lean_string_append(x_11, x_12); -x_14 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_14 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_15 = l_Lean_Environment_enableRealizationsForConst___lambda__3___closed__2; -x_16 = lean_unsigned_to_nat(811u); +x_16 = lean_unsigned_to_nat(800u); x_17 = lean_unsigned_to_nat(4u); x_18 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_14, x_15, x_16, x_17, x_13); lean_dec(x_13); @@ -10155,375 +9650,88 @@ return x_2; static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__3() { _start: { -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__9; -x_2 = lean_box(1); -x_3 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_3, 0, x_1); -lean_ctor_set(x_3, 1, x_2); -return x_3; -} -} -static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__4() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("[", 1, 1); -return x_1; -} -} -static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__5() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__4; -x_2 = lean_string_length(x_1); -return x_2; -} -} -static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__5; -x_2 = lean_nat_to_int(x_1); -return x_2; -} -} -static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__7() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__4; -x_2 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8() { -_start: -{ lean_object* x_1; -x_1 = lean_mk_string_unchecked("]", 1, 1); +x_1 = lean_mk_string_unchecked(",", 1, 1); return x_1; } } -static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9() { +static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__4() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8; +x_1 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__3; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -LEAN_EXPORT lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8(lean_object* x_1, lean_object* x_2) { +static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__5() { _start: { -if (lean_obj_tag(x_1) == 0) -{ -lean_object* x_3; -x_3 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__2; +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__4; +x_2 = lean_box(1); +x_3 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); return x_3; } -else -{ -lean_object* x_4; lean_object* x_5; uint8_t x_6; -x_4 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__3; -lean_inc(x_1); -x_5 = l_Std_Format_joinSep___at_Lean_Environment_dbgFormatAsyncState___spec__10(x_1, x_4); -x_6 = !lean_is_exclusive(x_1); -if (x_6 == 0) -{ -lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; uint8_t x_14; lean_object* x_15; -x_7 = lean_ctor_get(x_1, 1); -lean_dec(x_7); -x_8 = lean_ctor_get(x_1, 0); -lean_dec(x_8); -x_9 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__7; -lean_ctor_set_tag(x_1, 5); -lean_ctor_set(x_1, 1, x_5); -lean_ctor_set(x_1, 0, x_9); -x_10 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; -x_11 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_11, 0, x_1); -lean_ctor_set(x_11, 1, x_10); -x_12 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; -x_13 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_13, 0, x_12); -lean_ctor_set(x_13, 1, x_11); -x_14 = 0; -x_15 = lean_alloc_ctor(6, 1, 1); -lean_ctor_set(x_15, 0, x_13); -lean_ctor_set_uint8(x_15, sizeof(void*)*1, x_14); -return x_15; -} -else -{ -lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; lean_object* x_23; -lean_dec(x_1); -x_16 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__7; -x_17 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_17, 0, x_16); -lean_ctor_set(x_17, 1, x_5); -x_18 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; -x_19 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_19, 0, x_17); -lean_ctor_set(x_19, 1, x_18); -x_20 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; -x_21 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_21, 0, x_20); -lean_ctor_set(x_21, 1, x_19); -x_22 = 0; -x_23 = lean_alloc_ctor(6, 1, 1); -lean_ctor_set(x_23, 0, x_21); -lean_ctor_set_uint8(x_23, sizeof(void*)*1, x_22); -return x_23; -} -} -} } -static lean_object* _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__1() { +static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("(", 1, 1); +x_1 = lean_mk_string_unchecked("[", 1, 1); return x_1; } } -static lean_object* _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__2() { +static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__7() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__1; +x_1 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; x_2 = lean_string_length(x_1); return x_2; } } -static lean_object* _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__3() { +static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__2; +x_1 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__7; x_2 = lean_nat_to_int(x_1); return x_2; } } -static lean_object* _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__4() { +static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__1; +x_1 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__5() { +static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__10() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked(")", 1, 1); +x_1 = lean_mk_string_unchecked("]", 1, 1); return x_1; } } -static lean_object* _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__6() { +static lean_object* _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__11() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__5; +x_1 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__10; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(lean_object* x_1, lean_object* x_2) { -_start: -{ -uint8_t x_3; -x_3 = !lean_is_exclusive(x_1); -if (x_3 == 0) -{ -lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; lean_object* x_21; -x_4 = lean_ctor_get(x_1, 0); -x_5 = lean_ctor_get(x_1, 1); -x_6 = lean_unsigned_to_nat(0u); -x_7 = l_Lean_Name_reprPrec(x_4, x_6); -x_8 = lean_box(0); -lean_ctor_set_tag(x_1, 1); -lean_ctor_set(x_1, 1, x_8); -lean_ctor_set(x_1, 0, x_7); -x_9 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8(x_5, x_6); -x_10 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_10, 0, x_9); -lean_ctor_set(x_10, 1, x_1); -x_11 = l_List_reverse___rarg(x_10); -x_12 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__3; -x_13 = l_Std_Format_joinSep___at_Prod_repr___spec__1(x_11, x_12); -x_14 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__4; -x_15 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_15, 0, x_14); -lean_ctor_set(x_15, 1, x_13); -x_16 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__6; -x_17 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_17, 0, x_15); -lean_ctor_set(x_17, 1, x_16); -x_18 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__3; -x_19 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_19, 0, x_18); -lean_ctor_set(x_19, 1, x_17); -x_20 = 0; -x_21 = lean_alloc_ctor(6, 1, 1); -lean_ctor_set(x_21, 0, x_19); -lean_ctor_set_uint8(x_21, sizeof(void*)*1, x_20); -return x_21; -} -else -{ -lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; uint8_t x_39; lean_object* x_40; -x_22 = lean_ctor_get(x_1, 0); -x_23 = lean_ctor_get(x_1, 1); -lean_inc(x_23); -lean_inc(x_22); -lean_dec(x_1); -x_24 = lean_unsigned_to_nat(0u); -x_25 = l_Lean_Name_reprPrec(x_22, x_24); -x_26 = lean_box(0); -x_27 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_27, 0, x_25); -lean_ctor_set(x_27, 1, x_26); -x_28 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8(x_23, x_24); -x_29 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_29, 0, x_28); -lean_ctor_set(x_29, 1, x_27); -x_30 = l_List_reverse___rarg(x_29); -x_31 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__3; -x_32 = l_Std_Format_joinSep___at_Prod_repr___spec__1(x_30, x_31); -x_33 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__4; -x_34 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_34, 0, x_33); -lean_ctor_set(x_34, 1, x_32); -x_35 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__6; -x_36 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_36, 0, x_34); -lean_ctor_set(x_36, 1, x_35); -x_37 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__3; -x_38 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_38, 0, x_37); -lean_ctor_set(x_38, 1, x_36); -x_39 = 0; -x_40 = lean_alloc_ctor(6, 1, 1); -lean_ctor_set(x_40, 0, x_38); -lean_ctor_set_uint8(x_40, sizeof(void*)*1, x_39); -return x_40; -} -} -} -LEAN_EXPORT lean_object* l_repr___at_Lean_Environment_dbgFormatAsyncState___spec__13(lean_object* x_1) { -_start: -{ -lean_object* x_2; lean_object* x_3; -x_2 = lean_unsigned_to_nat(0u); -x_3 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(x_1, x_2); -return x_3; -} -} -LEAN_EXPORT lean_object* l_List_foldl___at_Lean_Environment_dbgFormatAsyncState___spec__16(lean_object* x_1, lean_object* x_2, lean_object* x_3) { -_start: -{ -if (lean_obj_tag(x_3) == 0) -{ -lean_dec(x_1); -return x_2; -} -else -{ -uint8_t x_4; -x_4 = !lean_is_exclusive(x_3); -if (x_4 == 0) -{ -lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; -x_5 = lean_ctor_get(x_3, 0); -x_6 = lean_ctor_get(x_3, 1); -lean_inc(x_1); -lean_ctor_set_tag(x_3, 5); -lean_ctor_set(x_3, 1, x_1); -lean_ctor_set(x_3, 0, x_2); -x_7 = lean_unsigned_to_nat(0u); -x_8 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(x_5, x_7); -x_9 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_9, 0, x_3); -lean_ctor_set(x_9, 1, x_8); -x_2 = x_9; -x_3 = x_6; -goto _start; -} -else -{ -lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; -x_11 = lean_ctor_get(x_3, 0); -x_12 = lean_ctor_get(x_3, 1); -lean_inc(x_12); -lean_inc(x_11); -lean_dec(x_3); -lean_inc(x_1); -x_13 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_13, 0, x_2); -lean_ctor_set(x_13, 1, x_1); -x_14 = lean_unsigned_to_nat(0u); -x_15 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(x_11, x_14); -x_16 = lean_alloc_ctor(5, 2, 0); -lean_ctor_set(x_16, 0, x_13); -lean_ctor_set(x_16, 1, x_15); -x_2 = x_16; -x_3 = x_12; -goto _start; -} -} -} -} -LEAN_EXPORT lean_object* l_Std_Format_joinSep___at_Lean_Environment_dbgFormatAsyncState___spec__15(lean_object* x_1, lean_object* x_2) { -_start: -{ -if (lean_obj_tag(x_1) == 0) -{ -lean_object* x_3; -lean_dec(x_2); -x_3 = lean_box(0); -return x_3; -} -else -{ -lean_object* x_4; -x_4 = lean_ctor_get(x_1, 1); -lean_inc(x_4); -if (lean_obj_tag(x_4) == 0) -{ -lean_object* x_5; lean_object* x_6; lean_object* x_7; -lean_dec(x_2); -x_5 = lean_ctor_get(x_1, 0); -lean_inc(x_5); -lean_dec(x_1); -x_6 = lean_unsigned_to_nat(0u); -x_7 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(x_5, x_6); -return x_7; -} -else -{ -lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; -x_8 = lean_ctor_get(x_1, 0); -lean_inc(x_8); -lean_dec(x_1); -x_9 = lean_unsigned_to_nat(0u); -x_10 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(x_8, x_9); -x_11 = l_List_foldl___at_Lean_Environment_dbgFormatAsyncState___spec__16(x_2, x_10, x_4); -return x_11; -} -} -} -} -LEAN_EXPORT lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__12(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8(lean_object* x_1, lean_object* x_2) { _start: { if (lean_obj_tag(x_1) == 0) @@ -10535,9 +9743,9 @@ return x_3; else { lean_object* x_4; lean_object* x_5; uint8_t x_6; -x_4 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__3; +x_4 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__5; lean_inc(x_1); -x_5 = l_Std_Format_joinSep___at_Lean_Environment_dbgFormatAsyncState___spec__15(x_1, x_4); +x_5 = l_Std_Format_joinSep___at_Lean_Environment_dbgFormatAsyncState___spec__10(x_1, x_4); x_6 = !lean_is_exclusive(x_1); if (x_6 == 0) { @@ -10546,15 +9754,15 @@ x_7 = lean_ctor_get(x_1, 1); lean_dec(x_7); x_8 = lean_ctor_get(x_1, 0); lean_dec(x_8); -x_9 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__7; +x_9 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; lean_ctor_set_tag(x_1, 5); lean_ctor_set(x_1, 1, x_5); lean_ctor_set(x_1, 0, x_9); -x_10 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; +x_10 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__11; x_11 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_11, 0, x_1); lean_ctor_set(x_11, 1, x_10); -x_12 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; +x_12 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8; x_13 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_13, 0, x_12); lean_ctor_set(x_13, 1, x_11); @@ -10568,15 +9776,320 @@ else { lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; lean_object* x_23; lean_dec(x_1); -x_16 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__7; +x_16 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; x_17 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_17, 0, x_16); lean_ctor_set(x_17, 1, x_5); -x_18 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; +x_18 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__11; x_19 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_19, 0, x_17); lean_ctor_set(x_19, 1, x_18); -x_20 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; +x_20 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8; +x_21 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_21, 0, x_20); +lean_ctor_set(x_21, 1, x_19); +x_22 = 0; +x_23 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_23, 0, x_21); +lean_ctor_set_uint8(x_23, sizeof(void*)*1, x_22); +return x_23; +} +} +} +} +static lean_object* _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("(", 1, 1); +return x_1; +} +} +static lean_object* _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__1; +x_2 = lean_string_length(x_1); +return x_2; +} +} +static lean_object* _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__2; +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__1; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__5() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked(")", 1, 1); +return x_1; +} +} +static lean_object* _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__6() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__5; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(lean_object* x_1, lean_object* x_2) { +_start: +{ +uint8_t x_3; +x_3 = !lean_is_exclusive(x_1); +if (x_3 == 0) +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; lean_object* x_21; +x_4 = lean_ctor_get(x_1, 0); +x_5 = lean_ctor_get(x_1, 1); +x_6 = lean_unsigned_to_nat(0u); +x_7 = l_Lean_Name_reprPrec(x_4, x_6); +x_8 = lean_box(0); +lean_ctor_set_tag(x_1, 1); +lean_ctor_set(x_1, 1, x_8); +lean_ctor_set(x_1, 0, x_7); +x_9 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8(x_5, x_6); +x_10 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_10, 0, x_9); +lean_ctor_set(x_10, 1, x_1); +x_11 = l_List_reverse___rarg(x_10); +x_12 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__5; +x_13 = l_Std_Format_joinSep___at_Prod_repr___spec__1(x_11, x_12); +x_14 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__4; +x_15 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_15, 0, x_14); +lean_ctor_set(x_15, 1, x_13); +x_16 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__6; +x_17 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17, 0, x_15); +lean_ctor_set(x_17, 1, x_16); +x_18 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__3; +x_19 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_19, 0, x_18); +lean_ctor_set(x_19, 1, x_17); +x_20 = 0; +x_21 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_21, 0, x_19); +lean_ctor_set_uint8(x_21, sizeof(void*)*1, x_20); +return x_21; +} +else +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; uint8_t x_39; lean_object* x_40; +x_22 = lean_ctor_get(x_1, 0); +x_23 = lean_ctor_get(x_1, 1); +lean_inc(x_23); +lean_inc(x_22); +lean_dec(x_1); +x_24 = lean_unsigned_to_nat(0u); +x_25 = l_Lean_Name_reprPrec(x_22, x_24); +x_26 = lean_box(0); +x_27 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_27, 0, x_25); +lean_ctor_set(x_27, 1, x_26); +x_28 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8(x_23, x_24); +x_29 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_29, 0, x_28); +lean_ctor_set(x_29, 1, x_27); +x_30 = l_List_reverse___rarg(x_29); +x_31 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__5; +x_32 = l_Std_Format_joinSep___at_Prod_repr___spec__1(x_30, x_31); +x_33 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__4; +x_34 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_34, 0, x_33); +lean_ctor_set(x_34, 1, x_32); +x_35 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__6; +x_36 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_36, 0, x_34); +lean_ctor_set(x_36, 1, x_35); +x_37 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__3; +x_38 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_36); +x_39 = 0; +x_40 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_40, 0, x_38); +lean_ctor_set_uint8(x_40, sizeof(void*)*1, x_39); +return x_40; +} +} +} +LEAN_EXPORT lean_object* l_repr___at_Lean_Environment_dbgFormatAsyncState___spec__13(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; +x_2 = lean_unsigned_to_nat(0u); +x_3 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_List_foldl___at_Lean_Environment_dbgFormatAsyncState___spec__16(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_3) == 0) +{ +lean_dec(x_1); +return x_2; +} +else +{ +uint8_t x_4; +x_4 = !lean_is_exclusive(x_3); +if (x_4 == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_5 = lean_ctor_get(x_3, 0); +x_6 = lean_ctor_get(x_3, 1); +lean_inc(x_1); +lean_ctor_set_tag(x_3, 5); +lean_ctor_set(x_3, 1, x_1); +lean_ctor_set(x_3, 0, x_2); +x_7 = lean_unsigned_to_nat(0u); +x_8 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(x_5, x_7); +x_9 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_9, 0, x_3); +lean_ctor_set(x_9, 1, x_8); +x_2 = x_9; +x_3 = x_6; +goto _start; +} +else +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_11 = lean_ctor_get(x_3, 0); +x_12 = lean_ctor_get(x_3, 1); +lean_inc(x_12); +lean_inc(x_11); +lean_dec(x_3); +lean_inc(x_1); +x_13 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13, 0, x_2); +lean_ctor_set(x_13, 1, x_1); +x_14 = lean_unsigned_to_nat(0u); +x_15 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(x_11, x_14); +x_16 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_16, 0, x_13); +lean_ctor_set(x_16, 1, x_15); +x_2 = x_16; +x_3 = x_12; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_Format_joinSep___at_Lean_Environment_dbgFormatAsyncState___spec__15(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_3; +lean_dec(x_2); +x_3 = lean_box(0); +return x_3; +} +else +{ +lean_object* x_4; +x_4 = lean_ctor_get(x_1, 1); +lean_inc(x_4); +if (lean_obj_tag(x_4) == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; +lean_dec(x_2); +x_5 = lean_ctor_get(x_1, 0); +lean_inc(x_5); +lean_dec(x_1); +x_6 = lean_unsigned_to_nat(0u); +x_7 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(x_5, x_6); +return x_7; +} +else +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_8 = lean_ctor_get(x_1, 0); +lean_inc(x_8); +lean_dec(x_1); +x_9 = lean_unsigned_to_nat(0u); +x_10 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14(x_8, x_9); +x_11 = l_List_foldl___at_Lean_Environment_dbgFormatAsyncState___spec__16(x_2, x_10, x_4); +return x_11; +} +} +} +} +LEAN_EXPORT lean_object* l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__12(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_3; +x_3 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__2; +return x_3; +} +else +{ +lean_object* x_4; lean_object* x_5; uint8_t x_6; +x_4 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__5; +lean_inc(x_1); +x_5 = l_Std_Format_joinSep___at_Lean_Environment_dbgFormatAsyncState___spec__15(x_1, x_4); +x_6 = !lean_is_exclusive(x_1); +if (x_6 == 0) +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; uint8_t x_14; lean_object* x_15; +x_7 = lean_ctor_get(x_1, 1); +lean_dec(x_7); +x_8 = lean_ctor_get(x_1, 0); +lean_dec(x_8); +x_9 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; +lean_ctor_set_tag(x_1, 5); +lean_ctor_set(x_1, 1, x_5); +lean_ctor_set(x_1, 0, x_9); +x_10 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__11; +x_11 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_11, 0, x_1); +lean_ctor_set(x_11, 1, x_10); +x_12 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8; +x_13 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_13, 0, x_12); +lean_ctor_set(x_13, 1, x_11); +x_14 = 0; +x_15 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_15, 0, x_13); +lean_ctor_set_uint8(x_15, sizeof(void*)*1, x_14); +return x_15; +} +else +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; lean_object* x_23; +lean_dec(x_1); +x_16 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; +x_17 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17, 0, x_16); +lean_ctor_set(x_17, 1, x_5); +x_18 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__11; +x_19 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19, 0, x_17); +lean_ctor_set(x_19, 1, x_18); +x_20 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8; x_21 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_21, 0, x_20); lean_ctor_set(x_21, 1, x_19); @@ -10613,7 +10126,7 @@ x_11 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_11, 0, x_10); lean_ctor_set(x_11, 1, x_1); x_12 = l_List_reverse___rarg(x_11); -x_13 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__3; +x_13 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__5; x_14 = l_Std_Format_joinSep___at_Prod_repr___spec__1(x_12, x_13); x_15 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__4; x_16 = lean_alloc_ctor(5, 2, 0); @@ -10655,7 +10168,7 @@ x_31 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_31, 0, x_30); lean_ctor_set(x_31, 1, x_28); x_32 = l_List_reverse___rarg(x_31); -x_33 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__3; +x_33 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__5; x_34 = l_Std_Format_joinSep___at_Prod_repr___spec__1(x_32, x_33); x_35 = l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__4; x_36 = lean_alloc_ctor(5, 2, 0); @@ -10792,7 +10305,7 @@ return x_3; else { lean_object* x_4; lean_object* x_5; uint8_t x_6; -x_4 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__3; +x_4 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__5; lean_inc(x_1); x_5 = l_Std_Format_joinSep___at_Lean_Environment_dbgFormatAsyncState___spec__21(x_1, x_4); x_6 = !lean_is_exclusive(x_1); @@ -10803,15 +10316,15 @@ x_7 = lean_ctor_get(x_1, 1); lean_dec(x_7); x_8 = lean_ctor_get(x_1, 0); lean_dec(x_8); -x_9 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__7; +x_9 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; lean_ctor_set_tag(x_1, 5); lean_ctor_set(x_1, 1, x_5); lean_ctor_set(x_1, 0, x_9); -x_10 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; +x_10 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__11; x_11 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_11, 0, x_1); lean_ctor_set(x_11, 1, x_10); -x_12 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; +x_12 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8; x_13 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_13, 0, x_12); lean_ctor_set(x_13, 1, x_11); @@ -10825,15 +10338,15 @@ else { lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; lean_object* x_23; lean_dec(x_1); -x_16 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__7; +x_16 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; x_17 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_17, 0, x_16); lean_ctor_set(x_17, 1, x_5); -x_18 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9; +x_18 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__11; x_19 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_19, 0, x_17); lean_ctor_set(x_19, 1, x_18); -x_20 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; +x_20 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8; x_21 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_21, 0, x_20); lean_ctor_set(x_21, 1, x_19); @@ -11205,7 +10718,7 @@ x_8 = lean_ctor_get(x_7, 0); lean_inc(x_8); lean_dec(x_7); x_9 = 1; -x_10 = l_Lean_instToStringImport___closed__1; +x_10 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_11 = l_Lean_Name_toString(x_8, x_9, x_10); lean_ctor_set(x_1, 1, x_2); lean_ctor_set(x_1, 0, x_11); @@ -11232,7 +10745,7 @@ x_16 = lean_ctor_get(x_15, 0); lean_inc(x_16); lean_dec(x_15); x_17 = 1; -x_18 = l_Lean_instToStringImport___closed__1; +x_18 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_19 = l_Lean_Name_toString(x_16, x_17, x_18); x_20 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_20, 0, x_19); @@ -11290,9 +10803,9 @@ if (lean_obj_tag(x_3) == 0) { lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; x_4 = lean_ctor_get(x_1, 0); -x_5 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__4; +x_5 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; x_6 = lean_string_append(x_5, x_4); -x_7 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8; +x_7 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__10; x_8 = lean_string_append(x_6, x_7); return x_8; } @@ -11300,7 +10813,7 @@ else { lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; uint32_t x_13; lean_object* x_14; x_9 = lean_ctor_get(x_1, 0); -x_10 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__4; +x_10 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; x_11 = lean_string_append(x_10, x_9); x_12 = l_List_foldl___at_Lean_Environment_dbgFormatAsyncState___spec__32(x_11, x_3); x_13 = 93; @@ -11685,7 +11198,7 @@ x_47 = l_Lean_Environment_dbgFormatAsyncState___closed__5; x_48 = lean_string_append(x_46, x_47); x_49 = lean_string_append(x_48, x_32); lean_dec(x_32); -x_50 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_50 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_51 = lean_string_append(x_49, x_50); if (lean_is_scalar(x_9)) { x_52 = lean_alloc_ctor(0, 2, 0); @@ -11729,7 +11242,7 @@ x_69 = l_Lean_Environment_dbgFormatAsyncState___closed__5; x_70 = lean_string_append(x_68, x_69); x_71 = lean_string_append(x_70, x_32); lean_dec(x_32); -x_72 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_72 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_73 = lean_string_append(x_71, x_72); if (lean_is_scalar(x_9)) { x_74 = lean_alloc_ctor(0, 2, 0); @@ -11772,7 +11285,7 @@ x_91 = l_Lean_Environment_dbgFormatAsyncState___closed__5; x_92 = lean_string_append(x_90, x_91); x_93 = lean_string_append(x_92, x_32); lean_dec(x_32); -x_94 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_94 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_95 = lean_string_append(x_93, x_94); if (lean_is_scalar(x_9)) { x_96 = lean_alloc_ctor(0, 2, 0); @@ -12011,7 +11524,7 @@ x_13 = lean_format_pretty(x_11, x_12, x_10, x_10); x_14 = l_Lean_Environment_dbgFormatCheckedSyncState___closed__1; x_15 = lean_string_append(x_14, x_13); lean_dec(x_13); -x_16 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_16 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_17 = lean_string_append(x_15, x_16); x_18 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_18, 0, x_17); @@ -12428,9 +11941,9 @@ static lean_object* _init_l_Lean_Environment_PromiseCheckedResult_commitChecked_ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_2 = l_Lean_Environment_PromiseCheckedResult_commitChecked___closed__4; -x_3 = lean_unsigned_to_nat(893u); +x_3 = lean_unsigned_to_nat(882u); x_4 = lean_unsigned_to_nat(2u); x_5 = l_Lean_Environment_PromiseCheckedResult_commitChecked___closed__3; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -12558,35 +12071,43 @@ return x_1; static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__9() { _start: { +lean_object* x_1; +x_1 = lean_mk_string_unchecked("true", 4, 4); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__10() { +_start: +{ lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__8; -x_2 = l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__26; +x_2 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__9; x_3 = l_Lean_Name_mkStr2(x_1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__10() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__11() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__9; +x_2 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__10; x_3 = l_Lean_Expr_const___override(x_2, x_1); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__11() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__12() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__5; x_2 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__7; -x_3 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__10; +x_3 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__11; x_4 = l_Lean_mkAppB(x_1, x_2, x_3); return x_4; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__12() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__13() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; @@ -12598,28 +12119,28 @@ lean_ctor_set(x_3, 1, x_1); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__13() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__14() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__2; -x_2 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__12; +x_2 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__13; x_3 = l_Lean_Expr_const___override(x_1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__14() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__15() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; -x_1 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__13; -x_2 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__11; -x_3 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__10; +x_1 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__14; +x_2 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__12; +x_3 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__11; x_4 = l_Lean_mkAppB(x_1, x_2, x_3); return x_4; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__15() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__16() { _start: { lean_object* x_1; @@ -12627,7 +12148,7 @@ x_1 = lean_mk_string_unchecked("unsupported constant kind ", 26, 26); return x_1; } } -static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__16() { +static lean_object* _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__17() { _start: { lean_object* x_1; @@ -12640,7 +12161,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_mkFall { lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_3 = lean_box(0); -x_4 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__11; +x_4 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__12; lean_inc(x_1); x_5 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_5, 0, x_1); @@ -12654,7 +12175,7 @@ lean_object* x_7; lean_object* x_8; lean_object* x_9; uint8_t x_10; lean_object* x_7 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_7, 0, x_1); lean_ctor_set(x_7, 1, x_3); -x_8 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__14; +x_8 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__15; x_9 = lean_box(1); x_10 = 1; x_11 = lean_alloc_ctor(0, 4, 1); @@ -12673,7 +12194,7 @@ lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; x_13 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_13, 0, x_1); lean_ctor_set(x_13, 1, x_3); -x_14 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__14; +x_14 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__15; x_15 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_15, 0, x_5); lean_ctor_set(x_15, 1, x_14); @@ -12701,17 +12222,17 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_1); x_20 = lean_unsigned_to_nat(0u); -x_21 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396_(x_2, x_20); +x_21 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248_(x_2, x_20); x_22 = l_Std_Format_defWidth; x_23 = lean_format_pretty(x_21, x_22, x_20, x_20); -x_24 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__15; +x_24 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__16; x_25 = lean_string_append(x_24, x_23); lean_dec(x_23); -x_26 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_26 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_27 = lean_string_append(x_25, x_26); -x_28 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; -x_29 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__16; -x_30 = lean_unsigned_to_nat(948u); +x_28 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; +x_29 = l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__17; +x_30 = lean_unsigned_to_nat(937u); x_31 = lean_unsigned_to_nat(11u); x_32 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_28, x_29, x_30, x_31, x_27); lean_dec(x_27); @@ -13626,7 +13147,7 @@ if (x_15 == 0) uint8_t x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_dec(x_1); x_16 = 1; -x_17 = l_Lean_instToStringImport___closed__1; +x_17 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_18 = l_Lean_Name_toString(x_2, x_16, x_17); x_19 = l_Lean_Environment_addDeclCore___closed__1; x_20 = lean_string_append(x_19, x_18); @@ -13639,7 +13160,7 @@ lean_dec(x_14); x_24 = l_Lean_Name_toString(x_23, x_16, x_17); x_25 = lean_string_append(x_22, x_24); lean_dec(x_24); -x_26 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_26 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_27 = lean_string_append(x_25, x_26); lean_ctor_set_tag(x_10, 18); lean_ctor_set(x_10, 0, x_27); @@ -13671,7 +13192,7 @@ if (x_32 == 0) uint8_t x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_dec(x_1); x_33 = 1; -x_34 = l_Lean_instToStringImport___closed__1; +x_34 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_35 = l_Lean_Name_toString(x_2, x_33, x_34); x_36 = l_Lean_Environment_addDeclCore___closed__1; x_37 = lean_string_append(x_36, x_35); @@ -13684,7 +13205,7 @@ lean_dec(x_31); x_41 = l_Lean_Name_toString(x_40, x_33, x_34); x_42 = lean_string_append(x_39, x_41); lean_dec(x_41); -x_43 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_43 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_44 = lean_string_append(x_42, x_43); x_45 = lean_alloc_ctor(18, 1, 0); lean_ctor_set(x_45, 0, x_44); @@ -13864,7 +13385,7 @@ uint8_t x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* lean_dec(x_2); lean_dec(x_1); x_7 = 1; -x_8 = l_Lean_instToStringImport___closed__1; +x_8 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_9 = l_Lean_Name_toString(x_4, x_7, x_8); x_10 = l_Lean_Environment_AddConstAsyncResult_commitSignature___closed__1; x_11 = lean_string_append(x_10, x_9); @@ -13874,7 +13395,7 @@ x_13 = lean_string_append(x_11, x_12); x_14 = l_Lean_Name_toString(x_5, x_7, x_8); x_15 = lean_string_append(x_13, x_14); lean_dec(x_14); -x_16 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_16 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_17 = lean_string_append(x_15, x_16); x_18 = lean_alloc_ctor(18, 1, 0); lean_ctor_set(x_18, 0, x_17); @@ -13923,7 +13444,7 @@ lean_dec(x_2); x_5 = l_List_foldl___at_Lean_Environment_dbgFormatAsyncState___spec__32___closed__1; x_6 = lean_string_append(x_1, x_5); x_7 = 1; -x_8 = l_Lean_instToStringImport___closed__1; +x_8 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_9 = l_Lean_Name_toString(x_3, x_7, x_8); x_10 = lean_string_append(x_6, x_9); lean_dec(x_9); @@ -13954,12 +13475,12 @@ x_4 = lean_ctor_get(x_1, 0); lean_inc(x_4); lean_dec(x_1); x_5 = 1; -x_6 = l_Lean_instToStringImport___closed__1; +x_6 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_7 = l_Lean_Name_toString(x_4, x_5, x_6); -x_8 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__4; +x_8 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; x_9 = lean_string_append(x_8, x_7); lean_dec(x_7); -x_10 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8; +x_10 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__10; x_11 = lean_string_append(x_9, x_10); return x_11; } @@ -13970,9 +13491,9 @@ x_12 = lean_ctor_get(x_1, 0); lean_inc(x_12); lean_dec(x_1); x_13 = 1; -x_14 = l_Lean_instToStringImport___closed__1; +x_14 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_15 = l_Lean_Name_toString(x_12, x_13, x_14); -x_16 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__4; +x_16 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; x_17 = lean_string_append(x_16, x_15); lean_dec(x_15); x_18 = l_List_foldl___at_Lean_Environment_AddConstAsyncResult_commitConst___spec__2(x_17, x_3); @@ -14206,7 +13727,7 @@ x_16 = lean_string_append(x_14, x_15); x_17 = lean_expr_dbg_to_string(x_9); x_18 = lean_string_append(x_16, x_17); lean_dec(x_17); -x_19 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_19 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_20 = lean_string_append(x_18, x_19); x_21 = lean_alloc_ctor(18, 1, 0); lean_ctor_set(x_21, 0, x_20); @@ -14262,7 +13783,7 @@ x_18 = lean_string_append(x_16, x_17); x_19 = l_List_toString___at_Lean_Environment_AddConstAsyncResult_commitConst___spec__1(x_11); x_20 = lean_string_append(x_18, x_19); lean_dec(x_19); -x_21 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_21 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_22 = lean_string_append(x_20, x_21); x_23 = lean_alloc_ctor(18, 1, 0); lean_ctor_set(x_23, 0, x_22); @@ -14311,7 +13832,7 @@ x_10 = lean_ctor_get(x_7, 0); lean_dec(x_10); x_11 = l_Lean_ConstantKind_ofConstantInfo(x_4); x_12 = lean_ctor_get_uint8(x_1, sizeof(void*)*7); -x_13 = l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1378_(x_12, x_11); +x_13 = l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1230_(x_12, x_11); if (x_13 == 0) { lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; @@ -14320,7 +13841,7 @@ lean_dec(x_4); lean_dec(x_2); lean_dec(x_1); x_14 = lean_unsigned_to_nat(0u); -x_15 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396_(x_11, x_14); +x_15 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248_(x_11, x_14); x_16 = l_Std_Format_defWidth; x_17 = lean_format_pretty(x_15, x_16, x_14, x_14); x_18 = l_Lean_Environment_AddConstAsyncResult_commitConst___lambda__5___closed__1; @@ -14328,11 +13849,11 @@ x_19 = lean_string_append(x_18, x_17); lean_dec(x_17); x_20 = l_Lean_Environment_AddConstAsyncResult_commitSignature___closed__2; x_21 = lean_string_append(x_19, x_20); -x_22 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396_(x_12, x_14); +x_22 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248_(x_12, x_14); x_23 = lean_format_pretty(x_22, x_16, x_14, x_14); x_24 = lean_string_append(x_21, x_23); lean_dec(x_23); -x_25 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_25 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_26 = lean_string_append(x_24, x_25); x_27 = lean_alloc_ctor(18, 1, 0); lean_ctor_set(x_27, 0, x_26); @@ -14358,7 +13879,7 @@ lean_inc(x_30); lean_dec(x_7); x_31 = l_Lean_ConstantKind_ofConstantInfo(x_4); x_32 = lean_ctor_get_uint8(x_1, sizeof(void*)*7); -x_33 = l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1378_(x_32, x_31); +x_33 = l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1230_(x_32, x_31); if (x_33 == 0) { lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; @@ -14367,7 +13888,7 @@ lean_dec(x_4); lean_dec(x_2); lean_dec(x_1); x_34 = lean_unsigned_to_nat(0u); -x_35 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396_(x_31, x_34); +x_35 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248_(x_31, x_34); x_36 = l_Std_Format_defWidth; x_37 = lean_format_pretty(x_35, x_36, x_34, x_34); x_38 = l_Lean_Environment_AddConstAsyncResult_commitConst___lambda__5___closed__1; @@ -14375,11 +13896,11 @@ x_39 = lean_string_append(x_38, x_37); lean_dec(x_37); x_40 = l_Lean_Environment_AddConstAsyncResult_commitSignature___closed__2; x_41 = lean_string_append(x_39, x_40); -x_42 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396_(x_32, x_34); +x_42 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248_(x_32, x_34); x_43 = lean_format_pretty(x_42, x_36, x_34, x_34); x_44 = lean_string_append(x_41, x_43); lean_dec(x_43); -x_45 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_45 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_46 = lean_string_append(x_44, x_45); x_47 = lean_alloc_ctor(18, 1, 0); lean_ctor_set(x_47, 0, x_46); @@ -14461,7 +13982,7 @@ uint8_t x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_obje lean_dec(x_2); lean_dec(x_1); x_10 = 1; -x_11 = l_Lean_instToStringImport___closed__1; +x_11 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_12 = l_Lean_Name_toString(x_8, x_10, x_11); x_13 = l_Lean_Environment_AddConstAsyncResult_commitConst___closed__1; x_14 = lean_string_append(x_13, x_12); @@ -15379,7 +14900,7 @@ lean_dec(x_4); x_7 = lean_ctor_get_uint8(x_6, sizeof(void*)*3); lean_dec(x_6); x_8 = 6; -x_9 = l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1378_(x_7, x_8); +x_9 = l___private_Lean_Environment_0__Lean_beqConstantKind____x40_Lean_Environment___hyg_1230_(x_7, x_8); return x_9; } } @@ -15706,7 +15227,7 @@ x_2 = l_Lean_instInhabitedEnvExtension___closed__2; return x_2; } } -static lean_object* _init_l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1() { +static lean_object* _init_l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1() { _start: { lean_object* x_1; lean_object* x_2; @@ -15715,11 +15236,11 @@ x_2 = lean_array_mk(x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645_(lean_object* x_1) { _start: { lean_object* x_2; lean_object* x_3; uint8_t x_4; -x_2 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_2 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_3 = lean_st_mk_ref(x_2, x_1); x_4 = !lean_is_exclusive(x_3); if (x_4 == 0) @@ -15946,9 +15467,9 @@ static lean_object* _init_l___private_Lean_Environment_0__Lean_EnvExtension_setS _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_2 = l___private_Lean_Environment_0__Lean_EnvExtension_setStateImpl___rarg___closed__1; -x_3 = lean_unsigned_to_nat(1260u); +x_3 = lean_unsigned_to_nat(1249u); x_4 = lean_unsigned_to_nat(4u); x_5 = l___private_Lean_Environment_0__Lean_EnvExtension_invalidExtMsg; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -16016,9 +15537,9 @@ static lean_object* _init_l___private_Lean_Environment_0__Lean_EnvExtension_modi _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_2 = l___private_Lean_Environment_0__Lean_EnvExtension_modifyStateImpl___rarg___closed__1; -x_3 = lean_unsigned_to_nat(1271u); +x_3 = lean_unsigned_to_nat(1260u); x_4 = lean_unsigned_to_nat(4u); x_5 = l___private_Lean_Environment_0__Lean_EnvExtension_invalidExtMsg; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -16082,9 +15603,9 @@ static lean_object* _init_l___private_Lean_Environment_0__Lean_EnvExtension_getS _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_2 = l___private_Lean_Environment_0__Lean_EnvExtension_getStateImpl___rarg___closed__1; -x_3 = lean_unsigned_to_nat(1277u); +x_3 = lean_unsigned_to_nat(1266u); x_4 = lean_unsigned_to_nat(4u); x_5 = l___private_Lean_Environment_0__Lean_EnvExtension_invalidExtMsg; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -16688,16 +16209,16 @@ x_13 = lean_ctor_get(x_10, 0); lean_inc(x_13); lean_dec(x_10); x_14 = 1; -x_15 = l_Lean_instToStringImport___closed__1; +x_15 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_16 = l_Lean_Name_toString(x_13, x_14, x_15); x_17 = l_Lean_EnvExtension_modifyState___rarg___closed__1; x_18 = lean_string_append(x_17, x_16); lean_dec(x_16); x_19 = l_Lean_EnvExtension_modifyState___rarg___closed__2; x_20 = lean_string_append(x_18, x_19); -x_21 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_21 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_22 = l_Lean_EnvExtension_modifyState___rarg___closed__3; -x_23 = lean_unsigned_to_nat(1306u); +x_23 = lean_unsigned_to_nat(1295u); x_24 = lean_unsigned_to_nat(15u); x_25 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_21, x_22, x_23, x_24, x_20); lean_dec(x_20); @@ -16971,7 +16492,7 @@ x_91 = lean_ctor_get(x_89, 0); lean_inc(x_91); lean_dec(x_89); x_92 = 1; -x_93 = l_Lean_instToStringImport___closed__1; +x_93 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_94 = l_Lean_Name_toString(x_91, x_92, x_93); if (x_90 == 0) { @@ -16981,9 +16502,9 @@ x_96 = lean_string_append(x_95, x_94); lean_dec(x_94); x_97 = l_Lean_EnvExtension_modifyState___rarg___closed__2; x_98 = lean_string_append(x_96, x_97); -x_99 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_99 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_100 = l_Lean_EnvExtension_modifyState___rarg___closed__3; -x_101 = lean_unsigned_to_nat(1298u); +x_101 = lean_unsigned_to_nat(1287u); x_102 = lean_unsigned_to_nat(13u); x_103 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_99, x_100, x_101, x_102, x_98); lean_dec(x_98); @@ -16998,9 +16519,9 @@ x_106 = lean_string_append(x_105, x_94); lean_dec(x_94); x_107 = l_Lean_EnvExtension_modifyState___rarg___closed__2; x_108 = lean_string_append(x_106, x_107); -x_109 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_109 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_110 = l_Lean_EnvExtension_modifyState___rarg___closed__3; -x_111 = lean_unsigned_to_nat(1298u); +x_111 = lean_unsigned_to_nat(1287u); x_112 = lean_unsigned_to_nat(13u); x_113 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_109, x_110, x_111, x_112, x_108); lean_dec(x_108); @@ -17051,16 +16572,16 @@ x_123 = lean_ctor_get(x_120, 0); lean_inc(x_123); lean_dec(x_120); x_124 = 1; -x_125 = l_Lean_instToStringImport___closed__1; +x_125 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_126 = l_Lean_Name_toString(x_123, x_124, x_125); x_127 = l_Lean_EnvExtension_modifyState___rarg___closed__1; x_128 = lean_string_append(x_127, x_126); lean_dec(x_126); x_129 = l_Lean_EnvExtension_modifyState___rarg___closed__2; x_130 = lean_string_append(x_128, x_129); -x_131 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_131 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_132 = l_Lean_EnvExtension_modifyState___rarg___closed__3; -x_133 = lean_unsigned_to_nat(1306u); +x_133 = lean_unsigned_to_nat(1295u); x_134 = lean_unsigned_to_nat(15u); x_135 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_131, x_132, x_133, x_134, x_130); lean_dec(x_130); @@ -17218,16 +16739,16 @@ x_14 = lean_ctor_get(x_11, 0); lean_inc(x_14); lean_dec(x_11); x_15 = 1; -x_16 = l_Lean_instToStringImport___closed__1; +x_16 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_17 = l_Lean_Name_toString(x_14, x_15, x_16); x_18 = l_Lean_EnvExtension_modifyState___rarg___closed__1; x_19 = lean_string_append(x_18, x_17); lean_dec(x_17); x_20 = l_Lean_EnvExtension_modifyState___rarg___closed__2; x_21 = lean_string_append(x_19, x_20); -x_22 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_22 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_23 = l_Lean_EnvExtension_modifyState___rarg___closed__3; -x_24 = lean_unsigned_to_nat(1306u); +x_24 = lean_unsigned_to_nat(1295u); x_25 = lean_unsigned_to_nat(15u); x_26 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_22, x_23, x_24, x_25, x_21); lean_dec(x_21); @@ -17501,7 +17022,7 @@ x_92 = lean_ctor_get(x_90, 0); lean_inc(x_92); lean_dec(x_90); x_93 = 1; -x_94 = l_Lean_instToStringImport___closed__1; +x_94 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_95 = l_Lean_Name_toString(x_92, x_93, x_94); if (x_91 == 0) { @@ -17511,9 +17032,9 @@ x_97 = lean_string_append(x_96, x_95); lean_dec(x_95); x_98 = l_Lean_EnvExtension_modifyState___rarg___closed__2; x_99 = lean_string_append(x_97, x_98); -x_100 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_100 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_101 = l_Lean_EnvExtension_modifyState___rarg___closed__3; -x_102 = lean_unsigned_to_nat(1298u); +x_102 = lean_unsigned_to_nat(1287u); x_103 = lean_unsigned_to_nat(13u); x_104 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_100, x_101, x_102, x_103, x_99); lean_dec(x_99); @@ -17528,9 +17049,9 @@ x_107 = lean_string_append(x_106, x_95); lean_dec(x_95); x_108 = l_Lean_EnvExtension_modifyState___rarg___closed__2; x_109 = lean_string_append(x_107, x_108); -x_110 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_110 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_111 = l_Lean_EnvExtension_modifyState___rarg___closed__3; -x_112 = lean_unsigned_to_nat(1298u); +x_112 = lean_unsigned_to_nat(1287u); x_113 = lean_unsigned_to_nat(13u); x_114 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_110, x_111, x_112, x_113, x_109); lean_dec(x_109); @@ -17581,16 +17102,16 @@ x_124 = lean_ctor_get(x_121, 0); lean_inc(x_124); lean_dec(x_121); x_125 = 1; -x_126 = l_Lean_instToStringImport___closed__1; +x_126 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_127 = l_Lean_Name_toString(x_124, x_125, x_126); x_128 = l_Lean_EnvExtension_modifyState___rarg___closed__1; x_129 = lean_string_append(x_128, x_127); lean_dec(x_127); x_130 = l_Lean_EnvExtension_modifyState___rarg___closed__2; x_131 = lean_string_append(x_129, x_130); -x_132 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_132 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_133 = l_Lean_EnvExtension_modifyState___rarg___closed__3; -x_134 = lean_unsigned_to_nat(1306u); +x_134 = lean_unsigned_to_nat(1295u); x_135 = lean_unsigned_to_nat(15u); x_136 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_132, x_133, x_134, x_135, x_131); lean_dec(x_131); @@ -17649,9 +17170,9 @@ static lean_object* _init_l___private_Lean_Environment_0__Lean_EnvExtension_getS _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_2 = l___private_Lean_Environment_0__Lean_EnvExtension_getStateUnsafe___rarg___closed__1; -x_3 = lean_unsigned_to_nat(1324u); +x_3 = lean_unsigned_to_nat(1313u); x_4 = lean_unsigned_to_nat(17u); x_5 = l___private_Lean_Environment_0__Lean_EnvExtension_getStateUnsafe___rarg___closed__2; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -18384,7 +17905,7 @@ lean_object* x_6; lean_object* x_7; uint8_t x_8; lean_object* x_9; lean_object* x_6 = lean_ctor_get(x_4, 0); x_7 = lean_box(0); x_8 = 0; -x_9 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_9 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_10 = lean_alloc_ctor(0, 5, 5); lean_ctor_set(x_10, 0, x_7); lean_ctor_set(x_10, 1, x_9); @@ -18438,7 +17959,7 @@ lean_inc(x_23); lean_dec(x_4); x_25 = lean_box(0); x_26 = 0; -x_27 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_27 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_28 = lean_alloc_ctor(0, 5, 5); lean_ctor_set(x_28, 0, x_25); lean_ctor_set(x_28, 1, x_27); @@ -18600,7 +18121,7 @@ LEAN_EXPORT lean_object* l_Lean_instInhabitedPersistentEnvExtensionState___rarg( _start: { lean_object* x_2; lean_object* x_3; -x_2 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_2 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_3 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_3, 0, x_2); lean_ctor_set(x_3, 1, x_1); @@ -18630,7 +18151,7 @@ LEAN_EXPORT lean_object* l_Lean_instInhabitedPersistentEnvExtension___lambda__2( _start: { lean_object* x_2; -x_2 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_2 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; return x_2; } } @@ -19041,11 +18562,11 @@ lean_dec(x_2); return x_5; } } -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Environment___hyg_8688_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Environment___hyg_8540_(lean_object* x_1) { _start: { lean_object* x_2; lean_object* x_3; uint8_t x_4; -x_2 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_2 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_3 = lean_st_mk_ref(x_2, x_1); x_4 = !lean_is_exclusive(x_3); if (x_4 == 0) @@ -19409,7 +18930,7 @@ LEAN_EXPORT lean_object* l_Lean_registerPersistentEnvExtensionUnsafe___rarg___la _start: { lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_3 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_3 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_4 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_4, 0, x_3); lean_ctor_set(x_4, 1, x_1); @@ -19825,7 +19346,7 @@ x_18 = lean_ctor_get(x_1, 0); lean_inc(x_18); lean_dec(x_1); x_19 = 1; -x_20 = l_Lean_instToStringImport___closed__1; +x_20 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_21 = l_Lean_Name_toString(x_18, x_19, x_20); x_22 = l_Lean_registerPersistentEnvExtensionUnsafe___rarg___closed__1; x_23 = lean_string_append(x_22, x_21); @@ -19882,7 +19403,7 @@ x_39 = lean_ctor_get(x_1, 0); lean_inc(x_39); lean_dec(x_1); x_40 = 1; -x_41 = l_Lean_instToStringImport___closed__1; +x_41 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_42 = l_Lean_Name_toString(x_39, x_40, x_41); x_43 = l_Lean_registerPersistentEnvExtensionUnsafe___rarg___closed__1; x_44 = lean_string_append(x_43, x_42); @@ -20034,9 +19555,9 @@ static lean_object* _init_l_Lean_readModuleData___closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_2 = l_Lean_readModuleData___closed__3; -x_3 = lean_unsigned_to_nat(1609u); +x_3 = lean_unsigned_to_nat(1598u); x_4 = lean_unsigned_to_nat(2u); x_5 = l_Lean_readModuleData___closed__2; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -20055,9 +19576,9 @@ static lean_object* _init_l_Lean_readModuleData___closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_2 = l_Lean_readModuleData___closed__3; -x_3 = lean_unsigned_to_nat(1610u); +x_3 = lean_unsigned_to_nat(1599u); x_4 = lean_unsigned_to_nat(31u); x_5 = l_Lean_readModuleData___closed__5; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -20832,7 +20353,7 @@ LEAN_EXPORT lean_object* l_Lean_RBTree_toArray___at_Lean_mkModuleData___spec__3( _start: { lean_object* x_2; lean_object* x_3; -x_2 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_2 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_3 = l_Lean_RBNode_fold___at_Lean_mkModuleData___spec__4(x_2, x_1); return x_3; } @@ -20916,7 +20437,7 @@ if (x_6 == 0) lean_object* x_7; lean_dec(x_2); lean_dec(x_1); -x_7 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_7 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; return x_7; } else @@ -20930,7 +20451,7 @@ if (x_9 == 0) lean_object* x_10; lean_dec(x_2); lean_dec(x_1); -x_10 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_10 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; return x_10; } else @@ -20938,7 +20459,7 @@ else size_t x_11; size_t x_12; lean_object* x_13; lean_object* x_14; x_11 = lean_usize_of_nat(x_4); x_12 = lean_usize_of_nat(x_5); -x_13 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_13 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_14 = l_Array_foldlMUnsafe_fold___at_Lean_mkModuleData___spec__6(x_1, x_2, x_3, x_11, x_12, x_13); return x_14; } @@ -20971,6 +20492,14 @@ goto _start; } } } +LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlM___at_Lean_mkModuleData___spec__8(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; +x_4 = l_Lean_PersistentHashMap_foldlMAux___at_Lean_Environment_dbgFormatAsyncState___spec__25___rarg(x_2, x_1, x_3); +return x_4; +} +} LEAN_EXPORT lean_object* l_Lean_mkModuleData___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { @@ -20979,6 +20508,14 @@ x_4 = lean_array_push(x_1, x_2); return x_4; } } +LEAN_EXPORT lean_object* l_Lean_mkModuleData___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; +x_4 = lean_array_push(x_1, x_3); +return x_4; +} +} static lean_object* _init_l_Lean_mkModuleData___closed__1() { _start: { @@ -20987,10 +20524,18 @@ x_1 = lean_alloc_closure((void*)(l_Lean_mkModuleData___lambda__1___boxed), 3, 0) return x_1; } } +static lean_object* _init_l_Lean_mkModuleData___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l_Lean_mkModuleData___lambda__2___boxed), 3, 0); +return x_1; +} +} LEAN_EXPORT lean_object* l_Lean_mkModuleData(lean_object* x_1, uint8_t x_2, lean_object* x_3) { _start: { -lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; size_t x_9; size_t x_10; lean_object* x_11; lean_object* x_12; uint8_t x_13; uint8_t x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; uint8_t x_25; +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; size_t x_9; size_t x_10; lean_object* x_11; lean_object* x_12; uint8_t x_13; uint8_t x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; uint8_t x_24; x_4 = l_Lean_registerPersistentEnvExtensionUnsafe___rarg___lambda__3___closed__2; x_5 = lean_st_ref_get(x_4, x_3); x_6 = lean_ctor_get(x_5, 0); @@ -21019,56 +20564,57 @@ x_16 = lean_ctor_get(x_15, 1); lean_inc(x_16); lean_dec(x_15); x_17 = l_Lean_mkModuleData___closed__1; -x_18 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_18 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_19 = l_Lean_PersistentHashMap_foldlMAux___at_Lean_Environment_dbgFormatAsyncState___spec__25___rarg(x_17, x_16, x_18); -lean_dec(x_16); -x_20 = lean_array_get_size(x_19); -x_21 = lean_ctor_get(x_1, 2); -lean_inc(x_21); -x_22 = lean_task_get_own(x_21); -x_23 = lean_ctor_get(x_22, 4); -lean_inc(x_23); -lean_dec(x_22); -x_24 = l_Lean_RBTree_toArray___at_Lean_mkModuleData___spec__3(x_23); +x_20 = lean_ctor_get(x_1, 2); +lean_inc(x_20); +x_21 = lean_task_get_own(x_20); +x_22 = lean_ctor_get(x_21, 4); +lean_inc(x_22); +lean_dec(x_21); +x_23 = l_Lean_RBTree_toArray___at_Lean_mkModuleData___spec__3(x_22); if (x_14 == 0) { -uint8_t x_37; -x_37 = 1; -x_25 = x_37; -goto block_36; +uint8_t x_45; +x_45 = 1; +x_24 = x_45; +goto block_44; } else { -uint8_t x_38; -x_38 = 0; -x_25 = x_38; -goto block_36; +uint8_t x_46; +x_46 = 0; +x_24 = x_46; +goto block_44; } -block_36: +block_44: { -lean_object* x_26; lean_object* x_27; lean_object* x_28; size_t x_29; lean_object* x_30; lean_object* x_31; uint8_t x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; -x_26 = l_Lean_Environment_setExporting(x_1, x_25); -x_27 = lean_unsigned_to_nat(0u); -lean_inc(x_26); -x_28 = l_Array_filterMapM___at_Lean_mkModuleData___spec__5(x_12, x_26, x_19, x_27, x_20); -lean_dec(x_20); -lean_dec(x_19); -x_29 = lean_array_size(x_28); +lean_object* x_25; lean_object* x_26; +x_25 = l_Lean_Environment_setExporting(x_1, x_24); +x_26 = l_Lean_Environment_header(x_25); +if (x_14 == 0) +{ +uint8_t x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; size_t x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; +lean_dec(x_16); +x_27 = lean_ctor_get_uint8(x_26, sizeof(void*)*5 + 4); +x_28 = lean_ctor_get(x_26, 1); lean_inc(x_28); -x_30 = l_Array_mapMUnsafe_map___at_Lean_mkModuleData___spec__7(x_29, x_10, x_28); -x_31 = l_Lean_Environment_header(x_26); lean_dec(x_26); -x_32 = lean_ctor_get_uint8(x_31, sizeof(void*)*5 + 4); -x_33 = lean_ctor_get(x_31, 1); -lean_inc(x_33); -lean_dec(x_31); +x_29 = lean_array_get_size(x_19); +x_30 = lean_unsigned_to_nat(0u); +x_31 = l_Array_filterMapM___at_Lean_mkModuleData___spec__5(x_12, x_25, x_19, x_30, x_29); +lean_dec(x_29); +lean_dec(x_19); +x_32 = lean_array_size(x_31); +lean_inc(x_31); +x_33 = l_Array_mapMUnsafe_map___at_Lean_mkModuleData___spec__7(x_32, x_10, x_31); x_34 = lean_alloc_ctor(0, 5, 1); -lean_ctor_set(x_34, 0, x_33); -lean_ctor_set(x_34, 1, x_30); -lean_ctor_set(x_34, 2, x_28); -lean_ctor_set(x_34, 3, x_24); +lean_ctor_set(x_34, 0, x_28); +lean_ctor_set(x_34, 1, x_33); +lean_ctor_set(x_34, 2, x_31); +lean_ctor_set(x_34, 3, x_23); lean_ctor_set(x_34, 4, x_11); -lean_ctor_set_uint8(x_34, sizeof(void*)*5, x_32); +lean_ctor_set_uint8(x_34, sizeof(void*)*5, x_27); if (lean_is_scalar(x_8)) { x_35 = lean_alloc_ctor(0, 2, 0); } else { @@ -21078,6 +20624,39 @@ lean_ctor_set(x_35, 0, x_34); lean_ctor_set(x_35, 1, x_7); return x_35; } +else +{ +uint8_t x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; size_t x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; +lean_dec(x_25); +lean_dec(x_19); +lean_dec(x_12); +x_36 = lean_ctor_get_uint8(x_26, sizeof(void*)*5 + 4); +x_37 = lean_ctor_get(x_26, 1); +lean_inc(x_37); +lean_dec(x_26); +x_38 = l_Lean_mkModuleData___closed__2; +x_39 = l_Lean_PersistentHashMap_foldlMAux___at_Lean_Environment_dbgFormatAsyncState___spec__25___rarg(x_38, x_16, x_18); +lean_dec(x_16); +x_40 = lean_array_size(x_39); +lean_inc(x_39); +x_41 = l_Array_mapMUnsafe_map___at_Lean_mkModuleData___spec__7(x_40, x_10, x_39); +x_42 = lean_alloc_ctor(0, 5, 1); +lean_ctor_set(x_42, 0, x_37); +lean_ctor_set(x_42, 1, x_41); +lean_ctor_set(x_42, 2, x_39); +lean_ctor_set(x_42, 3, x_23); +lean_ctor_set(x_42, 4, x_11); +lean_ctor_set_uint8(x_42, sizeof(void*)*5, x_36); +if (lean_is_scalar(x_8)) { + x_43 = lean_alloc_ctor(0, 2, 0); +} else { + x_43 = x_8; +} +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_7); +return x_43; +} +} } } LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_mkModuleData___spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { @@ -21152,6 +20731,15 @@ x_6 = l_Array_mapMUnsafe_map___at_Lean_mkModuleData___spec__7(x_4, x_5, x_3); return x_6; } } +LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlM___at_Lean_mkModuleData___spec__8___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; +x_4 = l_Lean_PersistentHashMap_foldlM___at_Lean_mkModuleData___spec__8(x_1, x_2, x_3); +lean_dec(x_1); +return x_4; +} +} LEAN_EXPORT lean_object* l_Lean_mkModuleData___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { @@ -21161,6 +20749,15 @@ lean_dec(x_3); return x_4; } } +LEAN_EXPORT lean_object* l_Lean_mkModuleData___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; +x_4 = l_Lean_mkModuleData___lambda__2(x_1, x_2, x_3); +lean_dec(x_2); +return x_4; +} +} LEAN_EXPORT lean_object* l_Lean_mkModuleData___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { @@ -21924,7 +21521,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_setImportedEntries { lean_object* x_3; lean_object* x_4; lean_object* x_5; uint8_t x_6; x_3 = lean_array_get_size(x_1); -x_4 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_4 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_5 = lean_mk_array(x_3, x_4); x_6 = !lean_is_exclusive(x_2); if (x_6 == 0) @@ -24542,7 +24139,7 @@ x_6 = lean_ctor_get(x_1, 1); x_7 = l_Lean_instInhabitedName; x_8 = lean_array_get(x_7, x_6, x_3); x_9 = 1; -x_10 = l_Lean_instToStringImport___closed__1; +x_10 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_11 = l_Lean_Name_toString(x_8, x_9, x_10); x_12 = l_Lean_throwAlreadyImported___rarg___closed__1; x_13 = lean_string_append(x_12, x_11); @@ -24587,7 +24184,7 @@ lean_dec(x_38); x_40 = l_Lean_Name_toString(x_39, x_9, x_10); x_41 = lean_string_append(x_19, x_40); lean_dec(x_40); -x_42 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_42 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_43 = lean_string_append(x_41, x_42); x_44 = lean_alloc_ctor(18, 1, 0); lean_ctor_set(x_44, 0, x_43); @@ -24627,7 +24224,7 @@ lean_dec(x_61); x_63 = l_Lean_Name_toString(x_62, x_9, x_10); x_64 = lean_string_append(x_19, x_63); lean_dec(x_63); -x_65 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_65 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_66 = lean_string_append(x_64, x_65); x_67 = lean_alloc_ctor(18, 1, 0); lean_ctor_set(x_67, 0, x_66); @@ -24820,6 +24417,296 @@ x_2 = lean_alloc_closure((void*)(l_Lean_ImportStateM_run___rarg), 3, 0); return x_2; } } +LEAN_EXPORT lean_object* l_Lean_ModuleArtifacts_oleanParts(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = lean_ctor_get(x_1, 1); +lean_inc(x_2); +if (lean_obj_tag(x_2) == 0) +{ +lean_object* x_3; +lean_dec(x_1); +x_3 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; +return x_3; +} +else +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; +x_4 = lean_ctor_get(x_2, 0); +lean_inc(x_4); +lean_dec(x_2); +x_5 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; +x_6 = lean_array_push(x_5, x_4); +x_7 = lean_ctor_get(x_1, 2); +lean_inc(x_7); +if (lean_obj_tag(x_7) == 0) +{ +lean_dec(x_1); +return x_6; +} +else +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_8 = lean_ctor_get(x_7, 0); +lean_inc(x_8); +lean_dec(x_7); +x_9 = lean_array_push(x_6, x_8); +x_10 = lean_ctor_get(x_1, 3); +lean_inc(x_10); +lean_dec(x_1); +if (lean_obj_tag(x_10) == 0) +{ +return x_9; +} +else +{ +lean_object* x_11; lean_object* x_12; +x_11 = lean_ctor_get(x_10, 0); +lean_inc(x_11); +lean_dec(x_10); +x_12 = lean_array_push(x_9, x_11); +return x_12; +} +} +} +} +} +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; +x_4 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_4, 0, x_1); +lean_ctor_set(x_4, 1, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__2___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__1___boxed), 3, 0); +return x_1; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; uint8_t x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; uint8_t x_13; +x_4 = lean_box(0); +lean_inc(x_1); +x_5 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_5, 0, x_1); +lean_ctor_set(x_5, 1, x_4); +x_6 = lean_array_mk(x_5); +x_7 = 1; +lean_inc(x_1); +x_8 = l_Lean_OLeanLevel_adjustFileName(x_1, x_7); +x_9 = l_System_FilePath_pathExists(x_8, x_3); +x_10 = lean_ctor_get(x_9, 0); +lean_inc(x_10); +x_11 = lean_ctor_get(x_9, 1); +lean_inc(x_11); +lean_dec(x_9); +x_12 = l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__2___closed__1; +x_13 = lean_unbox(x_10); +lean_dec(x_10); +if (x_13 == 0) +{ +lean_object* x_14; lean_object* x_15; +lean_dec(x_8); +lean_dec(x_1); +x_14 = lean_box(0); +x_15 = lean_apply_3(x_12, x_6, x_14, x_11); +return x_15; +} +else +{ +lean_object* x_16; uint8_t x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; uint8_t x_21; +x_16 = lean_array_push(x_6, x_8); +x_17 = 2; +x_18 = l_Lean_OLeanLevel_adjustFileName(x_1, x_17); +x_19 = l_System_FilePath_pathExists(x_18, x_11); +x_20 = lean_ctor_get(x_19, 0); +lean_inc(x_20); +x_21 = lean_unbox(x_20); +lean_dec(x_20); +if (x_21 == 0) +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; +lean_dec(x_18); +x_22 = lean_ctor_get(x_19, 1); +lean_inc(x_22); +lean_dec(x_19); +x_23 = lean_box(0); +x_24 = lean_apply_3(x_12, x_16, x_23, x_22); +return x_24; +} +else +{ +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_25 = lean_ctor_get(x_19, 1); +lean_inc(x_25); +lean_dec(x_19); +x_26 = lean_array_push(x_16, x_18); +x_27 = lean_box(0); +x_28 = lean_apply_3(x_12, x_26, x_27, x_25); +return x_28; +} +} +} +} +static lean_object* _init_l___private_Lean_Environment_0__Lean_findOLeanParts___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("object file '", 13, 13); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Environment_0__Lean_findOLeanParts___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("' of module ", 12, 12); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Environment_0__Lean_findOLeanParts___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked(" does not exist", 15, 15); +return x_1; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_findOLean(x_1, x_2); +if (lean_obj_tag(x_3) == 0) +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; uint8_t x_8; +x_4 = lean_ctor_get(x_3, 0); +lean_inc(x_4); +x_5 = lean_ctor_get(x_3, 1); +lean_inc(x_5); +lean_dec(x_3); +x_6 = l_System_FilePath_pathExists(x_4, x_5); +x_7 = lean_ctor_get(x_6, 0); +lean_inc(x_7); +x_8 = lean_unbox(x_7); +lean_dec(x_7); +if (x_8 == 0) +{ +uint8_t x_9; +x_9 = !lean_is_exclusive(x_6); +if (x_9 == 0) +{ +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; uint8_t x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; +x_10 = lean_ctor_get(x_6, 0); +lean_dec(x_10); +x_11 = l___private_Lean_Environment_0__Lean_findOLeanParts___closed__1; +x_12 = lean_string_append(x_11, x_4); +lean_dec(x_4); +x_13 = l___private_Lean_Environment_0__Lean_findOLeanParts___closed__2; +x_14 = lean_string_append(x_12, x_13); +x_15 = 1; +x_16 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; +x_17 = l_Lean_Name_toString(x_1, x_15, x_16); +x_18 = lean_string_append(x_14, x_17); +lean_dec(x_17); +x_19 = l___private_Lean_Environment_0__Lean_findOLeanParts___closed__3; +x_20 = lean_string_append(x_18, x_19); +x_21 = lean_alloc_ctor(18, 1, 0); +lean_ctor_set(x_21, 0, x_20); +lean_ctor_set_tag(x_6, 1); +lean_ctor_set(x_6, 0, x_21); +return x_6; +} +else +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; uint8_t x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_22 = lean_ctor_get(x_6, 1); +lean_inc(x_22); +lean_dec(x_6); +x_23 = l___private_Lean_Environment_0__Lean_findOLeanParts___closed__1; +x_24 = lean_string_append(x_23, x_4); +lean_dec(x_4); +x_25 = l___private_Lean_Environment_0__Lean_findOLeanParts___closed__2; +x_26 = lean_string_append(x_24, x_25); +x_27 = 1; +x_28 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; +x_29 = l_Lean_Name_toString(x_1, x_27, x_28); +x_30 = lean_string_append(x_26, x_29); +lean_dec(x_29); +x_31 = l___private_Lean_Environment_0__Lean_findOLeanParts___closed__3; +x_32 = lean_string_append(x_30, x_31); +x_33 = lean_alloc_ctor(18, 1, 0); +lean_ctor_set(x_33, 0, x_32); +x_34 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_34, 0, x_33); +lean_ctor_set(x_34, 1, x_22); +return x_34; +} +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; +lean_dec(x_1); +x_35 = lean_ctor_get(x_6, 1); +lean_inc(x_35); +lean_dec(x_6); +x_36 = lean_box(0); +x_37 = l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__2(x_4, x_36, x_35); +return x_37; +} +} +else +{ +uint8_t x_38; +lean_dec(x_1); +x_38 = !lean_is_exclusive(x_3); +if (x_38 == 0) +{ +return x_3; +} +else +{ +lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_39 = lean_ctor_get(x_3, 0); +x_40 = lean_ctor_get(x_3, 1); +lean_inc(x_40); +lean_inc(x_39); +lean_dec(x_3); +x_41 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_41, 0, x_39); +lean_ctor_set(x_41, 1, x_40); +return x_41; +} +} +} +} +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; +x_4 = l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__1(x_1, x_2, x_3); +lean_dec(x_2); +return x_4; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; +x_4 = l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__2(x_1, x_2, x_3); +lean_dec(x_2); +return x_4; +} +} static lean_object* _init_l_panic___at_Lean_importModulesCore_go___spec__1___closed__1() { _start: { @@ -25227,79 +25114,85 @@ x_46 = lean_ctor_get_uint8(x_45, sizeof(void*)*5); lean_dec(x_45); if (x_46 == 0) { -uint8_t x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; +uint8_t x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; x_47 = 1; -x_48 = l_Lean_instToStringImport___closed__1; +x_48 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_49 = l_Lean_Name_toString(x_19, x_47, x_48); x_50 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___closed__1; x_51 = lean_string_append(x_50, x_49); lean_dec(x_49); x_52 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___closed__2; x_53 = lean_string_append(x_51, x_52); -x_54 = l_Lean_Name_toString(x_2, x_47, x_48); -x_55 = lean_string_append(x_53, x_54); -lean_dec(x_54); -x_56 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; -x_57 = lean_string_append(x_55, x_56); +x_54 = lean_ctor_get(x_2, 0); +lean_inc(x_54); +lean_dec(x_2); +x_55 = l_Lean_Name_toString(x_54, x_47, x_48); +x_56 = lean_string_append(x_53, x_55); +lean_dec(x_55); +x_57 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_58 = lean_string_append(x_56, x_57); lean_ctor_set_tag(x_40, 18); -lean_ctor_set(x_40, 0, x_57); +lean_ctor_set(x_40, 0, x_58); lean_ctor_set_tag(x_13, 1); lean_ctor_set(x_13, 0, x_40); return x_13; } else { -size_t x_58; lean_object* x_59; +size_t x_59; lean_object* x_60; lean_free_object(x_40); lean_dec(x_19); lean_free_object(x_13); -x_58 = lean_usize_add(x_6, x_31); -x_59 = lean_box(0); -x_6 = x_58; -x_7 = x_59; +x_59 = lean_usize_add(x_6, x_31); +x_60 = lean_box(0); +x_6 = x_59; +x_7 = x_60; x_9 = x_17; goto _start; } } else { -lean_object* x_61; uint8_t x_62; -x_61 = lean_ctor_get(x_40, 0); -lean_inc(x_61); +lean_object* x_62; uint8_t x_63; +x_62 = lean_ctor_get(x_40, 0); +lean_inc(x_62); lean_dec(x_40); -x_62 = lean_ctor_get_uint8(x_61, sizeof(void*)*5); -lean_dec(x_61); -if (x_62 == 0) +x_63 = lean_ctor_get_uint8(x_62, sizeof(void*)*5); +lean_dec(x_62); +if (x_63 == 0) { -uint8_t x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; -x_63 = 1; -x_64 = l_Lean_instToStringImport___closed__1; -x_65 = l_Lean_Name_toString(x_19, x_63, x_64); -x_66 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___closed__1; -x_67 = lean_string_append(x_66, x_65); -lean_dec(x_65); -x_68 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___closed__2; -x_69 = lean_string_append(x_67, x_68); -x_70 = l_Lean_Name_toString(x_2, x_63, x_64); -x_71 = lean_string_append(x_69, x_70); -lean_dec(x_70); -x_72 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; -x_73 = lean_string_append(x_71, x_72); -x_74 = lean_alloc_ctor(18, 1, 0); -lean_ctor_set(x_74, 0, x_73); +uint8_t x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; +x_64 = 1; +x_65 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; +x_66 = l_Lean_Name_toString(x_19, x_64, x_65); +x_67 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___closed__1; +x_68 = lean_string_append(x_67, x_66); +lean_dec(x_66); +x_69 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___closed__2; +x_70 = lean_string_append(x_68, x_69); +x_71 = lean_ctor_get(x_2, 0); +lean_inc(x_71); +lean_dec(x_2); +x_72 = l_Lean_Name_toString(x_71, x_64, x_65); +x_73 = lean_string_append(x_70, x_72); +lean_dec(x_72); +x_74 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_75 = lean_string_append(x_73, x_74); +x_76 = lean_alloc_ctor(18, 1, 0); +lean_ctor_set(x_76, 0, x_75); lean_ctor_set_tag(x_13, 1); -lean_ctor_set(x_13, 0, x_74); +lean_ctor_set(x_13, 0, x_76); return x_13; } else { -size_t x_75; lean_object* x_76; +size_t x_77; lean_object* x_78; lean_dec(x_19); lean_free_object(x_13); -x_75 = lean_usize_add(x_6, x_31); -x_76 = lean_box(0); -x_6 = x_75; -x_7 = x_76; +x_77 = lean_usize_add(x_6, x_31); +x_78 = lean_box(0); +x_6 = x_77; +x_7 = x_78; x_9 = x_17; goto _start; } @@ -25309,116 +25202,119 @@ goto _start; } else { -lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; uint64_t x_82; uint64_t x_83; uint64_t x_84; uint64_t x_85; uint64_t x_86; uint64_t x_87; uint64_t x_88; size_t x_89; size_t x_90; size_t x_91; size_t x_92; size_t x_93; lean_object* x_94; lean_object* x_95; -x_78 = lean_ctor_get(x_13, 1); -lean_inc(x_78); +lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; uint64_t x_84; uint64_t x_85; uint64_t x_86; uint64_t x_87; uint64_t x_88; uint64_t x_89; uint64_t x_90; size_t x_91; size_t x_92; size_t x_93; size_t x_94; size_t x_95; lean_object* x_96; lean_object* x_97; +x_80 = lean_ctor_get(x_13, 1); +lean_inc(x_80); lean_dec(x_13); -x_79 = lean_ctor_get(x_12, 0); -lean_inc(x_79); +x_81 = lean_ctor_get(x_12, 0); +lean_inc(x_81); lean_dec(x_12); -x_80 = lean_ctor_get(x_15, 1); -lean_inc(x_80); +x_82 = lean_ctor_get(x_15, 1); +lean_inc(x_82); lean_dec(x_15); -x_81 = lean_array_get_size(x_80); -x_82 = l_Lean_Name_hash___override(x_79); -x_83 = 32; -x_84 = lean_uint64_shift_right(x_82, x_83); -x_85 = lean_uint64_xor(x_82, x_84); -x_86 = 16; -x_87 = lean_uint64_shift_right(x_85, x_86); -x_88 = lean_uint64_xor(x_85, x_87); -x_89 = lean_uint64_to_usize(x_88); -x_90 = lean_usize_of_nat(x_81); -lean_dec(x_81); -x_91 = 1; -x_92 = lean_usize_sub(x_90, x_91); -x_93 = lean_usize_land(x_89, x_92); -x_94 = lean_array_uget(x_80, x_93); -lean_dec(x_80); -x_95 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_importModulesCore_go___spec__7(x_79, x_94); -lean_dec(x_94); -if (lean_obj_tag(x_95) == 0) +x_83 = lean_array_get_size(x_82); +x_84 = l_Lean_Name_hash___override(x_81); +x_85 = 32; +x_86 = lean_uint64_shift_right(x_84, x_85); +x_87 = lean_uint64_xor(x_84, x_86); +x_88 = 16; +x_89 = lean_uint64_shift_right(x_87, x_88); +x_90 = lean_uint64_xor(x_87, x_89); +x_91 = lean_uint64_to_usize(x_90); +x_92 = lean_usize_of_nat(x_83); +lean_dec(x_83); +x_93 = 1; +x_94 = lean_usize_sub(x_92, x_93); +x_95 = lean_usize_land(x_91, x_94); +x_96 = lean_array_uget(x_82, x_95); +lean_dec(x_82); +x_97 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_importModulesCore_go___spec__7(x_81, x_96); +lean_dec(x_96); +if (lean_obj_tag(x_97) == 0) { -size_t x_96; lean_object* x_97; -lean_dec(x_79); -x_96 = lean_usize_add(x_6, x_91); -x_97 = lean_box(0); -x_6 = x_96; -x_7 = x_97; -x_9 = x_78; +size_t x_98; lean_object* x_99; +lean_dec(x_81); +x_98 = lean_usize_add(x_6, x_93); +x_99 = lean_box(0); +x_6 = x_98; +x_7 = x_99; +x_9 = x_80; goto _start; } else { -lean_object* x_99; lean_object* x_100; -x_99 = lean_ctor_get(x_95, 0); -lean_inc(x_99); -lean_dec(x_95); -x_100 = l___private_Lean_Environment_0__Lean_ImportedModule_mainModule_x3f(x_99); -lean_dec(x_99); -if (lean_obj_tag(x_100) == 0) +lean_object* x_101; lean_object* x_102; +x_101 = lean_ctor_get(x_97, 0); +lean_inc(x_101); +lean_dec(x_97); +x_102 = l___private_Lean_Environment_0__Lean_ImportedModule_mainModule_x3f(x_101); +lean_dec(x_101); +if (lean_obj_tag(x_102) == 0) { -size_t x_101; lean_object* x_102; -lean_dec(x_79); -x_101 = lean_usize_add(x_6, x_91); -x_102 = lean_box(0); -x_6 = x_101; -x_7 = x_102; -x_9 = x_78; +size_t x_103; lean_object* x_104; +lean_dec(x_81); +x_103 = lean_usize_add(x_6, x_93); +x_104 = lean_box(0); +x_6 = x_103; +x_7 = x_104; +x_9 = x_80; goto _start; } else { -lean_object* x_104; lean_object* x_105; uint8_t x_106; -x_104 = lean_ctor_get(x_100, 0); -lean_inc(x_104); -if (lean_is_exclusive(x_100)) { - lean_ctor_release(x_100, 0); - x_105 = x_100; +lean_object* x_106; lean_object* x_107; uint8_t x_108; +x_106 = lean_ctor_get(x_102, 0); +lean_inc(x_106); +if (lean_is_exclusive(x_102)) { + lean_ctor_release(x_102, 0); + x_107 = x_102; } else { - lean_dec_ref(x_100); - x_105 = lean_box(0); + lean_dec_ref(x_102); + x_107 = lean_box(0); } -x_106 = lean_ctor_get_uint8(x_104, sizeof(void*)*5); -lean_dec(x_104); -if (x_106 == 0) -{ -uint8_t x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; -x_107 = 1; -x_108 = l_Lean_instToStringImport___closed__1; -x_109 = l_Lean_Name_toString(x_79, x_107, x_108); -x_110 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___closed__1; -x_111 = lean_string_append(x_110, x_109); -lean_dec(x_109); -x_112 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___closed__2; -x_113 = lean_string_append(x_111, x_112); -x_114 = l_Lean_Name_toString(x_2, x_107, x_108); +x_108 = lean_ctor_get_uint8(x_106, sizeof(void*)*5); +lean_dec(x_106); +if (x_108 == 0) +{ +uint8_t x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; +x_109 = 1; +x_110 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; +x_111 = l_Lean_Name_toString(x_81, x_109, x_110); +x_112 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___closed__1; +x_113 = lean_string_append(x_112, x_111); +lean_dec(x_111); +x_114 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8___closed__2; x_115 = lean_string_append(x_113, x_114); -lean_dec(x_114); -x_116 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; -x_117 = lean_string_append(x_115, x_116); -if (lean_is_scalar(x_105)) { - x_118 = lean_alloc_ctor(18, 1, 0); -} else { - x_118 = x_105; - lean_ctor_set_tag(x_118, 18); -} -lean_ctor_set(x_118, 0, x_117); -x_119 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_119, 0, x_118); -lean_ctor_set(x_119, 1, x_78); -return x_119; +x_116 = lean_ctor_get(x_2, 0); +lean_inc(x_116); +lean_dec(x_2); +x_117 = l_Lean_Name_toString(x_116, x_109, x_110); +x_118 = lean_string_append(x_115, x_117); +lean_dec(x_117); +x_119 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_120 = lean_string_append(x_118, x_119); +if (lean_is_scalar(x_107)) { + x_121 = lean_alloc_ctor(18, 1, 0); +} else { + x_121 = x_107; + lean_ctor_set_tag(x_121, 18); +} +lean_ctor_set(x_121, 0, x_120); +x_122 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_122, 0, x_121); +lean_ctor_set(x_122, 1, x_80); +return x_122; } else { -size_t x_120; lean_object* x_121; -lean_dec(x_105); -lean_dec(x_79); -x_120 = lean_usize_add(x_6, x_91); -x_121 = lean_box(0); -x_6 = x_120; -x_7 = x_121; -x_9 = x_78; +size_t x_123; lean_object* x_124; +lean_dec(x_107); +lean_dec(x_81); +x_123 = lean_usize_add(x_6, x_93); +x_124 = lean_box(0); +x_6 = x_123; +x_7 = x_124; +x_9 = x_80; goto _start; } } @@ -25481,39 +25377,41 @@ lean_ctor_set(x_2, 0, x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1(lean_object* x_1, uint8_t x_2, uint8_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { _start: { -lean_object* x_8; uint8_t x_9; -x_8 = lean_st_ref_take(x_6, x_7); -x_9 = !lean_is_exclusive(x_8); -if (x_9 == 0) +lean_object* x_7; uint8_t x_8; +x_7 = lean_st_ref_take(x_5, x_6); +x_8 = !lean_is_exclusive(x_7); +if (x_8 == 0) { -lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; uint8_t x_16; -x_10 = lean_ctor_get(x_8, 0); -x_11 = lean_ctor_get(x_8, 1); -x_12 = lean_ctor_get(x_10, 0); -lean_inc(x_12); -lean_inc(x_1); -x_13 = lean_alloc_ctor(0, 1, 2); -lean_ctor_set(x_13, 0, x_1); -lean_ctor_set_uint8(x_13, sizeof(void*)*1, x_2); -lean_ctor_set_uint8(x_13, sizeof(void*)*1 + 1, x_3); -lean_ctor_set(x_8, 1, x_4); -lean_ctor_set(x_8, 0, x_13); -x_14 = lean_ctor_get(x_10, 1); +lean_object* x_9; lean_object* x_10; lean_object* x_11; uint8_t x_12; +x_9 = lean_ctor_get(x_7, 0); +x_10 = lean_ctor_get(x_7, 1); +x_11 = lean_ctor_get(x_9, 0); +lean_inc(x_11); +x_12 = !lean_is_exclusive(x_1); +if (x_12 == 0) +{ +lean_object* x_13; lean_object* x_14; lean_object* x_15; uint8_t x_16; +x_13 = lean_ctor_get(x_1, 0); +lean_inc(x_13); +lean_ctor_set_uint8(x_1, sizeof(void*)*1, x_2); +lean_ctor_set(x_7, 1, x_3); +lean_ctor_set(x_7, 0, x_1); +x_14 = lean_ctor_get(x_9, 1); lean_inc(x_14); -lean_dec(x_10); -lean_inc(x_1); -x_15 = lean_array_push(x_14, x_1); -x_16 = !lean_is_exclusive(x_12); +lean_dec(x_9); +lean_inc(x_13); +x_15 = lean_array_push(x_14, x_13); +x_16 = !lean_is_exclusive(x_11); if (x_16 == 0) { lean_object* x_17; lean_object* x_18; lean_object* x_19; uint64_t x_20; uint64_t x_21; uint64_t x_22; uint64_t x_23; uint64_t x_24; uint64_t x_25; uint64_t x_26; size_t x_27; size_t x_28; size_t x_29; size_t x_30; size_t x_31; lean_object* x_32; uint8_t x_33; -x_17 = lean_ctor_get(x_12, 0); -x_18 = lean_ctor_get(x_12, 1); +x_17 = lean_ctor_get(x_11, 0); +x_18 = lean_ctor_get(x_11, 1); x_19 = lean_array_get_size(x_18); -x_20 = l_Lean_Name_hash___override(x_1); +x_20 = l_Lean_Name_hash___override(x_13); x_21 = 32; x_22 = lean_uint64_shift_right(x_20, x_21); x_23 = lean_uint64_xor(x_20, x_22); @@ -25527,7 +25425,7 @@ x_29 = 1; x_30 = lean_usize_sub(x_28, x_29); x_31 = lean_usize_land(x_27, x_30); x_32 = lean_array_uget(x_18, x_31); -x_33 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_1, x_32); +x_33 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_13, x_32); if (x_33 == 0) { lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; uint8_t x_43; @@ -25535,8 +25433,8 @@ x_34 = lean_unsigned_to_nat(1u); x_35 = lean_nat_add(x_17, x_34); lean_dec(x_17); x_36 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_36, 0, x_1); -lean_ctor_set(x_36, 1, x_8); +lean_ctor_set(x_36, 0, x_13); +lean_ctor_set(x_36, 1, x_7); lean_ctor_set(x_36, 2, x_32); x_37 = lean_array_uset(x_18, x_31, x_36); x_38 = lean_unsigned_to_nat(4u); @@ -25552,12 +25450,12 @@ if (x_43 == 0) { lean_object* x_44; lean_object* x_45; lean_object* x_46; uint8_t x_47; x_44 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_37); -lean_ctor_set(x_12, 1, x_44); -lean_ctor_set(x_12, 0, x_35); +lean_ctor_set(x_11, 1, x_44); +lean_ctor_set(x_11, 0, x_35); x_45 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_45, 0, x_12); +lean_ctor_set(x_45, 0, x_11); lean_ctor_set(x_45, 1, x_15); -x_46 = lean_st_ref_set(x_6, x_45, x_11); +x_46 = lean_st_ref_set(x_5, x_45, x_10); x_47 = !lean_is_exclusive(x_46); if (x_47 == 0) { @@ -25584,12 +25482,12 @@ return x_52; else { lean_object* x_53; lean_object* x_54; uint8_t x_55; -lean_ctor_set(x_12, 1, x_37); -lean_ctor_set(x_12, 0, x_35); +lean_ctor_set(x_11, 1, x_37); +lean_ctor_set(x_11, 0, x_35); x_53 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_53, 0, x_12); +lean_ctor_set(x_53, 0, x_11); lean_ctor_set(x_53, 1, x_15); -x_54 = lean_st_ref_set(x_6, x_53, x_11); +x_54 = lean_st_ref_set(x_5, x_53, x_10); x_55 = !lean_is_exclusive(x_54); if (x_55 == 0) { @@ -25619,13 +25517,13 @@ else lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; uint8_t x_67; x_61 = lean_box(0); x_62 = lean_array_uset(x_18, x_31, x_61); -x_63 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_1, x_8, x_32); +x_63 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_13, x_7, x_32); x_64 = lean_array_uset(x_62, x_31, x_63); -lean_ctor_set(x_12, 1, x_64); +lean_ctor_set(x_11, 1, x_64); x_65 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_65, 0, x_12); +lean_ctor_set(x_65, 0, x_11); lean_ctor_set(x_65, 1, x_15); -x_66 = lean_st_ref_set(x_6, x_65, x_11); +x_66 = lean_st_ref_set(x_5, x_65, x_10); x_67 = !lean_is_exclusive(x_66); if (x_67 == 0) { @@ -25653,13 +25551,13 @@ return x_72; else { lean_object* x_73; lean_object* x_74; lean_object* x_75; uint64_t x_76; uint64_t x_77; uint64_t x_78; uint64_t x_79; uint64_t x_80; uint64_t x_81; uint64_t x_82; size_t x_83; size_t x_84; size_t x_85; size_t x_86; size_t x_87; lean_object* x_88; uint8_t x_89; -x_73 = lean_ctor_get(x_12, 0); -x_74 = lean_ctor_get(x_12, 1); +x_73 = lean_ctor_get(x_11, 0); +x_74 = lean_ctor_get(x_11, 1); lean_inc(x_74); lean_inc(x_73); -lean_dec(x_12); +lean_dec(x_11); x_75 = lean_array_get_size(x_74); -x_76 = l_Lean_Name_hash___override(x_1); +x_76 = l_Lean_Name_hash___override(x_13); x_77 = 32; x_78 = lean_uint64_shift_right(x_76, x_77); x_79 = lean_uint64_xor(x_76, x_78); @@ -25673,7 +25571,7 @@ x_85 = 1; x_86 = lean_usize_sub(x_84, x_85); x_87 = lean_usize_land(x_83, x_86); x_88 = lean_array_uget(x_74, x_87); -x_89 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_1, x_88); +x_89 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_13, x_88); if (x_89 == 0) { lean_object* x_90; lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; uint8_t x_99; @@ -25681,8 +25579,8 @@ x_90 = lean_unsigned_to_nat(1u); x_91 = lean_nat_add(x_73, x_90); lean_dec(x_73); x_92 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_92, 0, x_1); -lean_ctor_set(x_92, 1, x_8); +lean_ctor_set(x_92, 0, x_13); +lean_ctor_set(x_92, 1, x_7); lean_ctor_set(x_92, 2, x_88); x_93 = lean_array_uset(x_74, x_87, x_92); x_94 = lean_unsigned_to_nat(4u); @@ -25704,7 +25602,7 @@ lean_ctor_set(x_101, 1, x_100); x_102 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_102, 0, x_101); lean_ctor_set(x_102, 1, x_15); -x_103 = lean_st_ref_set(x_6, x_102, x_11); +x_103 = lean_st_ref_set(x_5, x_102, x_10); x_104 = lean_ctor_get(x_103, 1); lean_inc(x_104); if (lean_is_exclusive(x_103)) { @@ -25734,7 +25632,7 @@ lean_ctor_set(x_108, 1, x_93); x_109 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_109, 0, x_108); lean_ctor_set(x_109, 1, x_15); -x_110 = lean_st_ref_set(x_6, x_109, x_11); +x_110 = lean_st_ref_set(x_5, x_109, x_10); x_111 = lean_ctor_get(x_110, 1); lean_inc(x_111); if (lean_is_exclusive(x_110)) { @@ -25761,7 +25659,7 @@ else lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; x_115 = lean_box(0); x_116 = lean_array_uset(x_74, x_87, x_115); -x_117 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_1, x_8, x_88); +x_117 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_13, x_7, x_88); x_118 = lean_array_uset(x_116, x_87, x_117); x_119 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_119, 0, x_73); @@ -25769,7 +25667,7 @@ lean_ctor_set(x_119, 1, x_118); x_120 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_120, 0, x_119); lean_ctor_set(x_120, 1, x_15); -x_121 = lean_st_ref_set(x_6, x_120, x_11); +x_121 = lean_st_ref_set(x_5, x_120, x_10); x_122 = lean_ctor_get(x_121, 1); lean_inc(x_122); if (lean_is_exclusive(x_121)) { @@ -25794,182 +25692,373 @@ return x_125; } else { -lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; uint64_t x_137; uint64_t x_138; uint64_t x_139; uint64_t x_140; uint64_t x_141; uint64_t x_142; uint64_t x_143; size_t x_144; size_t x_145; size_t x_146; size_t x_147; size_t x_148; lean_object* x_149; uint8_t x_150; -x_126 = lean_ctor_get(x_8, 0); -x_127 = lean_ctor_get(x_8, 1); -lean_inc(x_127); +lean_object* x_126; uint8_t x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; uint64_t x_135; uint64_t x_136; uint64_t x_137; uint64_t x_138; uint64_t x_139; uint64_t x_140; uint64_t x_141; size_t x_142; size_t x_143; size_t x_144; size_t x_145; size_t x_146; lean_object* x_147; uint8_t x_148; +x_126 = lean_ctor_get(x_1, 0); +x_127 = lean_ctor_get_uint8(x_1, sizeof(void*)*1 + 1); lean_inc(x_126); -lean_dec(x_8); -x_128 = lean_ctor_get(x_126, 0); -lean_inc(x_128); -lean_inc(x_1); -x_129 = lean_alloc_ctor(0, 1, 2); -lean_ctor_set(x_129, 0, x_1); -lean_ctor_set_uint8(x_129, sizeof(void*)*1, x_2); -lean_ctor_set_uint8(x_129, sizeof(void*)*1 + 1, x_3); -x_130 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_130, 0, x_129); -lean_ctor_set(x_130, 1, x_4); -x_131 = lean_ctor_get(x_126, 1); +lean_dec(x_1); +lean_inc(x_126); +x_128 = lean_alloc_ctor(0, 1, 2); +lean_ctor_set(x_128, 0, x_126); +lean_ctor_set_uint8(x_128, sizeof(void*)*1, x_2); +lean_ctor_set_uint8(x_128, sizeof(void*)*1 + 1, x_127); +lean_ctor_set(x_7, 1, x_3); +lean_ctor_set(x_7, 0, x_128); +x_129 = lean_ctor_get(x_9, 1); +lean_inc(x_129); +lean_dec(x_9); +lean_inc(x_126); +x_130 = lean_array_push(x_129, x_126); +x_131 = lean_ctor_get(x_11, 0); lean_inc(x_131); -lean_dec(x_126); -lean_inc(x_1); -x_132 = lean_array_push(x_131, x_1); -x_133 = lean_ctor_get(x_128, 0); -lean_inc(x_133); -x_134 = lean_ctor_get(x_128, 1); -lean_inc(x_134); -if (lean_is_exclusive(x_128)) { - lean_ctor_release(x_128, 0); - lean_ctor_release(x_128, 1); - x_135 = x_128; +x_132 = lean_ctor_get(x_11, 1); +lean_inc(x_132); +if (lean_is_exclusive(x_11)) { + lean_ctor_release(x_11, 0); + lean_ctor_release(x_11, 1); + x_133 = x_11; +} else { + lean_dec_ref(x_11); + x_133 = lean_box(0); +} +x_134 = lean_array_get_size(x_132); +x_135 = l_Lean_Name_hash___override(x_126); +x_136 = 32; +x_137 = lean_uint64_shift_right(x_135, x_136); +x_138 = lean_uint64_xor(x_135, x_137); +x_139 = 16; +x_140 = lean_uint64_shift_right(x_138, x_139); +x_141 = lean_uint64_xor(x_138, x_140); +x_142 = lean_uint64_to_usize(x_141); +x_143 = lean_usize_of_nat(x_134); +lean_dec(x_134); +x_144 = 1; +x_145 = lean_usize_sub(x_143, x_144); +x_146 = lean_usize_land(x_142, x_145); +x_147 = lean_array_uget(x_132, x_146); +x_148 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_126, x_147); +if (x_148 == 0) +{ +lean_object* x_149; lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; lean_object* x_155; lean_object* x_156; lean_object* x_157; uint8_t x_158; +x_149 = lean_unsigned_to_nat(1u); +x_150 = lean_nat_add(x_131, x_149); +lean_dec(x_131); +x_151 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_151, 0, x_126); +lean_ctor_set(x_151, 1, x_7); +lean_ctor_set(x_151, 2, x_147); +x_152 = lean_array_uset(x_132, x_146, x_151); +x_153 = lean_unsigned_to_nat(4u); +x_154 = lean_nat_mul(x_150, x_153); +x_155 = lean_unsigned_to_nat(3u); +x_156 = lean_nat_div(x_154, x_155); +lean_dec(x_154); +x_157 = lean_array_get_size(x_152); +x_158 = lean_nat_dec_le(x_156, x_157); +lean_dec(x_157); +lean_dec(x_156); +if (x_158 == 0) +{ +lean_object* x_159; lean_object* x_160; lean_object* x_161; lean_object* x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; +x_159 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_152); +if (lean_is_scalar(x_133)) { + x_160 = lean_alloc_ctor(0, 2, 0); } else { - lean_dec_ref(x_128); - x_135 = lean_box(0); + x_160 = x_133; } -x_136 = lean_array_get_size(x_134); -x_137 = l_Lean_Name_hash___override(x_1); -x_138 = 32; -x_139 = lean_uint64_shift_right(x_137, x_138); -x_140 = lean_uint64_xor(x_137, x_139); -x_141 = 16; -x_142 = lean_uint64_shift_right(x_140, x_141); -x_143 = lean_uint64_xor(x_140, x_142); -x_144 = lean_uint64_to_usize(x_143); -x_145 = lean_usize_of_nat(x_136); -lean_dec(x_136); -x_146 = 1; -x_147 = lean_usize_sub(x_145, x_146); -x_148 = lean_usize_land(x_144, x_147); -x_149 = lean_array_uget(x_134, x_148); -x_150 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_1, x_149); -if (x_150 == 0) +lean_ctor_set(x_160, 0, x_150); +lean_ctor_set(x_160, 1, x_159); +x_161 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_161, 0, x_160); +lean_ctor_set(x_161, 1, x_130); +x_162 = lean_st_ref_set(x_5, x_161, x_10); +x_163 = lean_ctor_get(x_162, 1); +lean_inc(x_163); +if (lean_is_exclusive(x_162)) { + lean_ctor_release(x_162, 0); + lean_ctor_release(x_162, 1); + x_164 = x_162; +} else { + lean_dec_ref(x_162); + x_164 = lean_box(0); +} +x_165 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; +if (lean_is_scalar(x_164)) { + x_166 = lean_alloc_ctor(0, 2, 0); +} else { + x_166 = x_164; +} +lean_ctor_set(x_166, 0, x_165); +lean_ctor_set(x_166, 1, x_163); +return x_166; +} +else { -lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; uint8_t x_160; -x_151 = lean_unsigned_to_nat(1u); -x_152 = lean_nat_add(x_133, x_151); -lean_dec(x_133); -x_153 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_153, 0, x_1); -lean_ctor_set(x_153, 1, x_130); -lean_ctor_set(x_153, 2, x_149); -x_154 = lean_array_uset(x_134, x_148, x_153); -x_155 = lean_unsigned_to_nat(4u); -x_156 = lean_nat_mul(x_152, x_155); -x_157 = lean_unsigned_to_nat(3u); -x_158 = lean_nat_div(x_156, x_157); -lean_dec(x_156); -x_159 = lean_array_get_size(x_154); -x_160 = lean_nat_dec_le(x_158, x_159); -lean_dec(x_159); -lean_dec(x_158); -if (x_160 == 0) +lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; +if (lean_is_scalar(x_133)) { + x_167 = lean_alloc_ctor(0, 2, 0); +} else { + x_167 = x_133; +} +lean_ctor_set(x_167, 0, x_150); +lean_ctor_set(x_167, 1, x_152); +x_168 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_168, 0, x_167); +lean_ctor_set(x_168, 1, x_130); +x_169 = lean_st_ref_set(x_5, x_168, x_10); +x_170 = lean_ctor_get(x_169, 1); +lean_inc(x_170); +if (lean_is_exclusive(x_169)) { + lean_ctor_release(x_169, 0); + lean_ctor_release(x_169, 1); + x_171 = x_169; +} else { + lean_dec_ref(x_169); + x_171 = lean_box(0); +} +x_172 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; +if (lean_is_scalar(x_171)) { + x_173 = lean_alloc_ctor(0, 2, 0); +} else { + x_173 = x_171; +} +lean_ctor_set(x_173, 0, x_172); +lean_ctor_set(x_173, 1, x_170); +return x_173; +} +} +else { -lean_object* x_161; lean_object* x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; -x_161 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_154); -if (lean_is_scalar(x_135)) { - x_162 = lean_alloc_ctor(0, 2, 0); +lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; lean_object* x_179; lean_object* x_180; lean_object* x_181; lean_object* x_182; lean_object* x_183; lean_object* x_184; +x_174 = lean_box(0); +x_175 = lean_array_uset(x_132, x_146, x_174); +x_176 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_126, x_7, x_147); +x_177 = lean_array_uset(x_175, x_146, x_176); +if (lean_is_scalar(x_133)) { + x_178 = lean_alloc_ctor(0, 2, 0); } else { - x_162 = x_135; + x_178 = x_133; } -lean_ctor_set(x_162, 0, x_152); -lean_ctor_set(x_162, 1, x_161); -x_163 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_163, 0, x_162); -lean_ctor_set(x_163, 1, x_132); -x_164 = lean_st_ref_set(x_6, x_163, x_127); -x_165 = lean_ctor_get(x_164, 1); -lean_inc(x_165); -if (lean_is_exclusive(x_164)) { - lean_ctor_release(x_164, 0); - lean_ctor_release(x_164, 1); - x_166 = x_164; +lean_ctor_set(x_178, 0, x_131); +lean_ctor_set(x_178, 1, x_177); +x_179 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_179, 0, x_178); +lean_ctor_set(x_179, 1, x_130); +x_180 = lean_st_ref_set(x_5, x_179, x_10); +x_181 = lean_ctor_get(x_180, 1); +lean_inc(x_181); +if (lean_is_exclusive(x_180)) { + lean_ctor_release(x_180, 0); + lean_ctor_release(x_180, 1); + x_182 = x_180; } else { - lean_dec_ref(x_164); - x_166 = lean_box(0); + lean_dec_ref(x_180); + x_182 = lean_box(0); } -x_167 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; -if (lean_is_scalar(x_166)) { - x_168 = lean_alloc_ctor(0, 2, 0); +x_183 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; +if (lean_is_scalar(x_182)) { + x_184 = lean_alloc_ctor(0, 2, 0); } else { - x_168 = x_166; + x_184 = x_182; +} +lean_ctor_set(x_184, 0, x_183); +lean_ctor_set(x_184, 1, x_181); +return x_184; +} } -lean_ctor_set(x_168, 0, x_167); -lean_ctor_set(x_168, 1, x_165); -return x_168; } else { -lean_object* x_169; lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; lean_object* x_174; lean_object* x_175; -if (lean_is_scalar(x_135)) { - x_169 = lean_alloc_ctor(0, 2, 0); -} else { - x_169 = x_135; -} -lean_ctor_set(x_169, 0, x_152); -lean_ctor_set(x_169, 1, x_154); -x_170 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_170, 0, x_169); -lean_ctor_set(x_170, 1, x_132); -x_171 = lean_st_ref_set(x_6, x_170, x_127); -x_172 = lean_ctor_get(x_171, 1); -lean_inc(x_172); -if (lean_is_exclusive(x_171)) { - lean_ctor_release(x_171, 0); - lean_ctor_release(x_171, 1); - x_173 = x_171; +lean_object* x_185; lean_object* x_186; lean_object* x_187; lean_object* x_188; uint8_t x_189; lean_object* x_190; lean_object* x_191; lean_object* x_192; lean_object* x_193; lean_object* x_194; lean_object* x_195; lean_object* x_196; lean_object* x_197; lean_object* x_198; uint64_t x_199; uint64_t x_200; uint64_t x_201; uint64_t x_202; uint64_t x_203; uint64_t x_204; uint64_t x_205; size_t x_206; size_t x_207; size_t x_208; size_t x_209; size_t x_210; lean_object* x_211; uint8_t x_212; +x_185 = lean_ctor_get(x_7, 0); +x_186 = lean_ctor_get(x_7, 1); +lean_inc(x_186); +lean_inc(x_185); +lean_dec(x_7); +x_187 = lean_ctor_get(x_185, 0); +lean_inc(x_187); +x_188 = lean_ctor_get(x_1, 0); +lean_inc(x_188); +x_189 = lean_ctor_get_uint8(x_1, sizeof(void*)*1 + 1); +if (lean_is_exclusive(x_1)) { + lean_ctor_release(x_1, 0); + x_190 = x_1; } else { - lean_dec_ref(x_171); - x_173 = lean_box(0); + lean_dec_ref(x_1); + x_190 = lean_box(0); +} +lean_inc(x_188); +if (lean_is_scalar(x_190)) { + x_191 = lean_alloc_ctor(0, 1, 2); +} else { + x_191 = x_190; +} +lean_ctor_set(x_191, 0, x_188); +lean_ctor_set_uint8(x_191, sizeof(void*)*1, x_2); +lean_ctor_set_uint8(x_191, sizeof(void*)*1 + 1, x_189); +x_192 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_192, 0, x_191); +lean_ctor_set(x_192, 1, x_3); +x_193 = lean_ctor_get(x_185, 1); +lean_inc(x_193); +lean_dec(x_185); +lean_inc(x_188); +x_194 = lean_array_push(x_193, x_188); +x_195 = lean_ctor_get(x_187, 0); +lean_inc(x_195); +x_196 = lean_ctor_get(x_187, 1); +lean_inc(x_196); +if (lean_is_exclusive(x_187)) { + lean_ctor_release(x_187, 0); + lean_ctor_release(x_187, 1); + x_197 = x_187; +} else { + lean_dec_ref(x_187); + x_197 = lean_box(0); +} +x_198 = lean_array_get_size(x_196); +x_199 = l_Lean_Name_hash___override(x_188); +x_200 = 32; +x_201 = lean_uint64_shift_right(x_199, x_200); +x_202 = lean_uint64_xor(x_199, x_201); +x_203 = 16; +x_204 = lean_uint64_shift_right(x_202, x_203); +x_205 = lean_uint64_xor(x_202, x_204); +x_206 = lean_uint64_to_usize(x_205); +x_207 = lean_usize_of_nat(x_198); +lean_dec(x_198); +x_208 = 1; +x_209 = lean_usize_sub(x_207, x_208); +x_210 = lean_usize_land(x_206, x_209); +x_211 = lean_array_uget(x_196, x_210); +x_212 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_188, x_211); +if (x_212 == 0) +{ +lean_object* x_213; lean_object* x_214; lean_object* x_215; lean_object* x_216; lean_object* x_217; lean_object* x_218; lean_object* x_219; lean_object* x_220; lean_object* x_221; uint8_t x_222; +x_213 = lean_unsigned_to_nat(1u); +x_214 = lean_nat_add(x_195, x_213); +lean_dec(x_195); +x_215 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_215, 0, x_188); +lean_ctor_set(x_215, 1, x_192); +lean_ctor_set(x_215, 2, x_211); +x_216 = lean_array_uset(x_196, x_210, x_215); +x_217 = lean_unsigned_to_nat(4u); +x_218 = lean_nat_mul(x_214, x_217); +x_219 = lean_unsigned_to_nat(3u); +x_220 = lean_nat_div(x_218, x_219); +lean_dec(x_218); +x_221 = lean_array_get_size(x_216); +x_222 = lean_nat_dec_le(x_220, x_221); +lean_dec(x_221); +lean_dec(x_220); +if (x_222 == 0) +{ +lean_object* x_223; lean_object* x_224; lean_object* x_225; lean_object* x_226; lean_object* x_227; lean_object* x_228; lean_object* x_229; lean_object* x_230; +x_223 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_216); +if (lean_is_scalar(x_197)) { + x_224 = lean_alloc_ctor(0, 2, 0); +} else { + x_224 = x_197; } -x_174 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; -if (lean_is_scalar(x_173)) { - x_175 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_224, 0, x_214); +lean_ctor_set(x_224, 1, x_223); +x_225 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_225, 0, x_224); +lean_ctor_set(x_225, 1, x_194); +x_226 = lean_st_ref_set(x_5, x_225, x_186); +x_227 = lean_ctor_get(x_226, 1); +lean_inc(x_227); +if (lean_is_exclusive(x_226)) { + lean_ctor_release(x_226, 0); + lean_ctor_release(x_226, 1); + x_228 = x_226; } else { - x_175 = x_173; + lean_dec_ref(x_226); + x_228 = lean_box(0); } -lean_ctor_set(x_175, 0, x_174); -lean_ctor_set(x_175, 1, x_172); -return x_175; +x_229 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; +if (lean_is_scalar(x_228)) { + x_230 = lean_alloc_ctor(0, 2, 0); +} else { + x_230 = x_228; } +lean_ctor_set(x_230, 0, x_229); +lean_ctor_set(x_230, 1, x_227); +return x_230; } else { -lean_object* x_176; lean_object* x_177; lean_object* x_178; lean_object* x_179; lean_object* x_180; lean_object* x_181; lean_object* x_182; lean_object* x_183; lean_object* x_184; lean_object* x_185; lean_object* x_186; -x_176 = lean_box(0); -x_177 = lean_array_uset(x_134, x_148, x_176); -x_178 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_1, x_130, x_149); -x_179 = lean_array_uset(x_177, x_148, x_178); -if (lean_is_scalar(x_135)) { - x_180 = lean_alloc_ctor(0, 2, 0); +lean_object* x_231; lean_object* x_232; lean_object* x_233; lean_object* x_234; lean_object* x_235; lean_object* x_236; lean_object* x_237; +if (lean_is_scalar(x_197)) { + x_231 = lean_alloc_ctor(0, 2, 0); } else { - x_180 = x_135; + x_231 = x_197; } -lean_ctor_set(x_180, 0, x_133); -lean_ctor_set(x_180, 1, x_179); -x_181 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_181, 0, x_180); -lean_ctor_set(x_181, 1, x_132); -x_182 = lean_st_ref_set(x_6, x_181, x_127); -x_183 = lean_ctor_get(x_182, 1); -lean_inc(x_183); -if (lean_is_exclusive(x_182)) { - lean_ctor_release(x_182, 0); - lean_ctor_release(x_182, 1); - x_184 = x_182; +lean_ctor_set(x_231, 0, x_214); +lean_ctor_set(x_231, 1, x_216); +x_232 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_232, 0, x_231); +lean_ctor_set(x_232, 1, x_194); +x_233 = lean_st_ref_set(x_5, x_232, x_186); +x_234 = lean_ctor_get(x_233, 1); +lean_inc(x_234); +if (lean_is_exclusive(x_233)) { + lean_ctor_release(x_233, 0); + lean_ctor_release(x_233, 1); + x_235 = x_233; +} else { + lean_dec_ref(x_233); + x_235 = lean_box(0); +} +x_236 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; +if (lean_is_scalar(x_235)) { + x_237 = lean_alloc_ctor(0, 2, 0); +} else { + x_237 = x_235; +} +lean_ctor_set(x_237, 0, x_236); +lean_ctor_set(x_237, 1, x_234); +return x_237; +} +} +else +{ +lean_object* x_238; lean_object* x_239; lean_object* x_240; lean_object* x_241; lean_object* x_242; lean_object* x_243; lean_object* x_244; lean_object* x_245; lean_object* x_246; lean_object* x_247; lean_object* x_248; +x_238 = lean_box(0); +x_239 = lean_array_uset(x_196, x_210, x_238); +x_240 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_188, x_192, x_211); +x_241 = lean_array_uset(x_239, x_210, x_240); +if (lean_is_scalar(x_197)) { + x_242 = lean_alloc_ctor(0, 2, 0); } else { - lean_dec_ref(x_182); - x_184 = lean_box(0); + x_242 = x_197; } -x_185 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; -if (lean_is_scalar(x_184)) { - x_186 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_242, 0, x_195); +lean_ctor_set(x_242, 1, x_241); +x_243 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_243, 0, x_242); +lean_ctor_set(x_243, 1, x_194); +x_244 = lean_st_ref_set(x_5, x_243, x_186); +x_245 = lean_ctor_get(x_244, 1); +lean_inc(x_245); +if (lean_is_exclusive(x_244)) { + lean_ctor_release(x_244, 0); + lean_ctor_release(x_244, 1); + x_246 = x_244; } else { - x_186 = x_184; + lean_dec_ref(x_244); + x_246 = lean_box(0); +} +x_247 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; +if (lean_is_scalar(x_246)) { + x_248 = lean_alloc_ctor(0, 2, 0); +} else { + x_248 = x_246; } -lean_ctor_set(x_186, 0, x_185); -lean_ctor_set(x_186, 1, x_183); -return x_186; +lean_ctor_set(x_248, 0, x_247); +lean_ctor_set(x_248, 1, x_245); +return x_248; } } } @@ -25986,306 +26075,310 @@ static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_1 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_2 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2___closed__1; -x_3 = lean_unsigned_to_nat(1829u); +x_3 = lean_unsigned_to_nat(1845u); x_4 = lean_unsigned_to_nat(41u); x_5 = l_Lean_readModuleData___closed__5; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); return x_6; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2(uint8_t x_1, lean_object* x_2, uint8_t x_3, uint8_t x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2(uint8_t x_1, lean_object* x_2, uint8_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { _start: { -lean_object* x_11; -x_11 = lean_read_module_data_parts(x_7, x_10); -if (lean_obj_tag(x_11) == 0) +lean_object* x_8; +x_8 = lean_read_module_data_parts(x_5, x_7); +if (lean_obj_tag(x_8) == 0) { -lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; uint8_t x_16; -x_12 = lean_ctor_get(x_11, 0); -lean_inc(x_12); -x_13 = lean_ctor_get(x_11, 1); -lean_inc(x_13); +lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; uint8_t x_13; +x_9 = lean_ctor_get(x_8, 0); +lean_inc(x_9); +x_10 = lean_ctor_get(x_8, 1); +lean_inc(x_10); +lean_dec(x_8); +x_11 = lean_array_get_size(x_9); +x_12 = lean_unsigned_to_nat(0u); +x_13 = lean_nat_dec_lt(x_12, x_11); lean_dec(x_11); -x_14 = lean_array_get_size(x_12); -x_15 = lean_unsigned_to_nat(0u); -x_16 = lean_nat_dec_lt(x_15, x_14); -lean_dec(x_14); -if (x_16 == 0) +if (x_13 == 0) { -lean_object* x_17; lean_object* x_18; -lean_dec(x_12); -lean_dec(x_6); +lean_object* x_14; lean_object* x_15; +lean_dec(x_9); lean_dec(x_2); -x_17 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2___closed__2; -x_18 = l_panic___at_Lean_importModulesCore_go___spec__1(x_17, x_9, x_13); -if (lean_obj_tag(x_18) == 0) +x_14 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2___closed__2; +x_15 = l_panic___at_Lean_importModulesCore_go___spec__1(x_14, x_6, x_10); +if (lean_obj_tag(x_15) == 0) { -uint8_t x_19; -x_19 = !lean_is_exclusive(x_18); -if (x_19 == 0) +uint8_t x_16; +x_16 = !lean_is_exclusive(x_15); +if (x_16 == 0) { -lean_object* x_20; lean_object* x_21; -x_20 = lean_ctor_get(x_18, 0); -lean_dec(x_20); -x_21 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; -lean_ctor_set(x_18, 0, x_21); -return x_18; +lean_object* x_17; lean_object* x_18; +x_17 = lean_ctor_get(x_15, 0); +lean_dec(x_17); +x_18 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; +lean_ctor_set(x_15, 0, x_18); +return x_15; } else { -lean_object* x_22; lean_object* x_23; lean_object* x_24; -x_22 = lean_ctor_get(x_18, 1); -lean_inc(x_22); -lean_dec(x_18); -x_23 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; -x_24 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_24, 0, x_23); -lean_ctor_set(x_24, 1, x_22); -return x_24; +lean_object* x_19; lean_object* x_20; lean_object* x_21; +x_19 = lean_ctor_get(x_15, 1); +lean_inc(x_19); +lean_dec(x_15); +x_20 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___closed__1; +x_21 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21, 0, x_20); +lean_ctor_set(x_21, 1, x_19); +return x_21; } } else { -uint8_t x_25; -x_25 = !lean_is_exclusive(x_18); -if (x_25 == 0) +uint8_t x_22; +x_22 = !lean_is_exclusive(x_15); +if (x_22 == 0) { -return x_18; +return x_15; } else { -lean_object* x_26; lean_object* x_27; lean_object* x_28; -x_26 = lean_ctor_get(x_18, 0); -x_27 = lean_ctor_get(x_18, 1); -lean_inc(x_27); -lean_inc(x_26); -lean_dec(x_18); -x_28 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_28, 0, x_26); -lean_ctor_set(x_28, 1, x_27); -return x_28; +lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_23 = lean_ctor_get(x_15, 0); +x_24 = lean_ctor_get(x_15, 1); +lean_inc(x_24); +lean_inc(x_23); +lean_dec(x_15); +x_25 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_25, 0, x_23); +lean_ctor_set(x_25, 1, x_24); +return x_25; } } } else { -lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_69; lean_object* x_70; lean_object* x_71; uint8_t x_72; -x_29 = lean_array_fget(x_12, x_15); -x_30 = lean_ctor_get(x_29, 0); -lean_inc(x_30); -lean_dec(x_29); -x_69 = lean_ctor_get(x_30, 0); -lean_inc(x_69); -x_70 = lean_array_get_size(x_69); -x_71 = lean_array_mk(x_6); -x_72 = lean_nat_dec_lt(x_15, x_70); -if (x_72 == 0) +lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; uint8_t x_30; lean_object* x_31; lean_object* x_32; +x_26 = lean_array_fget(x_9, x_12); +x_27 = lean_ctor_get(x_26, 0); +lean_inc(x_27); +lean_dec(x_26); +x_28 = lean_ctor_get(x_27, 0); +lean_inc(x_28); +x_29 = lean_array_get_size(x_28); +x_30 = lean_nat_dec_lt(x_12, x_29); +x_31 = lean_box(0); +if (x_30 == 0) { -lean_dec(x_70); -lean_dec(x_69); -x_31 = x_71; -goto block_68; +lean_object* x_70; +lean_dec(x_29); +lean_dec(x_28); +x_70 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; +x_32 = x_70; +goto block_69; } else { -uint8_t x_73; -x_73 = lean_nat_dec_le(x_70, x_70); -if (x_73 == 0) +uint8_t x_71; +x_71 = lean_nat_dec_le(x_29, x_29); +if (x_71 == 0) { -lean_dec(x_70); -lean_dec(x_69); -x_31 = x_71; -goto block_68; +lean_object* x_72; +lean_dec(x_29); +lean_dec(x_28); +x_72 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; +x_32 = x_72; +goto block_69; } else { -size_t x_74; size_t x_75; lean_object* x_76; -x_74 = 0; -x_75 = lean_usize_of_nat(x_70); -lean_dec(x_70); -x_76 = l_Array_foldlMUnsafe_fold___at_Lean_importModulesCore_go___spec__9(x_3, x_69, x_74, x_75, x_71); -lean_dec(x_69); -x_31 = x_76; -goto block_68; +size_t x_73; size_t x_74; lean_object* x_75; lean_object* x_76; +x_73 = 0; +x_74 = lean_usize_of_nat(x_29); +lean_dec(x_29); +x_75 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; +x_76 = l_Array_foldlMUnsafe_fold___at_Lean_importModulesCore_go___spec__9(x_3, x_28, x_73, x_74, x_75); +lean_dec(x_28); +x_32 = x_76; +goto block_69; } } -block_68: +block_69: { if (x_1 == 0) { -uint8_t x_32; -x_32 = lean_ctor_get_uint8(x_30, sizeof(void*)*5); -lean_dec(x_30); -if (x_32 == 0) +uint8_t x_33; +x_33 = lean_ctor_get_uint8(x_27, sizeof(void*)*5); +lean_dec(x_27); +if (x_33 == 0) { -uint8_t x_33; lean_object* x_34; -x_33 = 1; -lean_inc(x_9); -x_34 = l_Lean_importModulesCore_go(x_31, x_33, x_9, x_13); -lean_dec(x_31); -if (lean_obj_tag(x_34) == 0) +uint8_t x_34; lean_object* x_35; +x_34 = 1; +lean_inc(x_6); +x_35 = l_Lean_importModulesCore_go(x_32, x_34, x_31, x_6, x_10); +lean_dec(x_32); +if (lean_obj_tag(x_35) == 0) { -lean_object* x_35; lean_object* x_36; lean_object* x_37; -x_35 = lean_ctor_get(x_34, 1); -lean_inc(x_35); -lean_dec(x_34); -x_36 = lean_box(0); -x_37 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1(x_2, x_3, x_4, x_12, x_36, x_9, x_35); -lean_dec(x_9); -return x_37; +lean_object* x_36; lean_object* x_37; lean_object* x_38; +x_36 = lean_ctor_get(x_35, 1); +lean_inc(x_36); +lean_dec(x_35); +x_37 = lean_box(0); +x_38 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1(x_2, x_3, x_9, x_37, x_6, x_36); +lean_dec(x_6); +return x_38; } else { -uint8_t x_38; -lean_dec(x_12); +uint8_t x_39; lean_dec(x_9); +lean_dec(x_6); lean_dec(x_2); -x_38 = !lean_is_exclusive(x_34); -if (x_38 == 0) +x_39 = !lean_is_exclusive(x_35); +if (x_39 == 0) { -return x_34; +return x_35; } else { -lean_object* x_39; lean_object* x_40; lean_object* x_41; -x_39 = lean_ctor_get(x_34, 0); -x_40 = lean_ctor_get(x_34, 1); +lean_object* x_40; lean_object* x_41; lean_object* x_42; +x_40 = lean_ctor_get(x_35, 0); +x_41 = lean_ctor_get(x_35, 1); +lean_inc(x_41); lean_inc(x_40); -lean_inc(x_39); -lean_dec(x_34); -x_41 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_41, 0, x_39); -lean_ctor_set(x_41, 1, x_40); -return x_41; +lean_dec(x_35); +x_42 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_42, 0, x_40); +lean_ctor_set(x_42, 1, x_41); +return x_42; } } } else { -uint8_t x_42; lean_object* x_43; -x_42 = 0; -lean_inc(x_9); -x_43 = l_Lean_importModulesCore_go(x_31, x_42, x_9, x_13); -if (lean_obj_tag(x_43) == 0) +uint8_t x_43; lean_object* x_44; +x_43 = 0; +lean_inc(x_6); +x_44 = l_Lean_importModulesCore_go(x_32, x_43, x_31, x_6, x_10); +if (lean_obj_tag(x_44) == 0) { -lean_object* x_44; size_t x_45; size_t x_46; lean_object* x_47; lean_object* x_48; -x_44 = lean_ctor_get(x_43, 1); -lean_inc(x_44); -lean_dec(x_43); -x_45 = lean_array_size(x_31); -x_46 = 0; -x_47 = lean_box(0); +lean_object* x_45; size_t x_46; size_t x_47; lean_object* x_48; lean_object* x_49; +x_45 = lean_ctor_get(x_44, 1); +lean_inc(x_45); +lean_dec(x_44); +x_46 = lean_array_size(x_32); +x_47 = 0; +x_48 = lean_box(0); lean_inc(x_2); -x_48 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8(x_5, x_2, x_31, x_31, x_45, x_46, x_47, x_9, x_44); -lean_dec(x_31); -if (lean_obj_tag(x_48) == 0) +x_49 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__8(x_4, x_2, x_32, x_32, x_46, x_47, x_48, x_6, x_45); +lean_dec(x_32); +if (lean_obj_tag(x_49) == 0) { -lean_object* x_49; lean_object* x_50; -x_49 = lean_ctor_get(x_48, 1); -lean_inc(x_49); -lean_dec(x_48); -x_50 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1(x_2, x_3, x_4, x_12, x_47, x_9, x_49); -lean_dec(x_9); -return x_50; +lean_object* x_50; lean_object* x_51; +x_50 = lean_ctor_get(x_49, 1); +lean_inc(x_50); +lean_dec(x_49); +x_51 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1(x_2, x_3, x_9, x_48, x_6, x_50); +lean_dec(x_6); +return x_51; } else { -uint8_t x_51; -lean_dec(x_12); +uint8_t x_52; lean_dec(x_9); +lean_dec(x_6); lean_dec(x_2); -x_51 = !lean_is_exclusive(x_48); -if (x_51 == 0) +x_52 = !lean_is_exclusive(x_49); +if (x_52 == 0) { -return x_48; +return x_49; } else { -lean_object* x_52; lean_object* x_53; lean_object* x_54; -x_52 = lean_ctor_get(x_48, 0); -x_53 = lean_ctor_get(x_48, 1); +lean_object* x_53; lean_object* x_54; lean_object* x_55; +x_53 = lean_ctor_get(x_49, 0); +x_54 = lean_ctor_get(x_49, 1); +lean_inc(x_54); lean_inc(x_53); -lean_inc(x_52); -lean_dec(x_48); -x_54 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_54, 0, x_52); -lean_ctor_set(x_54, 1, x_53); -return x_54; +lean_dec(x_49); +x_55 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_55, 0, x_53); +lean_ctor_set(x_55, 1, x_54); +return x_55; } } } else { -uint8_t x_55; -lean_dec(x_31); -lean_dec(x_12); +uint8_t x_56; +lean_dec(x_32); lean_dec(x_9); +lean_dec(x_6); lean_dec(x_2); -x_55 = !lean_is_exclusive(x_43); -if (x_55 == 0) +x_56 = !lean_is_exclusive(x_44); +if (x_56 == 0) { -return x_43; +return x_44; } else { -lean_object* x_56; lean_object* x_57; lean_object* x_58; -x_56 = lean_ctor_get(x_43, 0); -x_57 = lean_ctor_get(x_43, 1); +lean_object* x_57; lean_object* x_58; lean_object* x_59; +x_57 = lean_ctor_get(x_44, 0); +x_58 = lean_ctor_get(x_44, 1); +lean_inc(x_58); lean_inc(x_57); -lean_inc(x_56); -lean_dec(x_43); -x_58 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_58, 0, x_56); -lean_ctor_set(x_58, 1, x_57); -return x_58; +lean_dec(x_44); +x_59 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_59, 0, x_57); +lean_ctor_set(x_59, 1, x_58); +return x_59; } } } } else { -uint8_t x_59; lean_object* x_60; -lean_dec(x_30); -x_59 = 1; -lean_inc(x_9); -x_60 = l_Lean_importModulesCore_go(x_31, x_59, x_9, x_13); -lean_dec(x_31); -if (lean_obj_tag(x_60) == 0) +uint8_t x_60; lean_object* x_61; +lean_dec(x_27); +x_60 = 1; +lean_inc(x_6); +x_61 = l_Lean_importModulesCore_go(x_32, x_60, x_31, x_6, x_10); +lean_dec(x_32); +if (lean_obj_tag(x_61) == 0) { -lean_object* x_61; lean_object* x_62; lean_object* x_63; -x_61 = lean_ctor_get(x_60, 1); -lean_inc(x_61); -lean_dec(x_60); -x_62 = lean_box(0); -x_63 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1(x_2, x_3, x_4, x_12, x_62, x_9, x_61); -lean_dec(x_9); -return x_63; +lean_object* x_62; lean_object* x_63; lean_object* x_64; +x_62 = lean_ctor_get(x_61, 1); +lean_inc(x_62); +lean_dec(x_61); +x_63 = lean_box(0); +x_64 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1(x_2, x_3, x_9, x_63, x_6, x_62); +lean_dec(x_6); +return x_64; } else { -uint8_t x_64; -lean_dec(x_12); +uint8_t x_65; lean_dec(x_9); +lean_dec(x_6); lean_dec(x_2); -x_64 = !lean_is_exclusive(x_60); -if (x_64 == 0) +x_65 = !lean_is_exclusive(x_61); +if (x_65 == 0) { -return x_60; +return x_61; } else { -lean_object* x_65; lean_object* x_66; lean_object* x_67; -x_65 = lean_ctor_get(x_60, 0); -x_66 = lean_ctor_get(x_60, 1); +lean_object* x_66; lean_object* x_67; lean_object* x_68; +x_66 = lean_ctor_get(x_61, 0); +x_67 = lean_ctor_get(x_61, 1); +lean_inc(x_67); lean_inc(x_66); -lean_inc(x_65); -lean_dec(x_60); -x_67 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_67, 0, x_65); -lean_ctor_set(x_67, 1, x_66); -return x_67; +lean_dec(x_61); +x_68 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_68, 0, x_66); +lean_ctor_set(x_68, 1, x_67); +return x_68; } } } @@ -26295,22 +26388,21 @@ return x_67; else { uint8_t x_77; -lean_dec(x_9); lean_dec(x_6); lean_dec(x_2); -x_77 = !lean_is_exclusive(x_11); +x_77 = !lean_is_exclusive(x_8); if (x_77 == 0) { -return x_11; +return x_8; } else { lean_object* x_78; lean_object* x_79; lean_object* x_80; -x_78 = lean_ctor_get(x_11, 0); -x_79 = lean_ctor_get(x_11, 1); +x_78 = lean_ctor_get(x_8, 0); +x_79 = lean_ctor_get(x_8, 1); lean_inc(x_79); lean_inc(x_78); -lean_dec(x_11); +lean_dec(x_8); x_80 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_80, 0, x_78); lean_ctor_set(x_80, 1, x_79); @@ -26319,216 +26411,116 @@ return x_80; } } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__3(lean_object* x_1, uint8_t x_2, lean_object* x_3, uint8_t x_4, uint8_t x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__3(uint8_t x_1, lean_object* x_2, uint8_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { -lean_object* x_10; lean_object* x_11; lean_object* x_12; uint8_t x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; uint8_t x_17; -x_10 = lean_box(0); -lean_inc(x_1); -x_11 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_11, 0, x_1); -lean_ctor_set(x_11, 1, x_10); -x_12 = lean_array_mk(x_11); -x_13 = 1; -lean_inc(x_1); -x_14 = l_Lean_OLeanLevel_adjustFileName(x_1, x_13); -x_15 = l_System_FilePath_pathExists(x_14, x_9); -x_16 = lean_ctor_get(x_15, 0); -lean_inc(x_16); -x_17 = lean_unbox(x_16); -lean_dec(x_16); -if (x_17 == 0) +lean_object* x_9; lean_object* x_10; +x_9 = lean_ctor_get(x_2, 0); +lean_inc(x_9); +x_10 = l_Lean_RBNode_find___at_Lean_NameMap_find_x3f___spec__1___rarg(x_5, x_9); +if (lean_obj_tag(x_10) == 0) { -lean_object* x_18; lean_object* x_19; lean_object* x_20; -lean_dec(x_14); -lean_dec(x_1); -x_18 = lean_ctor_get(x_15, 1); -lean_inc(x_18); -lean_dec(x_15); -x_19 = lean_box(0); -x_20 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2(x_2, x_3, x_4, x_5, x_6, x_10, x_12, x_19, x_8, x_18); +lean_object* x_11; +x_11 = l___private_Lean_Environment_0__Lean_findOLeanParts(x_9, x_8); +if (lean_obj_tag(x_11) == 0) +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_12 = lean_ctor_get(x_11, 0); +lean_inc(x_12); +x_13 = lean_ctor_get(x_11, 1); +lean_inc(x_13); +lean_dec(x_11); +x_14 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2(x_1, x_2, x_3, x_4, x_12, x_7, x_13); lean_dec(x_12); -return x_20; +return x_14; } else { -lean_object* x_21; lean_object* x_22; uint8_t x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; uint8_t x_27; -x_21 = lean_ctor_get(x_15, 1); -lean_inc(x_21); -lean_dec(x_15); -x_22 = lean_array_push(x_12, x_14); -x_23 = 2; -x_24 = l_Lean_OLeanLevel_adjustFileName(x_1, x_23); -x_25 = l_System_FilePath_pathExists(x_24, x_21); -x_26 = lean_ctor_get(x_25, 0); -lean_inc(x_26); -x_27 = lean_unbox(x_26); -lean_dec(x_26); -if (x_27 == 0) +uint8_t x_15; +lean_dec(x_7); +lean_dec(x_2); +x_15 = !lean_is_exclusive(x_11); +if (x_15 == 0) { -lean_object* x_28; lean_object* x_29; lean_object* x_30; -lean_dec(x_24); -x_28 = lean_ctor_get(x_25, 1); -lean_inc(x_28); -lean_dec(x_25); -x_29 = lean_box(0); -x_30 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2(x_2, x_3, x_4, x_5, x_6, x_10, x_22, x_29, x_8, x_28); -lean_dec(x_22); -return x_30; +return x_11; } else { -lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; -x_31 = lean_ctor_get(x_25, 1); -lean_inc(x_31); -lean_dec(x_25); -x_32 = lean_array_push(x_22, x_24); -x_33 = lean_box(0); -x_34 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2(x_2, x_3, x_4, x_5, x_6, x_10, x_32, x_33, x_8, x_31); -lean_dec(x_32); -return x_34; -} -} -} -} -static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__1() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("object file '", 13, 13); -return x_1; -} -} -static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__2() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("' of module ", 12, 12); -return x_1; -} +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_11, 0); +x_17 = lean_ctor_get(x_11, 1); +lean_inc(x_17); +lean_inc(x_16); +lean_dec(x_11); +x_18 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18, 0, x_16); +lean_ctor_set(x_18, 1, x_17); +return x_18; } -static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__3() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked(" does not exist", 15, 15); -return x_1; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4(lean_object* x_1, uint8_t x_2, uint8_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { -_start: -{ -lean_object* x_8; uint8_t x_9; lean_object* x_10; -x_8 = lean_ctor_get(x_1, 0); -lean_inc(x_8); -x_9 = lean_ctor_get_uint8(x_1, sizeof(void*)*1 + 1); -lean_dec(x_1); -x_10 = l_Lean_findOLean(x_8, x_7); -if (lean_obj_tag(x_10) == 0) +else { -lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; uint8_t x_15; -x_11 = lean_ctor_get(x_10, 0); -lean_inc(x_11); -x_12 = lean_ctor_get(x_10, 1); -lean_inc(x_12); +lean_object* x_19; lean_object* x_20; uint8_t x_21; +x_19 = lean_ctor_get(x_10, 0); +lean_inc(x_19); lean_dec(x_10); -x_13 = l_System_FilePath_pathExists(x_11, x_12); -x_14 = lean_ctor_get(x_13, 0); -lean_inc(x_14); -x_15 = lean_unbox(x_14); -lean_dec(x_14); -if (x_15 == 0) -{ -uint8_t x_16; -lean_dec(x_6); -x_16 = !lean_is_exclusive(x_13); -if (x_16 == 0) +x_20 = l_Lean_ModuleArtifacts_oleanParts(x_19); +x_21 = l_Array_isEmpty___rarg(x_20); +if (x_21 == 0) { -lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; -x_17 = lean_ctor_get(x_13, 0); -lean_dec(x_17); -x_18 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__1; -x_19 = lean_string_append(x_18, x_11); -lean_dec(x_11); -x_20 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__2; -x_21 = lean_string_append(x_19, x_20); -x_22 = 1; -x_23 = l_Lean_instToStringImport___closed__1; -x_24 = l_Lean_Name_toString(x_8, x_22, x_23); -x_25 = lean_string_append(x_21, x_24); -lean_dec(x_24); -x_26 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__3; -x_27 = lean_string_append(x_25, x_26); -x_28 = lean_alloc_ctor(18, 1, 0); -lean_ctor_set(x_28, 0, x_27); -lean_ctor_set_tag(x_13, 1); -lean_ctor_set(x_13, 0, x_28); -return x_13; +lean_object* x_22; +lean_dec(x_9); +x_22 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2(x_1, x_2, x_3, x_4, x_20, x_7, x_8); +lean_dec(x_20); +return x_22; } else { -lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; uint8_t x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; -x_29 = lean_ctor_get(x_13, 1); -lean_inc(x_29); -lean_dec(x_13); -x_30 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__1; -x_31 = lean_string_append(x_30, x_11); -lean_dec(x_11); -x_32 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__2; -x_33 = lean_string_append(x_31, x_32); -x_34 = 1; -x_35 = l_Lean_instToStringImport___closed__1; -x_36 = l_Lean_Name_toString(x_8, x_34, x_35); -x_37 = lean_string_append(x_33, x_36); -lean_dec(x_36); -x_38 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__3; -x_39 = lean_string_append(x_37, x_38); -x_40 = lean_alloc_ctor(18, 1, 0); -lean_ctor_set(x_40, 0, x_39); -x_41 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_41, 0, x_40); -lean_ctor_set(x_41, 1, x_29); -return x_41; -} -} -else +lean_object* x_23; +lean_dec(x_20); +x_23 = l___private_Lean_Environment_0__Lean_findOLeanParts(x_9, x_8); +if (lean_obj_tag(x_23) == 0) { -lean_object* x_42; lean_object* x_43; lean_object* x_44; -x_42 = lean_ctor_get(x_13, 1); -lean_inc(x_42); -lean_dec(x_13); -x_43 = lean_box(0); -x_44 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__3(x_11, x_2, x_8, x_3, x_9, x_4, x_43, x_6, x_42); -return x_44; -} +lean_object* x_24; lean_object* x_25; lean_object* x_26; +x_24 = lean_ctor_get(x_23, 0); +lean_inc(x_24); +x_25 = lean_ctor_get(x_23, 1); +lean_inc(x_25); +lean_dec(x_23); +x_26 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2(x_1, x_2, x_3, x_4, x_24, x_7, x_25); +lean_dec(x_24); +return x_26; } else { -uint8_t x_45; -lean_dec(x_8); -lean_dec(x_6); -x_45 = !lean_is_exclusive(x_10); -if (x_45 == 0) +uint8_t x_27; +lean_dec(x_7); +lean_dec(x_2); +x_27 = !lean_is_exclusive(x_23); +if (x_27 == 0) { -return x_10; +return x_23; } else { -lean_object* x_46; lean_object* x_47; lean_object* x_48; -x_46 = lean_ctor_get(x_10, 0); -x_47 = lean_ctor_get(x_10, 1); -lean_inc(x_47); -lean_inc(x_46); -lean_dec(x_10); -x_48 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_48, 0, x_46); -lean_ctor_set(x_48, 1, x_47); -return x_48; +lean_object* x_28; lean_object* x_29; lean_object* x_30; +x_28 = lean_ctor_get(x_23, 0); +x_29 = lean_ctor_get(x_23, 1); +lean_inc(x_29); +lean_inc(x_28); +lean_dec(x_23); +x_30 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_30, 0, x_28); +lean_ctor_set(x_30, 1, x_29); +return x_30; +} +} } } } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__5(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; lean_object* x_5; @@ -26543,812 +26535,813 @@ static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCo _start: { lean_object* x_1; -x_1 = lean_alloc_closure((void*)(l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__5___boxed), 3, 0); +x_1 = lean_alloc_closure((void*)(l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___boxed), 3, 0); return x_1; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4, size_t x_5, size_t x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, size_t x_6, size_t x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { -lean_object* x_10; uint8_t x_28; -x_28 = lean_usize_dec_lt(x_6, x_5); -if (x_28 == 0) +lean_object* x_11; uint8_t x_29; +x_29 = lean_usize_dec_lt(x_7, x_6); +if (x_29 == 0) { -lean_object* x_29; -lean_dec(x_8); -x_29 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_29, 0, x_7); -lean_ctor_set(x_29, 1, x_9); -return x_29; +lean_object* x_30; +lean_dec(x_9); +x_30 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_30, 0, x_8); +lean_ctor_set(x_30, 1, x_10); +return x_30; } else { -lean_object* x_30; lean_object* x_31; uint8_t x_32; -lean_dec(x_7); -x_30 = lean_array_uget(x_4, x_6); -x_31 = lean_st_ref_get(x_8, x_9); +lean_object* x_31; lean_object* x_32; uint8_t x_33; +lean_dec(x_8); +x_31 = lean_array_uget(x_5, x_7); +x_32 = lean_st_ref_get(x_9, x_10); if (x_2 == 0) { -uint8_t x_284; -x_284 = lean_ctor_get_uint8(x_30, sizeof(void*)*1); -x_32 = x_284; -goto block_283; +uint8_t x_286; +x_286 = lean_ctor_get_uint8(x_31, sizeof(void*)*1); +x_33 = x_286; +goto block_285; } else { -uint8_t x_285; -x_285 = 1; -x_32 = x_285; -goto block_283; +uint8_t x_287; +x_287 = 1; +x_33 = x_287; +goto block_285; } -block_283: +block_285: { -lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; uint64_t x_39; uint64_t x_40; uint64_t x_41; uint64_t x_42; uint64_t x_43; uint64_t x_44; uint64_t x_45; size_t x_46; size_t x_47; size_t x_48; size_t x_49; size_t x_50; lean_object* x_51; lean_object* x_52; -x_33 = lean_ctor_get(x_31, 0); -lean_inc(x_33); -x_34 = lean_ctor_get(x_33, 0); +lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; uint64_t x_40; uint64_t x_41; uint64_t x_42; uint64_t x_43; uint64_t x_44; uint64_t x_45; uint64_t x_46; size_t x_47; size_t x_48; size_t x_49; size_t x_50; size_t x_51; lean_object* x_52; lean_object* x_53; +x_34 = lean_ctor_get(x_32, 0); lean_inc(x_34); -lean_dec(x_33); -x_35 = lean_ctor_get(x_31, 1); +x_35 = lean_ctor_get(x_34, 0); lean_inc(x_35); -lean_dec(x_31); -x_36 = lean_ctor_get(x_30, 0); +lean_dec(x_34); +x_36 = lean_ctor_get(x_32, 1); lean_inc(x_36); -x_37 = lean_ctor_get(x_34, 1); +lean_dec(x_32); +x_37 = lean_ctor_get(x_31, 0); lean_inc(x_37); -lean_dec(x_34); -x_38 = lean_array_get_size(x_37); -x_39 = l_Lean_Name_hash___override(x_36); -x_40 = 32; -x_41 = lean_uint64_shift_right(x_39, x_40); -x_42 = lean_uint64_xor(x_39, x_41); -x_43 = 16; -x_44 = lean_uint64_shift_right(x_42, x_43); -x_45 = lean_uint64_xor(x_42, x_44); -x_46 = lean_uint64_to_usize(x_45); -x_47 = lean_usize_of_nat(x_38); +x_38 = lean_ctor_get(x_35, 1); +lean_inc(x_38); +lean_dec(x_35); +x_39 = lean_array_get_size(x_38); +x_40 = l_Lean_Name_hash___override(x_37); +x_41 = 32; +x_42 = lean_uint64_shift_right(x_40, x_41); +x_43 = lean_uint64_xor(x_40, x_42); +x_44 = 16; +x_45 = lean_uint64_shift_right(x_43, x_44); +x_46 = lean_uint64_xor(x_43, x_45); +x_47 = lean_uint64_to_usize(x_46); +x_48 = lean_usize_of_nat(x_39); +lean_dec(x_39); +x_49 = 1; +x_50 = lean_usize_sub(x_48, x_49); +x_51 = lean_usize_land(x_47, x_50); +x_52 = lean_array_uget(x_38, x_51); lean_dec(x_38); -x_48 = 1; -x_49 = lean_usize_sub(x_47, x_48); -x_50 = lean_usize_land(x_46, x_49); -x_51 = lean_array_uget(x_37, x_50); -lean_dec(x_37); -x_52 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_importModulesCore_go___spec__7(x_36, x_51); -lean_dec(x_51); -if (lean_obj_tag(x_52) == 0) +x_53 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_importModulesCore_go___spec__7(x_37, x_52); +lean_dec(x_52); +if (lean_obj_tag(x_53) == 0) { -lean_object* x_53; lean_object* x_54; -lean_dec(x_36); -x_53 = lean_box(0); -lean_inc(x_8); -x_54 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4(x_30, x_2, x_32, x_3, x_53, x_8, x_35); -x_10 = x_54; -goto block_27; +lean_object* x_54; lean_object* x_55; +lean_dec(x_37); +x_54 = lean_box(0); +lean_inc(x_9); +x_55 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__3(x_2, x_31, x_33, x_4, x_3, x_54, x_9, x_36); +x_11 = x_55; +goto block_28; } else { -lean_object* x_55; lean_object* x_56; lean_object* x_57; -lean_dec(x_30); -x_55 = lean_ctor_get(x_52, 0); -lean_inc(x_55); -lean_dec(x_52); -x_56 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___closed__1; -if (x_32 == 0) +lean_object* x_56; lean_object* x_57; lean_object* x_58; +lean_dec(x_31); +x_56 = lean_ctor_get(x_53, 0); +lean_inc(x_56); +lean_dec(x_53); +x_57 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___closed__1; +if (x_33 == 0) { -lean_object* x_75; lean_object* x_76; -lean_dec(x_55); -lean_dec(x_36); -x_75 = lean_box(0); -lean_inc(x_8); -x_76 = lean_apply_3(x_56, x_75, x_8, x_35); -x_10 = x_76; -goto block_27; +lean_object* x_77; lean_object* x_78; +lean_dec(x_56); +lean_dec(x_37); +x_77 = lean_box(0); +lean_inc(x_9); +x_78 = lean_apply_3(x_57, x_77, x_9, x_36); +x_11 = x_78; +goto block_28; } else { -lean_object* x_77; uint8_t x_78; -x_77 = lean_ctor_get(x_55, 0); -lean_inc(x_77); -x_78 = lean_ctor_get_uint8(x_77, sizeof(void*)*1); -if (x_78 == 0) -{ lean_object* x_79; uint8_t x_80; -x_79 = lean_ctor_get(x_55, 1); +x_79 = lean_ctor_get(x_56, 0); lean_inc(x_79); -x_80 = !lean_is_exclusive(x_77); +x_80 = lean_ctor_get_uint8(x_79, sizeof(void*)*1); if (x_80 == 0) { lean_object* x_81; uint8_t x_82; -x_81 = lean_st_ref_take(x_8, x_35); -x_82 = !lean_is_exclusive(x_81); +x_81 = lean_ctor_get(x_56, 1); +lean_inc(x_81); +x_82 = !lean_is_exclusive(x_79); if (x_82 == 0) { lean_object* x_83; uint8_t x_84; -x_83 = lean_ctor_get(x_81, 0); +x_83 = lean_st_ref_take(x_9, x_36); x_84 = !lean_is_exclusive(x_83); if (x_84 == 0) { -lean_object* x_85; lean_object* x_86; uint8_t x_87; uint8_t x_88; -x_85 = lean_ctor_get(x_81, 1); -x_86 = lean_ctor_get(x_83, 0); -x_87 = 1; -lean_ctor_set_uint8(x_77, sizeof(void*)*1, x_87); -lean_ctor_set(x_81, 1, x_79); -lean_ctor_set(x_81, 0, x_77); -x_88 = !lean_is_exclusive(x_86); -if (x_88 == 0) -{ -lean_object* x_89; lean_object* x_90; lean_object* x_91; size_t x_92; size_t x_93; size_t x_94; lean_object* x_95; uint8_t x_96; -x_89 = lean_ctor_get(x_86, 0); -x_90 = lean_ctor_get(x_86, 1); -x_91 = lean_array_get_size(x_90); -x_92 = lean_usize_of_nat(x_91); -lean_dec(x_91); -x_93 = lean_usize_sub(x_92, x_48); -x_94 = lean_usize_land(x_46, x_93); -x_95 = lean_array_uget(x_90, x_94); -x_96 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_36, x_95); -if (x_96 == 0) +lean_object* x_85; uint8_t x_86; +x_85 = lean_ctor_get(x_83, 0); +x_86 = !lean_is_exclusive(x_85); +if (x_86 == 0) { -lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; uint8_t x_106; -x_97 = lean_unsigned_to_nat(1u); -x_98 = lean_nat_add(x_89, x_97); -lean_dec(x_89); -x_99 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_99, 0, x_36); -lean_ctor_set(x_99, 1, x_81); -lean_ctor_set(x_99, 2, x_95); -x_100 = lean_array_uset(x_90, x_94, x_99); -x_101 = lean_unsigned_to_nat(4u); -x_102 = lean_nat_mul(x_98, x_101); -x_103 = lean_unsigned_to_nat(3u); -x_104 = lean_nat_div(x_102, x_103); -lean_dec(x_102); -x_105 = lean_array_get_size(x_100); -x_106 = lean_nat_dec_le(x_104, x_105); -lean_dec(x_105); +lean_object* x_87; lean_object* x_88; uint8_t x_89; uint8_t x_90; +x_87 = lean_ctor_get(x_83, 1); +x_88 = lean_ctor_get(x_85, 0); +x_89 = 1; +lean_ctor_set_uint8(x_79, sizeof(void*)*1, x_89); +lean_ctor_set(x_83, 1, x_81); +lean_ctor_set(x_83, 0, x_79); +x_90 = !lean_is_exclusive(x_88); +if (x_90 == 0) +{ +lean_object* x_91; lean_object* x_92; lean_object* x_93; size_t x_94; size_t x_95; size_t x_96; lean_object* x_97; uint8_t x_98; +x_91 = lean_ctor_get(x_88, 0); +x_92 = lean_ctor_get(x_88, 1); +x_93 = lean_array_get_size(x_92); +x_94 = lean_usize_of_nat(x_93); +lean_dec(x_93); +x_95 = lean_usize_sub(x_94, x_49); +x_96 = lean_usize_land(x_47, x_95); +x_97 = lean_array_uget(x_92, x_96); +x_98 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_37, x_97); +if (x_98 == 0) +{ +lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; uint8_t x_108; +x_99 = lean_unsigned_to_nat(1u); +x_100 = lean_nat_add(x_91, x_99); +lean_dec(x_91); +x_101 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_101, 0, x_37); +lean_ctor_set(x_101, 1, x_83); +lean_ctor_set(x_101, 2, x_97); +x_102 = lean_array_uset(x_92, x_96, x_101); +x_103 = lean_unsigned_to_nat(4u); +x_104 = lean_nat_mul(x_100, x_103); +x_105 = lean_unsigned_to_nat(3u); +x_106 = lean_nat_div(x_104, x_105); lean_dec(x_104); -if (x_106 == 0) -{ -lean_object* x_107; lean_object* x_108; lean_object* x_109; -x_107 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_100); -lean_ctor_set(x_86, 1, x_107); -lean_ctor_set(x_86, 0, x_98); -x_108 = lean_st_ref_set(x_8, x_83, x_85); -x_109 = lean_ctor_get(x_108, 1); -lean_inc(x_109); -lean_dec(x_108); -x_57 = x_109; -goto block_74; -} -else +x_107 = lean_array_get_size(x_102); +x_108 = lean_nat_dec_le(x_106, x_107); +lean_dec(x_107); +lean_dec(x_106); +if (x_108 == 0) { -lean_object* x_110; lean_object* x_111; -lean_ctor_set(x_86, 1, x_100); -lean_ctor_set(x_86, 0, x_98); -x_110 = lean_st_ref_set(x_8, x_83, x_85); +lean_object* x_109; lean_object* x_110; lean_object* x_111; +x_109 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_102); +lean_ctor_set(x_88, 1, x_109); +lean_ctor_set(x_88, 0, x_100); +x_110 = lean_st_ref_set(x_9, x_85, x_87); x_111 = lean_ctor_get(x_110, 1); lean_inc(x_111); lean_dec(x_110); -x_57 = x_111; -goto block_74; -} +x_58 = x_111; +goto block_76; } else { -lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; -x_112 = lean_box(0); -x_113 = lean_array_uset(x_90, x_94, x_112); -x_114 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_36, x_81, x_95); -x_115 = lean_array_uset(x_113, x_94, x_114); -lean_ctor_set(x_86, 1, x_115); -x_116 = lean_st_ref_set(x_8, x_83, x_85); -x_117 = lean_ctor_get(x_116, 1); -lean_inc(x_117); -lean_dec(x_116); -x_57 = x_117; -goto block_74; +lean_object* x_112; lean_object* x_113; +lean_ctor_set(x_88, 1, x_102); +lean_ctor_set(x_88, 0, x_100); +x_112 = lean_st_ref_set(x_9, x_85, x_87); +x_113 = lean_ctor_get(x_112, 1); +lean_inc(x_113); +lean_dec(x_112); +x_58 = x_113; +goto block_76; } } else { -lean_object* x_118; lean_object* x_119; lean_object* x_120; size_t x_121; size_t x_122; size_t x_123; lean_object* x_124; uint8_t x_125; -x_118 = lean_ctor_get(x_86, 0); -x_119 = lean_ctor_get(x_86, 1); +lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; +x_114 = lean_box(0); +x_115 = lean_array_uset(x_92, x_96, x_114); +x_116 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_37, x_83, x_97); +x_117 = lean_array_uset(x_115, x_96, x_116); +lean_ctor_set(x_88, 1, x_117); +x_118 = lean_st_ref_set(x_9, x_85, x_87); +x_119 = lean_ctor_get(x_118, 1); lean_inc(x_119); -lean_inc(x_118); -lean_dec(x_86); -x_120 = lean_array_get_size(x_119); -x_121 = lean_usize_of_nat(x_120); -lean_dec(x_120); -x_122 = lean_usize_sub(x_121, x_48); -x_123 = lean_usize_land(x_46, x_122); -x_124 = lean_array_uget(x_119, x_123); -x_125 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_36, x_124); -if (x_125 == 0) -{ -lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; uint8_t x_135; -x_126 = lean_unsigned_to_nat(1u); -x_127 = lean_nat_add(x_118, x_126); lean_dec(x_118); -x_128 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_128, 0, x_36); -lean_ctor_set(x_128, 1, x_81); -lean_ctor_set(x_128, 2, x_124); -x_129 = lean_array_uset(x_119, x_123, x_128); -x_130 = lean_unsigned_to_nat(4u); -x_131 = lean_nat_mul(x_127, x_130); -x_132 = lean_unsigned_to_nat(3u); -x_133 = lean_nat_div(x_131, x_132); -lean_dec(x_131); -x_134 = lean_array_get_size(x_129); -x_135 = lean_nat_dec_le(x_133, x_134); -lean_dec(x_134); -lean_dec(x_133); -if (x_135 == 0) +x_58 = x_119; +goto block_76; +} +} +else { -lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; -x_136 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_129); -x_137 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_137, 0, x_127); -lean_ctor_set(x_137, 1, x_136); -lean_ctor_set(x_83, 0, x_137); -x_138 = lean_st_ref_set(x_8, x_83, x_85); -x_139 = lean_ctor_get(x_138, 1); -lean_inc(x_139); -lean_dec(x_138); -x_57 = x_139; -goto block_74; +lean_object* x_120; lean_object* x_121; lean_object* x_122; size_t x_123; size_t x_124; size_t x_125; lean_object* x_126; uint8_t x_127; +x_120 = lean_ctor_get(x_88, 0); +x_121 = lean_ctor_get(x_88, 1); +lean_inc(x_121); +lean_inc(x_120); +lean_dec(x_88); +x_122 = lean_array_get_size(x_121); +x_123 = lean_usize_of_nat(x_122); +lean_dec(x_122); +x_124 = lean_usize_sub(x_123, x_49); +x_125 = lean_usize_land(x_47, x_124); +x_126 = lean_array_uget(x_121, x_125); +x_127 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_37, x_126); +if (x_127 == 0) +{ +lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; uint8_t x_137; +x_128 = lean_unsigned_to_nat(1u); +x_129 = lean_nat_add(x_120, x_128); +lean_dec(x_120); +x_130 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_130, 0, x_37); +lean_ctor_set(x_130, 1, x_83); +lean_ctor_set(x_130, 2, x_126); +x_131 = lean_array_uset(x_121, x_125, x_130); +x_132 = lean_unsigned_to_nat(4u); +x_133 = lean_nat_mul(x_129, x_132); +x_134 = lean_unsigned_to_nat(3u); +x_135 = lean_nat_div(x_133, x_134); +lean_dec(x_133); +x_136 = lean_array_get_size(x_131); +x_137 = lean_nat_dec_le(x_135, x_136); +lean_dec(x_136); +lean_dec(x_135); +if (x_137 == 0) +{ +lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; +x_138 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_131); +x_139 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_139, 0, x_129); +lean_ctor_set(x_139, 1, x_138); +lean_ctor_set(x_85, 0, x_139); +x_140 = lean_st_ref_set(x_9, x_85, x_87); +x_141 = lean_ctor_get(x_140, 1); +lean_inc(x_141); +lean_dec(x_140); +x_58 = x_141; +goto block_76; } else { -lean_object* x_140; lean_object* x_141; lean_object* x_142; -x_140 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_140, 0, x_127); -lean_ctor_set(x_140, 1, x_129); -lean_ctor_set(x_83, 0, x_140); -x_141 = lean_st_ref_set(x_8, x_83, x_85); -x_142 = lean_ctor_get(x_141, 1); -lean_inc(x_142); -lean_dec(x_141); -x_57 = x_142; -goto block_74; +lean_object* x_142; lean_object* x_143; lean_object* x_144; +x_142 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_142, 0, x_129); +lean_ctor_set(x_142, 1, x_131); +lean_ctor_set(x_85, 0, x_142); +x_143 = lean_st_ref_set(x_9, x_85, x_87); +x_144 = lean_ctor_get(x_143, 1); +lean_inc(x_144); +lean_dec(x_143); +x_58 = x_144; +goto block_76; } } else { -lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; -x_143 = lean_box(0); -x_144 = lean_array_uset(x_119, x_123, x_143); -x_145 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_36, x_81, x_124); -x_146 = lean_array_uset(x_144, x_123, x_145); -x_147 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_147, 0, x_118); -lean_ctor_set(x_147, 1, x_146); -lean_ctor_set(x_83, 0, x_147); -x_148 = lean_st_ref_set(x_8, x_83, x_85); -x_149 = lean_ctor_get(x_148, 1); -lean_inc(x_149); -lean_dec(x_148); -x_57 = x_149; -goto block_74; +lean_object* x_145; lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; +x_145 = lean_box(0); +x_146 = lean_array_uset(x_121, x_125, x_145); +x_147 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_37, x_83, x_126); +x_148 = lean_array_uset(x_146, x_125, x_147); +x_149 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_149, 0, x_120); +lean_ctor_set(x_149, 1, x_148); +lean_ctor_set(x_85, 0, x_149); +x_150 = lean_st_ref_set(x_9, x_85, x_87); +x_151 = lean_ctor_get(x_150, 1); +lean_inc(x_151); +lean_dec(x_150); +x_58 = x_151; +goto block_76; } } } else { -lean_object* x_150; lean_object* x_151; lean_object* x_152; uint8_t x_153; lean_object* x_154; lean_object* x_155; lean_object* x_156; lean_object* x_157; size_t x_158; size_t x_159; size_t x_160; lean_object* x_161; uint8_t x_162; -x_150 = lean_ctor_get(x_81, 1); -x_151 = lean_ctor_get(x_83, 0); +lean_object* x_152; lean_object* x_153; lean_object* x_154; uint8_t x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; size_t x_160; size_t x_161; size_t x_162; lean_object* x_163; uint8_t x_164; x_152 = lean_ctor_get(x_83, 1); -lean_inc(x_152); -lean_inc(x_151); -lean_dec(x_83); -x_153 = 1; -lean_ctor_set_uint8(x_77, sizeof(void*)*1, x_153); -lean_ctor_set(x_81, 1, x_79); -lean_ctor_set(x_81, 0, x_77); -x_154 = lean_ctor_get(x_151, 0); +x_153 = lean_ctor_get(x_85, 0); +x_154 = lean_ctor_get(x_85, 1); lean_inc(x_154); -x_155 = lean_ctor_get(x_151, 1); -lean_inc(x_155); -if (lean_is_exclusive(x_151)) { - lean_ctor_release(x_151, 0); - lean_ctor_release(x_151, 1); - x_156 = x_151; +lean_inc(x_153); +lean_dec(x_85); +x_155 = 1; +lean_ctor_set_uint8(x_79, sizeof(void*)*1, x_155); +lean_ctor_set(x_83, 1, x_81); +lean_ctor_set(x_83, 0, x_79); +x_156 = lean_ctor_get(x_153, 0); +lean_inc(x_156); +x_157 = lean_ctor_get(x_153, 1); +lean_inc(x_157); +if (lean_is_exclusive(x_153)) { + lean_ctor_release(x_153, 0); + lean_ctor_release(x_153, 1); + x_158 = x_153; } else { - lean_dec_ref(x_151); - x_156 = lean_box(0); + lean_dec_ref(x_153); + x_158 = lean_box(0); } -x_157 = lean_array_get_size(x_155); -x_158 = lean_usize_of_nat(x_157); -lean_dec(x_157); -x_159 = lean_usize_sub(x_158, x_48); -x_160 = lean_usize_land(x_46, x_159); -x_161 = lean_array_uget(x_155, x_160); -x_162 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_36, x_161); -if (x_162 == 0) -{ -lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; lean_object* x_171; uint8_t x_172; -x_163 = lean_unsigned_to_nat(1u); -x_164 = lean_nat_add(x_154, x_163); -lean_dec(x_154); -x_165 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_165, 0, x_36); -lean_ctor_set(x_165, 1, x_81); -lean_ctor_set(x_165, 2, x_161); -x_166 = lean_array_uset(x_155, x_160, x_165); -x_167 = lean_unsigned_to_nat(4u); -x_168 = lean_nat_mul(x_164, x_167); -x_169 = lean_unsigned_to_nat(3u); -x_170 = lean_nat_div(x_168, x_169); -lean_dec(x_168); -x_171 = lean_array_get_size(x_166); -x_172 = lean_nat_dec_le(x_170, x_171); -lean_dec(x_171); +x_159 = lean_array_get_size(x_157); +x_160 = lean_usize_of_nat(x_159); +lean_dec(x_159); +x_161 = lean_usize_sub(x_160, x_49); +x_162 = lean_usize_land(x_47, x_161); +x_163 = lean_array_uget(x_157, x_162); +x_164 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_37, x_163); +if (x_164 == 0) +{ +lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; uint8_t x_174; +x_165 = lean_unsigned_to_nat(1u); +x_166 = lean_nat_add(x_156, x_165); +lean_dec(x_156); +x_167 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_167, 0, x_37); +lean_ctor_set(x_167, 1, x_83); +lean_ctor_set(x_167, 2, x_163); +x_168 = lean_array_uset(x_157, x_162, x_167); +x_169 = lean_unsigned_to_nat(4u); +x_170 = lean_nat_mul(x_166, x_169); +x_171 = lean_unsigned_to_nat(3u); +x_172 = lean_nat_div(x_170, x_171); lean_dec(x_170); -if (x_172 == 0) +x_173 = lean_array_get_size(x_168); +x_174 = lean_nat_dec_le(x_172, x_173); +lean_dec(x_173); +lean_dec(x_172); +if (x_174 == 0) { -lean_object* x_173; lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; -x_173 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_166); -if (lean_is_scalar(x_156)) { - x_174 = lean_alloc_ctor(0, 2, 0); +lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; lean_object* x_179; +x_175 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_168); +if (lean_is_scalar(x_158)) { + x_176 = lean_alloc_ctor(0, 2, 0); } else { - x_174 = x_156; + x_176 = x_158; } -lean_ctor_set(x_174, 0, x_164); -lean_ctor_set(x_174, 1, x_173); -x_175 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_175, 0, x_174); -lean_ctor_set(x_175, 1, x_152); -x_176 = lean_st_ref_set(x_8, x_175, x_150); -x_177 = lean_ctor_get(x_176, 1); -lean_inc(x_177); -lean_dec(x_176); -x_57 = x_177; -goto block_74; +lean_ctor_set(x_176, 0, x_166); +lean_ctor_set(x_176, 1, x_175); +x_177 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_177, 0, x_176); +lean_ctor_set(x_177, 1, x_154); +x_178 = lean_st_ref_set(x_9, x_177, x_152); +x_179 = lean_ctor_get(x_178, 1); +lean_inc(x_179); +lean_dec(x_178); +x_58 = x_179; +goto block_76; } else { -lean_object* x_178; lean_object* x_179; lean_object* x_180; lean_object* x_181; -if (lean_is_scalar(x_156)) { - x_178 = lean_alloc_ctor(0, 2, 0); +lean_object* x_180; lean_object* x_181; lean_object* x_182; lean_object* x_183; +if (lean_is_scalar(x_158)) { + x_180 = lean_alloc_ctor(0, 2, 0); } else { - x_178 = x_156; + x_180 = x_158; } -lean_ctor_set(x_178, 0, x_164); -lean_ctor_set(x_178, 1, x_166); -x_179 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_179, 0, x_178); -lean_ctor_set(x_179, 1, x_152); -x_180 = lean_st_ref_set(x_8, x_179, x_150); -x_181 = lean_ctor_get(x_180, 1); -lean_inc(x_181); -lean_dec(x_180); -x_57 = x_181; -goto block_74; +lean_ctor_set(x_180, 0, x_166); +lean_ctor_set(x_180, 1, x_168); +x_181 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_181, 0, x_180); +lean_ctor_set(x_181, 1, x_154); +x_182 = lean_st_ref_set(x_9, x_181, x_152); +x_183 = lean_ctor_get(x_182, 1); +lean_inc(x_183); +lean_dec(x_182); +x_58 = x_183; +goto block_76; } } else { -lean_object* x_182; lean_object* x_183; lean_object* x_184; lean_object* x_185; lean_object* x_186; lean_object* x_187; lean_object* x_188; lean_object* x_189; -x_182 = lean_box(0); -x_183 = lean_array_uset(x_155, x_160, x_182); -x_184 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_36, x_81, x_161); -x_185 = lean_array_uset(x_183, x_160, x_184); -if (lean_is_scalar(x_156)) { - x_186 = lean_alloc_ctor(0, 2, 0); +lean_object* x_184; lean_object* x_185; lean_object* x_186; lean_object* x_187; lean_object* x_188; lean_object* x_189; lean_object* x_190; lean_object* x_191; +x_184 = lean_box(0); +x_185 = lean_array_uset(x_157, x_162, x_184); +x_186 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_37, x_83, x_163); +x_187 = lean_array_uset(x_185, x_162, x_186); +if (lean_is_scalar(x_158)) { + x_188 = lean_alloc_ctor(0, 2, 0); } else { - x_186 = x_156; + x_188 = x_158; } -lean_ctor_set(x_186, 0, x_154); -lean_ctor_set(x_186, 1, x_185); -x_187 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_187, 0, x_186); -lean_ctor_set(x_187, 1, x_152); -x_188 = lean_st_ref_set(x_8, x_187, x_150); -x_189 = lean_ctor_get(x_188, 1); -lean_inc(x_189); -lean_dec(x_188); -x_57 = x_189; -goto block_74; +lean_ctor_set(x_188, 0, x_156); +lean_ctor_set(x_188, 1, x_187); +x_189 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_189, 0, x_188); +lean_ctor_set(x_189, 1, x_154); +x_190 = lean_st_ref_set(x_9, x_189, x_152); +x_191 = lean_ctor_get(x_190, 1); +lean_inc(x_191); +lean_dec(x_190); +x_58 = x_191; +goto block_76; } } } else { -lean_object* x_190; lean_object* x_191; lean_object* x_192; lean_object* x_193; lean_object* x_194; uint8_t x_195; lean_object* x_196; lean_object* x_197; lean_object* x_198; lean_object* x_199; lean_object* x_200; size_t x_201; size_t x_202; size_t x_203; lean_object* x_204; uint8_t x_205; -x_190 = lean_ctor_get(x_81, 0); -x_191 = lean_ctor_get(x_81, 1); -lean_inc(x_191); -lean_inc(x_190); -lean_dec(x_81); -x_192 = lean_ctor_get(x_190, 0); -lean_inc(x_192); -x_193 = lean_ctor_get(x_190, 1); +lean_object* x_192; lean_object* x_193; lean_object* x_194; lean_object* x_195; lean_object* x_196; uint8_t x_197; lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; lean_object* x_202; size_t x_203; size_t x_204; size_t x_205; lean_object* x_206; uint8_t x_207; +x_192 = lean_ctor_get(x_83, 0); +x_193 = lean_ctor_get(x_83, 1); lean_inc(x_193); -if (lean_is_exclusive(x_190)) { - lean_ctor_release(x_190, 0); - lean_ctor_release(x_190, 1); - x_194 = x_190; -} else { - lean_dec_ref(x_190); - x_194 = lean_box(0); -} -x_195 = 1; -lean_ctor_set_uint8(x_77, sizeof(void*)*1, x_195); -x_196 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_196, 0, x_77); -lean_ctor_set(x_196, 1, x_79); -x_197 = lean_ctor_get(x_192, 0); -lean_inc(x_197); -x_198 = lean_ctor_get(x_192, 1); -lean_inc(x_198); +lean_inc(x_192); +lean_dec(x_83); +x_194 = lean_ctor_get(x_192, 0); +lean_inc(x_194); +x_195 = lean_ctor_get(x_192, 1); +lean_inc(x_195); if (lean_is_exclusive(x_192)) { lean_ctor_release(x_192, 0); lean_ctor_release(x_192, 1); - x_199 = x_192; + x_196 = x_192; } else { lean_dec_ref(x_192); - x_199 = lean_box(0); + x_196 = lean_box(0); } -x_200 = lean_array_get_size(x_198); -x_201 = lean_usize_of_nat(x_200); -lean_dec(x_200); -x_202 = lean_usize_sub(x_201, x_48); -x_203 = lean_usize_land(x_46, x_202); -x_204 = lean_array_uget(x_198, x_203); -x_205 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_36, x_204); -if (x_205 == 0) -{ -lean_object* x_206; lean_object* x_207; lean_object* x_208; lean_object* x_209; lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; uint8_t x_215; -x_206 = lean_unsigned_to_nat(1u); -x_207 = lean_nat_add(x_197, x_206); -lean_dec(x_197); -x_208 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_208, 0, x_36); -lean_ctor_set(x_208, 1, x_196); -lean_ctor_set(x_208, 2, x_204); -x_209 = lean_array_uset(x_198, x_203, x_208); -x_210 = lean_unsigned_to_nat(4u); -x_211 = lean_nat_mul(x_207, x_210); -x_212 = lean_unsigned_to_nat(3u); -x_213 = lean_nat_div(x_211, x_212); -lean_dec(x_211); -x_214 = lean_array_get_size(x_209); -x_215 = lean_nat_dec_le(x_213, x_214); -lean_dec(x_214); +x_197 = 1; +lean_ctor_set_uint8(x_79, sizeof(void*)*1, x_197); +x_198 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_198, 0, x_79); +lean_ctor_set(x_198, 1, x_81); +x_199 = lean_ctor_get(x_194, 0); +lean_inc(x_199); +x_200 = lean_ctor_get(x_194, 1); +lean_inc(x_200); +if (lean_is_exclusive(x_194)) { + lean_ctor_release(x_194, 0); + lean_ctor_release(x_194, 1); + x_201 = x_194; +} else { + lean_dec_ref(x_194); + x_201 = lean_box(0); +} +x_202 = lean_array_get_size(x_200); +x_203 = lean_usize_of_nat(x_202); +lean_dec(x_202); +x_204 = lean_usize_sub(x_203, x_49); +x_205 = lean_usize_land(x_47, x_204); +x_206 = lean_array_uget(x_200, x_205); +x_207 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_37, x_206); +if (x_207 == 0) +{ +lean_object* x_208; lean_object* x_209; lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; lean_object* x_215; lean_object* x_216; uint8_t x_217; +x_208 = lean_unsigned_to_nat(1u); +x_209 = lean_nat_add(x_199, x_208); +lean_dec(x_199); +x_210 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_210, 0, x_37); +lean_ctor_set(x_210, 1, x_198); +lean_ctor_set(x_210, 2, x_206); +x_211 = lean_array_uset(x_200, x_205, x_210); +x_212 = lean_unsigned_to_nat(4u); +x_213 = lean_nat_mul(x_209, x_212); +x_214 = lean_unsigned_to_nat(3u); +x_215 = lean_nat_div(x_213, x_214); lean_dec(x_213); -if (x_215 == 0) +x_216 = lean_array_get_size(x_211); +x_217 = lean_nat_dec_le(x_215, x_216); +lean_dec(x_216); +lean_dec(x_215); +if (x_217 == 0) { -lean_object* x_216; lean_object* x_217; lean_object* x_218; lean_object* x_219; lean_object* x_220; -x_216 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_209); -if (lean_is_scalar(x_199)) { - x_217 = lean_alloc_ctor(0, 2, 0); +lean_object* x_218; lean_object* x_219; lean_object* x_220; lean_object* x_221; lean_object* x_222; +x_218 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_211); +if (lean_is_scalar(x_201)) { + x_219 = lean_alloc_ctor(0, 2, 0); } else { - x_217 = x_199; + x_219 = x_201; } -lean_ctor_set(x_217, 0, x_207); -lean_ctor_set(x_217, 1, x_216); -if (lean_is_scalar(x_194)) { - x_218 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_219, 0, x_209); +lean_ctor_set(x_219, 1, x_218); +if (lean_is_scalar(x_196)) { + x_220 = lean_alloc_ctor(0, 2, 0); } else { - x_218 = x_194; + x_220 = x_196; } -lean_ctor_set(x_218, 0, x_217); -lean_ctor_set(x_218, 1, x_193); -x_219 = lean_st_ref_set(x_8, x_218, x_191); -x_220 = lean_ctor_get(x_219, 1); -lean_inc(x_220); -lean_dec(x_219); -x_57 = x_220; -goto block_74; +lean_ctor_set(x_220, 0, x_219); +lean_ctor_set(x_220, 1, x_195); +x_221 = lean_st_ref_set(x_9, x_220, x_193); +x_222 = lean_ctor_get(x_221, 1); +lean_inc(x_222); +lean_dec(x_221); +x_58 = x_222; +goto block_76; } else { -lean_object* x_221; lean_object* x_222; lean_object* x_223; lean_object* x_224; -if (lean_is_scalar(x_199)) { - x_221 = lean_alloc_ctor(0, 2, 0); +lean_object* x_223; lean_object* x_224; lean_object* x_225; lean_object* x_226; +if (lean_is_scalar(x_201)) { + x_223 = lean_alloc_ctor(0, 2, 0); } else { - x_221 = x_199; + x_223 = x_201; } -lean_ctor_set(x_221, 0, x_207); -lean_ctor_set(x_221, 1, x_209); -if (lean_is_scalar(x_194)) { - x_222 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_223, 0, x_209); +lean_ctor_set(x_223, 1, x_211); +if (lean_is_scalar(x_196)) { + x_224 = lean_alloc_ctor(0, 2, 0); } else { - x_222 = x_194; + x_224 = x_196; } -lean_ctor_set(x_222, 0, x_221); -lean_ctor_set(x_222, 1, x_193); -x_223 = lean_st_ref_set(x_8, x_222, x_191); -x_224 = lean_ctor_get(x_223, 1); -lean_inc(x_224); -lean_dec(x_223); -x_57 = x_224; -goto block_74; +lean_ctor_set(x_224, 0, x_223); +lean_ctor_set(x_224, 1, x_195); +x_225 = lean_st_ref_set(x_9, x_224, x_193); +x_226 = lean_ctor_get(x_225, 1); +lean_inc(x_226); +lean_dec(x_225); +x_58 = x_226; +goto block_76; } } else { -lean_object* x_225; lean_object* x_226; lean_object* x_227; lean_object* x_228; lean_object* x_229; lean_object* x_230; lean_object* x_231; lean_object* x_232; -x_225 = lean_box(0); -x_226 = lean_array_uset(x_198, x_203, x_225); -x_227 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_36, x_196, x_204); -x_228 = lean_array_uset(x_226, x_203, x_227); -if (lean_is_scalar(x_199)) { - x_229 = lean_alloc_ctor(0, 2, 0); +lean_object* x_227; lean_object* x_228; lean_object* x_229; lean_object* x_230; lean_object* x_231; lean_object* x_232; lean_object* x_233; lean_object* x_234; +x_227 = lean_box(0); +x_228 = lean_array_uset(x_200, x_205, x_227); +x_229 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_37, x_198, x_206); +x_230 = lean_array_uset(x_228, x_205, x_229); +if (lean_is_scalar(x_201)) { + x_231 = lean_alloc_ctor(0, 2, 0); } else { - x_229 = x_199; + x_231 = x_201; } -lean_ctor_set(x_229, 0, x_197); -lean_ctor_set(x_229, 1, x_228); -if (lean_is_scalar(x_194)) { - x_230 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_231, 0, x_199); +lean_ctor_set(x_231, 1, x_230); +if (lean_is_scalar(x_196)) { + x_232 = lean_alloc_ctor(0, 2, 0); } else { - x_230 = x_194; + x_232 = x_196; } -lean_ctor_set(x_230, 0, x_229); -lean_ctor_set(x_230, 1, x_193); -x_231 = lean_st_ref_set(x_8, x_230, x_191); -x_232 = lean_ctor_get(x_231, 1); -lean_inc(x_232); -lean_dec(x_231); -x_57 = x_232; -goto block_74; +lean_ctor_set(x_232, 0, x_231); +lean_ctor_set(x_232, 1, x_195); +x_233 = lean_st_ref_set(x_9, x_232, x_193); +x_234 = lean_ctor_get(x_233, 1); +lean_inc(x_234); +lean_dec(x_233); +x_58 = x_234; +goto block_76; } } } else { -lean_object* x_233; uint8_t x_234; lean_object* x_235; lean_object* x_236; lean_object* x_237; lean_object* x_238; lean_object* x_239; lean_object* x_240; lean_object* x_241; uint8_t x_242; lean_object* x_243; lean_object* x_244; lean_object* x_245; lean_object* x_246; lean_object* x_247; lean_object* x_248; size_t x_249; size_t x_250; size_t x_251; lean_object* x_252; uint8_t x_253; -x_233 = lean_ctor_get(x_77, 0); -x_234 = lean_ctor_get_uint8(x_77, sizeof(void*)*1 + 1); -lean_inc(x_233); -lean_dec(x_77); -x_235 = lean_st_ref_take(x_8, x_35); -x_236 = lean_ctor_get(x_235, 0); -lean_inc(x_236); -x_237 = lean_ctor_get(x_235, 1); -lean_inc(x_237); -if (lean_is_exclusive(x_235)) { - lean_ctor_release(x_235, 0); - lean_ctor_release(x_235, 1); - x_238 = x_235; -} else { - lean_dec_ref(x_235); - x_238 = lean_box(0); -} -x_239 = lean_ctor_get(x_236, 0); +lean_object* x_235; uint8_t x_236; lean_object* x_237; lean_object* x_238; lean_object* x_239; lean_object* x_240; lean_object* x_241; lean_object* x_242; lean_object* x_243; uint8_t x_244; lean_object* x_245; lean_object* x_246; lean_object* x_247; lean_object* x_248; lean_object* x_249; lean_object* x_250; size_t x_251; size_t x_252; size_t x_253; lean_object* x_254; uint8_t x_255; +x_235 = lean_ctor_get(x_79, 0); +x_236 = lean_ctor_get_uint8(x_79, sizeof(void*)*1 + 1); +lean_inc(x_235); +lean_dec(x_79); +x_237 = lean_st_ref_take(x_9, x_36); +x_238 = lean_ctor_get(x_237, 0); +lean_inc(x_238); +x_239 = lean_ctor_get(x_237, 1); lean_inc(x_239); -x_240 = lean_ctor_get(x_236, 1); -lean_inc(x_240); -if (lean_is_exclusive(x_236)) { - lean_ctor_release(x_236, 0); - lean_ctor_release(x_236, 1); - x_241 = x_236; -} else { - lean_dec_ref(x_236); - x_241 = lean_box(0); -} -x_242 = 1; -x_243 = lean_alloc_ctor(0, 1, 2); -lean_ctor_set(x_243, 0, x_233); -lean_ctor_set_uint8(x_243, sizeof(void*)*1, x_242); -lean_ctor_set_uint8(x_243, sizeof(void*)*1 + 1, x_234); -if (lean_is_scalar(x_238)) { - x_244 = lean_alloc_ctor(0, 2, 0); -} else { - x_244 = x_238; -} -lean_ctor_set(x_244, 0, x_243); -lean_ctor_set(x_244, 1, x_79); -x_245 = lean_ctor_get(x_239, 0); -lean_inc(x_245); -x_246 = lean_ctor_get(x_239, 1); -lean_inc(x_246); -if (lean_is_exclusive(x_239)) { - lean_ctor_release(x_239, 0); - lean_ctor_release(x_239, 1); - x_247 = x_239; -} else { - lean_dec_ref(x_239); - x_247 = lean_box(0); -} -x_248 = lean_array_get_size(x_246); -x_249 = lean_usize_of_nat(x_248); -lean_dec(x_248); -x_250 = lean_usize_sub(x_249, x_48); -x_251 = lean_usize_land(x_46, x_250); -x_252 = lean_array_uget(x_246, x_251); -x_253 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_36, x_252); -if (x_253 == 0) -{ -lean_object* x_254; lean_object* x_255; lean_object* x_256; lean_object* x_257; lean_object* x_258; lean_object* x_259; lean_object* x_260; lean_object* x_261; lean_object* x_262; uint8_t x_263; -x_254 = lean_unsigned_to_nat(1u); -x_255 = lean_nat_add(x_245, x_254); -lean_dec(x_245); -x_256 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_256, 0, x_36); -lean_ctor_set(x_256, 1, x_244); -lean_ctor_set(x_256, 2, x_252); -x_257 = lean_array_uset(x_246, x_251, x_256); -x_258 = lean_unsigned_to_nat(4u); -x_259 = lean_nat_mul(x_255, x_258); -x_260 = lean_unsigned_to_nat(3u); -x_261 = lean_nat_div(x_259, x_260); -lean_dec(x_259); -x_262 = lean_array_get_size(x_257); -x_263 = lean_nat_dec_le(x_261, x_262); -lean_dec(x_262); +if (lean_is_exclusive(x_237)) { + lean_ctor_release(x_237, 0); + lean_ctor_release(x_237, 1); + x_240 = x_237; +} else { + lean_dec_ref(x_237); + x_240 = lean_box(0); +} +x_241 = lean_ctor_get(x_238, 0); +lean_inc(x_241); +x_242 = lean_ctor_get(x_238, 1); +lean_inc(x_242); +if (lean_is_exclusive(x_238)) { + lean_ctor_release(x_238, 0); + lean_ctor_release(x_238, 1); + x_243 = x_238; +} else { + lean_dec_ref(x_238); + x_243 = lean_box(0); +} +x_244 = 1; +x_245 = lean_alloc_ctor(0, 1, 2); +lean_ctor_set(x_245, 0, x_235); +lean_ctor_set_uint8(x_245, sizeof(void*)*1, x_244); +lean_ctor_set_uint8(x_245, sizeof(void*)*1 + 1, x_236); +if (lean_is_scalar(x_240)) { + x_246 = lean_alloc_ctor(0, 2, 0); +} else { + x_246 = x_240; +} +lean_ctor_set(x_246, 0, x_245); +lean_ctor_set(x_246, 1, x_81); +x_247 = lean_ctor_get(x_241, 0); +lean_inc(x_247); +x_248 = lean_ctor_get(x_241, 1); +lean_inc(x_248); +if (lean_is_exclusive(x_241)) { + lean_ctor_release(x_241, 0); + lean_ctor_release(x_241, 1); + x_249 = x_241; +} else { + lean_dec_ref(x_241); + x_249 = lean_box(0); +} +x_250 = lean_array_get_size(x_248); +x_251 = lean_usize_of_nat(x_250); +lean_dec(x_250); +x_252 = lean_usize_sub(x_251, x_49); +x_253 = lean_usize_land(x_47, x_252); +x_254 = lean_array_uget(x_248, x_253); +x_255 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2(x_37, x_254); +if (x_255 == 0) +{ +lean_object* x_256; lean_object* x_257; lean_object* x_258; lean_object* x_259; lean_object* x_260; lean_object* x_261; lean_object* x_262; lean_object* x_263; lean_object* x_264; uint8_t x_265; +x_256 = lean_unsigned_to_nat(1u); +x_257 = lean_nat_add(x_247, x_256); +lean_dec(x_247); +x_258 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_258, 0, x_37); +lean_ctor_set(x_258, 1, x_246); +lean_ctor_set(x_258, 2, x_254); +x_259 = lean_array_uset(x_248, x_253, x_258); +x_260 = lean_unsigned_to_nat(4u); +x_261 = lean_nat_mul(x_257, x_260); +x_262 = lean_unsigned_to_nat(3u); +x_263 = lean_nat_div(x_261, x_262); lean_dec(x_261); -if (x_263 == 0) +x_264 = lean_array_get_size(x_259); +x_265 = lean_nat_dec_le(x_263, x_264); +lean_dec(x_264); +lean_dec(x_263); +if (x_265 == 0) { -lean_object* x_264; lean_object* x_265; lean_object* x_266; lean_object* x_267; lean_object* x_268; -x_264 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_257); -if (lean_is_scalar(x_247)) { - x_265 = lean_alloc_ctor(0, 2, 0); +lean_object* x_266; lean_object* x_267; lean_object* x_268; lean_object* x_269; lean_object* x_270; +x_266 = l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_importModulesCore_go___spec__3(x_259); +if (lean_is_scalar(x_249)) { + x_267 = lean_alloc_ctor(0, 2, 0); } else { - x_265 = x_247; + x_267 = x_249; } -lean_ctor_set(x_265, 0, x_255); -lean_ctor_set(x_265, 1, x_264); -if (lean_is_scalar(x_241)) { - x_266 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_267, 0, x_257); +lean_ctor_set(x_267, 1, x_266); +if (lean_is_scalar(x_243)) { + x_268 = lean_alloc_ctor(0, 2, 0); } else { - x_266 = x_241; + x_268 = x_243; } -lean_ctor_set(x_266, 0, x_265); -lean_ctor_set(x_266, 1, x_240); -x_267 = lean_st_ref_set(x_8, x_266, x_237); -x_268 = lean_ctor_get(x_267, 1); -lean_inc(x_268); -lean_dec(x_267); -x_57 = x_268; -goto block_74; +lean_ctor_set(x_268, 0, x_267); +lean_ctor_set(x_268, 1, x_242); +x_269 = lean_st_ref_set(x_9, x_268, x_239); +x_270 = lean_ctor_get(x_269, 1); +lean_inc(x_270); +lean_dec(x_269); +x_58 = x_270; +goto block_76; } else { -lean_object* x_269; lean_object* x_270; lean_object* x_271; lean_object* x_272; -if (lean_is_scalar(x_247)) { - x_269 = lean_alloc_ctor(0, 2, 0); +lean_object* x_271; lean_object* x_272; lean_object* x_273; lean_object* x_274; +if (lean_is_scalar(x_249)) { + x_271 = lean_alloc_ctor(0, 2, 0); } else { - x_269 = x_247; + x_271 = x_249; } -lean_ctor_set(x_269, 0, x_255); -lean_ctor_set(x_269, 1, x_257); -if (lean_is_scalar(x_241)) { - x_270 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_271, 0, x_257); +lean_ctor_set(x_271, 1, x_259); +if (lean_is_scalar(x_243)) { + x_272 = lean_alloc_ctor(0, 2, 0); } else { - x_270 = x_241; + x_272 = x_243; } -lean_ctor_set(x_270, 0, x_269); -lean_ctor_set(x_270, 1, x_240); -x_271 = lean_st_ref_set(x_8, x_270, x_237); -x_272 = lean_ctor_get(x_271, 1); -lean_inc(x_272); -lean_dec(x_271); -x_57 = x_272; -goto block_74; +lean_ctor_set(x_272, 0, x_271); +lean_ctor_set(x_272, 1, x_242); +x_273 = lean_st_ref_set(x_9, x_272, x_239); +x_274 = lean_ctor_get(x_273, 1); +lean_inc(x_274); +lean_dec(x_273); +x_58 = x_274; +goto block_76; } } else { -lean_object* x_273; lean_object* x_274; lean_object* x_275; lean_object* x_276; lean_object* x_277; lean_object* x_278; lean_object* x_279; lean_object* x_280; -x_273 = lean_box(0); -x_274 = lean_array_uset(x_246, x_251, x_273); -x_275 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_36, x_244, x_252); -x_276 = lean_array_uset(x_274, x_251, x_275); -if (lean_is_scalar(x_247)) { - x_277 = lean_alloc_ctor(0, 2, 0); +lean_object* x_275; lean_object* x_276; lean_object* x_277; lean_object* x_278; lean_object* x_279; lean_object* x_280; lean_object* x_281; lean_object* x_282; +x_275 = lean_box(0); +x_276 = lean_array_uset(x_248, x_253, x_275); +x_277 = l_Std_DHashMap_Internal_AssocList_replace___at_Lean_importModulesCore_go___spec__6(x_37, x_246, x_254); +x_278 = lean_array_uset(x_276, x_253, x_277); +if (lean_is_scalar(x_249)) { + x_279 = lean_alloc_ctor(0, 2, 0); } else { - x_277 = x_247; + x_279 = x_249; } -lean_ctor_set(x_277, 0, x_245); -lean_ctor_set(x_277, 1, x_276); -if (lean_is_scalar(x_241)) { - x_278 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_279, 0, x_247); +lean_ctor_set(x_279, 1, x_278); +if (lean_is_scalar(x_243)) { + x_280 = lean_alloc_ctor(0, 2, 0); } else { - x_278 = x_241; + x_280 = x_243; } -lean_ctor_set(x_278, 0, x_277); -lean_ctor_set(x_278, 1, x_240); -x_279 = lean_st_ref_set(x_8, x_278, x_237); -x_280 = lean_ctor_get(x_279, 1); -lean_inc(x_280); -lean_dec(x_279); -x_57 = x_280; -goto block_74; +lean_ctor_set(x_280, 0, x_279); +lean_ctor_set(x_280, 1, x_242); +x_281 = lean_st_ref_set(x_9, x_280, x_239); +x_282 = lean_ctor_get(x_281, 1); +lean_inc(x_282); +lean_dec(x_281); +x_58 = x_282; +goto block_76; } } } else { -lean_object* x_281; lean_object* x_282; -lean_dec(x_77); -lean_dec(x_55); -lean_dec(x_36); -x_281 = lean_box(0); -lean_inc(x_8); -x_282 = lean_apply_3(x_56, x_281, x_8, x_35); -x_10 = x_282; -goto block_27; +lean_object* x_283; lean_object* x_284; +lean_dec(x_79); +lean_dec(x_56); +lean_dec(x_37); +x_283 = lean_box(0); +lean_inc(x_9); +x_284 = lean_apply_3(x_57, x_283, x_9, x_36); +x_11 = x_284; +goto block_28; } } -block_74: +block_76: { if (x_2 == 0) { -lean_object* x_58; lean_object* x_59; -lean_dec(x_55); -x_58 = lean_box(0); -lean_inc(x_8); -x_59 = lean_apply_3(x_56, x_58, x_8, x_57); -x_10 = x_59; -goto block_27; +lean_object* x_59; lean_object* x_60; +lean_dec(x_56); +x_59 = lean_box(0); +lean_inc(x_9); +x_60 = lean_apply_3(x_57, x_59, x_9, x_58); +x_11 = x_60; +goto block_28; } else { -lean_object* x_60; -x_60 = l___private_Lean_Environment_0__Lean_ImportedModule_mainModule_x3f(x_55); -lean_dec(x_55); -if (lean_obj_tag(x_60) == 0) +lean_object* x_61; +x_61 = l___private_Lean_Environment_0__Lean_ImportedModule_mainModule_x3f(x_56); +lean_dec(x_56); +if (lean_obj_tag(x_61) == 0) { -lean_object* x_61; lean_object* x_62; -x_61 = lean_box(0); -lean_inc(x_8); -x_62 = lean_apply_3(x_56, x_61, x_8, x_57); -x_10 = x_62; -goto block_27; +lean_object* x_62; lean_object* x_63; +x_62 = lean_box(0); +lean_inc(x_9); +x_63 = lean_apply_3(x_57, x_62, x_9, x_58); +x_11 = x_63; +goto block_28; } else { -lean_object* x_63; lean_object* x_64; uint8_t x_65; lean_object* x_66; -x_63 = lean_ctor_get(x_60, 0); -lean_inc(x_63); -lean_dec(x_60); -x_64 = lean_ctor_get(x_63, 0); +lean_object* x_64; lean_object* x_65; lean_object* x_66; uint8_t x_67; lean_object* x_68; +x_64 = lean_ctor_get(x_61, 0); lean_inc(x_64); -lean_dec(x_63); -x_65 = 1; -lean_inc(x_8); -x_66 = l_Lean_importModulesCore_go(x_64, x_65, x_8, x_57); +lean_dec(x_61); +x_65 = lean_ctor_get(x_64, 0); +lean_inc(x_65); lean_dec(x_64); -if (lean_obj_tag(x_66) == 0) +x_66 = lean_box(0); +x_67 = 1; +lean_inc(x_9); +x_68 = l_Lean_importModulesCore_go(x_65, x_67, x_66, x_9, x_58); +lean_dec(x_65); +if (lean_obj_tag(x_68) == 0) { -lean_object* x_67; lean_object* x_68; lean_object* x_69; -x_67 = lean_ctor_get(x_66, 0); -lean_inc(x_67); -x_68 = lean_ctor_get(x_66, 1); -lean_inc(x_68); -lean_dec(x_66); -lean_inc(x_8); -x_69 = lean_apply_3(x_56, x_67, x_8, x_68); -x_10 = x_69; -goto block_27; +lean_object* x_69; lean_object* x_70; lean_object* x_71; +x_69 = lean_ctor_get(x_68, 0); +lean_inc(x_69); +x_70 = lean_ctor_get(x_68, 1); +lean_inc(x_70); +lean_dec(x_68); +lean_inc(x_9); +x_71 = lean_apply_3(x_57, x_69, x_9, x_70); +x_11 = x_71; +goto block_28; } else { -uint8_t x_70; -x_70 = !lean_is_exclusive(x_66); -if (x_70 == 0) +uint8_t x_72; +x_72 = !lean_is_exclusive(x_68); +if (x_72 == 0) { -x_10 = x_66; -goto block_27; +x_11 = x_68; +goto block_28; } else { -lean_object* x_71; lean_object* x_72; lean_object* x_73; -x_71 = lean_ctor_get(x_66, 0); -x_72 = lean_ctor_get(x_66, 1); -lean_inc(x_72); -lean_inc(x_71); -lean_dec(x_66); -x_73 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_73, 0, x_71); -lean_ctor_set(x_73, 1, x_72); -x_10 = x_73; -goto block_27; +lean_object* x_73; lean_object* x_74; lean_object* x_75; +x_73 = lean_ctor_get(x_68, 0); +x_74 = lean_ctor_get(x_68, 1); +lean_inc(x_74); +lean_inc(x_73); +lean_dec(x_68); +x_75 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_75, 0, x_73); +lean_ctor_set(x_75, 1, x_74); +x_11 = x_75; +goto block_28; } } } @@ -27357,150 +27350,150 @@ goto block_27; } } } -block_27: -{ -if (lean_obj_tag(x_10) == 0) +block_28: { -lean_object* x_11; -x_11 = lean_ctor_get(x_10, 0); -lean_inc(x_11); if (lean_obj_tag(x_11) == 0) { -uint8_t x_12; -lean_dec(x_8); -x_12 = !lean_is_exclusive(x_10); -if (x_12 == 0) +lean_object* x_12; +x_12 = lean_ctor_get(x_11, 0); +lean_inc(x_12); +if (lean_obj_tag(x_12) == 0) { -lean_object* x_13; lean_object* x_14; -x_13 = lean_ctor_get(x_10, 0); -lean_dec(x_13); +uint8_t x_13; +lean_dec(x_9); +x_13 = !lean_is_exclusive(x_11); +if (x_13 == 0) +{ +lean_object* x_14; lean_object* x_15; x_14 = lean_ctor_get(x_11, 0); -lean_inc(x_14); -lean_dec(x_11); -lean_ctor_set(x_10, 0, x_14); -return x_10; +lean_dec(x_14); +x_15 = lean_ctor_get(x_12, 0); +lean_inc(x_15); +lean_dec(x_12); +lean_ctor_set(x_11, 0, x_15); +return x_11; } else { -lean_object* x_15; lean_object* x_16; lean_object* x_17; -x_15 = lean_ctor_get(x_10, 1); -lean_inc(x_15); -lean_dec(x_10); -x_16 = lean_ctor_get(x_11, 0); +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_11, 1); lean_inc(x_16); lean_dec(x_11); -x_17 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_17, 0, x_16); -lean_ctor_set(x_17, 1, x_15); -return x_17; +x_17 = lean_ctor_get(x_12, 0); +lean_inc(x_17); +lean_dec(x_12); +x_18 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18, 0, x_17); +lean_ctor_set(x_18, 1, x_16); +return x_18; } } else { -lean_object* x_18; lean_object* x_19; size_t x_20; size_t x_21; -x_18 = lean_ctor_get(x_10, 1); -lean_inc(x_18); -lean_dec(x_10); -x_19 = lean_ctor_get(x_11, 0); +lean_object* x_19; lean_object* x_20; size_t x_21; size_t x_22; +x_19 = lean_ctor_get(x_11, 1); lean_inc(x_19); lean_dec(x_11); -x_20 = 1; -x_21 = lean_usize_add(x_6, x_20); -x_6 = x_21; -x_7 = x_19; -x_9 = x_18; +x_20 = lean_ctor_get(x_12, 0); +lean_inc(x_20); +lean_dec(x_12); +x_21 = 1; +x_22 = lean_usize_add(x_7, x_21); +x_7 = x_22; +x_8 = x_20; +x_10 = x_19; goto _start; } } else { -uint8_t x_23; -lean_dec(x_8); -x_23 = !lean_is_exclusive(x_10); -if (x_23 == 0) +uint8_t x_24; +lean_dec(x_9); +x_24 = !lean_is_exclusive(x_11); +if (x_24 == 0) { -return x_10; +return x_11; } else { -lean_object* x_24; lean_object* x_25; lean_object* x_26; -x_24 = lean_ctor_get(x_10, 0); -x_25 = lean_ctor_get(x_10, 1); +lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_25 = lean_ctor_get(x_11, 0); +x_26 = lean_ctor_get(x_11, 1); +lean_inc(x_26); lean_inc(x_25); -lean_inc(x_24); -lean_dec(x_10); -x_26 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_26, 0, x_24); -lean_ctor_set(x_26, 1, x_25); -return x_26; +lean_dec(x_11); +x_27 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_27, 0, x_25); +lean_ctor_set(x_27, 1, x_26); +return x_27; } } } } } -LEAN_EXPORT lean_object* l_Lean_importModulesCore_go(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Lean_importModulesCore_go(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { _start: { -lean_object* x_5; size_t x_6; size_t x_7; lean_object* x_8; lean_object* x_9; -x_5 = lean_box(0); -x_6 = lean_array_size(x_1); -x_7 = 0; -x_8 = lean_box(0); -x_9 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10(x_1, x_2, x_5, x_1, x_6, x_7, x_8, x_3, x_4); -if (lean_obj_tag(x_9) == 0) +lean_object* x_6; size_t x_7; size_t x_8; lean_object* x_9; lean_object* x_10; +x_6 = lean_box(0); +x_7 = lean_array_size(x_1); +x_8 = 0; +x_9 = lean_box(0); +x_10 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10(x_1, x_2, x_3, x_6, x_1, x_7, x_8, x_9, x_4, x_5); +if (lean_obj_tag(x_10) == 0) { -uint8_t x_10; -x_10 = !lean_is_exclusive(x_9); -if (x_10 == 0) +uint8_t x_11; +x_11 = !lean_is_exclusive(x_10); +if (x_11 == 0) { -lean_object* x_11; -x_11 = lean_ctor_get(x_9, 0); -lean_dec(x_11); -lean_ctor_set(x_9, 0, x_8); -return x_9; +lean_object* x_12; +x_12 = lean_ctor_get(x_10, 0); +lean_dec(x_12); +lean_ctor_set(x_10, 0, x_9); +return x_10; } else { -lean_object* x_12; lean_object* x_13; -x_12 = lean_ctor_get(x_9, 1); -lean_inc(x_12); -lean_dec(x_9); -x_13 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_13, 0, x_8); -lean_ctor_set(x_13, 1, x_12); -return x_13; +lean_object* x_13; lean_object* x_14; +x_13 = lean_ctor_get(x_10, 1); +lean_inc(x_13); +lean_dec(x_10); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_9); +lean_ctor_set(x_14, 1, x_13); +return x_14; } } else { -uint8_t x_14; -x_14 = !lean_is_exclusive(x_9); -if (x_14 == 0) +uint8_t x_15; +x_15 = !lean_is_exclusive(x_10); +if (x_15 == 0) { -return x_9; +return x_10; } else { -lean_object* x_15; lean_object* x_16; lean_object* x_17; -x_15 = lean_ctor_get(x_9, 0); -x_16 = lean_ctor_get(x_9, 1); +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_10, 0); +x_17 = lean_ctor_get(x_10, 1); +lean_inc(x_17); lean_inc(x_16); -lean_inc(x_15); -lean_dec(x_9); -x_17 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_17, 0, x_15); -lean_ctor_set(x_17, 1, x_16); -return x_17; +lean_dec(x_10); +x_18 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18, 0, x_16); +lean_ctor_set(x_18, 1, x_17); +return x_18; } } } } -LEAN_EXPORT lean_object* l_Lean_importModulesCore(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Lean_importModulesCore(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { _start: { -lean_object* x_5; -x_5 = l_Lean_importModulesCore_go(x_1, x_2, x_3, x_4); -return x_5; +lean_object* x_6; +x_6 = l_Lean_importModulesCore_go(x_1, x_2, x_3, x_4, x_5); +return x_6; } } LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_contains___at_Lean_importModulesCore_go___spec__2___boxed(lean_object* x_1, lean_object* x_2) { @@ -27555,114 +27548,97 @@ lean_dec(x_2); return x_9; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { _start: { -uint8_t x_8; uint8_t x_9; lean_object* x_10; -x_8 = lean_unbox(x_2); +uint8_t x_7; lean_object* x_8; +x_7 = lean_unbox(x_2); lean_dec(x_2); -x_9 = lean_unbox(x_3); -lean_dec(x_3); -x_10 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1(x_1, x_8, x_9, x_4, x_5, x_6, x_7); -lean_dec(x_6); +x_8 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__1(x_1, x_7, x_3, x_4, x_5, x_6); lean_dec(x_5); -return x_10; +lean_dec(x_4); +return x_8; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { _start: { -uint8_t x_11; uint8_t x_12; uint8_t x_13; lean_object* x_14; -x_11 = lean_unbox(x_1); +uint8_t x_8; uint8_t x_9; lean_object* x_10; +x_8 = lean_unbox(x_1); lean_dec(x_1); -x_12 = lean_unbox(x_3); +x_9 = lean_unbox(x_3); lean_dec(x_3); -x_13 = lean_unbox(x_4); -lean_dec(x_4); -x_14 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2(x_11, x_2, x_12, x_13, x_5, x_6, x_7, x_8, x_9, x_10); -lean_dec(x_8); -lean_dec(x_7); +x_10 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2(x_8, x_2, x_9, x_4, x_5, x_6, x_7); lean_dec(x_5); -return x_14; -} -} -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { -_start: -{ -uint8_t x_10; uint8_t x_11; uint8_t x_12; lean_object* x_13; -x_10 = lean_unbox(x_2); -lean_dec(x_2); -x_11 = lean_unbox(x_4); lean_dec(x_4); -x_12 = lean_unbox(x_5); -lean_dec(x_5); -x_13 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__3(x_1, x_10, x_3, x_11, x_12, x_6, x_7, x_8, x_9); -lean_dec(x_7); -lean_dec(x_6); -return x_13; +return x_10; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { -uint8_t x_8; uint8_t x_9; lean_object* x_10; -x_8 = lean_unbox(x_2); -lean_dec(x_2); -x_9 = lean_unbox(x_3); +uint8_t x_9; uint8_t x_10; lean_object* x_11; +x_9 = lean_unbox(x_1); +lean_dec(x_1); +x_10 = lean_unbox(x_3); lean_dec(x_3); -x_10 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4(x_1, x_8, x_9, x_4, x_5, x_6, x_7); +x_11 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__3(x_9, x_2, x_10, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -return x_10; +return x_11; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__5___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { lean_object* x_4; -x_4 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__5(x_1, x_2, x_3); +x_4 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4(x_1, x_2, x_3); lean_dec(x_2); lean_dec(x_1); return x_4; } } -LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { -uint8_t x_10; size_t x_11; size_t x_12; lean_object* x_13; -x_10 = lean_unbox(x_2); +uint8_t x_11; size_t x_12; size_t x_13; lean_object* x_14; +x_11 = lean_unbox(x_2); lean_dec(x_2); -x_11 = lean_unbox_usize(x_5); -lean_dec(x_5); x_12 = lean_unbox_usize(x_6); lean_dec(x_6); -x_13 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10(x_1, x_10, x_3, x_4, x_11, x_12, x_7, x_8, x_9); +x_13 = lean_unbox_usize(x_7); +lean_dec(x_7); +x_14 = l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10(x_1, x_11, x_3, x_4, x_5, x_12, x_13, x_8, x_9, x_10); +lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_1); -return x_13; +return x_14; } } -LEAN_EXPORT lean_object* l_Lean_importModulesCore_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Lean_importModulesCore_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { _start: { -uint8_t x_5; lean_object* x_6; -x_5 = lean_unbox(x_2); +uint8_t x_6; lean_object* x_7; +x_6 = lean_unbox(x_2); lean_dec(x_2); -x_6 = l_Lean_importModulesCore_go(x_1, x_5, x_3, x_4); +x_7 = l_Lean_importModulesCore_go(x_1, x_6, x_3, x_4, x_5); +lean_dec(x_3); lean_dec(x_1); -return x_6; +return x_7; } } -LEAN_EXPORT lean_object* l_Lean_importModulesCore___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Lean_importModulesCore___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { _start: { -uint8_t x_5; lean_object* x_6; -x_5 = lean_unbox(x_2); +uint8_t x_6; lean_object* x_7; +x_6 = lean_unbox(x_2); lean_dec(x_2); -x_6 = l_Lean_importModulesCore(x_1, x_5, x_3, x_4); +x_7 = l_Lean_importModulesCore(x_1, x_6, x_3, x_4, x_5); +lean_dec(x_3); lean_dec(x_1); -return x_6; +return x_7; } } LEAN_EXPORT uint8_t l___private_Lean_Environment_0__Lean_subsumesInfo(lean_object* x_1, lean_object* x_2) { @@ -27927,7 +27903,7 @@ x_5 = lean_nat_dec_lt(x_3, x_4); if (x_5 == 0) { lean_object* x_6; -x_6 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_6 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; return x_6; } else @@ -27939,7 +27915,7 @@ lean_dec(x_7); if (x_8 == 0) { lean_object* x_9; -x_9 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_9 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; return x_9; } else @@ -27947,7 +27923,7 @@ else size_t x_10; size_t x_11; lean_object* x_12; lean_object* x_13; x_10 = lean_usize_of_nat(x_3); x_11 = lean_usize_of_nat(x_4); -x_12 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_12 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_13 = l_Array_foldlMUnsafe_fold___at_Lean_finalizeImport___spec__2(x_1, x_2, x_10, x_11, x_12); return x_13; } @@ -27993,12 +27969,12 @@ x_12 = lean_ctor_get(x_11, 0); lean_inc(x_12); lean_dec(x_11); x_13 = 1; -x_14 = l_Lean_instToStringImport___closed__1; +x_14 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_15 = l_Lean_Name_toString(x_12, x_13, x_14); x_16 = l_Array_mapMUnsafe_map___at_Lean_finalizeImport___spec__3___closed__1; x_17 = lean_string_append(x_16, x_15); lean_dec(x_15); -x_18 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_18 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_19 = lean_string_append(x_17, x_18); x_20 = lean_alloc_ctor(18, 1, 0); lean_ctor_set(x_20, 0, x_19); @@ -30539,7 +30515,7 @@ x_5 = lean_nat_dec_lt(x_3, x_4); if (x_5 == 0) { lean_object* x_6; -x_6 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_6 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; return x_6; } else @@ -30551,7 +30527,7 @@ lean_dec(x_7); if (x_8 == 0) { lean_object* x_9; -x_9 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_9 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; return x_9; } else @@ -30559,7 +30535,7 @@ else size_t x_10; size_t x_11; lean_object* x_12; lean_object* x_13; x_10 = lean_usize_of_nat(x_3); x_11 = lean_usize_of_nat(x_4); -x_12 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_12 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_13 = l_Array_foldlMUnsafe_fold___at_Lean_finalizeImport___spec__13(x_1, x_2, x_10, x_11, x_12); return x_13; } @@ -31794,7 +31770,7 @@ lean_ctor_set(x_26, 0, x_16); lean_ctor_set(x_26, 1, x_23); lean_ctor_set_uint8(x_26, sizeof(void*)*2, x_22); x_27 = lean_box(0); -x_28 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_28 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; lean_inc(x_6); lean_inc(x_5); lean_inc(x_2); @@ -32437,7 +32413,7 @@ x_59 = lean_box(0); if (x_24 == 0) { lean_object* x_68; -x_68 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_68 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_60 = x_68; goto block_67; } @@ -32448,7 +32424,7 @@ x_69 = lean_nat_dec_le(x_23, x_23); if (x_69 == 0) { lean_object* x_70; -x_70 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_70 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_60 = x_70; goto block_67; } @@ -32456,7 +32432,7 @@ else { size_t x_71; lean_object* x_72; lean_object* x_73; x_71 = lean_usize_of_nat(x_23); -x_72 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_72 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_73 = l_Array_foldlMUnsafe_fold___at_Lean_finalizeImport___spec__17(x_14, x_16, x_71, x_72); x_60 = x_73; goto block_67; @@ -32950,67 +32926,67 @@ static lean_object* _init_l_Lean_importModules___lambda__1___closed__1() { { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Lean_mkEmptyEnvironment___lambda__1___closed__3; -x_2 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_2 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_3 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_importModules___lambda__1(uint8_t x_1, lean_object* x_2, lean_object* x_3, uint32_t x_4, uint8_t x_5, uint8_t x_6, lean_object* x_7, lean_object* x_8) { +LEAN_EXPORT lean_object* l_Lean_importModules___lambda__1(uint8_t x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, uint32_t x_5, uint8_t x_6, uint8_t x_7, lean_object* x_8, lean_object* x_9) { _start: { -uint8_t x_9; uint8_t x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; -x_9 = 2; -x_10 = l_Lean_instDecidableEqOLeanLevel(x_1, x_9); -x_11 = l_Lean_importModules___lambda__1___closed__1; -x_12 = lean_st_mk_ref(x_11, x_8); -x_13 = lean_ctor_get(x_12, 0); -lean_inc(x_13); -x_14 = lean_ctor_get(x_12, 1); +uint8_t x_10; uint8_t x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_10 = 2; +x_11 = l_Lean_instDecidableEqOLeanLevel(x_1, x_10); +x_12 = l_Lean_importModules___lambda__1___closed__1; +x_13 = lean_st_mk_ref(x_12, x_9); +x_14 = lean_ctor_get(x_13, 0); lean_inc(x_14); -lean_dec(x_12); -lean_inc(x_13); -x_15 = l_Lean_importModulesCore_go(x_2, x_10, x_13, x_14); -if (lean_obj_tag(x_15) == 0) -{ -lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_16 = lean_ctor_get(x_15, 1); -lean_inc(x_16); -lean_dec(x_15); -x_17 = lean_st_ref_get(x_13, x_16); +x_15 = lean_ctor_get(x_13, 1); +lean_inc(x_15); lean_dec(x_13); -x_18 = lean_ctor_get(x_17, 0); -lean_inc(x_18); -x_19 = lean_ctor_get(x_17, 1); +lean_inc(x_14); +x_16 = l_Lean_importModulesCore_go(x_2, x_11, x_3, x_14, x_15); +if (lean_obj_tag(x_16) == 0) +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; +x_17 = lean_ctor_get(x_16, 1); +lean_inc(x_17); +lean_dec(x_16); +x_18 = lean_st_ref_get(x_14, x_17); +lean_dec(x_14); +x_19 = lean_ctor_get(x_18, 0); lean_inc(x_19); -lean_dec(x_17); -x_20 = l_Lean_finalizeImport(x_18, x_2, x_3, x_4, x_5, x_6, x_1, x_19); -return x_20; +x_20 = lean_ctor_get(x_18, 1); +lean_inc(x_20); +lean_dec(x_18); +x_21 = l_Lean_finalizeImport(x_19, x_2, x_4, x_5, x_6, x_7, x_1, x_20); +return x_21; } else { -uint8_t x_21; -lean_dec(x_13); -lean_dec(x_3); +uint8_t x_22; +lean_dec(x_14); +lean_dec(x_4); lean_dec(x_2); -x_21 = !lean_is_exclusive(x_15); -if (x_21 == 0) +x_22 = !lean_is_exclusive(x_16); +if (x_22 == 0) { -return x_15; +return x_16; } else { -lean_object* x_22; lean_object* x_23; lean_object* x_24; -x_22 = lean_ctor_get(x_15, 0); -x_23 = lean_ctor_get(x_15, 1); +lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_23 = lean_ctor_get(x_16, 0); +x_24 = lean_ctor_get(x_16, 1); +lean_inc(x_24); lean_inc(x_23); -lean_inc(x_22); -lean_dec(x_15); -x_24 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_24, 0, x_22); -lean_ctor_set(x_24, 1, x_23); -return x_24; +lean_dec(x_16); +x_25 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_25, 0, x_23); +lean_ctor_set(x_25, 1, x_24); +return x_25; } } } @@ -33025,70 +33001,71 @@ lean_closure_set(x_2, 0, x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Lean_importModules___lambda__2(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4, uint32_t x_5, uint8_t x_6, uint8_t x_7, size_t x_8, lean_object* x_9, lean_object* x_10) { +LEAN_EXPORT lean_object* l_Lean_importModules___lambda__2(lean_object* x_1, uint8_t x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, uint32_t x_6, uint8_t x_7, uint8_t x_8, size_t x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_11; lean_object* x_12; uint8_t x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; -x_11 = lean_array_get_size(x_1); -x_12 = lean_unsigned_to_nat(0u); -x_13 = lean_nat_dec_lt(x_12, x_11); -x_14 = lean_box(x_2); -x_15 = lean_box_uint32(x_5); -x_16 = lean_box(x_6); +lean_object* x_12; lean_object* x_13; uint8_t x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_12 = lean_array_get_size(x_1); +x_13 = lean_unsigned_to_nat(0u); +x_14 = lean_nat_dec_lt(x_13, x_12); +x_15 = lean_box(x_2); +x_16 = lean_box_uint32(x_6); x_17 = lean_box(x_7); -x_18 = lean_alloc_closure((void*)(l_Lean_importModules___lambda__1___boxed), 8, 6); -lean_closure_set(x_18, 0, x_14); -lean_closure_set(x_18, 1, x_3); -lean_closure_set(x_18, 2, x_4); -lean_closure_set(x_18, 3, x_15); -lean_closure_set(x_18, 4, x_16); -lean_closure_set(x_18, 5, x_17); -if (x_13 == 0) +x_18 = lean_box(x_8); +x_19 = lean_alloc_closure((void*)(l_Lean_importModules___lambda__1___boxed), 9, 7); +lean_closure_set(x_19, 0, x_15); +lean_closure_set(x_19, 1, x_3); +lean_closure_set(x_19, 2, x_4); +lean_closure_set(x_19, 3, x_5); +lean_closure_set(x_19, 4, x_16); +lean_closure_set(x_19, 5, x_17); +lean_closure_set(x_19, 6, x_18); +if (x_14 == 0) { -lean_object* x_19; lean_object* x_20; lean_object* x_21; -lean_dec(x_11); +lean_object* x_20; lean_object* x_21; lean_object* x_22; +lean_dec(x_12); lean_dec(x_1); -x_19 = l_Lean_importModules___lambda__2___closed__1; -x_20 = lean_alloc_closure((void*)(l_EStateM_bind___rarg), 3, 2); -lean_closure_set(x_20, 0, x_19); -lean_closure_set(x_20, 1, x_18); -x_21 = l_Lean_withImporting___rarg(x_20, x_10); -return x_21; +x_20 = l_Lean_importModules___lambda__2___closed__1; +x_21 = lean_alloc_closure((void*)(l_EStateM_bind___rarg), 3, 2); +lean_closure_set(x_21, 0, x_20); +lean_closure_set(x_21, 1, x_19); +x_22 = l_Lean_withImporting___rarg(x_21, x_11); +return x_22; } else { -uint8_t x_22; -x_22 = lean_nat_dec_le(x_11, x_11); -if (x_22 == 0) +uint8_t x_23; +x_23 = lean_nat_dec_le(x_12, x_12); +if (x_23 == 0) { -lean_object* x_23; lean_object* x_24; lean_object* x_25; -lean_dec(x_11); +lean_object* x_24; lean_object* x_25; lean_object* x_26; +lean_dec(x_12); lean_dec(x_1); -x_23 = l_Lean_importModules___lambda__2___closed__1; -x_24 = lean_alloc_closure((void*)(l_EStateM_bind___rarg), 3, 2); -lean_closure_set(x_24, 0, x_23); -lean_closure_set(x_24, 1, x_18); -x_25 = l_Lean_withImporting___rarg(x_24, x_10); -return x_25; +x_24 = l_Lean_importModules___lambda__2___closed__1; +x_25 = lean_alloc_closure((void*)(l_EStateM_bind___rarg), 3, 2); +lean_closure_set(x_25, 0, x_24); +lean_closure_set(x_25, 1, x_19); +x_26 = l_Lean_withImporting___rarg(x_25, x_11); +return x_26; } else { -size_t x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; -x_26 = lean_usize_of_nat(x_11); -lean_dec(x_11); -x_27 = lean_box(0); -x_28 = lean_box_usize(x_8); -x_29 = lean_box_usize(x_26); -x_30 = lean_alloc_closure((void*)(l_Array_foldlMUnsafe_fold___at_Lean_importModules___spec__2___boxed), 5, 4); -lean_closure_set(x_30, 0, x_1); -lean_closure_set(x_30, 1, x_28); -lean_closure_set(x_30, 2, x_29); -lean_closure_set(x_30, 3, x_27); -x_31 = lean_alloc_closure((void*)(l_EStateM_bind___rarg), 3, 2); -lean_closure_set(x_31, 0, x_30); -lean_closure_set(x_31, 1, x_18); -x_32 = l_Lean_withImporting___rarg(x_31, x_10); -return x_32; +size_t x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_27 = lean_usize_of_nat(x_12); +lean_dec(x_12); +x_28 = lean_box(0); +x_29 = lean_box_usize(x_9); +x_30 = lean_box_usize(x_27); +x_31 = lean_alloc_closure((void*)(l_Array_foldlMUnsafe_fold___at_Lean_importModules___spec__2___boxed), 5, 4); +lean_closure_set(x_31, 0, x_1); +lean_closure_set(x_31, 1, x_29); +lean_closure_set(x_31, 2, x_30); +lean_closure_set(x_31, 3, x_28); +x_32 = lean_alloc_closure((void*)(l_EStateM_bind___rarg), 3, 2); +lean_closure_set(x_32, 0, x_31); +lean_closure_set(x_32, 1, x_19); +x_33 = l_Lean_withImporting___rarg(x_32, x_11); +return x_33; } } } @@ -33110,46 +33087,47 @@ x_2 = lean_box_usize(x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Lean_importModules(lean_object* x_1, lean_object* x_2, uint32_t x_3, lean_object* x_4, uint8_t x_5, uint8_t x_6, uint8_t x_7, lean_object* x_8) { +LEAN_EXPORT lean_object* l_Lean_importModules(lean_object* x_1, lean_object* x_2, uint32_t x_3, lean_object* x_4, uint8_t x_5, uint8_t x_6, uint8_t x_7, lean_object* x_8, lean_object* x_9) { _start: { -lean_object* x_9; size_t x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; -x_9 = lean_box(0); -x_10 = lean_array_size(x_1); -x_11 = lean_box(0); -x_12 = lean_box_usize(x_10); -x_13 = l_Lean_importModules___boxed__const__1; +lean_object* x_10; size_t x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_10 = lean_box(0); +x_11 = lean_array_size(x_1); +x_12 = lean_box(0); +x_13 = lean_box_usize(x_11); +x_14 = l_Lean_importModules___boxed__const__1; lean_inc_n(x_1, 2); -x_14 = lean_alloc_closure((void*)(l_Array_forIn_x27Unsafe_loop___at_Lean_importModules___spec__1___boxed), 7, 6); -lean_closure_set(x_14, 0, x_1); -lean_closure_set(x_14, 1, x_9); -lean_closure_set(x_14, 2, x_1); -lean_closure_set(x_14, 3, x_12); -lean_closure_set(x_14, 4, x_13); -lean_closure_set(x_14, 5, x_11); -x_15 = lean_box(x_7); -x_16 = lean_box_uint32(x_3); -x_17 = lean_box(x_5); -x_18 = lean_box(x_6); -x_19 = l_Lean_importModules___boxed__const__1; +x_15 = lean_alloc_closure((void*)(l_Array_forIn_x27Unsafe_loop___at_Lean_importModules___spec__1___boxed), 7, 6); +lean_closure_set(x_15, 0, x_1); +lean_closure_set(x_15, 1, x_10); +lean_closure_set(x_15, 2, x_1); +lean_closure_set(x_15, 3, x_13); +lean_closure_set(x_15, 4, x_14); +lean_closure_set(x_15, 5, x_12); +x_16 = lean_box(x_7); +x_17 = lean_box_uint32(x_3); +x_18 = lean_box(x_5); +x_19 = lean_box(x_6); +x_20 = l_Lean_importModules___boxed__const__1; lean_inc(x_2); -x_20 = lean_alloc_closure((void*)(l_Lean_importModules___lambda__2___boxed), 10, 8); -lean_closure_set(x_20, 0, x_4); -lean_closure_set(x_20, 1, x_15); -lean_closure_set(x_20, 2, x_1); -lean_closure_set(x_20, 3, x_2); -lean_closure_set(x_20, 4, x_16); -lean_closure_set(x_20, 5, x_17); -lean_closure_set(x_20, 6, x_18); -lean_closure_set(x_20, 7, x_19); -x_21 = lean_alloc_closure((void*)(l_EStateM_bind___rarg), 3, 2); -lean_closure_set(x_21, 0, x_14); -lean_closure_set(x_21, 1, x_20); -x_22 = l_Lean_importModules___closed__1; -x_23 = lean_box(0); -x_24 = l_Lean_profileitIOUnsafe___rarg(x_22, x_2, x_21, x_23, x_8); +x_21 = lean_alloc_closure((void*)(l_Lean_importModules___lambda__2___boxed), 11, 9); +lean_closure_set(x_21, 0, x_4); +lean_closure_set(x_21, 1, x_16); +lean_closure_set(x_21, 2, x_1); +lean_closure_set(x_21, 3, x_8); +lean_closure_set(x_21, 4, x_2); +lean_closure_set(x_21, 5, x_17); +lean_closure_set(x_21, 6, x_18); +lean_closure_set(x_21, 7, x_19); +lean_closure_set(x_21, 8, x_20); +x_22 = lean_alloc_closure((void*)(l_EStateM_bind___rarg), 3, 2); +lean_closure_set(x_22, 0, x_15); +lean_closure_set(x_22, 1, x_21); +x_23 = l_Lean_importModules___closed__1; +x_24 = lean_box(0); +x_25 = l_Lean_profileitIOUnsafe___rarg(x_23, x_2, x_22, x_24, x_9); lean_dec(x_2); -return x_24; +return x_25; } } LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_importModules___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { @@ -33180,213 +33158,215 @@ lean_dec(x_1); return x_8; } } -LEAN_EXPORT lean_object* l_Lean_importModules___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +LEAN_EXPORT lean_object* l_Lean_importModules___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: { -uint8_t x_9; uint32_t x_10; uint8_t x_11; uint8_t x_12; lean_object* x_13; -x_9 = lean_unbox(x_1); +uint8_t x_10; uint32_t x_11; uint8_t x_12; uint8_t x_13; lean_object* x_14; +x_10 = lean_unbox(x_1); lean_dec(x_1); -x_10 = lean_unbox_uint32(x_4); -lean_dec(x_4); -x_11 = lean_unbox(x_5); +x_11 = lean_unbox_uint32(x_5); lean_dec(x_5); x_12 = lean_unbox(x_6); lean_dec(x_6); -x_13 = l_Lean_importModules___lambda__1(x_9, x_2, x_3, x_10, x_11, x_12, x_7, x_8); +x_13 = lean_unbox(x_7); lean_dec(x_7); -return x_13; +x_14 = l_Lean_importModules___lambda__1(x_10, x_2, x_3, x_4, x_11, x_12, x_13, x_8, x_9); +lean_dec(x_8); +lean_dec(x_3); +return x_14; } } -LEAN_EXPORT lean_object* l_Lean_importModules___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +LEAN_EXPORT lean_object* l_Lean_importModules___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -uint8_t x_11; uint32_t x_12; uint8_t x_13; uint8_t x_14; size_t x_15; lean_object* x_16; -x_11 = lean_unbox(x_2); +uint8_t x_12; uint32_t x_13; uint8_t x_14; uint8_t x_15; size_t x_16; lean_object* x_17; +x_12 = lean_unbox(x_2); lean_dec(x_2); -x_12 = lean_unbox_uint32(x_5); -lean_dec(x_5); -x_13 = lean_unbox(x_6); +x_13 = lean_unbox_uint32(x_6); lean_dec(x_6); x_14 = lean_unbox(x_7); lean_dec(x_7); -x_15 = lean_unbox_usize(x_8); +x_15 = lean_unbox(x_8); lean_dec(x_8); -x_16 = l_Lean_importModules___lambda__2(x_1, x_11, x_3, x_4, x_12, x_13, x_14, x_15, x_9, x_10); +x_16 = lean_unbox_usize(x_9); lean_dec(x_9); -return x_16; +x_17 = l_Lean_importModules___lambda__2(x_1, x_12, x_3, x_4, x_5, x_13, x_14, x_15, x_16, x_10, x_11); +lean_dec(x_10); +return x_17; } } -LEAN_EXPORT lean_object* l_Lean_importModules___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +LEAN_EXPORT lean_object* l_Lean_importModules___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: { -uint32_t x_9; uint8_t x_10; uint8_t x_11; uint8_t x_12; lean_object* x_13; -x_9 = lean_unbox_uint32(x_3); +uint32_t x_10; uint8_t x_11; uint8_t x_12; uint8_t x_13; lean_object* x_14; +x_10 = lean_unbox_uint32(x_3); lean_dec(x_3); -x_10 = lean_unbox(x_5); +x_11 = lean_unbox(x_5); lean_dec(x_5); -x_11 = lean_unbox(x_6); +x_12 = lean_unbox(x_6); lean_dec(x_6); -x_12 = lean_unbox(x_7); +x_13 = lean_unbox(x_7); lean_dec(x_7); -x_13 = l_Lean_importModules(x_1, x_2, x_9, x_4, x_10, x_11, x_12, x_8); -return x_13; +x_14 = l_Lean_importModules(x_1, x_2, x_10, x_4, x_11, x_12, x_13, x_8, x_9); +return x_14; } } LEAN_EXPORT lean_object* l_Lean_withImportModules___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, uint32_t x_4, lean_object* x_5) { _start: { -lean_object* x_6; uint8_t x_7; uint8_t x_8; lean_object* x_9; -x_6 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; -x_7 = 0; -x_8 = 2; -x_9 = l_Lean_importModules(x_1, x_2, x_4, x_6, x_7, x_7, x_8, x_5); -if (lean_obj_tag(x_9) == 0) +lean_object* x_6; lean_object* x_7; uint8_t x_8; uint8_t x_9; lean_object* x_10; +x_6 = lean_box(0); +x_7 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; +x_8 = 0; +x_9 = 2; +x_10 = l_Lean_importModules(x_1, x_2, x_4, x_7, x_8, x_8, x_9, x_6, x_5); +if (lean_obj_tag(x_10) == 0) { -lean_object* x_10; lean_object* x_11; lean_object* x_12; -x_10 = lean_ctor_get(x_9, 0); -lean_inc(x_10); -x_11 = lean_ctor_get(x_9, 1); +lean_object* x_11; lean_object* x_12; lean_object* x_13; +x_11 = lean_ctor_get(x_10, 0); lean_inc(x_11); -lean_dec(x_9); -lean_inc(x_10); -x_12 = lean_apply_2(x_3, x_10, x_11); -if (lean_obj_tag(x_12) == 0) +x_12 = lean_ctor_get(x_10, 1); +lean_inc(x_12); +lean_dec(x_10); +lean_inc(x_11); +x_13 = lean_apply_2(x_3, x_11, x_12); +if (lean_obj_tag(x_13) == 0) { -lean_object* x_13; lean_object* x_14; lean_object* x_15; -x_13 = lean_ctor_get(x_12, 0); -lean_inc(x_13); -x_14 = lean_ctor_get(x_12, 1); +lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_14 = lean_ctor_get(x_13, 0); lean_inc(x_14); -lean_dec(x_12); -x_15 = lean_environment_free_regions(x_10, x_14); -if (lean_obj_tag(x_15) == 0) +x_15 = lean_ctor_get(x_13, 1); +lean_inc(x_15); +lean_dec(x_13); +x_16 = lean_environment_free_regions(x_11, x_15); +if (lean_obj_tag(x_16) == 0) { -uint8_t x_16; -x_16 = !lean_is_exclusive(x_15); -if (x_16 == 0) +uint8_t x_17; +x_17 = !lean_is_exclusive(x_16); +if (x_17 == 0) { -lean_object* x_17; -x_17 = lean_ctor_get(x_15, 0); -lean_dec(x_17); -lean_ctor_set(x_15, 0, x_13); -return x_15; +lean_object* x_18; +x_18 = lean_ctor_get(x_16, 0); +lean_dec(x_18); +lean_ctor_set(x_16, 0, x_14); +return x_16; } else { -lean_object* x_18; lean_object* x_19; -x_18 = lean_ctor_get(x_15, 1); -lean_inc(x_18); -lean_dec(x_15); -x_19 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_19, 0, x_13); -lean_ctor_set(x_19, 1, x_18); -return x_19; +lean_object* x_19; lean_object* x_20; +x_19 = lean_ctor_get(x_16, 1); +lean_inc(x_19); +lean_dec(x_16); +x_20 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_20, 0, x_14); +lean_ctor_set(x_20, 1, x_19); +return x_20; } } else { -uint8_t x_20; -lean_dec(x_13); -x_20 = !lean_is_exclusive(x_15); -if (x_20 == 0) +uint8_t x_21; +lean_dec(x_14); +x_21 = !lean_is_exclusive(x_16); +if (x_21 == 0) { -return x_15; +return x_16; } else { -lean_object* x_21; lean_object* x_22; lean_object* x_23; -x_21 = lean_ctor_get(x_15, 0); -x_22 = lean_ctor_get(x_15, 1); +lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_22 = lean_ctor_get(x_16, 0); +x_23 = lean_ctor_get(x_16, 1); +lean_inc(x_23); lean_inc(x_22); -lean_inc(x_21); -lean_dec(x_15); -x_23 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_23, 0, x_21); -lean_ctor_set(x_23, 1, x_22); -return x_23; +lean_dec(x_16); +x_24 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_24, 0, x_22); +lean_ctor_set(x_24, 1, x_23); +return x_24; } } } else { -lean_object* x_24; lean_object* x_25; lean_object* x_26; -x_24 = lean_ctor_get(x_12, 0); -lean_inc(x_24); -x_25 = lean_ctor_get(x_12, 1); +lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_25 = lean_ctor_get(x_13, 0); lean_inc(x_25); -lean_dec(x_12); -x_26 = lean_environment_free_regions(x_10, x_25); -if (lean_obj_tag(x_26) == 0) +x_26 = lean_ctor_get(x_13, 1); +lean_inc(x_26); +lean_dec(x_13); +x_27 = lean_environment_free_regions(x_11, x_26); +if (lean_obj_tag(x_27) == 0) { -uint8_t x_27; -x_27 = !lean_is_exclusive(x_26); -if (x_27 == 0) +uint8_t x_28; +x_28 = !lean_is_exclusive(x_27); +if (x_28 == 0) { -lean_object* x_28; -x_28 = lean_ctor_get(x_26, 0); -lean_dec(x_28); -lean_ctor_set_tag(x_26, 1); -lean_ctor_set(x_26, 0, x_24); -return x_26; +lean_object* x_29; +x_29 = lean_ctor_get(x_27, 0); +lean_dec(x_29); +lean_ctor_set_tag(x_27, 1); +lean_ctor_set(x_27, 0, x_25); +return x_27; } else { -lean_object* x_29; lean_object* x_30; -x_29 = lean_ctor_get(x_26, 1); -lean_inc(x_29); -lean_dec(x_26); -x_30 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_30, 0, x_24); -lean_ctor_set(x_30, 1, x_29); -return x_30; +lean_object* x_30; lean_object* x_31; +x_30 = lean_ctor_get(x_27, 1); +lean_inc(x_30); +lean_dec(x_27); +x_31 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_31, 0, x_25); +lean_ctor_set(x_31, 1, x_30); +return x_31; } } else { -uint8_t x_31; -lean_dec(x_24); -x_31 = !lean_is_exclusive(x_26); -if (x_31 == 0) +uint8_t x_32; +lean_dec(x_25); +x_32 = !lean_is_exclusive(x_27); +if (x_32 == 0) { -return x_26; +return x_27; } else { -lean_object* x_32; lean_object* x_33; lean_object* x_34; -x_32 = lean_ctor_get(x_26, 0); -x_33 = lean_ctor_get(x_26, 1); +lean_object* x_33; lean_object* x_34; lean_object* x_35; +x_33 = lean_ctor_get(x_27, 0); +x_34 = lean_ctor_get(x_27, 1); +lean_inc(x_34); lean_inc(x_33); -lean_inc(x_32); -lean_dec(x_26); -x_34 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_34, 0, x_32); -lean_ctor_set(x_34, 1, x_33); -return x_34; +lean_dec(x_27); +x_35 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_35, 0, x_33); +lean_ctor_set(x_35, 1, x_34); +return x_35; } } } } else { -uint8_t x_35; +uint8_t x_36; lean_dec(x_3); -x_35 = !lean_is_exclusive(x_9); -if (x_35 == 0) +x_36 = !lean_is_exclusive(x_10); +if (x_36 == 0) { -return x_9; +return x_10; } else { -lean_object* x_36; lean_object* x_37; lean_object* x_38; -x_36 = lean_ctor_get(x_9, 0); -x_37 = lean_ctor_get(x_9, 1); +lean_object* x_37; lean_object* x_38; lean_object* x_39; +x_37 = lean_ctor_get(x_10, 0); +x_38 = lean_ctor_get(x_10, 1); +lean_inc(x_38); lean_inc(x_37); -lean_inc(x_36); -lean_dec(x_9); -x_38 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_38, 0, x_36); -lean_ctor_set(x_38, 1, x_37); -return x_38; +lean_dec(x_10); +x_39 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_39, 0, x_37); +lean_ctor_set(x_39, 1, x_38); +return x_39; } } } @@ -33710,7 +33690,7 @@ x_7 = lean_ctor_get(x_3, 0); lean_inc(x_7); lean_dec(x_3); x_8 = 1; -x_9 = l_Lean_instToStringImport___closed__1; +x_9 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_10 = l_Lean_Name_toString(x_7, x_8, x_9); x_11 = lean_string_append(x_6, x_10); lean_dec(x_10); @@ -33744,12 +33724,12 @@ x_5 = lean_ctor_get(x_4, 0); lean_inc(x_5); lean_dec(x_4); x_6 = 1; -x_7 = l_Lean_instToStringImport___closed__1; +x_7 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_8 = l_Lean_Name_toString(x_5, x_6, x_7); -x_9 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__4; +x_9 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; x_10 = lean_string_append(x_9, x_8); lean_dec(x_8); -x_11 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8; +x_11 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__10; x_12 = lean_string_append(x_10, x_11); return x_12; } @@ -33763,9 +33743,9 @@ x_14 = lean_ctor_get(x_13, 0); lean_inc(x_14); lean_dec(x_13); x_15 = 1; -x_16 = l_Lean_instToStringImport___closed__1; +x_16 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_17 = l_Lean_Name_toString(x_14, x_15, x_16); -x_18 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__4; +x_18 = l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__6; x_19 = lean_string_append(x_18, x_17); lean_dec(x_17); x_20 = l_List_foldl___at_Lean_Environment_displayStats___spec__2(x_19, x_3); @@ -33925,7 +33905,7 @@ x_12 = l_Std_Format_isNil(x_11); if (x_12 == 0) { lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_13 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3; +x_13 = l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3; x_14 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_14, 0, x_13); lean_ctor_set(x_14, 1, x_11); @@ -34005,7 +33985,7 @@ x_8 = lean_array_uget(x_2, x_3); x_9 = lean_ctor_get(x_8, 1); lean_inc(x_9); x_10 = 1; -x_11 = l_Lean_instToStringImport___closed__1; +x_11 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_12 = l_Lean_Name_toString(x_9, x_10, x_11); x_13 = l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__7___closed__1; x_14 = lean_string_append(x_13, x_12); @@ -34299,7 +34279,7 @@ if (x_97 == 0) lean_object* x_98; lean_dec(x_18); lean_dec(x_17); -x_98 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_98 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_24 = x_98; goto block_95; } @@ -34312,7 +34292,7 @@ if (x_99 == 0) lean_object* x_100; lean_dec(x_18); lean_dec(x_17); -x_100 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_100 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_24 = x_100; goto block_95; } @@ -34322,7 +34302,7 @@ size_t x_101; size_t x_102; lean_object* x_103; lean_object* x_104; x_101 = 0; x_102 = lean_usize_of_nat(x_18); lean_dec(x_18); -x_103 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_103 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; x_104 = l_Array_foldlMUnsafe_fold___at_Lean_Environment_displayStats___spec__8(x_17, x_101, x_102, x_103); lean_dec(x_17); x_24 = x_104; @@ -34768,7 +34748,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Environment_0__Lean_Environment_throwU { uint8_t x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; x_3 = 1; -x_4 = l_Lean_instToStringImport___closed__1; +x_4 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_5 = l_Lean_Name_toString(x_2, x_3, x_4); x_6 = l___private_Lean_Environment_0__Lean_Environment_throwUnexpectedType___rarg___closed__1; x_7 = lean_string_append(x_6, x_5); @@ -35112,16 +35092,16 @@ x_9 = lean_ctor_get(x_4, 0); lean_inc(x_9); lean_dec(x_4); x_10 = 1; -x_11 = l_Lean_instToStringImport___closed__1; +x_11 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_12 = l_Lean_Name_toString(x_9, x_10, x_11); -x_13 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_13 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_14 = lean_string_append(x_13, x_12); lean_dec(x_12); x_15 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__1; x_16 = lean_string_append(x_14, x_15); -x_17 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_17 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_18 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; -x_19 = lean_unsigned_to_nat(2142u); +x_19 = lean_unsigned_to_nat(2159u); x_20 = lean_unsigned_to_nat(17u); x_21 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_17, x_18, x_19, x_20, x_16); lean_dec(x_16); @@ -35146,16 +35126,16 @@ x_26 = lean_ctor_get(x_4, 0); lean_inc(x_26); lean_dec(x_4); x_27 = 1; -x_28 = l_Lean_instToStringImport___closed__1; +x_28 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_29 = l_Lean_Name_toString(x_26, x_27, x_28); -x_30 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_30 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_31 = lean_string_append(x_30, x_29); lean_dec(x_29); x_32 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__1; x_33 = lean_string_append(x_31, x_32); -x_34 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_34 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_35 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; -x_36 = lean_unsigned_to_nat(2142u); +x_36 = lean_unsigned_to_nat(2159u); x_37 = lean_unsigned_to_nat(17u); x_38 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_34, x_35, x_36, x_37, x_33); lean_dec(x_33); @@ -35237,16 +35217,16 @@ x_56 = lean_ctor_get(x_4, 0); lean_inc(x_56); lean_dec(x_4); x_57 = 1; -x_58 = l_Lean_instToStringImport___closed__1; +x_58 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_59 = l_Lean_Name_toString(x_56, x_57, x_58); -x_60 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_60 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_61 = lean_string_append(x_60, x_59); lean_dec(x_59); x_62 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__1; x_63 = lean_string_append(x_61, x_62); -x_64 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_64 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_65 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; -x_66 = lean_unsigned_to_nat(2142u); +x_66 = lean_unsigned_to_nat(2159u); x_67 = lean_unsigned_to_nat(17u); x_68 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_64, x_65, x_66, x_67, x_63); lean_dec(x_63); @@ -35271,16 +35251,16 @@ x_73 = lean_ctor_get(x_4, 0); lean_inc(x_73); lean_dec(x_4); x_74 = 1; -x_75 = l_Lean_instToStringImport___closed__1; +x_75 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_76 = l_Lean_Name_toString(x_73, x_74, x_75); -x_77 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_77 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_78 = lean_string_append(x_77, x_76); lean_dec(x_76); x_79 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__1; x_80 = lean_string_append(x_78, x_79); -x_81 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_81 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_82 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; -x_83 = lean_unsigned_to_nat(2142u); +x_83 = lean_unsigned_to_nat(2159u); x_84 = lean_unsigned_to_nat(17u); x_85 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_81, x_82, x_83, x_84, x_80); lean_dec(x_80); @@ -35312,16 +35292,16 @@ x_93 = lean_ctor_get(x_4, 0); lean_inc(x_93); lean_dec(x_4); x_94 = 1; -x_95 = l_Lean_instToStringImport___closed__1; +x_95 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_96 = l_Lean_Name_toString(x_93, x_94, x_95); -x_97 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_97 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_98 = lean_string_append(x_97, x_96); lean_dec(x_96); x_99 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__1; x_100 = lean_string_append(x_98, x_99); -x_101 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_101 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_102 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; -x_103 = lean_unsigned_to_nat(2142u); +x_103 = lean_unsigned_to_nat(2159u); x_104 = lean_unsigned_to_nat(17u); x_105 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_101, x_102, x_103, x_104, x_100); lean_dec(x_100); @@ -35346,16 +35326,16 @@ x_110 = lean_ctor_get(x_4, 0); lean_inc(x_110); lean_dec(x_4); x_111 = 1; -x_112 = l_Lean_instToStringImport___closed__1; +x_112 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_113 = l_Lean_Name_toString(x_110, x_111, x_112); -x_114 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_114 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_115 = lean_string_append(x_114, x_113); lean_dec(x_113); x_116 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__1; x_117 = lean_string_append(x_115, x_116); -x_118 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_118 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_119 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; -x_120 = lean_unsigned_to_nat(2142u); +x_120 = lean_unsigned_to_nat(2159u); x_121 = lean_unsigned_to_nat(17u); x_122 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_118, x_119, x_120, x_121, x_117); lean_dec(x_117); @@ -35387,16 +35367,16 @@ x_130 = lean_ctor_get(x_4, 0); lean_inc(x_130); lean_dec(x_4); x_131 = 1; -x_132 = l_Lean_instToStringImport___closed__1; +x_132 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_133 = l_Lean_Name_toString(x_130, x_131, x_132); -x_134 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_134 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_135 = lean_string_append(x_134, x_133); lean_dec(x_133); x_136 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__1; x_137 = lean_string_append(x_135, x_136); -x_138 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_138 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_139 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; -x_140 = lean_unsigned_to_nat(2142u); +x_140 = lean_unsigned_to_nat(2159u); x_141 = lean_unsigned_to_nat(17u); x_142 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_138, x_139, x_140, x_141, x_137); lean_dec(x_137); @@ -35421,16 +35401,16 @@ x_147 = lean_ctor_get(x_4, 0); lean_inc(x_147); lean_dec(x_4); x_148 = 1; -x_149 = l_Lean_instToStringImport___closed__1; +x_149 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_150 = l_Lean_Name_toString(x_147, x_148, x_149); -x_151 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_151 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_152 = lean_string_append(x_151, x_150); lean_dec(x_150); x_153 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__1; x_154 = lean_string_append(x_152, x_153); -x_155 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_155 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_156 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; -x_157 = lean_unsigned_to_nat(2142u); +x_157 = lean_unsigned_to_nat(2159u); x_158 = lean_unsigned_to_nat(17u); x_159 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_155, x_156, x_157, x_158, x_154); lean_dec(x_154); @@ -35462,16 +35442,16 @@ x_167 = lean_ctor_get(x_4, 0); lean_inc(x_167); lean_dec(x_4); x_168 = 1; -x_169 = l_Lean_instToStringImport___closed__1; +x_169 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_170 = l_Lean_Name_toString(x_167, x_168, x_169); -x_171 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_171 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_172 = lean_string_append(x_171, x_170); lean_dec(x_170); x_173 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__1; x_174 = lean_string_append(x_172, x_173); -x_175 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_175 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_176 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; -x_177 = lean_unsigned_to_nat(2142u); +x_177 = lean_unsigned_to_nat(2159u); x_178 = lean_unsigned_to_nat(17u); x_179 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_175, x_176, x_177, x_178, x_174); lean_dec(x_174); @@ -35496,16 +35476,16 @@ x_184 = lean_ctor_get(x_4, 0); lean_inc(x_184); lean_dec(x_4); x_185 = 1; -x_186 = l_Lean_instToStringImport___closed__1; +x_186 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_187 = l_Lean_Name_toString(x_184, x_185, x_186); -x_188 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_188 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_189 = lean_string_append(x_188, x_187); lean_dec(x_187); x_190 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__1; x_191 = lean_string_append(x_189, x_190); -x_192 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_192 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_193 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; -x_194 = lean_unsigned_to_nat(2142u); +x_194 = lean_unsigned_to_nat(2159u); x_195 = lean_unsigned_to_nat(17u); x_196 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_192, x_193, x_194, x_195, x_191); lean_dec(x_191); @@ -35537,16 +35517,16 @@ x_204 = lean_ctor_get(x_4, 0); lean_inc(x_204); lean_dec(x_4); x_205 = 1; -x_206 = l_Lean_instToStringImport___closed__1; +x_206 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_207 = l_Lean_Name_toString(x_204, x_205, x_206); -x_208 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_208 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_209 = lean_string_append(x_208, x_207); lean_dec(x_207); x_210 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__1; x_211 = lean_string_append(x_209, x_210); -x_212 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_212 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_213 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; -x_214 = lean_unsigned_to_nat(2142u); +x_214 = lean_unsigned_to_nat(2159u); x_215 = lean_unsigned_to_nat(17u); x_216 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_212, x_213, x_214, x_215, x_211); lean_dec(x_211); @@ -35571,16 +35551,16 @@ x_221 = lean_ctor_get(x_4, 0); lean_inc(x_221); lean_dec(x_4); x_222 = 1; -x_223 = l_Lean_instToStringImport___closed__1; +x_223 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_224 = l_Lean_Name_toString(x_221, x_222, x_223); -x_225 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__3; +x_225 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; x_226 = lean_string_append(x_225, x_224); lean_dec(x_224); x_227 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__1; x_228 = lean_string_append(x_226, x_227); -x_229 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4; +x_229 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5; x_230 = l_List_forIn_x27_loop___at_Lean_Environment_replayConsts_replayKernel___spec__9___lambda__2___closed__2; -x_231 = lean_unsigned_to_nat(2142u); +x_231 = lean_unsigned_to_nat(2159u); x_232 = lean_unsigned_to_nat(17u); x_233 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_229, x_230, x_231, x_232, x_228); lean_dec(x_228); @@ -36323,7 +36303,7 @@ lean_dec(x_8); x_13 = lean_ctor_get(x_7, 1); lean_inc(x_13); lean_dec(x_7); -x_14 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_14 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; lean_inc(x_13); x_15 = l_List_takeTR_go___rarg(x_13, x_13, x_12, x_14); lean_dec(x_13); @@ -36620,7 +36600,7 @@ uint8_t x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* lean_dec(x_3); lean_dec(x_1); x_7 = 1; -x_8 = l_Lean_instToStringImport___closed__1; +x_8 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_9 = l_Lean_Name_toString(x_4, x_7, x_8); x_10 = l_Lean_Environment_evalConstCheck___rarg___closed__1; x_11 = lean_string_append(x_10, x_9); @@ -37824,7 +37804,7 @@ x_36 = lean_ctor_get(x_31, 1); lean_inc(x_36); lean_dec(x_31); x_37 = lean_box(0); -x_38 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_38 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; lean_inc(x_36); x_39 = l_List_takeTR_go___rarg(x_36, x_36, x_35, x_38); lean_dec(x_36); @@ -38025,7 +38005,7 @@ x_106 = lean_ctor_get(x_101, 1); lean_inc(x_106); lean_dec(x_101); x_107 = lean_box(0); -x_108 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1; +x_108 = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1; lean_inc(x_106); x_109 = l_List_takeTR_go___rarg(x_106, x_106, x_105, x_108); lean_dec(x_106); @@ -38266,7 +38246,7 @@ lean_dec(x_5); lean_dec(x_2); lean_dec(x_1); x_30 = 1; -x_31 = l_Lean_instToStringImport___closed__1; +x_31 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_32 = l_Lean_Name_toString(x_4, x_30, x_31); x_33 = l_Lean_Environment_realizeConst___lambda__5___closed__1; x_34 = lean_string_append(x_33, x_32); @@ -38365,7 +38345,7 @@ lean_dec(x_5); lean_dec(x_2); lean_dec(x_1); x_66 = 1; -x_67 = l_Lean_instToStringImport___closed__1; +x_67 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_68 = l_Lean_Name_toString(x_4, x_66, x_67); x_69 = l_Lean_Environment_realizeConst___lambda__5___closed__1; x_70 = lean_string_append(x_69, x_68); @@ -38490,7 +38470,7 @@ lean_dec(x_4); lean_dec(x_2); lean_dec(x_1); x_21 = 1; -x_22 = l_Lean_instToStringImport___closed__1; +x_22 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_23 = l_Lean_Name_toString(x_3, x_21, x_22); x_24 = l_Lean_Environment_realizeConst___closed__1; x_25 = lean_string_append(x_24, x_23); @@ -38533,7 +38513,7 @@ lean_dec(x_4); lean_dec(x_2); lean_dec(x_1); x_35 = 1; -x_36 = l_Lean_instToStringImport___closed__1; +x_36 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_37 = l_Lean_Name_toString(x_3, x_35, x_36); x_38 = l_Lean_Environment_realizeConst___closed__1; x_39 = lean_string_append(x_38, x_37); @@ -38586,7 +38566,7 @@ lean_dec(x_4); lean_dec(x_2); lean_dec(x_1); x_51 = 1; -x_52 = l_Lean_instToStringImport___closed__1; +x_52 = l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1; x_53 = l_Lean_Name_toString(x_3, x_51, x_52); x_54 = l_Lean_Environment_realizeConst___closed__1; x_55 = lean_string_append(x_54, x_53); @@ -39296,6 +39276,7 @@ lean_object* initialize_Init_System_Promise(uint8_t builtin, lean_object*); lean_object* initialize_Lean_ImportingFlag(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Data_NameTrie(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Data_SMap(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Setup(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Declaration(uint8_t builtin, lean_object*); lean_object* initialize_Lean_LocalContext(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Util_Path(uint8_t builtin, lean_object*); @@ -39332,6 +39313,9 @@ lean_dec_ref(res); res = initialize_Lean_Data_SMap(builtin, lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); +res = initialize_Lean_Setup(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); res = initialize_Lean_Declaration(builtin, lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); @@ -39407,74 +39391,6 @@ l_Lean_instGetElem_x3fArrayModuleIdxLtNatToNatSize___closed__3 = _init_l_Lean_in lean_mark_persistent(l_Lean_instGetElem_x3fArrayModuleIdxLtNatToNatSize___closed__3); l_Lean_instGetElem_x3fArrayModuleIdxLtNatToNatSize___closed__4 = _init_l_Lean_instGetElem_x3fArrayModuleIdxLtNatToNatSize___closed__4(); lean_mark_persistent(l_Lean_instGetElem_x3fArrayModuleIdxLtNatToNatSize___closed__4); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__1 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__1(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__1); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__2 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__2(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__2); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__3 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__3(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__3); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__4 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__4(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__4); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__5 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__5(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__5); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__6 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__6(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__6); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__7 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__7(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__7); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__8 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__8(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__8); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__9 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__9(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__9); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__10 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__10(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__10); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__11 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__11(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__11); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__12 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__12(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__12); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__13 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__13(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__13); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__14 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__14(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__14); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__15 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__15(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__15); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__16 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__16(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__16); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__17 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__17(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__17); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__18 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__18(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__18); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__19 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__19(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__19); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__20 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__20(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__20); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__21 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__21(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__21); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__22 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__22(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__22); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__23 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__23(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__23); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__24 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__24(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__24); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__25 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__25(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__25); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__26 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__26(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__26); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__27 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__27(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__27); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__28 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__28(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__28); -l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__29 = _init_l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__29(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprImport____x40_Lean_Environment___hyg_237____closed__29); -l_Lean_instReprImport___closed__1 = _init_l_Lean_instReprImport___closed__1(); -lean_mark_persistent(l_Lean_instReprImport___closed__1); -l_Lean_instReprImport = _init_l_Lean_instReprImport(); -lean_mark_persistent(l_Lean_instReprImport); -l_Lean_instInhabitedImport___closed__1 = _init_l_Lean_instInhabitedImport___closed__1(); -lean_mark_persistent(l_Lean_instInhabitedImport___closed__1); -l_Lean_instInhabitedImport = _init_l_Lean_instInhabitedImport(); -lean_mark_persistent(l_Lean_instInhabitedImport); -l_Lean_instToStringImport___closed__1 = _init_l_Lean_instToStringImport___closed__1(); -lean_mark_persistent(l_Lean_instToStringImport___closed__1); l_Lean_EnvExtensionEntrySpec = _init_l_Lean_EnvExtensionEntrySpec(); l_Lean_instInhabitedModuleData___closed__1 = _init_l_Lean_instInhabitedModuleData___closed__1(); lean_mark_persistent(l_Lean_instInhabitedModuleData___closed__1); @@ -39501,106 +39417,106 @@ l_Lean_instBEqConstantKind___closed__1 = _init_l_Lean_instBEqConstantKind___clos lean_mark_persistent(l_Lean_instBEqConstantKind___closed__1); l_Lean_instBEqConstantKind = _init_l_Lean_instBEqConstantKind(); lean_mark_persistent(l_Lean_instBEqConstantKind); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__1 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__1(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__1); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__2 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__2(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__2); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__3); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__4 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__4(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__4); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__5 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__5(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__5); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__6); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__7 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__7(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__7); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__8 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__8(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__8); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__9 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__9(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__9); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__10 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__10(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__10); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__11 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__11(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__11); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__12 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__12(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__12); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__13 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__13(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__13); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__14 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__14(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__14); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__15 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__15(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__15); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__16 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__16(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__16); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__17 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__17(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__17); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__18 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__18(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__18); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__19 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__19(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__19); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__20 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__20(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__20); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__21 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__21(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__21); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__22 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__22(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__22); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__23 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__23(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__23); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__24 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__24(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__24); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__25 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__25(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__25); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__26 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__26(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__26); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__27 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__27(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__27); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__28 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__28(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__28); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__29 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__29(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__29); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__30 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__30(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__30); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__31 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__31(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__31); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__32 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__32(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__32); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__33 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__33(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__33); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__34 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__34(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__34); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__35 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__35(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__35); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__36 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__36(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__36); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__37 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__37(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__37); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__38 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__38(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__38); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__39 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__39(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__39); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__40 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__40(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__40); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__41 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__41(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__41); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__42 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__42(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__42); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__43 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__43(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__43); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__44 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__44(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__44); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__45 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__45(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__45); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__46 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__46(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__46); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__47 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__47(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__47); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__48 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__48(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__48); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__49 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__49(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__49); -l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__50 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__50(); -lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1396____closed__50); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__1 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__1(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__1); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__2 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__2(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__2); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__3); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__4 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__4(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__4); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__5 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__5(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__5); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__6); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__7 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__7(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__7); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__8 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__8(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__8); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__9 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__9(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__9); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__10 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__10(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__10); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__11 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__11(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__11); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__12 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__12(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__12); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__13 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__13(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__13); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__14 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__14(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__14); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__15 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__15(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__15); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__16 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__16(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__16); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__17 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__17(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__17); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__18 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__18(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__18); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__19 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__19(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__19); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__20 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__20(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__20); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__21 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__21(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__21); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__22 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__22(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__22); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__23 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__23(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__23); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__24 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__24(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__24); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__25 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__25(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__25); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__26 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__26(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__26); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__27 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__27(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__27); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__28 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__28(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__28); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__29 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__29(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__29); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__30 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__30(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__30); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__31 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__31(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__31); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__32 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__32(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__32); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__33 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__33(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__33); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__34 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__34(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__34); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__35 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__35(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__35); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__36 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__36(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__36); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__37 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__37(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__37); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__38 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__38(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__38); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__39 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__39(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__39); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__40 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__40(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__40); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__41 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__41(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__41); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__42 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__42(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__42); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__43 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__43(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__43); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__44 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__44(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__44); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__45 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__45(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__45); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__46 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__46(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__46); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__47 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__47(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__47); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__48 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__48(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__48); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__49 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__49(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__49); +l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__50 = _init_l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__50(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_reprConstantKind____x40_Lean_Environment___hyg_1248____closed__50); l_Lean_instReprConstantKind___closed__1 = _init_l_Lean_instReprConstantKind___closed__1(); lean_mark_persistent(l_Lean_instReprConstantKind___closed__1); l_Lean_instReprConstantKind = _init_l_Lean_instReprConstantKind(); @@ -39631,26 +39547,26 @@ l_Lean_instInhabitedAsyncConsts___closed__2 = _init_l_Lean_instInhabitedAsyncCon lean_mark_persistent(l_Lean_instInhabitedAsyncConsts___closed__2); l_Lean_instInhabitedAsyncConsts = _init_l_Lean_instInhabitedAsyncConsts(); lean_mark_persistent(l_Lean_instInhabitedAsyncConsts); -l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__1 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__1(); -lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__1); -l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__2 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__2(); -lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__2); -l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__3 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__3(); -lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__3); -l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__4 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__4(); -lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__4); -l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__5 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__5(); -lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__5); -l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__6 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__6(); -lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__6); -l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__7 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__7(); -lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__7); -l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__8 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__8(); -lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__8); -l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__9 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__9(); -lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_2021____closed__9); -l_Lean_instImpl____x40_Lean_Environment___hyg_2021_ = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_2021_(); -lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_2021_); +l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__1 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__1(); +lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__1); +l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__2 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__2(); +lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__2); +l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__3 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__3(); +lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__3); +l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__4 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__4(); +lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__4); +l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__5 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__5(); +lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__5); +l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__6 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__6(); +lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__6); +l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__7 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__7(); +lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__7); +l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__8 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__8(); +lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__8); +l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__9 = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__9(); +lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_1873____closed__9); +l_Lean_instImpl____x40_Lean_Environment___hyg_1873_ = _init_l_Lean_instImpl____x40_Lean_Environment___hyg_1873_(); +lean_mark_persistent(l_Lean_instImpl____x40_Lean_Environment___hyg_1873_); l_Lean_instTypeNameAsyncConsts = _init_l_Lean_instTypeNameAsyncConsts(); lean_mark_persistent(l_Lean_instTypeNameAsyncConsts); l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1 = _init_l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__1(); @@ -39663,6 +39579,8 @@ l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4 = _init_l___pri lean_mark_persistent(l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__4); l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5 = _init_l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5(); lean_mark_persistent(l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__5); +l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__6 = _init_l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__6(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_AsyncConsts_add___closed__6); l_Lean_PrefixTreeNode_findLongestPrefix_x3f___at___private_Lean_Environment_0__Lean_AsyncConsts_findPrefix_x3f___spec__1___closed__1 = _init_l_Lean_PrefixTreeNode_findLongestPrefix_x3f___at___private_Lean_Environment_0__Lean_AsyncConsts_findPrefix_x3f___spec__1___closed__1(); lean_mark_persistent(l_Lean_PrefixTreeNode_findLongestPrefix_x3f___at___private_Lean_Environment_0__Lean_AsyncConsts_findPrefix_x3f___spec__1___closed__1); l___private_Lean_Environment_0__Lean_AsyncConsts_findRecTask___lambda__1___closed__1 = _init_l___private_Lean_Environment_0__Lean_AsyncConsts_findRecTask___lambda__1___closed__1(); @@ -39715,6 +39633,10 @@ l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8 = _i lean_mark_persistent(l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__8); l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9 = _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9(); lean_mark_persistent(l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__9); +l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__10 = _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__10(); +lean_mark_persistent(l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__10); +l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__11 = _init_l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__11(); +lean_mark_persistent(l_List_repr___at_Lean_Environment_dbgFormatAsyncState___spec__8___closed__11); l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__1 = _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__1(); lean_mark_persistent(l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__1); l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__2 = _init_l_Prod_repr___at_Lean_Environment_dbgFormatAsyncState___spec__14___closed__2(); @@ -39797,6 +39719,8 @@ l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__1 lean_mark_persistent(l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__15); l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__16 = _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__16(); lean_mark_persistent(l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__16); +l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__17 = _init_l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__17(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_Environment_mkFallbackConstInfo___closed__17); l_Lean_Environment_addConstAsync___lambda__8___closed__1 = _init_l_Lean_Environment_addConstAsync___lambda__8___closed__1(); lean_mark_persistent(l_Lean_Environment_addConstAsync___lambda__8___closed__1); l_Lean_Environment_addConstAsync___lambda__8___closed__2 = _init_l_Lean_Environment_addConstAsync___lambda__8___closed__2(); @@ -39828,9 +39752,9 @@ l_Lean_instInhabitedEnvExtension___closed__1 = _init_l_Lean_instInhabitedEnvExte lean_mark_persistent(l_Lean_instInhabitedEnvExtension___closed__1); l_Lean_instInhabitedEnvExtension___closed__2 = _init_l_Lean_instInhabitedEnvExtension___closed__2(); lean_mark_persistent(l_Lean_instInhabitedEnvExtension___closed__2); -l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1 = _init_l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1(); -lean_mark_persistent(l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793____closed__1); -if (builtin) {res = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6793_(lean_io_mk_world()); +l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1 = _init_l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1(); +lean_mark_persistent(l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645____closed__1); +if (builtin) {res = l_Lean_EnvExtension_initFn____x40_Lean_Environment___hyg_6645_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; l___private_Lean_Environment_0__Lean_EnvExtension_envExtensionsRef = lean_io_result_get_value(res); lean_mark_persistent(l___private_Lean_Environment_0__Lean_EnvExtension_envExtensionsRef); @@ -39907,7 +39831,7 @@ l_Lean_instInhabitedPersistentEnvExtension___closed__4 = _init_l_Lean_instInhabi lean_mark_persistent(l_Lean_instInhabitedPersistentEnvExtension___closed__4); l_Lean_instInhabitedPersistentEnvExtension___closed__5 = _init_l_Lean_instInhabitedPersistentEnvExtension___closed__5(); lean_mark_persistent(l_Lean_instInhabitedPersistentEnvExtension___closed__5); -if (builtin) {res = l_Lean_initFn____x40_Lean_Environment___hyg_8688_(lean_io_mk_world()); +if (builtin) {res = l_Lean_initFn____x40_Lean_Environment___hyg_8540_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; l_Lean_persistentEnvExtensionsRef = lean_io_result_get_value(res); lean_mark_persistent(l_Lean_persistentEnvExtensionsRef); @@ -40020,6 +39944,8 @@ l___private_Lean_Environment_0__Lean_looksLikeOldCodegenName___closed__12 = _ini lean_mark_persistent(l___private_Lean_Environment_0__Lean_looksLikeOldCodegenName___closed__12); l_Lean_mkModuleData___closed__1 = _init_l_Lean_mkModuleData___closed__1(); lean_mark_persistent(l_Lean_mkModuleData___closed__1); +l_Lean_mkModuleData___closed__2 = _init_l_Lean_mkModuleData___closed__2(); +lean_mark_persistent(l_Lean_mkModuleData___closed__2); l___private_Lean_Environment_0__Lean_setImportedEntries_unsafe__2___closed__1 = _init_l___private_Lean_Environment_0__Lean_setImportedEntries_unsafe__2___closed__1(); lean_mark_persistent(l___private_Lean_Environment_0__Lean_setImportedEntries_unsafe__2___closed__1); l___private_Lean_Environment_0__Lean_finalizePersistentExtensions_loop___closed__1 = _init_l___private_Lean_Environment_0__Lean_finalizePersistentExtensions_loop___closed__1(); @@ -40038,6 +39964,14 @@ l_Lean_throwAlreadyImported___rarg___closed__2 = _init_l_Lean_throwAlreadyImport lean_mark_persistent(l_Lean_throwAlreadyImported___rarg___closed__2); l_Lean_throwAlreadyImported___rarg___closed__3 = _init_l_Lean_throwAlreadyImported___rarg___closed__3(); lean_mark_persistent(l_Lean_throwAlreadyImported___rarg___closed__3); +l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__2___closed__1 = _init_l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__2___closed__1(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_findOLeanParts___lambda__2___closed__1); +l___private_Lean_Environment_0__Lean_findOLeanParts___closed__1 = _init_l___private_Lean_Environment_0__Lean_findOLeanParts___closed__1(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_findOLeanParts___closed__1); +l___private_Lean_Environment_0__Lean_findOLeanParts___closed__2 = _init_l___private_Lean_Environment_0__Lean_findOLeanParts___closed__2(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_findOLeanParts___closed__2); +l___private_Lean_Environment_0__Lean_findOLeanParts___closed__3 = _init_l___private_Lean_Environment_0__Lean_findOLeanParts___closed__3(); +lean_mark_persistent(l___private_Lean_Environment_0__Lean_findOLeanParts___closed__3); l_panic___at_Lean_importModulesCore_go___spec__1___closed__1 = _init_l_panic___at_Lean_importModulesCore_go___spec__1___closed__1(); lean_mark_persistent(l_panic___at_Lean_importModulesCore_go___spec__1___closed__1); l_panic___at_Lean_importModulesCore_go___spec__1___closed__2 = _init_l_panic___at_Lean_importModulesCore_go___spec__1___closed__2(); @@ -40054,12 +39988,6 @@ l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda_ lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2___closed__1); l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2___closed__2 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2___closed__2(); lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__2___closed__2); -l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__1 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__1(); -lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__1); -l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__2 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__2(); -lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__2); -l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__3 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__3(); -lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___lambda__4___closed__3); l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___closed__1 = _init_l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___closed__1(); lean_mark_persistent(l_Array_forIn_x27Unsafe_loop___at_Lean_importModulesCore_go___spec__10___closed__1); l_Array_mapMUnsafe_map___at_Lean_finalizeImport___spec__3___closed__1 = _init_l_Array_mapMUnsafe_map___at_Lean_finalizeImport___spec__3___closed__1(); diff --git a/stage0/stdlib/Lean/Language/Lean.c b/stage0/stdlib/Lean/Language/Lean.c index 5a4ae11df2bb..862c735c77d3 100644 --- a/stage0/stdlib/Lean/Language/Lean.c +++ b/stage0/stdlib/Lean/Language/Lean.c @@ -19,14 +19,14 @@ static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__3___closed__ lean_object* lean_profileit(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Language_Lean_process_parseHeader___lambda__5___closed__1; LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__7; +static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__2; lean_object* l_Lean_Core_getMaxHeartbeats(lean_object*); lean_object* l_Lean_Option_set___at_Lean_Environment_realizeConst___spec__3(lean_object*, lean_object*, uint8_t); -LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__5(lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_mk_empty_array_with_capacity(lean_object*); static lean_object* l_Lean_Language_Lean_process_parseHeader___lambda__3___closed__3; +lean_object* l_Lean_Elab_processHeaderCore(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, uint32_t, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__18___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__1; static lean_object* l_Lean_Language_Lean_process_doElab___lambda__3___closed__1; lean_object* l_Lean_Language_Lean_instToSnapshotTreeHeaderProcessedSnapshot(lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Language_Lean_process_parseCmd___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -106,7 +106,6 @@ LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Language_Lean_pr static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__8___closed__3; uint8_t l_Lean_Syntax_isOfKind(lean_object*, lean_object*); lean_object* l_Lean_Name_mkStr5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Elab_processHeader(lean_object*, lean_object*, lean_object*, lean_object*, uint32_t, lean_object*, uint8_t, lean_object*, lean_object*); static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__8___closed__7; LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__19(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Language_Lean_process_processHeader___lambda__3___closed__2; @@ -133,6 +132,7 @@ static lean_object* l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHea static lean_object* l_Lean_Language_Lean_process_processHeader___lambda__4___closed__5; LEAN_EXPORT lean_object* l___private_Lean_Language_Lean_0__Lean_Language_Lean_getNiceCommandStartPos_x3f(lean_object*); static lean_object* l_Lean_Language_Lean_process_doElab___lambda__3___closed__3; +static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__5; static lean_object* l_Lean_Language_Lean_process_processHeader___lambda__7___closed__2; lean_object* l_Lean_Name_mkStr3(lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_Language_Lean_instToSnapshotTreeCommandParsedSnapshot; @@ -156,7 +156,6 @@ static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__16___closed_ lean_object* lean_st_ref_take(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Language_Lean_process_parseCmd___spec__10___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Language_Lean_processCommands(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__2; static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__16___closed__1; static lean_object* l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__5; static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__16___closed__17; @@ -170,18 +169,22 @@ LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__21___bo LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_List_toPArray_x27___rarg(lean_object*); static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__16___closed__13; +static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__1; lean_object* l_Lean_Option_get___at_Lean_profiler_threshold_getSecs___spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__15(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Elab_HeaderSyntax_startPos(lean_object*); static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__10___closed__3; static lean_object* l_Lean_Language_Lean_process_processHeader___lambda__6___closed__2; static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__16___closed__21; LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Language_Lean_process_parseCmd___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__8___closed__5; +static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__6; static lean_object* l_Lean_Language_Lean_process_processHeader___lambda__6___closed__1; LEAN_EXPORT lean_object* l_Lean_Language_Lean_instMonadLiftLeanProcessingMLeanProcessingTIO(lean_object*); static lean_object* l_Lean_Language_Lean_process_doElab___closed__3; static lean_object* l_Lean_Language_Lean_process_processHeader___lambda__6___closed__3; static lean_object* l___private_Lean_Language_Lean_0__Lean_Language_Lean_getNiceCommandStartPos_x3f___closed__2; +LEAN_EXPORT lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200_(lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Language_Lean_process_parseCmd___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*); static lean_object* l_List_mapTR_loop___at_Lean_Language_Lean_process_processHeader___spec__1___closed__2; lean_object* l_Lean_MessageData_ofFormat(lean_object*); @@ -224,8 +227,6 @@ lean_object* lean_io_mono_nanos_now(lean_object*); static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__8___closed__4; lean_object* l_panic___at_String_fromUTF8_x21___spec__1(lean_object*); static lean_object* l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__2; -LEAN_EXPORT lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166_(lean_object*); -static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__3; lean_object* lean_thunk_get_own(lean_object*); static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__16___closed__7; lean_object* l_Lean_Language_diagnosticsOfHeaderError(lean_object*, lean_object*, lean_object*); @@ -254,6 +255,7 @@ LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__8_ lean_object* l___private_Init_Util_0__mkPanicMessageWithDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__15___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__15; +static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; extern lean_object* l_Lean_diagnostics; LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_get_set_stderr(lean_object*, lean_object*); @@ -280,11 +282,10 @@ static lean_object* l_List_forIn_x27_loop___at_Lean_Language_Lean_reparseOptions double l_Float_ofScientific(lean_object*, uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__12(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__6; size_t lean_usize_of_nat(lean_object*); LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_doElab___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__8___closed__8; -LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__6(lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__6(lean_object*, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, double, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__16___closed__19; LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Language_Lean_process_parseCmd___spec__2___lambda__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseCmd(lean_object*, lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); @@ -334,7 +335,6 @@ static lean_object* l_Lean_Language_Lean_process_doElab___closed__2; lean_object* lean_task_map(lean_object*, lean_object*, lean_object*, uint8_t); static lean_object* l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__11; LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__5; lean_object* lean_io_bind_task(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*); lean_object* lean_get_set_stdin(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseHeader___lambda__2___boxed(lean_object*, lean_object*, lean_object*); @@ -356,6 +356,7 @@ LEAN_EXPORT lean_object* l_Lean_Option_setIfNotSet___at_Lean_Language_Lean_proce extern lean_object* l_Lean_Core_stderrAsMessages; extern lean_object* l_Lean_Elab_Command_instInhabitedScope; lean_object* lean_array_mk(lean_object*); +static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__3; static lean_object* l_Lean_Language_Lean_process_doElab___lambda__3___closed__4; lean_object* l_Lean_PersistentArray_get_x21___rarg(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__16___closed__4; @@ -381,6 +382,7 @@ static lean_object* l_IO_FS_withIsolatedStreams___at_Lean_Language_Lean_process_ lean_object* l_Lean_Name_mkStr4(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Syntax_unsetTrailing(lean_object*); LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseHeader___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__7; static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__10___closed__1; lean_object* lean_string_append(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Language_Lean_process_parseCmd___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -417,7 +419,6 @@ static lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__16___closed_ LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_parseCmd___lambda__20(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions(lean_object*); -static lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; static lean_object* l_Lean_Language_Lean_process_processHeader___lambda__7___closed__3; static lean_object* l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__8; lean_object* lean_mk_empty_array_with_capacity(lean_object*); @@ -2156,7 +2157,7 @@ lean_dec(x_1); return x_3; } } -static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__1() { +static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__1() { _start: { lean_object* x_1; @@ -2164,7 +2165,7 @@ x_1 = lean_mk_string_unchecked("experimental", 12, 12); return x_1; } } -static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__2() { +static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__2() { _start: { lean_object* x_1; @@ -2172,17 +2173,17 @@ x_1 = lean_mk_string_unchecked("module", 6, 6); return x_1; } } -static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__3() { +static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__1; -x_2 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__2; +x_1 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__1; +x_2 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__2; x_3 = l_Lean_Name_mkStr2(x_1, x_2); return x_3; } } -static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4() { +static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4() { _start: { lean_object* x_1; @@ -2190,7 +2191,7 @@ x_1 = lean_mk_string_unchecked("", 0, 0); return x_1; } } -static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__5() { +static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__5() { _start: { lean_object* x_1; @@ -2198,13 +2199,13 @@ x_1 = lean_mk_string_unchecked("Allow use of module system (experimental)", 41, return x_1; } } -static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__6() { +static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__6() { _start: { uint8_t x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_1 = 0; -x_2 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; -x_3 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__5; +x_2 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; +x_3 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__5; x_4 = lean_box(x_1); x_5 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_5, 0, x_4); @@ -2213,25 +2214,25 @@ lean_ctor_set(x_5, 2, x_3); return x_5; } } -static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__7() { +static lean_object* _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__7() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_1 = l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__3; x_2 = l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__5; -x_3 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__1; -x_4 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__2; +x_3 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__1; +x_4 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__2; x_5 = l_Lean_Name_mkStr5(x_1, x_2, x_1, x_3, x_4); return x_5; } } -LEAN_EXPORT lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200_(lean_object* x_1) { _start: { lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_2 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__3; -x_3 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__6; -x_4 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__7; +x_2 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__3; +x_3 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__6; +x_4 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__7; x_5 = l_Lean_Option_register___at_Lean_Elab_initFn____x40_Lean_Elab_AutoBound___hyg_6____spec__1(x_2, x_3, x_4, x_1); return x_5; } @@ -3494,7 +3495,7 @@ x_51 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_51, 0, x_42); x_52 = l_Lean_MessageData_ofFormat(x_51); x_53 = 0; -x_54 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; +x_54 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; x_55 = lean_alloc_ctor(0, 5, 3); lean_ctor_set(x_55, 0, x_23); lean_ctor_set(x_55, 1, x_49); @@ -3755,7 +3756,7 @@ x_121 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_121, 0, x_112); x_122 = l_Lean_MessageData_ofFormat(x_121); x_123 = 0; -x_124 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; +x_124 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; x_125 = lean_alloc_ctor(0, 5, 3); lean_ctor_set(x_125, 0, x_93); lean_ctor_set(x_125, 1, x_119); @@ -4101,7 +4102,7 @@ lean_inc(x_18); lean_dec(x_14); x_19 = 0; x_20 = 0; -x_21 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; +x_21 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; lean_inc(x_2); x_22 = lean_alloc_ctor(0, 5, 3); lean_ctor_set(x_22, 0, x_2); @@ -4295,7 +4296,7 @@ lean_inc(x_18); lean_dec(x_14); x_19 = 0; x_20 = 0; -x_21 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; +x_21 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; lean_inc(x_2); x_22 = lean_alloc_ctor(0, 5, 3); lean_ctor_set(x_22, 0, x_2); @@ -4645,7 +4646,7 @@ lean_inc(x_18); lean_dec(x_14); x_19 = 0; x_20 = 0; -x_21 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; +x_21 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; lean_inc(x_2); x_22 = lean_alloc_ctor(0, 5, 3); lean_ctor_set(x_22, 0, x_2); @@ -4827,7 +4828,7 @@ lean_inc(x_18); lean_dec(x_14); x_19 = 0; x_20 = 0; -x_21 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; +x_21 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; lean_inc(x_2); x_22 = lean_alloc_ctor(0, 5, 3); lean_ctor_set(x_22, 0, x_2); @@ -5182,7 +5183,7 @@ lean_object* x_1; double x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Lean_Language_Lean_process_parseCmd___lambda__3___closed__4; x_2 = l_Lean_Language_Lean_process_parseCmd___lambda__3___closed__7; x_3 = 1; -x_4 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; +x_4 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; x_5 = lean_alloc_ctor(0, 2, 17); lean_ctor_set(x_5, 0, x_1); lean_ctor_set(x_5, 1, x_4); @@ -5362,7 +5363,7 @@ lean_dec(x_39); x_42 = l_Lean_FileMap_toPosition(x_41, x_7); x_43 = 0; x_44 = 0; -x_45 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; +x_45 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; x_46 = lean_alloc_ctor(0, 5, 3); lean_ctor_set(x_46, 0, x_40); lean_ctor_set(x_46, 1, x_42); @@ -5413,7 +5414,7 @@ lean_dec(x_62); x_65 = l_Lean_FileMap_toPosition(x_64, x_7); x_66 = 0; x_67 = 0; -x_68 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; +x_68 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; x_69 = lean_alloc_ctor(0, 5, 3); lean_ctor_set(x_69, 0, x_63); lean_ctor_set(x_69, 1, x_65); @@ -6850,7 +6851,7 @@ static lean_object* _init_l_Lean_Language_Lean_process_parseCmd___lambda__10___c { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; uint8_t x_5; lean_object* x_6; x_1 = lean_box(0); -x_2 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; +x_2 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; x_3 = lean_box(0); x_4 = l_Lean_Language_Lean_process_doElab___closed__1; x_5 = 0; @@ -9810,7 +9811,7 @@ static lean_object* _init_l_Lean_Language_Lean_process_parseCmd___lambda__16___c { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; uint8_t x_5; lean_object* x_6; x_1 = lean_box(0); -x_2 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; +x_2 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; x_3 = l_Lean_Language_Lean_process_parseCmd___lambda__16___closed__15; x_4 = l_Lean_Language_Lean_process_parseCmd___lambda__16___closed__17; x_5 = 0; @@ -15971,7 +15972,7 @@ lean_dec(x_17); x_21 = 0; x_22 = l_Lean_Language_Lean_process_processHeader___lambda__4___closed__4; x_23 = 1; -x_24 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4; +x_24 = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4; x_25 = lean_alloc_ctor(0, 2, 17); lean_ctor_set(x_25, 0, x_22); lean_ctor_set(x_25, 1, x_24); @@ -16004,233 +16005,246 @@ return x_37; } } } -LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__5(lean_object* x_1, lean_object* x_2, lean_object* x_3, double x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, double x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__5(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, double x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, double x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { _start: { -uint32_t x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; uint8_t x_18; lean_object* x_19; -x_14 = lean_ctor_get_uint32(x_1, sizeof(void*)*3); -x_15 = lean_ctor_get(x_1, 2); +lean_object* x_14; lean_object* x_15; uint8_t x_16; uint32_t x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; lean_object* x_23; +x_14 = l_Lean_Elab_HeaderSyntax_startPos(x_1); +x_15 = lean_ctor_get(x_2, 1); lean_inc(x_15); -x_16 = lean_ctor_get(x_1, 0); -lean_inc(x_16); -lean_dec(x_1); -x_17 = l_Lean_MessageLog_empty; -x_18 = 1; -lean_inc(x_3); -lean_inc(x_10); -lean_inc(x_2); -x_19 = l_Lean_Elab_processHeader(x_2, x_10, x_17, x_3, x_14, x_15, x_18, x_16, x_13); -if (lean_obj_tag(x_19) == 0) -{ -lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; uint8_t x_25; -x_20 = lean_ctor_get(x_19, 0); +x_16 = lean_ctor_get_uint8(x_2, sizeof(void*)*5 + 4); +x_17 = lean_ctor_get_uint32(x_2, sizeof(void*)*5); +x_18 = lean_ctor_get(x_2, 4); +lean_inc(x_18); +x_19 = lean_ctor_get(x_2, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_2, 3); lean_inc(x_20); -x_21 = lean_ctor_get(x_19, 1); -lean_inc(x_21); -lean_dec(x_19); -x_22 = lean_ctor_get(x_20, 0); -lean_inc(x_22); -x_23 = lean_ctor_get(x_20, 1); -lean_inc(x_23); -lean_dec(x_20); -x_24 = lean_io_mono_nanos_now(x_21); -x_25 = !lean_is_exclusive(x_24); -if (x_25 == 0) +lean_dec(x_2); +x_21 = l_Lean_MessageLog_empty; +x_22 = 1; +lean_inc(x_4); +lean_inc(x_3); +x_23 = l_Lean_Elab_processHeaderCore(x_14, x_15, x_16, x_3, x_21, x_4, x_17, x_18, x_22, x_19, x_20, x_13); +lean_dec(x_14); +if (lean_obj_tag(x_23) == 0) { -lean_object* x_26; lean_object* x_27; uint8_t x_28; lean_object* x_29; double x_30; double x_31; lean_object* x_32; uint8_t x_33; +lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; uint8_t x_29; +x_24 = lean_ctor_get(x_23, 0); +lean_inc(x_24); +x_25 = lean_ctor_get(x_23, 1); +lean_inc(x_25); +lean_dec(x_23); x_26 = lean_ctor_get(x_24, 0); +lean_inc(x_26); x_27 = lean_ctor_get(x_24, 1); -x_28 = 0; -x_29 = lean_unsigned_to_nat(0u); -x_30 = l_Float_ofScientific(x_26, x_28, x_29); -lean_dec(x_26); -x_31 = lean_float_div(x_30, x_4); -lean_inc(x_23); -x_32 = l_Lean_Language_Snapshot_Diagnostics_ofMessageLog(x_23, x_27); -x_33 = !lean_is_exclusive(x_32); -if (x_33 == 0) +lean_inc(x_27); +lean_dec(x_24); +x_28 = lean_io_mono_nanos_now(x_25); +x_29 = !lean_is_exclusive(x_28); +if (x_29 == 0) { -lean_object* x_34; lean_object* x_35; uint8_t x_36; -x_34 = lean_ctor_get(x_32, 0); -x_35 = lean_ctor_get(x_32, 1); -x_36 = l_Lean_MessageLog_hasErrors(x_23); -if (x_36 == 0) +lean_object* x_30; lean_object* x_31; uint8_t x_32; lean_object* x_33; double x_34; double x_35; lean_object* x_36; uint8_t x_37; +x_30 = lean_ctor_get(x_28, 0); +x_31 = lean_ctor_get(x_28, 1); +x_32 = 0; +x_33 = lean_unsigned_to_nat(0u); +x_34 = l_Float_ofScientific(x_30, x_32, x_33); +lean_dec(x_30); +x_35 = lean_float_div(x_34, x_5); +lean_inc(x_27); +x_36 = l_Lean_Language_Snapshot_Diagnostics_ofMessageLog(x_27, x_31); +x_37 = !lean_is_exclusive(x_36); +if (x_37 == 0) { -lean_object* x_37; lean_object* x_38; -lean_free_object(x_32); -lean_free_object(x_24); -x_37 = lean_box(0); -x_38 = l_Lean_Language_Lean_process_processHeader___lambda__4(x_22, x_23, x_3, x_2, x_5, x_6, x_7, x_34, x_8, x_9, x_31, x_10, x_37, x_12, x_35); +lean_object* x_38; lean_object* x_39; uint8_t x_40; +x_38 = lean_ctor_get(x_36, 0); +x_39 = lean_ctor_get(x_36, 1); +x_40 = l_Lean_MessageLog_hasErrors(x_27); +if (x_40 == 0) +{ +lean_object* x_41; lean_object* x_42; +lean_free_object(x_36); +lean_free_object(x_28); +x_41 = lean_box(0); +x_42 = l_Lean_Language_Lean_process_processHeader___lambda__4(x_26, x_27, x_4, x_1, x_6, x_7, x_8, x_38, x_3, x_9, x_35, x_10, x_41, x_12, x_39); lean_dec(x_3); -return x_38; +lean_dec(x_4); +return x_42; } else { -lean_object* x_39; lean_object* x_40; lean_object* x_41; -lean_dec(x_23); -lean_dec(x_22); +lean_object* x_43; lean_object* x_44; lean_object* x_45; +lean_dec(x_27); +lean_dec(x_26); lean_dec(x_10); +lean_dec(x_8); lean_dec(x_7); -lean_dec(x_6); +lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -x_39 = l_Lean_Language_Lean_process_processHeader___lambda__3___closed__9; -x_40 = l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__19; -lean_inc(x_5); -x_41 = lean_alloc_ctor(0, 4, 1); -lean_ctor_set(x_41, 0, x_39); -lean_ctor_set(x_41, 1, x_34); -lean_ctor_set(x_41, 2, x_5); -lean_ctor_set(x_41, 3, x_40); -lean_ctor_set_uint8(x_41, sizeof(void*)*4, x_18); -lean_ctor_set(x_24, 1, x_5); -lean_ctor_set(x_24, 0, x_41); -lean_ctor_set(x_32, 0, x_24); -return x_32; +lean_dec(x_1); +x_43 = l_Lean_Language_Lean_process_processHeader___lambda__3___closed__9; +x_44 = l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__19; +lean_inc(x_6); +x_45 = lean_alloc_ctor(0, 4, 1); +lean_ctor_set(x_45, 0, x_43); +lean_ctor_set(x_45, 1, x_38); +lean_ctor_set(x_45, 2, x_6); +lean_ctor_set(x_45, 3, x_44); +lean_ctor_set_uint8(x_45, sizeof(void*)*4, x_22); +lean_ctor_set(x_28, 1, x_6); +lean_ctor_set(x_28, 0, x_45); +lean_ctor_set(x_36, 0, x_28); +return x_36; } } else { -lean_object* x_42; lean_object* x_43; uint8_t x_44; -x_42 = lean_ctor_get(x_32, 0); -x_43 = lean_ctor_get(x_32, 1); -lean_inc(x_43); -lean_inc(x_42); -lean_dec(x_32); -x_44 = l_Lean_MessageLog_hasErrors(x_23); -if (x_44 == 0) +lean_object* x_46; lean_object* x_47; uint8_t x_48; +x_46 = lean_ctor_get(x_36, 0); +x_47 = lean_ctor_get(x_36, 1); +lean_inc(x_47); +lean_inc(x_46); +lean_dec(x_36); +x_48 = l_Lean_MessageLog_hasErrors(x_27); +if (x_48 == 0) { -lean_object* x_45; lean_object* x_46; -lean_free_object(x_24); -x_45 = lean_box(0); -x_46 = l_Lean_Language_Lean_process_processHeader___lambda__4(x_22, x_23, x_3, x_2, x_5, x_6, x_7, x_42, x_8, x_9, x_31, x_10, x_45, x_12, x_43); +lean_object* x_49; lean_object* x_50; +lean_free_object(x_28); +x_49 = lean_box(0); +x_50 = l_Lean_Language_Lean_process_processHeader___lambda__4(x_26, x_27, x_4, x_1, x_6, x_7, x_8, x_46, x_3, x_9, x_35, x_10, x_49, x_12, x_47); lean_dec(x_3); -return x_46; +lean_dec(x_4); +return x_50; } else { -lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; -lean_dec(x_23); -lean_dec(x_22); +lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; +lean_dec(x_27); +lean_dec(x_26); lean_dec(x_10); +lean_dec(x_8); lean_dec(x_7); -lean_dec(x_6); +lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -x_47 = l_Lean_Language_Lean_process_processHeader___lambda__3___closed__9; -x_48 = l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__19; -lean_inc(x_5); -x_49 = lean_alloc_ctor(0, 4, 1); -lean_ctor_set(x_49, 0, x_47); -lean_ctor_set(x_49, 1, x_42); -lean_ctor_set(x_49, 2, x_5); -lean_ctor_set(x_49, 3, x_48); -lean_ctor_set_uint8(x_49, sizeof(void*)*4, x_18); -lean_ctor_set(x_24, 1, x_5); -lean_ctor_set(x_24, 0, x_49); -x_50 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_50, 0, x_24); -lean_ctor_set(x_50, 1, x_43); -return x_50; +lean_dec(x_1); +x_51 = l_Lean_Language_Lean_process_processHeader___lambda__3___closed__9; +x_52 = l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__19; +lean_inc(x_6); +x_53 = lean_alloc_ctor(0, 4, 1); +lean_ctor_set(x_53, 0, x_51); +lean_ctor_set(x_53, 1, x_46); +lean_ctor_set(x_53, 2, x_6); +lean_ctor_set(x_53, 3, x_52); +lean_ctor_set_uint8(x_53, sizeof(void*)*4, x_22); +lean_ctor_set(x_28, 1, x_6); +lean_ctor_set(x_28, 0, x_53); +x_54 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_54, 0, x_28); +lean_ctor_set(x_54, 1, x_47); +return x_54; } } } else { -lean_object* x_51; lean_object* x_52; uint8_t x_53; lean_object* x_54; double x_55; double x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; uint8_t x_61; -x_51 = lean_ctor_get(x_24, 0); -x_52 = lean_ctor_get(x_24, 1); -lean_inc(x_52); -lean_inc(x_51); -lean_dec(x_24); -x_53 = 0; -x_54 = lean_unsigned_to_nat(0u); -x_55 = l_Float_ofScientific(x_51, x_53, x_54); -lean_dec(x_51); -x_56 = lean_float_div(x_55, x_4); -lean_inc(x_23); -x_57 = l_Lean_Language_Snapshot_Diagnostics_ofMessageLog(x_23, x_52); -x_58 = lean_ctor_get(x_57, 0); -lean_inc(x_58); -x_59 = lean_ctor_get(x_57, 1); -lean_inc(x_59); -if (lean_is_exclusive(x_57)) { - lean_ctor_release(x_57, 0); - lean_ctor_release(x_57, 1); - x_60 = x_57; +lean_object* x_55; lean_object* x_56; uint8_t x_57; lean_object* x_58; double x_59; double x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; uint8_t x_65; +x_55 = lean_ctor_get(x_28, 0); +x_56 = lean_ctor_get(x_28, 1); +lean_inc(x_56); +lean_inc(x_55); +lean_dec(x_28); +x_57 = 0; +x_58 = lean_unsigned_to_nat(0u); +x_59 = l_Float_ofScientific(x_55, x_57, x_58); +lean_dec(x_55); +x_60 = lean_float_div(x_59, x_5); +lean_inc(x_27); +x_61 = l_Lean_Language_Snapshot_Diagnostics_ofMessageLog(x_27, x_56); +x_62 = lean_ctor_get(x_61, 0); +lean_inc(x_62); +x_63 = lean_ctor_get(x_61, 1); +lean_inc(x_63); +if (lean_is_exclusive(x_61)) { + lean_ctor_release(x_61, 0); + lean_ctor_release(x_61, 1); + x_64 = x_61; } else { - lean_dec_ref(x_57); - x_60 = lean_box(0); + lean_dec_ref(x_61); + x_64 = lean_box(0); } -x_61 = l_Lean_MessageLog_hasErrors(x_23); -if (x_61 == 0) +x_65 = l_Lean_MessageLog_hasErrors(x_27); +if (x_65 == 0) { -lean_object* x_62; lean_object* x_63; -lean_dec(x_60); -x_62 = lean_box(0); -x_63 = l_Lean_Language_Lean_process_processHeader___lambda__4(x_22, x_23, x_3, x_2, x_5, x_6, x_7, x_58, x_8, x_9, x_56, x_10, x_62, x_12, x_59); +lean_object* x_66; lean_object* x_67; +lean_dec(x_64); +x_66 = lean_box(0); +x_67 = l_Lean_Language_Lean_process_processHeader___lambda__4(x_26, x_27, x_4, x_1, x_6, x_7, x_8, x_62, x_3, x_9, x_60, x_10, x_66, x_12, x_63); lean_dec(x_3); -return x_63; +lean_dec(x_4); +return x_67; } else { -lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; -lean_dec(x_23); -lean_dec(x_22); +lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; +lean_dec(x_27); +lean_dec(x_26); lean_dec(x_10); +lean_dec(x_8); lean_dec(x_7); -lean_dec(x_6); +lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -x_64 = l_Lean_Language_Lean_process_processHeader___lambda__3___closed__9; -x_65 = l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__19; -lean_inc(x_5); -x_66 = lean_alloc_ctor(0, 4, 1); -lean_ctor_set(x_66, 0, x_64); -lean_ctor_set(x_66, 1, x_58); -lean_ctor_set(x_66, 2, x_5); -lean_ctor_set(x_66, 3, x_65); -lean_ctor_set_uint8(x_66, sizeof(void*)*4, x_18); -x_67 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_67, 0, x_66); -lean_ctor_set(x_67, 1, x_5); -if (lean_is_scalar(x_60)) { - x_68 = lean_alloc_ctor(0, 2, 0); +lean_dec(x_1); +x_68 = l_Lean_Language_Lean_process_processHeader___lambda__3___closed__9; +x_69 = l___private_Lean_Language_Lean_0__Lean_Language_Lean_withHeaderExceptions___rarg___closed__19; +lean_inc(x_6); +x_70 = lean_alloc_ctor(0, 4, 1); +lean_ctor_set(x_70, 0, x_68); +lean_ctor_set(x_70, 1, x_62); +lean_ctor_set(x_70, 2, x_6); +lean_ctor_set(x_70, 3, x_69); +lean_ctor_set_uint8(x_70, sizeof(void*)*4, x_22); +x_71 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_71, 0, x_70); +lean_ctor_set(x_71, 1, x_6); +if (lean_is_scalar(x_64)) { + x_72 = lean_alloc_ctor(0, 2, 0); } else { - x_68 = x_60; + x_72 = x_64; } -lean_ctor_set(x_68, 0, x_67); -lean_ctor_set(x_68, 1, x_59); -return x_68; +lean_ctor_set(x_72, 0, x_71); +lean_ctor_set(x_72, 1, x_63); +return x_72; } } } else { -uint8_t x_69; +uint8_t x_73; lean_dec(x_10); +lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); -lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -x_69 = !lean_is_exclusive(x_19); -if (x_69 == 0) +lean_dec(x_1); +x_73 = !lean_is_exclusive(x_23); +if (x_73 == 0) { -return x_19; +return x_23; } else { -lean_object* x_70; lean_object* x_71; lean_object* x_72; -x_70 = lean_ctor_get(x_19, 0); -x_71 = lean_ctor_get(x_19, 1); -lean_inc(x_71); -lean_inc(x_70); -lean_dec(x_19); -x_72 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_72, 0, x_70); -lean_ctor_set(x_72, 1, x_71); -return x_72; +lean_object* x_74; lean_object* x_75; lean_object* x_76; +x_74 = lean_ctor_get(x_23, 0); +x_75 = lean_ctor_get(x_23, 1); +lean_inc(x_75); +lean_inc(x_74); +lean_dec(x_23); +x_76 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_76, 0, x_74); +lean_ctor_set(x_76, 1, x_75); +return x_76; } } } @@ -16261,12 +16275,12 @@ lean_ctor_set(x_2, 0, x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__6(lean_object* x_1, lean_object* x_2, lean_object* x_3, double x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, double x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__6(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, double x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, double x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { _start: { lean_object* x_14; lean_object* x_15; uint8_t x_16; x_14 = lean_unsigned_to_nat(0u); -x_15 = l_Lean_Syntax_getArg(x_2, x_14); +x_15 = l_Lean_Syntax_getArg(x_1, x_14); x_16 = l_Lean_Syntax_isNone(x_15); lean_dec(x_15); if (x_16 == 0) @@ -16278,9 +16292,10 @@ if (x_18 == 0) { lean_object* x_19; lean_object* x_20; lean_dec(x_10); +lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); -lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); @@ -16352,7 +16367,7 @@ x_14 = l_Float_ofScientific(x_10, x_12, x_13); lean_dec(x_10); x_15 = l_Lean_Language_Lean_process_processHeader___lambda__7___closed__1; x_16 = lean_float_div(x_14, x_15); -x_17 = lean_ctor_get(x_6, 1); +x_17 = lean_ctor_get(x_6, 2); lean_inc(x_17); x_18 = lean_ctor_get(x_6, 0); lean_inc(x_18); @@ -16364,8 +16379,7 @@ if (x_20 == 0) lean_object* x_21; lean_object* x_22; x_21 = lean_box(0); lean_inc(x_17); -x_22 = l_Lean_Language_Lean_process_processHeader___lambda__6(x_6, x_1, x_2, x_15, x_3, x_4, x_5, x_17, x_16, x_17, x_21, x_7, x_11); -lean_dec(x_17); +x_22 = l_Lean_Language_Lean_process_processHeader___lambda__6(x_1, x_6, x_17, x_2, x_15, x_3, x_4, x_5, x_16, x_17, x_21, x_7, x_11); return x_22; } else @@ -16376,8 +16390,7 @@ x_24 = 1; lean_inc(x_17); x_25 = l_Lean_Option_setIfNotSet___at_Lean_Language_Lean_process_processHeader___spec__2(x_17, x_23, x_24); x_26 = lean_box(0); -x_27 = l_Lean_Language_Lean_process_processHeader___lambda__6(x_6, x_1, x_2, x_15, x_3, x_4, x_5, x_17, x_16, x_25, x_26, x_7, x_11); -lean_dec(x_17); +x_27 = l_Lean_Language_Lean_process_processHeader___lambda__6(x_1, x_6, x_17, x_2, x_15, x_3, x_4, x_5, x_16, x_25, x_26, x_7, x_11); return x_27; } } @@ -16497,14 +16510,13 @@ LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__5_ _start: { double x_14; double x_15; lean_object* x_16; -x_14 = lean_unbox_float(x_4); -lean_dec(x_4); +x_14 = lean_unbox_float(x_5); +lean_dec(x_5); x_15 = lean_unbox_float(x_9); lean_dec(x_9); -x_16 = l_Lean_Language_Lean_process_processHeader___lambda__5(x_1, x_2, x_3, x_14, x_5, x_6, x_7, x_8, x_15, x_10, x_11, x_12, x_13); +x_16 = l_Lean_Language_Lean_process_processHeader___lambda__5(x_1, x_2, x_3, x_4, x_14, x_6, x_7, x_8, x_15, x_10, x_11, x_12, x_13); lean_dec(x_12); lean_dec(x_11); -lean_dec(x_8); return x_16; } } @@ -16512,14 +16524,13 @@ LEAN_EXPORT lean_object* l_Lean_Language_Lean_process_processHeader___lambda__6_ _start: { double x_14; double x_15; lean_object* x_16; -x_14 = lean_unbox_float(x_4); -lean_dec(x_4); +x_14 = lean_unbox_float(x_5); +lean_dec(x_5); x_15 = lean_unbox_float(x_9); lean_dec(x_9); -x_16 = l_Lean_Language_Lean_process_processHeader___lambda__6(x_1, x_2, x_3, x_14, x_5, x_6, x_7, x_8, x_15, x_10, x_11, x_12, x_13); +x_16 = l_Lean_Language_Lean_process_processHeader___lambda__6(x_1, x_2, x_3, x_4, x_14, x_6, x_7, x_8, x_15, x_10, x_11, x_12, x_13); lean_dec(x_12); lean_dec(x_11); -lean_dec(x_8); return x_16; } } @@ -20233,21 +20244,21 @@ l___private_Lean_Language_Lean_0__Lean_Language_Lean_getNiceCommandStartPos_x3f_ lean_mark_persistent(l___private_Lean_Language_Lean_0__Lean_Language_Lean_getNiceCommandStartPos_x3f___closed__4); l___private_Lean_Language_Lean_0__Lean_Language_Lean_getNiceCommandStartPos_x3f___closed__5 = _init_l___private_Lean_Language_Lean_0__Lean_Language_Lean_getNiceCommandStartPos_x3f___closed__5(); lean_mark_persistent(l___private_Lean_Language_Lean_0__Lean_Language_Lean_getNiceCommandStartPos_x3f___closed__5); -l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__1 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__1(); -lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__1); -l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__2 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__2(); -lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__2); -l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__3 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__3(); -lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__3); -l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4(); -lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__4); -l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__5 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__5(); -lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__5); -l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__6 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__6(); -lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__6); -l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__7 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__7(); -lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166____closed__7); -if (builtin) {res = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1166_(lean_io_mk_world()); +l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__1 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__1(); +lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__1); +l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__2 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__2(); +lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__2); +l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__3 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__3(); +lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__3); +l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4(); +lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__4); +l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__5 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__5(); +lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__5); +l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__6 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__6(); +lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__6); +l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__7 = _init_l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__7(); +lean_mark_persistent(l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200____closed__7); +if (builtin) {res = l_Lean_Language_Lean_initFn____x40_Lean_Language_Lean___hyg_1200_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; l_Lean_Language_Lean_experimental_module = lean_io_result_get_value(res); lean_mark_persistent(l_Lean_Language_Lean_experimental_module); diff --git a/stage0/stdlib/Lean/Message.c b/stage0/stdlib/Lean/Message.c index 2a8c659750ec..55123fd600db 100644 --- a/stage0/stdlib/Lean/Message.c +++ b/stage0/stdlib/Lean/Message.c @@ -77,7 +77,6 @@ static lean_object* l_Lean_Kernel_Exception_toMessageData___closed__32; LEAN_EXPORT lean_object* l_Lean_PersistentArray_mapM___at_Lean_MessageLog_errorsToInfos___spec__1(lean_object*); LEAN_EXPORT lean_object* l_Lean_MessageData_ofSyntax___elambda__2(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_MessageData_joinSep(lean_object*, lean_object*); -static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__3; static lean_object* l_Lean_MessageData_formatAux___closed__1; static lean_object* l_Lean_MessageData_formatAux___lambda__2___closed__4; lean_object* l_Lean_Name_toString(lean_object*, uint8_t, lean_object*); @@ -191,7 +190,6 @@ LEAN_EXPORT lean_object* l_Lean_instToMessageDataOfToFormat___rarg(lean_object*) lean_object* lean_string_utf8_byte_size(lean_object*); static lean_object* l___private_Lean_Message_0__Lean_fromJsonMessageSeverity____x40_Lean_Message___hyg_164____lambda__1___closed__2; static lean_object* l___private_Lean_Message_0__Lean_fromJsonMessageSeverity____x40_Lean_Message___hyg_164____lambda__2___closed__3; -lean_object* l_Lean_Json_getBool_x3f(lean_object*); static lean_object* l_Lean_aquote___closed__5; static lean_object* l_Lean_MessageData_instCoeString___closed__3; LEAN_EXPORT lean_object* l_Lean_addMessageContextFull(lean_object*); @@ -230,7 +228,6 @@ size_t lean_usize_of_nat(lean_object*); static lean_object* l_Lean_MessageData_instCoeArrayExpr___closed__2; static lean_object* l___private_Lean_Message_0__Lean_toJsonBaseMessage____x40_Lean_Message___hyg_2974____rarg___closed__8; LEAN_EXPORT lean_object* l_Lean_instToMessageDataExpr; -LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__5(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_anyMUnsafe_any___at_Lean_MessageData_hasSyntheticSorry_visit___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_MessageData_ofList___closed__6; LEAN_EXPORT lean_object* l_Lean_MessageLog_errorsToInfos(lean_object*); @@ -292,7 +289,6 @@ static lean_object* l___private_Lean_Message_0__Lean_toJsonMessageSeverity____x4 static lean_object* l_Lean___aux__Lean__Message______macroRules__Lean__termM_x21____1___closed__11; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_MessageLog_errorsToInfos___spec__4(size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_MessageData_ofExpr___elambda__1___boxed(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___lambda__1___boxed(lean_object*, lean_object*); static lean_object* l_Lean_MessageData_initFn____x40_Lean_Message___hyg_1428____closed__3; LEAN_EXPORT lean_object* l_Lean_instBEqMessageSeverity; LEAN_EXPORT lean_object* l_Lean_Message_kind(lean_object*); @@ -301,13 +297,11 @@ LEAN_EXPORT lean_object* l_Lean_instTypeNameMessageData; LEAN_EXPORT lean_object* l_Lean_MessageData_formatAux___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_pop(lean_object*); static lean_object* l_Lean_Kernel_Exception_toMessageData___closed__38; -static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__1; LEAN_EXPORT lean_object* l_Lean_MessageSeverity_noConfusion___rarg(uint8_t, uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_MessageLog_errorsToInfos___spec__3(size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_instInhabitedMessageLog; LEAN_EXPORT lean_object* l_Lean_instToMessageDataString; static lean_object* l___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____closed__22; -static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__2; lean_object* lean_array_to_list(lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_MessageLog_errorsToInfos___spec__3___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Data_PersistentArray_0__Lean_PersistentArray_foldlFromMAux___at_Lean_MessageLog_getInfoMessages___spec__2___closed__1; @@ -336,7 +330,6 @@ LEAN_EXPORT lean_object* l_Array_anyMUnsafe_any___at_Lean_MessageLog_hasErrors__ static lean_object* l___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____rarg___closed__2; static lean_object* l___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____closed__6; static lean_object* l___private_Lean_Message_0__Lean_fromJsonMessageSeverity____x40_Lean_Message___hyg_164____lambda__3___closed__1; -LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__5___boxed(lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Lean_PersistentArray_anyMAux___at_Lean_MessageLog_hasErrors___spec__2(lean_object*); LEAN_EXPORT lean_object* l_Lean_instFromJsonBaseMessage(lean_object*); lean_object* l_Lean_ppConstNameWithInfos(lean_object*, lean_object*, lean_object*); @@ -356,6 +349,7 @@ LEAN_EXPORT lean_object* l_Lean_MessageData_ofConstName___elambda__2___boxed(lea lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); static lean_object* l___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____closed__11; static lean_object* l___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____rarg___closed__8; +lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(lean_object*, lean_object*); static lean_object* l___private_Lean_Message_0__Lean_toJsonBaseMessage____x40_Lean_Message___hyg_2974____rarg___closed__10; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_MessageData_formatAux___spec__3(lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instToJsonSerialMessage; @@ -502,7 +496,6 @@ LEAN_EXPORT lean_object* l___private_Lean_Message_0__Lean_toJsonBaseMessage____x uint8_t lean_float_beq(double, double); lean_object* l_Lean_Level_format(lean_object*, uint8_t); LEAN_EXPORT lean_object* l_Lean_instToJsonMessageSeverity; -lean_object* l_String_toName(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Data_PersistentArray_0__Lean_PersistentArray_foldlMAux___at_Lean_MessageLog_getInfoMessages___spec__3(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_MessageLog_getInfoMessages___spec__5(lean_object*, size_t, size_t, lean_object*); static lean_object* l_Lean_aquote___closed__2; @@ -513,7 +506,6 @@ lean_object* l_Except_orElseLazy___rarg(lean_object*, lean_object*); static lean_object* l_Lean_MessageData_instCoeArrayExpr___closed__1; lean_object* lean_nat_sub(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_MessageData_formatAux___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___lambda__1(lean_object*, lean_object*); static lean_object* l___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____rarg___closed__7; static lean_object* l_Lean_instInhabitedMessageLog___closed__2; LEAN_EXPORT lean_object* l_Lean_addMessageContextPartial(lean_object*); @@ -526,6 +518,7 @@ static lean_object* l_Lean_termM_x21_____closed__9; static lean_object* l_Lean_instFromJsonSerialMessage___closed__1; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_MessageLog_errorsToInfos___spec__4___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_toMessageList(lean_object*); +lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1(lean_object*, lean_object*); static lean_object* l___private_Lean_Message_0__Lean_toJsonMessageSeverity____x40_Lean_Message___hyg_125____closed__1; static lean_object* l_Lean_instInhabitedMessageLog___closed__5; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_MessageLog_getInfoMessages___spec__4(lean_object*, size_t, size_t, lean_object*); @@ -592,7 +585,6 @@ static lean_object* l_Lean_Kernel_Exception_toMessageData___closed__20; LEAN_EXPORT lean_object* l___private_Lean_Message_0__Lean_toJsonSerialMessage____x40_Lean_Message___hyg_3511_(lean_object*); LEAN_EXPORT lean_object* l_Lean_instInhabitedBaseMessage(lean_object*); lean_object* l_Lean_ppGoal(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___boxed(lean_object*, lean_object*); lean_object* lean_string_append(lean_object*, lean_object*); static lean_object* l_Lean_termM_x21_____closed__6; static lean_object* l_Lean_MessageData_ofSyntax___closed__1; @@ -609,7 +601,6 @@ static lean_object* l_Lean_Kernel_Exception_toMessageData___closed__5; LEAN_EXPORT lean_object* l_Lean_MessageData_instCoeExpr; LEAN_EXPORT lean_object* l_Lean_instToMessageDataSubarray___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Message_toString___boxed(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1(lean_object*, lean_object*); lean_object* lean_array_get(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661_(lean_object*); uint8_t lean_nat_dec_le(lean_object*, lean_object*); @@ -622,10 +613,12 @@ LEAN_EXPORT lean_object* l_Lean_MessageData_kind___boxed(lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_MessageData_formatAux___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instToJsonBaseMessage(lean_object*); LEAN_EXPORT lean_object* l_Lean_addMessageContextPartial___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_toTraceElem___rarg(lean_object*, lean_object*, lean_object*); lean_object* lean_expr_dbg_to_string(lean_object*); static lean_object* l_Lean_mkErrorStringWithPos___closed__1; LEAN_EXPORT lean_object* l_Lean_MessageData_instCoeName; lean_object* lean_nat_add(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_toTraceElem(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Message_0__Lean_toJsonMessageSeverity____x40_Lean_Message___hyg_125_(uint8_t); LEAN_EXPORT lean_object* l_Lean_instToMessageDataList___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_MessageData_instCoeArrayExpr___boxed(lean_object*); @@ -635,7 +628,6 @@ static lean_object* l_Lean_MessageData_instCoeString___closed__1; LEAN_EXPORT lean_object* l_Lean_SerialMessage_instToString(lean_object*); static lean_object* l___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____rarg___closed__32; static lean_object* l_Lean_termM_x21_____closed__14; -lean_object* l_Lean_Json_pretty(lean_object*, lean_object*); static lean_object* l___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____rarg___closed__31; LEAN_EXPORT lean_object* l_Lean_instFromJsonBaseMessage___rarg(lean_object*); static lean_object* l_Lean_instToMessageDataOptionExpr___closed__2; @@ -6705,16 +6697,6 @@ LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message { lean_object* x_3; lean_object* x_4; x_3 = l_Lean_Json_getObjValD(x_1, x_2); -x_4 = l_Lean_Json_getBool_x3f(x_3); -lean_dec(x_3); -return x_4; -} -} -LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__5(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; lean_object* x_4; -x_3 = l_Lean_Json_getObjValD(x_1, x_2); x_4 = l___private_Lean_Message_0__Lean_fromJsonMessageSeverity____x40_Lean_Message___hyg_164_(x_3); return x_4; } @@ -7220,7 +7202,7 @@ lean_inc(x_35); lean_dec(x_26); x_36 = l___private_Lean_Message_0__Lean_toJsonBaseMessage____x40_Lean_Message___hyg_2974____rarg___closed__3; lean_inc(x_2); -x_37 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_2, x_36); +x_37 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_2, x_36); if (lean_obj_tag(x_37) == 0) { uint8_t x_38; @@ -7262,7 +7244,7 @@ lean_inc(x_46); lean_dec(x_37); x_47 = l___private_Lean_Message_0__Lean_toJsonBaseMessage____x40_Lean_Message___hyg_2974____rarg___closed__4; lean_inc(x_2); -x_48 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__5(x_2, x_47); +x_48 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_2, x_47); if (lean_obj_tag(x_48) == 0) { uint8_t x_49; @@ -7305,7 +7287,7 @@ lean_inc(x_57); lean_dec(x_48); x_58 = l___private_Lean_Message_0__Lean_toJsonBaseMessage____x40_Lean_Message___hyg_2974____rarg___closed__5; lean_inc(x_2); -x_59 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_2, x_58); +x_59 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_2, x_58); if (lean_obj_tag(x_59) == 0) { uint8_t x_60; @@ -7534,15 +7516,6 @@ lean_dec(x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__5___boxed(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; -x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__5(x_1, x_2); -lean_dec(x_2); -return x_3; -} -} LEAN_EXPORT lean_object* l_Lean_instFromJsonBaseMessage___rarg(lean_object* x_1) { _start: { @@ -7747,164 +7720,6 @@ x_1 = l_Lean_instToJsonSerialMessage___closed__1; return x_1; } } -LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___lambda__1(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; -x_3 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_3, 0, x_1); -return x_3; -} -} -static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__1() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("[anonymous]", 11, 11); -return x_1; -} -} -static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__2() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("expected a `Name`, got '", 24, 24); -return x_1; -} -} -static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__3() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = lean_box(0); -x_2 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; lean_object* x_4; -x_3 = l_Lean_Json_getObjValD(x_1, x_2); -lean_inc(x_3); -x_4 = l_Lean_Json_getStr_x3f(x_3); -if (lean_obj_tag(x_4) == 0) -{ -uint8_t x_5; -lean_dec(x_3); -x_5 = !lean_is_exclusive(x_4); -if (x_5 == 0) -{ -return x_4; -} -else -{ -lean_object* x_6; lean_object* x_7; -x_6 = lean_ctor_get(x_4, 0); -lean_inc(x_6); -lean_dec(x_4); -x_7 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_7, 0, x_6); -return x_7; -} -} -else -{ -uint8_t x_8; -x_8 = !lean_is_exclusive(x_4); -if (x_8 == 0) -{ -lean_object* x_9; lean_object* x_10; uint8_t x_11; -x_9 = lean_ctor_get(x_4, 0); -x_10 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__1; -x_11 = lean_string_dec_eq(x_9, x_10); -if (x_11 == 0) -{ -lean_object* x_12; uint8_t x_13; -x_12 = l_String_toName(x_9); -x_13 = l_Lean_Name_isAnonymous(x_12); -if (x_13 == 0) -{ -lean_dec(x_3); -lean_ctor_set(x_4, 0, x_12); -return x_4; -} -else -{ -lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; -lean_dec(x_12); -x_14 = lean_unsigned_to_nat(80u); -x_15 = l_Lean_Json_pretty(x_3, x_14); -x_16 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__2; -x_17 = lean_string_append(x_16, x_15); -lean_dec(x_15); -x_18 = l_List_mapTR_loop___at_Lean_MessageData_orList___spec__1___closed__1; -x_19 = lean_string_append(x_17, x_18); -lean_ctor_set_tag(x_4, 0); -lean_ctor_set(x_4, 0, x_19); -return x_4; -} -} -else -{ -lean_object* x_20; -lean_free_object(x_4); -lean_dec(x_9); -lean_dec(x_3); -x_20 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__3; -return x_20; -} -} -else -{ -lean_object* x_21; lean_object* x_22; uint8_t x_23; -x_21 = lean_ctor_get(x_4, 0); -lean_inc(x_21); -lean_dec(x_4); -x_22 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__1; -x_23 = lean_string_dec_eq(x_21, x_22); -if (x_23 == 0) -{ -lean_object* x_24; uint8_t x_25; -x_24 = l_String_toName(x_21); -x_25 = l_Lean_Name_isAnonymous(x_24); -if (x_25 == 0) -{ -lean_object* x_26; -lean_dec(x_3); -x_26 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_26, 0, x_24); -return x_26; -} -else -{ -lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; -lean_dec(x_24); -x_27 = lean_unsigned_to_nat(80u); -x_28 = l_Lean_Json_pretty(x_3, x_27); -x_29 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__2; -x_30 = lean_string_append(x_29, x_28); -lean_dec(x_28); -x_31 = l_List_mapTR_loop___at_Lean_MessageData_orList___spec__1___closed__1; -x_32 = lean_string_append(x_30, x_31); -x_33 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_33, 0, x_32); -return x_33; -} -} -else -{ -lean_object* x_34; -lean_dec(x_21); -lean_dec(x_3); -x_34 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__3; -return x_34; -} -} -} -} -} static lean_object* _init_l___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____closed__1() { _start: { @@ -8268,7 +8083,7 @@ lean_inc(x_34); lean_dec(x_25); x_35 = l___private_Lean_Message_0__Lean_toJsonBaseMessage____x40_Lean_Message___hyg_2974____rarg___closed__3; lean_inc(x_1); -x_36 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_35); +x_36 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_35); if (lean_obj_tag(x_36) == 0) { uint8_t x_37; @@ -8309,7 +8124,7 @@ lean_inc(x_45); lean_dec(x_36); x_46 = l___private_Lean_Message_0__Lean_toJsonBaseMessage____x40_Lean_Message___hyg_2974____rarg___closed__4; lean_inc(x_1); -x_47 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__5(x_1, x_46); +x_47 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_46); if (lean_obj_tag(x_47) == 0) { uint8_t x_48; @@ -8351,7 +8166,7 @@ lean_inc(x_56); lean_dec(x_47); x_57 = l___private_Lean_Message_0__Lean_toJsonBaseMessage____x40_Lean_Message___hyg_2974____rarg___closed__5; lean_inc(x_1); -x_58 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_57); +x_58 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_57); if (lean_obj_tag(x_58) == 0) { uint8_t x_59; @@ -8482,7 +8297,7 @@ x_89 = lean_ctor_get(x_80, 0); lean_inc(x_89); lean_dec(x_80); x_90 = l___private_Lean_Message_0__Lean_toJsonSerialMessage____x40_Lean_Message___hyg_3511____closed__1; -x_91 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1(x_1, x_90); +x_91 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1(x_1, x_90); if (lean_obj_tag(x_91) == 0) { uint8_t x_92; @@ -8587,24 +8402,6 @@ return x_113; } } } -LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; -x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___lambda__1(x_1, x_2); -lean_dec(x_2); -return x_3; -} -} -LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___boxed(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; -x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1(x_1, x_2); -lean_dec(x_2); -return x_3; -} -} static lean_object* _init_l_Lean_instFromJsonSerialMessage___closed__1() { _start: { @@ -13456,6 +13253,36 @@ return x_196; } } } +LEAN_EXPORT lean_object* l_Lean_toTraceElem___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +double x_4; uint8_t x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_4 = l_Lean_MessageData_formatAux___lambda__3___closed__5; +x_5 = 1; +x_6 = l_Lean_mkErrorStringWithPos___closed__1; +x_7 = lean_alloc_ctor(0, 2, 17); +lean_ctor_set(x_7, 0, x_3); +lean_ctor_set(x_7, 1, x_6); +lean_ctor_set_float(x_7, sizeof(void*)*2, x_4); +lean_ctor_set_float(x_7, sizeof(void*)*2 + 8, x_4); +lean_ctor_set_uint8(x_7, sizeof(void*)*2 + 16, x_5); +x_8 = lean_apply_1(x_1, x_2); +x_9 = l___private_Lean_Message_0__Lean_fromJsonMessageSeverity____x40_Lean_Message___hyg_164____closed__1; +x_10 = lean_alloc_ctor(9, 3, 0); +lean_ctor_set(x_10, 0, x_7); +lean_ctor_set(x_10, 1, x_8); +lean_ctor_set(x_10, 2, x_9); +return x_10; +} +} +LEAN_EXPORT lean_object* l_Lean_toTraceElem(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_toTraceElem___rarg), 3, 0); +return x_2; +} +} lean_object* initialize_Lean_Data_Position(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Data_OpenDecl(uint8_t builtin, lean_object*); lean_object* initialize_Lean_MetavarContext(uint8_t builtin, lean_object*); @@ -13838,12 +13665,6 @@ l_Lean_instToJsonSerialMessage___closed__1 = _init_l_Lean_instToJsonSerialMessag lean_mark_persistent(l_Lean_instToJsonSerialMessage___closed__1); l_Lean_instToJsonSerialMessage = _init_l_Lean_instToJsonSerialMessage(); lean_mark_persistent(l_Lean_instToJsonSerialMessage); -l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__1 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__1(); -lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__1); -l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__2 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__2(); -lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__2); -l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__3 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__3(); -lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1___closed__3); l___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____closed__1 = _init_l___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____closed__1(); lean_mark_persistent(l___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____closed__1); l___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____closed__2 = _init_l___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____closed__2(); diff --git a/stage0/stdlib/Lean/Meta/Tactic/FunInd.c b/stage0/stdlib/Lean/Meta/Tactic/FunInd.c index 0480eb9f4267..afd992fe139e 100644 --- a/stage0/stdlib/Lean/Meta/Tactic/FunInd.c +++ b/stage0/stdlib/Lean/Meta/Tactic/FunInd.c @@ -22,7 +22,6 @@ static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveI LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_lambdaTelescope1___at_Lean_Tactic_FunInd_buildInductionBody___spec__33___rarg___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Expr_const___override(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849_(lean_object*); lean_object* l_Lean_Expr_fvarId_x3f(lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Tactic_FunInd_rwFun___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_abstractIndependentMVars___lambda__1___closed__1; @@ -76,6 +75,7 @@ static lean_object* l_Lean_getConstInfo___at_Lean_Tactic_FunInd_foldAndCollect__ LEAN_EXPORT lean_object* l_Lean_Meta_lambdaBoundedTelescope___at_Lean_Tactic_FunInd_lambdaTelescope1___spec__1(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__16(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__29(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__40___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___spec__5(size_t, size_t, lean_object*); static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__24___closed__3; extern lean_object* l_Lean_Elab_WF_instInhabitedEqnInfo; @@ -94,6 +94,7 @@ LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_bu LEAN_EXPORT lean_object* l_Lean_Meta_withLetDecl___at_Lean_Tactic_FunInd_buildInductionBody___spec__10___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_inProdLambdaLastArg___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Meta_mkNoConfusion(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__26___closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_isTracingEnabledFor___at_Lean_Tactic_FunInd_buildInductionCase___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -106,6 +107,7 @@ static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda LEAN_EXPORT lean_object* l_Lean_Meta_lambdaBoundedTelescope___at_Lean_Tactic_FunInd_lambdaTelescope1___spec__1___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_matchMatcherApp_x3f___at_Lean_Tactic_FunInd_foldAndCollect___spec__1___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_PersistentArray_toArray___rarg(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__41(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_matchMatcherApp_x3f___at_Lean_Tactic_FunInd_foldAndCollect___spec__1___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Expr_withAppAux___at_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___spec__1___lambda__1___boxed(lean_object*, lean_object*); @@ -113,7 +115,6 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___l LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__19___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__14(lean_object*, lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Tactic_FunInd_buildInductionCase___spec__12___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__12; static lean_object* l_Lean_Tactic_FunInd_rwIfWith___lambda__1___closed__1; lean_object* l_Lean_ConstantInfo_type(lean_object*); lean_object* lean_whnf(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -126,6 +127,7 @@ LEAN_EXPORT lean_object* l_Lean_logAt___at_Lean_Tactic_FunInd_buildInductionBody LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_cleanupAfter_cleanupAfter_x3f___spec__3(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_rwIfWith___lambda__2___closed__1; +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__7; static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__12___closed__2; lean_object* l_Lean_Meta_getFunIndInfoForInduct_x3f(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_ConstantInfo_levelParams(lean_object*); @@ -138,6 +140,7 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deduplicateIHs___boxed(lean_object LEAN_EXPORT lean_object* l_Lean_Meta_matchMatcherApp_x3f___at_Lean_Tactic_FunInd_foldAndCollect___spec__1___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_FunInd_0__Lean_Tactic_FunInd_elimTypeAnnotations___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_withLocalDecl___at_Lean_Tactic_FunInd_foldAndCollect___spec__10___rarg(lean_object*, uint8_t, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Exception_isInterrupt(lean_object*); static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__1___closed__2; LEAN_EXPORT lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_buildInductionBody___spec__12___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -160,12 +163,12 @@ lean_object* l_Lean_FileMap_toPosition(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_rwMatcher(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_StateT_pure___at_Lean_Tactic_FunInd_buildInductionBody___spec__11(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__6(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__10; uint64_t lean_uint64_lor(uint64_t, uint64_t); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapFinIdxM_map___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__9___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_panic___at_Lean_Tactic_FunInd_buildInductionBody___spec__7(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_panic___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__1; uint8_t l_Lean_Expr_isLet(lean_object*); lean_object* l_Lean_Elab_Structural_RecArgInfo_pickIndicesMajor(lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__10___closed__3; @@ -173,6 +176,7 @@ uint8_t l_Lean_Expr_isAppOfArity(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_buildInductionBody___spec__22___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__3___closed__2; lean_object* l_Lean_Meta_MatcherApp_withUserNames___at_Lean_Meta_MatcherApp_inferMatchType___spec__10___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_buildInductionBody___spec__12___lambda__6___closed__4; uint8_t l_Lean_Expr_isOptParam(lean_object*); @@ -204,6 +208,7 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__26(lean_o lean_object* l_Lean_MessageData_ofList(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__10___boxed(lean_object**); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__23(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__6; lean_object* l_Lean_PersistentArray_push___rarg(lean_object*, lean_object*); lean_object* lean_array_push(lean_object*, lean_object*); lean_object* l_Array_toSubarray___rarg(lean_object*, lean_object*, lean_object*); @@ -222,6 +227,7 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealiz static lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__3___closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_M_localMapM(lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Tactic_FunInd_cleanupAfter_allHeqToEq___spec__2___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__3; LEAN_EXPORT lean_object* l_Lean_Meta_matchMatcherApp_x3f___at_Lean_Tactic_FunInd_buildInductionBody___spec__3___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_usize_dec_eq(size_t, size_t); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_unpackMutualInduction_doRealize(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -232,7 +238,6 @@ static lean_object* l_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___lambd static lean_object* l_Lean_Tactic_FunInd_setNaryFunIndInfo___closed__6; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveCases___closed__1; -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_buildInductionCase___lambda__3___closed__1; lean_object* l_Lean_Meta_withLocalDecl___at_Lean_Meta_Simp_Arith_withAbstractAtoms_go___spec__1___rarg(lean_object*, uint8_t, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_M2_run___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -253,7 +258,6 @@ static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___l static lean_object* l_Lean_Tactic_FunInd_M_run___rarg___closed__1; lean_object* l_Lean_replaceRef(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_buildInductionBody___spec__27(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_mk_array(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_rwLetWith(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -290,13 +294,11 @@ static lean_object* l_panic___at_Lean_Tactic_FunInd_buildInductionBody___spec__3 LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__7(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__11___closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__28(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__11; static lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__3___closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_matchMatcherApp_x3f___at_Lean_Tactic_FunInd_buildInductionBody___spec__3(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_abstractIndependentMVars___closed__3; static double l_Lean_withTraceNode___at_Lean_Tactic_FunInd_foldAndCollect___spec__17___lambda__2___closed__1; static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__20___closed__3; -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Expr_fvarId_x21(lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__17___closed__3; static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__26___closed__1; @@ -308,10 +310,12 @@ uint8_t lean_float_decLt(double, double); LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Tactic_FunInd_cleanupAfter_cleanupAfter_x3f___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_withLetDecls___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_abstractIndependentMVars___lambda__1___boxed(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__39(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_isFunInductName___lambda__3___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__13___closed__3; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_mkConstWithLevelParams___at_Lean_Tactic_FunInd_deriveCases___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__38(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__18___closed__4; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__3; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__11___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -322,7 +326,7 @@ static lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__9___closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInduction___lambda__2(lean_object*); static lean_object* l_Lean_withTraceNode___at_Lean_Tactic_FunInd_foldAndCollect___spec__17___lambda__4___closed__3; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__11(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__11; +static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__42___closed__1; static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__17___closed__2; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__16___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_forallTelescopeReducing___at_Lean_Meta_getParamNames___spec__2___rarg(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -427,6 +431,7 @@ static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___closed LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__24(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_Elab_Structural_instInhabitedRecArgInfo; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__18___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -471,9 +476,11 @@ static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_cleanupAfter_cleanupAfter_x3f___spec__4___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_rwIfWith___lambda__1___closed__2; LEAN_EXPORT lean_object* l_StateT_lift___at_Lean_Tactic_FunInd_buildInductionCase___spec__2(lean_object*); +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__5; LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Tactic_FunInd_rwMatcher___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__7___closed__1; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__42___closed__2; static lean_object* l_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___lambda__7___closed__4; uint8_t l_Lean_Expr_hasMVar(lean_object*); lean_object* l_Lean_Elab_Structural_IndGroupInst_brecOn(lean_object*, uint8_t, lean_object*, lean_object*); @@ -488,6 +495,7 @@ static lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__5___closed__3 LEAN_EXPORT lean_object* l_Lean_Meta_forallTelescope___at_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___spec__2___rarg(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_addTrace___at_Lean_Tactic_FunInd_buildInductionCase___spec__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_rwIfWith___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__8; LEAN_EXPORT lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_buildInductionBody___spec__12___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_withLocalDecls_loop___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__10___rarg___lambda__1(lean_object*); @@ -497,7 +505,6 @@ LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_buildInd static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__8___closed__1; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_withLocalDecl___at_Lean_Tactic_FunInd_buildInductionBody___spec__1(lean_object*); -static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__5; static lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_buildInductionBody___spec__12___lambda__5___closed__2; LEAN_EXPORT lean_object* l_Lean_Meta_lambdaTelescope___at_Lean_Tactic_FunInd_buildInductionBody___spec__17___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_ptr_addr(lean_object*); @@ -518,6 +525,7 @@ LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_deriveCa LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_inProdLambdaLastArg___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_isFunInductName___closed__2; LEAN_EXPORT lean_object* l_Lean_Meta_matchMatcherApp_x3f___at_Lean_Tactic_FunInd_foldAndCollect___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +uint8_t l_Lean_TagDeclarationExtension_isTagged(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__4___closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__24(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_isFunInductName(lean_object*, lean_object*); @@ -525,6 +533,7 @@ static lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Tactic_FunInd_setNaryFu LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__16___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__17___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_buildInductionBody___spec__28(lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_lambdaTelescope___at_Lean_Tactic_FunInd_buildInductionBody___spec__17___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Expr_getNumHeadForalls(lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Tactic_FunInd_buildInductionBody___spec__16(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_Structural_Positions_mapMwith___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__12___closed__1; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_lambdaTelescope1___at_Lean_Tactic_FunInd_buildInductionBody___spec__33___rarg___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -536,6 +545,7 @@ static lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_bui LEAN_EXPORT lean_object* l_Lean_Meta_withLetDecl___at_Lean_Tactic_FunInd_buildInductionBody___spec__10(lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__7; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761_(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_withRewrittenMotive(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_abstractIndependentMVars___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_withLocalDecls___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__9(lean_object*); @@ -547,7 +557,6 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__8___boxed LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionCase___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__17___closed__3; lean_object* l_Lean_throwError___at_Lean_Expr_abstractRangeM___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__9; lean_object* l_Lean_Meta_getFunInductName(lean_object*, uint8_t); LEAN_EXPORT lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_buildInductionBody___spec__12___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_isEqvAux___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -555,6 +564,7 @@ static lean_object* l_Lean_Elab_Structural_Positions_mapMwith___at_Lean_Tactic_F uint8_t l_Array_contains___at_Lean_Meta_setMVarUserNamesAt___spec__1(lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_rwMatcher___closed__4; lean_object* lean_st_ref_take(lean_object*, lean_object*); +lean_object* l_Lean_Expr_getRevArg_x21(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_withLetDecls_go___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Simp_Result_mkEqMPR(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -576,8 +586,9 @@ lean_object* l_Lean_Meta_Match_Extension_getMatcherInfo_x3f(lean_object*, lean_o static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___closed__1; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__23___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__13___closed__1; -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Array_zip___rarg(lean_object*, lean_object*); +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__11; +static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__10; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__12(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_forallBoundedTelescope___at_Lean_Meta_arrowDomainsN___spec__6___rarg(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -587,11 +598,13 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_lambdaTelescope1(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_inProdLambdaLastArg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint64_t lean_uint64_shift_right(uint64_t, uint64_t); +static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__1; LEAN_EXPORT uint8_t l_Array_anyMUnsafe_any___at_Lean_Tactic_FunInd_foldAndCollect___spec__12(lean_object*, lean_object*, size_t, size_t); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_M_eval(lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Tactic_FunInd_buildInductionCase___spec__10___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__23___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_forallBoundedTelescope___at_Lean_Tactic_FunInd_buildInductionBody___spec__21___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__14___closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_forallBoundedTelescope___at_Lean_Tactic_FunInd_buildInductionBody___spec__21(lean_object*); @@ -618,8 +631,8 @@ static lean_object* l_Lean_logAt___at_Lean_Tactic_FunInd_buildInductionBody___sp static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__13___closed__2; lean_object* l_Lean_registerTraceClass(lean_object*, uint8_t, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__34(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__2; lean_object* l_Lean_Meta_kabstract(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionCase___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_mkHEqRefl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__18___closed__2; @@ -628,6 +641,7 @@ lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Meta_withErasedFVars___spec__3( LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__9___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___private_Lean_Meta_Basic_0__Lean_Meta_withLocalDeclImp___rarg(lean_object*, uint8_t, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__16___lambda__3___closed__3; +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__1; LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_rwIfWith___closed__1; static lean_object* l_List_forIn_x27_loop___at_Lean_Tactic_FunInd_foldAndCollect___spec__6___closed__1; @@ -639,7 +653,6 @@ static lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__3___closed__1 LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__16(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__8___closed__3; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_M_branch___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__27(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_lambdaBoundedTelescope___at_Lean_Tactic_FunInd_buildInductionBody___spec__34(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -649,7 +662,6 @@ static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__1___closed__3; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_M_tell___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__16(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Array_idxOf_x3f___at_Lean_Meta_getElimExprInfo___spec__1(lean_object*, lean_object*); -static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__3; LEAN_EXPORT lean_object* l_Lean_Meta_withLetDecl___at_Lean_Tactic_FunInd_buildInductionBody___spec__10___rarg(lean_object*, lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_FixedParamPerm_forallTelescope___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__25(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_MVarId_assign___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -663,11 +675,14 @@ LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_abstract lean_object* l_Lean_Meta_check(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_rwIfWith___lambda__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Match_MatcherInfo_numAlts(lean_object*); +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__10; +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__3; LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Tactic_FunInd_buildInductionBody___spec__29(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__12___closed__2; LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Tactic_FunInd_foldAndCollect___spec__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_M_branch(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_lambdaTelescope1___at_Lean_Tactic_FunInd_foldAndCollect___spec__15___rarg___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__40(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__10(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__1(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_List_forIn_x27_loop___at_Lean_Tactic_FunInd_foldAndCollect___spec__6___closed__4; @@ -684,11 +699,11 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__21___boxe static lean_object* l_Lean_withTraceNode___at_Lean_Tactic_FunInd_foldAndCollect___spec__17___lambda__3___closed__2; static lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_buildInductionBody___spec__12___closed__4; static lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__3___closed__4; -static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__1; LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Tactic_FunInd_cleanupAfter_cleanupAfter_x3f___spec__1(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern uint8_t l_Lean_instInhabitedBinderInfo; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_buildInductionBody___spec__27___lambda__2___closed__2; +lean_object* l_Lean_Meta_mkAbsurd(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_setNaryFunIndInfo___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Tactic_FunInd_setNaryFunIndInfo___spec__2___closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -698,13 +713,13 @@ static lean_object* l_Lean_Tactic_FunInd_deriveInduction___lambda__1___closed__1 static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__4; static lean_object* l_Lean_Tactic_FunInd_deduplicateIHs___closed__1; lean_object* lean_st_ref_get(lean_object*, lean_object*); -static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__4; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_M_exec___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_isFunInductName___lambda__2___closed__2; static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__17___closed__2; static lean_object* l_Lean_Meta_matchMatcherApp_x3f___at_Lean_Tactic_FunInd_foldAndCollect___spec__1___lambda__2___closed__1; lean_object* lean_array_pop(lean_object*); static lean_object* l_Lean_Meta_withLocalDecls_loop___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__10___rarg___closed__2; +static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__8; LEAN_EXPORT lean_object* l___private_Lean_Meta_Match_MatcherApp_Transform_0__Lean_Meta_MatcherApp_forallAltTelescope_x27___at_Lean_Tactic_FunInd_buildInductionBody___spec__26___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__16___closed__1; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__16___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -717,10 +732,10 @@ LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_as static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__14___closed__1; static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__13___closed__2; static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__36___closed__2; -static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__7; lean_object* lean_st_mk_ref(lean_object*, lean_object*); lean_object* l_Lean_MVarId_tryClearMany_x27(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_withLetDecls___rarg___closed__2; +static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__2; LEAN_EXPORT lean_object* l___private_Lean_Util_Trace_0__Lean_addTraceNode___at_Lean_Tactic_FunInd_foldAndCollect___spec__20___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_to_list(lean_object*); lean_object* l_Lean_Meta_mkEq(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -743,7 +758,6 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealiz uint8_t l_Lean_checkTraceOption(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_cleanupAfter_allHeqToEq___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_buildInductionBody___spec__22___lambda__1(size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__8; LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__27___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__11___closed__1; static lean_object* l_Lean_Tactic_FunInd_isFunInductName___closed__3; @@ -753,7 +767,6 @@ static lean_object* l_List_forIn_x27_loop___at_Lean_Tactic_FunInd_foldAndCollect LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__22(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_unpackMutualInduction___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static uint64_t l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__3; static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__13___closed__3; static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__12___closed__2; static lean_object* l_Lean_Elab_Structural_Positions_mapMwith___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__12___closed__5; @@ -775,6 +788,7 @@ lean_object* l_Lean_Elab_FixedParamPerm_instantiateForall(lean_object*, lean_obj LEAN_EXPORT lean_object* l_panic___at_Lean_Tactic_FunInd_foldAndCollect___spec__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__15(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__12___closed__3; +lean_object* l_Lean_Expr_constName_x21(lean_object*); static lean_object* l___private_Lean_Util_Trace_0__Lean_getResetTraces___at_Lean_Tactic_FunInd_foldAndCollect___spec__19___closed__3; extern lean_object* l_Lean_instInhabitedExpr; static lean_object* l_Lean_logAt___at_Lean_Tactic_FunInd_buildInductionBody___spec__25___closed__2; @@ -783,7 +797,9 @@ LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_buildInd LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__28___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_withLetDecls_go(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__3; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_M_exec(lean_object*); +static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__12; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deduplicateIHs(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___closed__1; static lean_object* l_Lean_logAt___at_Lean_Tactic_FunInd_buildInductionBody___spec__25___lambda__2___closed__3; @@ -795,6 +811,7 @@ lean_object* l_Lean_FVarId_getType(lean_object*, lean_object*, lean_object*, lea static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__20___closed__5; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__11___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_withLocalDecls_loop___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__10___rarg___closed__3; +static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__4; LEAN_EXPORT lean_object* l_MonadExcept_ofExcept___at_Lean_Tactic_FunInd_buildInductionCase___spec__15___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_panic___at_Lean_Expr_appFn_x21___spec__1(lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -805,6 +822,9 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_lambdaTelescope1___at_Lean_Tactic_ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_cleanupAfter_cleanupAfter_x3f(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_forallTelescope___at_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___spec__2___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__16___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__9; +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__1___closed__4; LEAN_EXPORT lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_buildInductionBody___spec__12___lambda__6(lean_object*, lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, uint8_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__8___boxed(lean_object*, lean_object*, lean_object*); @@ -834,7 +854,6 @@ extern lean_object* l_Lean_warningAsError; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__13___boxed(lean_object**); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__18(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Tactic_FunInd_buildInductionCase___spec__12___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__2; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_buildInductionBody___spec__22___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__8(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -858,14 +877,12 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___ lean_object* l_Lean_Meta_PProdN_packLambdas(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_mkNot(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__6(lean_object*, lean_object*, lean_object*, lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129_(lean_object*); uint8_t l_Lean_LocalDecl_isLet(lean_object*); static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__4___closed__3; -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__4; +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__2; static lean_object* l_Lean_Expr_withAppAux___at_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___spec__1___lambda__3___closed__1; static lean_object* l_Lean_Tactic_FunInd_inProdLambdaLastArg___closed__2; static lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_buildInductionBody___spec__12___lambda__5___closed__1; -static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__8; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__16___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_buildInductionBody___spec__12___lambda__4___boxed(lean_object**); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__1(size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -888,9 +905,11 @@ lean_object* l_Lean_Expr_appFn_x21(lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__7(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_withLCtx___at_Lean_Tactic_FunInd_buildInductionCase___spec__8___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_panic___at_Lean_Tactic_FunInd_withLetDecls___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__4; LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Tactic_FunInd_buildInductionBody___spec__6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static uint64_t l_Lean_Tactic_FunInd_foldAndCollect___lambda__10___closed__1; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_cleanupAfter_allHeqToEq___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__2; static lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__10___closed__3; static lean_object* l___private_Lean_Meta_Tactic_FunInd_0__Lean_Tactic_FunInd_elimTypeAnnotations___lambda__1___closed__1; extern lean_object* l_Lean_Meta_instMonadMetaM; @@ -917,6 +936,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_lambdaTelescope___at_Lean_Tactic_FunInd_bui static lean_object* l_Lean_Expr_withAppAux___at_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___spec__1___lambda__3___closed__2; static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__18___closed__1; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_abstractIndependentMVars___lambda__1(lean_object*); +lean_object* l_Lean_Meta_mkFalseElim(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__13___closed__2; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_projectMutualInduct___spec__1___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_Structural_Positions_groupAndSort___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__5___closed__4; @@ -993,6 +1013,7 @@ static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__24___closed__1 static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__26___closed__4; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__8___boxed(lean_object**); LEAN_EXPORT lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_buildInductionBody___spec__12___lambda__3___boxed(lean_object**); +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__3; static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__17___closed__2; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__16___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionCase___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1022,7 +1043,7 @@ static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__7; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_lambdaTelescope1___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_lambdaBoundedTelescope___at_Lean_Elab_TerminationMeasure_delab___spec__1___rarg(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__4___closed__4; -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__3; +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__9; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInduction(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_withRewrittenMotiveArg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_heqToEq(lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1030,6 +1051,7 @@ static lean_object* l_Lean_Tactic_FunInd_lambdaTelescope1___rarg___lambda__2___c LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_M_localMapM___spec__2(lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_MonadExcept_ofExcept___at_Lean_Tactic_FunInd_foldAndCollect___spec__21(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Name_hasMacroScopes(lean_object*); +static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__6; lean_object* lean_array_fget(lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_lambdaTelescope1___rarg___lambda__2___closed__1; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__12(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1051,7 +1073,6 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__3(lean_o LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_throwError___rarg(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_StateT_bind___at_Lean_Tactic_FunInd_buildInductionCase___spec__6___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_withLocalDecl___at_Lean_Tactic_FunInd_foldAndCollect___spec__10___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_buildInductionBody___spec__22___lambda__4(lean_object*, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__29___closed__1; @@ -1064,7 +1085,6 @@ lean_object* l_Lean_Expr_app___override(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__9(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_string_length(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_rwFun(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__5; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__35(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_mkLambdaFVarsMasked(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__15___closed__2; @@ -1100,7 +1120,6 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___l static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__1___closed__2; static lean_object* l_Lean_Tactic_FunInd_rwIfWith___closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_rwIfWith___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___lambda__1(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Tactic_FunInd_buildInductionBody___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_buildInductionBody___spec__27___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1141,7 +1160,6 @@ LEAN_EXPORT lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunIn static lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__20___closed__6; static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__1___closed__6; LEAN_EXPORT lean_object* l___private_Lean_Meta_Match_MatcherApp_Transform_0__Lean_Meta_MatcherApp_forallAltTelescope_x27___at_Lean_Tactic_FunInd_buildInductionBody___spec__26(lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__10; lean_object* l_Lean_Meta_unfoldDefinition_x3f(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___lambda__7___closed__3; LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Tactic_FunInd_deriveCases___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1152,6 +1170,7 @@ static lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__3___closed__3; LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__27(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_withErasedFVars___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__20(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_FVarId_getUserName(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Meta_matchConstructorApp_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_rwIfWith___closed__3; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_maskArray___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Expr_isConstOf(lean_object*, lean_object*); @@ -1189,6 +1208,7 @@ lean_object* lean_panic_fn(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__18(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_isFunInductName___lambda__1___boxed(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_isFunCasesName___boxed(lean_object*, lean_object*); +static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__5; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_assertIHs___spec__1(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInduction___lambda__1(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__12(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1211,7 +1231,9 @@ LEAN_EXPORT lean_object* l_Lean_Meta_withLocalDecls_loop___at_Lean_Tactic_FunInd static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__1___closed__3; static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__14___closed__1; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__17___boxed(lean_object**); +static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__7; static lean_object* l_panic___at_Lean_Tactic_FunInd_buildInductionBody___spec__7___closed__1; +static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__11; LEAN_EXPORT lean_object* l_Lean_Elab_Structural_Positions_mapMwith___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__12(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Expr_withAppAux___at_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___spec__1___lambda__2___closed__1; LEAN_EXPORT lean_object* l_Lean_Expr_withAppAux___at_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___spec__1(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1219,6 +1241,7 @@ static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__28___closed__2 LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Simp_mkCongr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_M_branch___rarg___closed__1; +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_PProdN_mk(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__12(lean_object*, lean_object*, lean_object*, size_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*); @@ -1226,11 +1249,11 @@ static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__4; LEAN_EXPORT lean_object* l_Lean_Expr_withAppAux___at_Lean_Tactic_FunInd_rwFun___spec__3___lambda__1(lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__24(size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Expr_withAppAux___at_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___spec__1___lambda__2___closed__4; -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__7; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Tactic_FunInd_buildInductionCase___spec__11(lean_object*, lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Elab_Structural_Positions_groupAndSort___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__5(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Tactic_FunInd_buildInductionCase___spec__12(lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static uint64_t l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__3; LEAN_EXPORT lean_object* l_Lean_getConstInfo___at_Lean_Tactic_FunInd_buildInductionBody___spec__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__19(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint64_t lean_uint64_shift_left(uint64_t, uint64_t); @@ -1247,6 +1270,8 @@ static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__1___closed LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_lambdaTelescope1___at_Lean_Tactic_FunInd_buildInductionBody___spec__33___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__8___closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__18(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__4; +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__4; LEAN_EXPORT lean_object* l_Lean_Meta_matchMatcherApp_x3f___at_Lean_Tactic_FunInd_foldAndCollect___spec__1___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_instantiateMVarsIfMVarApp(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Array_back_x21___rarg(lean_object*, lean_object*); @@ -1256,13 +1281,13 @@ LEAN_EXPORT lean_object* l_Array_mapFinIdxM_map___at_Lean_Tactic_FunInd_deriveIn LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__8(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_PersistentHashMap_mkEmptyEntriesArray(lean_object*, lean_object*); lean_object* l_Array_ofSubarray___rarg(lean_object*); +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__1; static lean_object* l_Lean_Expr_withAppAux___at_Lean_Tactic_FunInd_unpackMutualInduction_doRealize___spec__1___closed__1; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_buildInductionBody___spec__27___lambda__2___closed__1; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__5(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_withLetDecls_go___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_isFunCasesName___lambda__1___boxed(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Tactic_FunInd_foldAndCollect___spec__17___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_matchMatcherApp_x3f___at_Lean_Tactic_FunInd_buildInductionBody___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__14___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe___at_Lean_Tactic_FunInd_M_localMapM___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1292,10 +1317,10 @@ static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__4___closed static lean_object* l_Lean_getConstInfo___at_Lean_Tactic_FunInd_foldAndCollect___spec__3___closed__4; lean_object* l_Lean_instantiateMVarsCore(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_isFunInductName___lambda__2(lean_object*, lean_object*, lean_object*); +extern lean_object* l_Lean_noConfusionExt; LEAN_EXPORT lean_object* l_Lean_Meta_getMatcherInfo_x3f___at_Lean_Tactic_FunInd_foldAndCollect___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__5___closed__2; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__16___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__6; lean_object* l_Lean_getConstInfoDefn___at___private_Lean_Elab_PreDefinition_Eqns_0__Lean_Elab_Eqns_unfoldThmType___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_cleanupAfter_allHeqToEq___spec__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_PProdN_stripProjs(lean_object*); @@ -1309,7 +1334,6 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___l LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Elab_Structural_Positions_mapMwith___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__12___closed__4; LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Tactic_FunInd_buildInductionCase___spec__12___lambda__2(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, uint8_t, double, double, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__4; static lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__13___closed__1; static lean_object* l_Lean_Tactic_FunInd_isFunInductName___closed__4; static lean_object* l_Lean_Elab_Structural_Positions_groupAndSort___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__5___closed__2; @@ -1319,7 +1343,6 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__9___b LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_abstractIndependentMVars___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Tactic_FunInd_cleanupAfter_allHeqToEq___spec__2___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__11(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__3; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_maskArray___rarg___boxed(lean_object*, lean_object*); extern lean_object* l_Lean_instInhabitedName; LEAN_EXPORT lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_buildInductionBody___spec__12___lambda__6___boxed(lean_object**); @@ -1327,7 +1350,7 @@ LEAN_EXPORT lean_object* l_StateT_pure___at_Lean_Tactic_FunInd_foldAndCollect___ LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__18___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_projectMutualInduct___spec__1___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_uget(lean_object*, size_t); -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__1; lean_object* l_Lean_Expr_fvar___override(lean_object*); size_t lean_array_size(lean_object*); static lean_object* l_Lean_Meta_MatcherApp_transform___at_Lean_Tactic_FunInd_buildInductionBody___spec__12___closed__3; @@ -1388,7 +1411,6 @@ lean_object* l_Lean_Meta_ArgsPacker_unpack(lean_object*, lean_object*); lean_object* l_Lean_isTracingEnabledFor___at_Lean_Meta_processPostponed_loop___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_matchMatcherApp_x3f___at_Lean_Tactic_FunInd_foldAndCollect___spec__1___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__2; static lean_object* l_Lean_Expr_withAppAux___at_Lean_Tactic_FunInd_rwFun___spec__3___closed__2; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__13___boxed(lean_object**); uint8_t l_Array_contains___at_Lean_Meta_addImplicitTargets_collect___spec__1(lean_object*, lean_object*); @@ -1406,6 +1428,7 @@ uint8_t l_Lean_isAuxRecursorWithSuffix(lean_object*, lean_object*, lean_object*) lean_object* lean_array_get_size(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__10___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__2; LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Tactic_FunInd_buildInductionBody___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_deriveCases___lambda__9___closed__1; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__14___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1421,6 +1444,7 @@ static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__14___close LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19(size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_logAt___at_Lean_Tactic_FunInd_buildInductionBody___spec__25(lean_object*, lean_object*, uint8_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_FunInd_0__Lean_Tactic_FunInd_elimTypeAnnotations(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__5; LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Tactic_FunInd_buildInductionBody___spec__9___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_get(lean_object*, lean_object*, lean_object*); lean_object* lean_infer_type(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1452,6 +1476,8 @@ lean_object* l_Lean_Meta_mkLambdaFVars(lean_object*, lean_object*, uint8_t, uint LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__10(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveUnaryInduction_doRealize___lambda__7(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint64_t l_Lean_Meta_TransparencyMode_toUInt64(uint8_t); +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481_(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__42(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_nat_add(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Tactic_FunInd_buildInductionCase___spec__9(lean_object*, lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_lambdaTelescope1___at_Lean_Tactic_FunInd_buildInductionBody___spec__33___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1459,6 +1485,7 @@ LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealiz LEAN_EXPORT lean_object* l_Lean_logAt___at_Lean_Tactic_FunInd_buildInductionBody___spec__25___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_withLocalDecl___at_Lean_Tactic_FunInd_buildInductionBody___spec__1___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__26___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +uint8_t l_Lean_Expr_isConst(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Util_Trace_0__Lean_getResetTraces___at_Lean_Tactic_FunInd_foldAndCollect___spec__19___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Exception_isRuntime(lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_matchMatcherApp_x3f___at_Lean_Tactic_FunInd_buildInductionBody___spec__3___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1483,9 +1510,7 @@ uint8_t l_Lean_Expr_isFVar(lean_object*); LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_M_ask(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Expr_isForall(lean_object*); LEAN_EXPORT lean_object* l_Array_mapFinIdxM_map___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__9___lambda__2(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__6; lean_object* l_Lean_Expr_mvarId_x21(lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__9; lean_object* l_Lean_InductiveVal_numCtors(lean_object*); static lean_object* l_Lean_Tactic_FunInd_setNaryFunIndInfo___closed__9; static lean_object* l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__11___closed__2; @@ -1512,7 +1537,6 @@ lean_object* lean_expr_instantiate1(lean_object*, lean_object*); lean_object* l___private_Init_Data_Repr_0__Nat_reprFast(lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__1; LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_foldAndCollect___lambda__13___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_lambdaBoundedTelescope___at_Lean_Tactic_FunInd_buildInductionBody___spec__34___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___private_Lean_Meta_Basic_0__Lean_Meta_instantiateForallAux(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_withTraceNode___at_Lean_Tactic_FunInd_buildInductionCase___spec__12___lambda__3(lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, uint8_t, double, double, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1536,6 +1560,7 @@ lean_object* l_Lean_addDecl(lean_object*, lean_object*, lean_object*, lean_objec LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__22(lean_object*, lean_object*, size_t, size_t, lean_object*); lean_object* l_Lean_MessageLog_add(lean_object*, lean_object*); static lean_object* l_Lean_Tactic_FunInd_withLetDecls___rarg___closed__1; +lean_object* l_Lean_Meta_matchEq_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Nat_foldRevM_loop___at_Lean_Tactic_FunInd_foldAndCollect___spec__14(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static double l_Lean_withTraceNode___at_Lean_Tactic_FunInd_foldAndCollect___spec__17___lambda__4___closed__5; static lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__16___lambda__5___closed__3; @@ -58098,7 +58123,7 @@ static lean_object* _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__31__ lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__1; x_2 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__31___closed__1; -x_3 = lean_unsigned_to_nat(752u); +x_3 = lean_unsigned_to_nat(773u); x_4 = lean_unsigned_to_nat(40u); x_5 = l_List_forIn_x27_loop___at_Lean_Tactic_FunInd_foldAndCollect___spec__6___closed__3; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -59048,21 +59073,1350 @@ static lean_object* _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__37__ _start: { lean_object* x_1; +x_1 = l_Lean_noConfusionExt; +return x_1; +} +} +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__37(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15) { +_start: +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; +x_16 = lean_st_ref_get(x_14, x_15); +x_17 = lean_ctor_get(x_16, 0); +lean_inc(x_17); +x_18 = lean_ctor_get(x_16, 1); +lean_inc(x_18); +lean_dec(x_16); +x_19 = lean_ctor_get(x_17, 0); +lean_inc(x_19); +lean_dec(x_17); +x_20 = l_Lean_Expr_isApp(x_2); +if (x_20 == 0) +{ +lean_object* x_21; lean_object* x_22; +lean_dec(x_19); +x_21 = lean_box(0); +x_22 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_21, x_9, x_10, x_11, x_12, x_13, x_14, x_18); +return x_22; +} +else +{ +lean_object* x_23; uint8_t x_24; +x_23 = l_Lean_Expr_getAppFn(x_2); +x_24 = l_Lean_Expr_isConst(x_23); +if (x_24 == 0) +{ +lean_object* x_25; lean_object* x_26; +lean_dec(x_23); +lean_dec(x_19); +x_25 = lean_box(0); +x_26 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_25, x_9, x_10, x_11, x_12, x_13, x_14, x_18); +return x_26; +} +else +{ +lean_object* x_27; lean_object* x_28; uint8_t x_29; +x_27 = l_Lean_Expr_constName_x21(x_23); +x_28 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___closed__1; +x_29 = l_Lean_TagDeclarationExtension_isTagged(x_28, x_19, x_27); +if (x_29 == 0) +{ +lean_object* x_30; lean_object* x_31; +lean_dec(x_23); +x_30 = lean_box(0); +x_31 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_30, x_9, x_10, x_11, x_12, x_13, x_14, x_18); +return x_31; +} +else +{ +lean_object* x_32; +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +x_32 = lean_infer_type(x_23, x_11, x_12, x_13, x_14, x_18); +if (lean_obj_tag(x_32) == 0) +{ +lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; +x_33 = lean_ctor_get(x_32, 0); +lean_inc(x_33); +x_34 = lean_ctor_get(x_32, 1); +lean_inc(x_34); +lean_dec(x_32); +x_35 = l_Lean_Expr_getNumHeadForalls(x_33); +lean_dec(x_33); +x_36 = lean_unsigned_to_nat(1u); +x_37 = lean_nat_sub(x_35, x_36); +lean_dec(x_35); +x_38 = lean_unsigned_to_nat(0u); +x_39 = l___private_Lean_Expr_0__Lean_Expr_getAppNumArgsAux(x_2, x_38); +x_40 = lean_nat_sub(x_39, x_37); +lean_dec(x_37); +lean_dec(x_39); +x_41 = lean_nat_sub(x_40, x_36); +lean_dec(x_40); +x_42 = l_Lean_Expr_getRevArg_x21(x_2, x_41); +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_42); +x_43 = lean_infer_type(x_42, x_11, x_12, x_13, x_14, x_34); +if (lean_obj_tag(x_43) == 0) +{ +lean_object* x_44; lean_object* x_45; lean_object* x_46; +x_44 = lean_ctor_get(x_43, 0); +lean_inc(x_44); +x_45 = lean_ctor_get(x_43, 1); +lean_inc(x_45); +lean_dec(x_43); +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +x_46 = l_Lean_Meta_matchEq_x3f(x_44, x_11, x_12, x_13, x_14, x_45); +if (lean_obj_tag(x_46) == 0) +{ +lean_object* x_47; +x_47 = lean_ctor_get(x_46, 0); +lean_inc(x_47); +if (lean_obj_tag(x_47) == 0) +{ +lean_object* x_48; lean_object* x_49; lean_object* x_50; +lean_dec(x_42); +x_48 = lean_ctor_get(x_46, 1); +lean_inc(x_48); +lean_dec(x_46); +x_49 = lean_box(0); +x_50 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_49, x_9, x_10, x_11, x_12, x_13, x_14, x_48); +return x_50; +} +else +{ +lean_object* x_51; uint8_t x_52; +x_51 = lean_ctor_get(x_47, 0); +lean_inc(x_51); +lean_dec(x_47); +x_52 = !lean_is_exclusive(x_51); +if (x_52 == 0) +{ +lean_object* x_53; lean_object* x_54; lean_object* x_55; uint8_t x_56; +x_53 = lean_ctor_get(x_51, 1); +x_54 = lean_ctor_get(x_51, 0); +lean_dec(x_54); +x_55 = lean_ctor_get(x_46, 1); +lean_inc(x_55); +lean_dec(x_46); +x_56 = !lean_is_exclusive(x_53); +if (x_56 == 0) +{ +lean_object* x_57; lean_object* x_58; lean_object* x_59; +x_57 = lean_ctor_get(x_53, 0); +x_58 = lean_ctor_get(x_53, 1); +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +x_59 = l_Lean_Meta_matchConstructorApp_x3f(x_57, x_11, x_12, x_13, x_14, x_55); +if (lean_obj_tag(x_59) == 0) +{ +lean_object* x_60; +x_60 = lean_ctor_get(x_59, 0); +lean_inc(x_60); +if (lean_obj_tag(x_60) == 0) +{ +lean_object* x_61; lean_object* x_62; lean_object* x_63; +lean_free_object(x_53); +lean_dec(x_58); +lean_free_object(x_51); +lean_dec(x_42); +x_61 = lean_ctor_get(x_59, 1); +lean_inc(x_61); +lean_dec(x_59); +x_62 = lean_box(0); +x_63 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_62, x_9, x_10, x_11, x_12, x_13, x_14, x_61); +return x_63; +} +else +{ +lean_object* x_64; lean_object* x_65; lean_object* x_66; +x_64 = lean_ctor_get(x_59, 1); +lean_inc(x_64); +lean_dec(x_59); +x_65 = lean_ctor_get(x_60, 0); +lean_inc(x_65); +lean_dec(x_60); +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +x_66 = l_Lean_Meta_matchConstructorApp_x3f(x_58, x_11, x_12, x_13, x_14, x_64); +if (lean_obj_tag(x_66) == 0) +{ +lean_object* x_67; +x_67 = lean_ctor_get(x_66, 0); +lean_inc(x_67); +if (lean_obj_tag(x_67) == 0) +{ +lean_object* x_68; lean_object* x_69; lean_object* x_70; +lean_dec(x_65); +lean_free_object(x_53); +lean_free_object(x_51); +lean_dec(x_42); +x_68 = lean_ctor_get(x_66, 1); +lean_inc(x_68); +lean_dec(x_66); +x_69 = lean_box(0); +x_70 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_69, x_9, x_10, x_11, x_12, x_13, x_14, x_68); +return x_70; +} +else +{ +lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; uint8_t x_77; +x_71 = lean_ctor_get(x_66, 1); +lean_inc(x_71); +lean_dec(x_66); +x_72 = lean_ctor_get(x_67, 0); +lean_inc(x_72); +lean_dec(x_67); +x_73 = lean_ctor_get(x_65, 0); +lean_inc(x_73); +lean_dec(x_65); +x_74 = lean_ctor_get(x_73, 0); +lean_inc(x_74); +lean_dec(x_73); +x_75 = lean_ctor_get(x_72, 0); +lean_inc(x_75); +lean_dec(x_72); +x_76 = lean_ctor_get(x_75, 0); +lean_inc(x_76); +lean_dec(x_75); +x_77 = lean_name_eq(x_74, x_76); +lean_dec(x_76); +lean_dec(x_74); +if (x_77 == 0) +{ +lean_object* x_78; +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_78 = l_Lean_Meta_mkNoConfusion(x_1, x_42, x_11, x_12, x_13, x_14, x_71); +if (lean_obj_tag(x_78) == 0) +{ +uint8_t x_79; +x_79 = !lean_is_exclusive(x_78); +if (x_79 == 0) +{ +lean_object* x_80; +x_80 = lean_ctor_get(x_78, 0); +lean_ctor_set(x_53, 1, x_9); +lean_ctor_set(x_53, 0, x_80); +lean_ctor_set(x_51, 1, x_10); +lean_ctor_set(x_51, 0, x_53); +lean_ctor_set(x_78, 0, x_51); +return x_78; +} +else +{ +lean_object* x_81; lean_object* x_82; lean_object* x_83; +x_81 = lean_ctor_get(x_78, 0); +x_82 = lean_ctor_get(x_78, 1); +lean_inc(x_82); +lean_inc(x_81); +lean_dec(x_78); +lean_ctor_set(x_53, 1, x_9); +lean_ctor_set(x_53, 0, x_81); +lean_ctor_set(x_51, 1, x_10); +lean_ctor_set(x_51, 0, x_53); +x_83 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_83, 0, x_51); +lean_ctor_set(x_83, 1, x_82); +return x_83; +} +} +else +{ +uint8_t x_84; +lean_free_object(x_53); +lean_free_object(x_51); +lean_dec(x_10); +lean_dec(x_9); +x_84 = !lean_is_exclusive(x_78); +if (x_84 == 0) +{ +return x_78; +} +else +{ +lean_object* x_85; lean_object* x_86; lean_object* x_87; +x_85 = lean_ctor_get(x_78, 0); +x_86 = lean_ctor_get(x_78, 1); +lean_inc(x_86); +lean_inc(x_85); +lean_dec(x_78); +x_87 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_87, 0, x_85); +lean_ctor_set(x_87, 1, x_86); +return x_87; +} +} +} +else +{ +lean_object* x_88; lean_object* x_89; +lean_free_object(x_53); +lean_free_object(x_51); +lean_dec(x_42); +x_88 = lean_box(0); +x_89 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_88, x_9, x_10, x_11, x_12, x_13, x_14, x_71); +return x_89; +} +} +} +else +{ +uint8_t x_90; +lean_dec(x_65); +lean_free_object(x_53); +lean_free_object(x_51); +lean_dec(x_42); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_90 = !lean_is_exclusive(x_66); +if (x_90 == 0) +{ +return x_66; +} +else +{ +lean_object* x_91; lean_object* x_92; lean_object* x_93; +x_91 = lean_ctor_get(x_66, 0); +x_92 = lean_ctor_get(x_66, 1); +lean_inc(x_92); +lean_inc(x_91); +lean_dec(x_66); +x_93 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_93, 0, x_91); +lean_ctor_set(x_93, 1, x_92); +return x_93; +} +} +} +} +else +{ +uint8_t x_94; +lean_free_object(x_53); +lean_dec(x_58); +lean_free_object(x_51); +lean_dec(x_42); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_94 = !lean_is_exclusive(x_59); +if (x_94 == 0) +{ +return x_59; +} +else +{ +lean_object* x_95; lean_object* x_96; lean_object* x_97; +x_95 = lean_ctor_get(x_59, 0); +x_96 = lean_ctor_get(x_59, 1); +lean_inc(x_96); +lean_inc(x_95); +lean_dec(x_59); +x_97 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_97, 0, x_95); +lean_ctor_set(x_97, 1, x_96); +return x_97; +} +} +} +else +{ +lean_object* x_98; lean_object* x_99; lean_object* x_100; +x_98 = lean_ctor_get(x_53, 0); +x_99 = lean_ctor_get(x_53, 1); +lean_inc(x_99); +lean_inc(x_98); +lean_dec(x_53); +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +x_100 = l_Lean_Meta_matchConstructorApp_x3f(x_98, x_11, x_12, x_13, x_14, x_55); +if (lean_obj_tag(x_100) == 0) +{ +lean_object* x_101; +x_101 = lean_ctor_get(x_100, 0); +lean_inc(x_101); +if (lean_obj_tag(x_101) == 0) +{ +lean_object* x_102; lean_object* x_103; lean_object* x_104; +lean_dec(x_99); +lean_free_object(x_51); +lean_dec(x_42); +x_102 = lean_ctor_get(x_100, 1); +lean_inc(x_102); +lean_dec(x_100); +x_103 = lean_box(0); +x_104 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_103, x_9, x_10, x_11, x_12, x_13, x_14, x_102); +return x_104; +} +else +{ +lean_object* x_105; lean_object* x_106; lean_object* x_107; +x_105 = lean_ctor_get(x_100, 1); +lean_inc(x_105); +lean_dec(x_100); +x_106 = lean_ctor_get(x_101, 0); +lean_inc(x_106); +lean_dec(x_101); +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +x_107 = l_Lean_Meta_matchConstructorApp_x3f(x_99, x_11, x_12, x_13, x_14, x_105); +if (lean_obj_tag(x_107) == 0) +{ +lean_object* x_108; +x_108 = lean_ctor_get(x_107, 0); +lean_inc(x_108); +if (lean_obj_tag(x_108) == 0) +{ +lean_object* x_109; lean_object* x_110; lean_object* x_111; +lean_dec(x_106); +lean_free_object(x_51); +lean_dec(x_42); +x_109 = lean_ctor_get(x_107, 1); +lean_inc(x_109); +lean_dec(x_107); +x_110 = lean_box(0); +x_111 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_110, x_9, x_10, x_11, x_12, x_13, x_14, x_109); +return x_111; +} +else +{ +lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; uint8_t x_118; +x_112 = lean_ctor_get(x_107, 1); +lean_inc(x_112); +lean_dec(x_107); +x_113 = lean_ctor_get(x_108, 0); +lean_inc(x_113); +lean_dec(x_108); +x_114 = lean_ctor_get(x_106, 0); +lean_inc(x_114); +lean_dec(x_106); +x_115 = lean_ctor_get(x_114, 0); +lean_inc(x_115); +lean_dec(x_114); +x_116 = lean_ctor_get(x_113, 0); +lean_inc(x_116); +lean_dec(x_113); +x_117 = lean_ctor_get(x_116, 0); +lean_inc(x_117); +lean_dec(x_116); +x_118 = lean_name_eq(x_115, x_117); +lean_dec(x_117); +lean_dec(x_115); +if (x_118 == 0) +{ +lean_object* x_119; +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_119 = l_Lean_Meta_mkNoConfusion(x_1, x_42, x_11, x_12, x_13, x_14, x_112); +if (lean_obj_tag(x_119) == 0) +{ +lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; +x_120 = lean_ctor_get(x_119, 0); +lean_inc(x_120); +x_121 = lean_ctor_get(x_119, 1); +lean_inc(x_121); +if (lean_is_exclusive(x_119)) { + lean_ctor_release(x_119, 0); + lean_ctor_release(x_119, 1); + x_122 = x_119; +} else { + lean_dec_ref(x_119); + x_122 = lean_box(0); +} +x_123 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_123, 0, x_120); +lean_ctor_set(x_123, 1, x_9); +lean_ctor_set(x_51, 1, x_10); +lean_ctor_set(x_51, 0, x_123); +if (lean_is_scalar(x_122)) { + x_124 = lean_alloc_ctor(0, 2, 0); +} else { + x_124 = x_122; +} +lean_ctor_set(x_124, 0, x_51); +lean_ctor_set(x_124, 1, x_121); +return x_124; +} +else +{ +lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; +lean_free_object(x_51); +lean_dec(x_10); +lean_dec(x_9); +x_125 = lean_ctor_get(x_119, 0); +lean_inc(x_125); +x_126 = lean_ctor_get(x_119, 1); +lean_inc(x_126); +if (lean_is_exclusive(x_119)) { + lean_ctor_release(x_119, 0); + lean_ctor_release(x_119, 1); + x_127 = x_119; +} else { + lean_dec_ref(x_119); + x_127 = lean_box(0); +} +if (lean_is_scalar(x_127)) { + x_128 = lean_alloc_ctor(1, 2, 0); +} else { + x_128 = x_127; +} +lean_ctor_set(x_128, 0, x_125); +lean_ctor_set(x_128, 1, x_126); +return x_128; +} +} +else +{ +lean_object* x_129; lean_object* x_130; +lean_free_object(x_51); +lean_dec(x_42); +x_129 = lean_box(0); +x_130 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_129, x_9, x_10, x_11, x_12, x_13, x_14, x_112); +return x_130; +} +} +} +else +{ +lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; +lean_dec(x_106); +lean_free_object(x_51); +lean_dec(x_42); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_131 = lean_ctor_get(x_107, 0); +lean_inc(x_131); +x_132 = lean_ctor_get(x_107, 1); +lean_inc(x_132); +if (lean_is_exclusive(x_107)) { + lean_ctor_release(x_107, 0); + lean_ctor_release(x_107, 1); + x_133 = x_107; +} else { + lean_dec_ref(x_107); + x_133 = lean_box(0); +} +if (lean_is_scalar(x_133)) { + x_134 = lean_alloc_ctor(1, 2, 0); +} else { + x_134 = x_133; +} +lean_ctor_set(x_134, 0, x_131); +lean_ctor_set(x_134, 1, x_132); +return x_134; +} +} +} +else +{ +lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; +lean_dec(x_99); +lean_free_object(x_51); +lean_dec(x_42); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_135 = lean_ctor_get(x_100, 0); +lean_inc(x_135); +x_136 = lean_ctor_get(x_100, 1); +lean_inc(x_136); +if (lean_is_exclusive(x_100)) { + lean_ctor_release(x_100, 0); + lean_ctor_release(x_100, 1); + x_137 = x_100; +} else { + lean_dec_ref(x_100); + x_137 = lean_box(0); +} +if (lean_is_scalar(x_137)) { + x_138 = lean_alloc_ctor(1, 2, 0); +} else { + x_138 = x_137; +} +lean_ctor_set(x_138, 0, x_135); +lean_ctor_set(x_138, 1, x_136); +return x_138; +} +} +} +else +{ +lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; lean_object* x_144; +x_139 = lean_ctor_get(x_51, 1); +lean_inc(x_139); +lean_dec(x_51); +x_140 = lean_ctor_get(x_46, 1); +lean_inc(x_140); +lean_dec(x_46); +x_141 = lean_ctor_get(x_139, 0); +lean_inc(x_141); +x_142 = lean_ctor_get(x_139, 1); +lean_inc(x_142); +if (lean_is_exclusive(x_139)) { + lean_ctor_release(x_139, 0); + lean_ctor_release(x_139, 1); + x_143 = x_139; +} else { + lean_dec_ref(x_139); + x_143 = lean_box(0); +} +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +x_144 = l_Lean_Meta_matchConstructorApp_x3f(x_141, x_11, x_12, x_13, x_14, x_140); +if (lean_obj_tag(x_144) == 0) +{ +lean_object* x_145; +x_145 = lean_ctor_get(x_144, 0); +lean_inc(x_145); +if (lean_obj_tag(x_145) == 0) +{ +lean_object* x_146; lean_object* x_147; lean_object* x_148; +lean_dec(x_143); +lean_dec(x_142); +lean_dec(x_42); +x_146 = lean_ctor_get(x_144, 1); +lean_inc(x_146); +lean_dec(x_144); +x_147 = lean_box(0); +x_148 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_147, x_9, x_10, x_11, x_12, x_13, x_14, x_146); +return x_148; +} +else +{ +lean_object* x_149; lean_object* x_150; lean_object* x_151; +x_149 = lean_ctor_get(x_144, 1); +lean_inc(x_149); +lean_dec(x_144); +x_150 = lean_ctor_get(x_145, 0); +lean_inc(x_150); +lean_dec(x_145); +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +x_151 = l_Lean_Meta_matchConstructorApp_x3f(x_142, x_11, x_12, x_13, x_14, x_149); +if (lean_obj_tag(x_151) == 0) +{ +lean_object* x_152; +x_152 = lean_ctor_get(x_151, 0); +lean_inc(x_152); +if (lean_obj_tag(x_152) == 0) +{ +lean_object* x_153; lean_object* x_154; lean_object* x_155; +lean_dec(x_150); +lean_dec(x_143); +lean_dec(x_42); +x_153 = lean_ctor_get(x_151, 1); +lean_inc(x_153); +lean_dec(x_151); +x_154 = lean_box(0); +x_155 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_154, x_9, x_10, x_11, x_12, x_13, x_14, x_153); +return x_155; +} +else +{ +lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; lean_object* x_160; lean_object* x_161; uint8_t x_162; +x_156 = lean_ctor_get(x_151, 1); +lean_inc(x_156); +lean_dec(x_151); +x_157 = lean_ctor_get(x_152, 0); +lean_inc(x_157); +lean_dec(x_152); +x_158 = lean_ctor_get(x_150, 0); +lean_inc(x_158); +lean_dec(x_150); +x_159 = lean_ctor_get(x_158, 0); +lean_inc(x_159); +lean_dec(x_158); +x_160 = lean_ctor_get(x_157, 0); +lean_inc(x_160); +lean_dec(x_157); +x_161 = lean_ctor_get(x_160, 0); +lean_inc(x_161); +lean_dec(x_160); +x_162 = lean_name_eq(x_159, x_161); +lean_dec(x_161); +lean_dec(x_159); +if (x_162 == 0) +{ +lean_object* x_163; +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_163 = l_Lean_Meta_mkNoConfusion(x_1, x_42, x_11, x_12, x_13, x_14, x_156); +if (lean_obj_tag(x_163) == 0) +{ +lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; +x_164 = lean_ctor_get(x_163, 0); +lean_inc(x_164); +x_165 = lean_ctor_get(x_163, 1); +lean_inc(x_165); +if (lean_is_exclusive(x_163)) { + lean_ctor_release(x_163, 0); + lean_ctor_release(x_163, 1); + x_166 = x_163; +} else { + lean_dec_ref(x_163); + x_166 = lean_box(0); +} +if (lean_is_scalar(x_143)) { + x_167 = lean_alloc_ctor(0, 2, 0); +} else { + x_167 = x_143; +} +lean_ctor_set(x_167, 0, x_164); +lean_ctor_set(x_167, 1, x_9); +x_168 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_168, 0, x_167); +lean_ctor_set(x_168, 1, x_10); +if (lean_is_scalar(x_166)) { + x_169 = lean_alloc_ctor(0, 2, 0); +} else { + x_169 = x_166; +} +lean_ctor_set(x_169, 0, x_168); +lean_ctor_set(x_169, 1, x_165); +return x_169; +} +else +{ +lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; +lean_dec(x_143); +lean_dec(x_10); +lean_dec(x_9); +x_170 = lean_ctor_get(x_163, 0); +lean_inc(x_170); +x_171 = lean_ctor_get(x_163, 1); +lean_inc(x_171); +if (lean_is_exclusive(x_163)) { + lean_ctor_release(x_163, 0); + lean_ctor_release(x_163, 1); + x_172 = x_163; +} else { + lean_dec_ref(x_163); + x_172 = lean_box(0); +} +if (lean_is_scalar(x_172)) { + x_173 = lean_alloc_ctor(1, 2, 0); +} else { + x_173 = x_172; +} +lean_ctor_set(x_173, 0, x_170); +lean_ctor_set(x_173, 1, x_171); +return x_173; +} +} +else +{ +lean_object* x_174; lean_object* x_175; +lean_dec(x_143); +lean_dec(x_42); +x_174 = lean_box(0); +x_175 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_174, x_9, x_10, x_11, x_12, x_13, x_14, x_156); +return x_175; +} +} +} +else +{ +lean_object* x_176; lean_object* x_177; lean_object* x_178; lean_object* x_179; +lean_dec(x_150); +lean_dec(x_143); +lean_dec(x_42); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_176 = lean_ctor_get(x_151, 0); +lean_inc(x_176); +x_177 = lean_ctor_get(x_151, 1); +lean_inc(x_177); +if (lean_is_exclusive(x_151)) { + lean_ctor_release(x_151, 0); + lean_ctor_release(x_151, 1); + x_178 = x_151; +} else { + lean_dec_ref(x_151); + x_178 = lean_box(0); +} +if (lean_is_scalar(x_178)) { + x_179 = lean_alloc_ctor(1, 2, 0); +} else { + x_179 = x_178; +} +lean_ctor_set(x_179, 0, x_176); +lean_ctor_set(x_179, 1, x_177); +return x_179; +} +} +} +else +{ +lean_object* x_180; lean_object* x_181; lean_object* x_182; lean_object* x_183; +lean_dec(x_143); +lean_dec(x_142); +lean_dec(x_42); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_180 = lean_ctor_get(x_144, 0); +lean_inc(x_180); +x_181 = lean_ctor_get(x_144, 1); +lean_inc(x_181); +if (lean_is_exclusive(x_144)) { + lean_ctor_release(x_144, 0); + lean_ctor_release(x_144, 1); + x_182 = x_144; +} else { + lean_dec_ref(x_144); + x_182 = lean_box(0); +} +if (lean_is_scalar(x_182)) { + x_183 = lean_alloc_ctor(1, 2, 0); +} else { + x_183 = x_182; +} +lean_ctor_set(x_183, 0, x_180); +lean_ctor_set(x_183, 1, x_181); +return x_183; +} +} +} +} +else +{ +uint8_t x_184; +lean_dec(x_42); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_184 = !lean_is_exclusive(x_46); +if (x_184 == 0) +{ +return x_46; +} +else +{ +lean_object* x_185; lean_object* x_186; lean_object* x_187; +x_185 = lean_ctor_get(x_46, 0); +x_186 = lean_ctor_get(x_46, 1); +lean_inc(x_186); +lean_inc(x_185); +lean_dec(x_46); +x_187 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_187, 0, x_185); +lean_ctor_set(x_187, 1, x_186); +return x_187; +} +} +} +else +{ +uint8_t x_188; +lean_dec(x_42); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_188 = !lean_is_exclusive(x_43); +if (x_188 == 0) +{ +return x_43; +} +else +{ +lean_object* x_189; lean_object* x_190; lean_object* x_191; +x_189 = lean_ctor_get(x_43, 0); +x_190 = lean_ctor_get(x_43, 1); +lean_inc(x_190); +lean_inc(x_189); +lean_dec(x_43); +x_191 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_191, 0, x_189); +lean_ctor_set(x_191, 1, x_190); +return x_191; +} +} +} +else +{ +uint8_t x_192; +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_192 = !lean_is_exclusive(x_32); +if (x_192 == 0) +{ +return x_32; +} +else +{ +lean_object* x_193; lean_object* x_194; lean_object* x_195; +x_193 = lean_ctor_get(x_32, 0); +x_194 = lean_ctor_get(x_32, 1); +lean_inc(x_194); +lean_inc(x_193); +lean_dec(x_32); +x_195 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_195, 0, x_193); +lean_ctor_set(x_195, 1, x_194); +return x_195; +} +} +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__38(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +lean_object* x_11; +x_11 = l_Lean_Meta_mkAbsurd(x_1, x_2, x_3, x_6, x_7, x_8, x_9, x_10); +if (lean_obj_tag(x_11) == 0) +{ +uint8_t x_12; +x_12 = !lean_is_exclusive(x_11); +if (x_12 == 0) +{ +lean_object* x_13; lean_object* x_14; lean_object* x_15; +x_13 = lean_ctor_get(x_11, 0); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_4); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_14); +lean_ctor_set(x_15, 1, x_5); +lean_ctor_set(x_11, 0, x_15); +return x_11; +} +else +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_16 = lean_ctor_get(x_11, 0); +x_17 = lean_ctor_get(x_11, 1); +lean_inc(x_17); +lean_inc(x_16); +lean_dec(x_11); +x_18 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18, 0, x_16); +lean_ctor_set(x_18, 1, x_4); +x_19 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19, 0, x_18); +lean_ctor_set(x_19, 1, x_5); +x_20 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_20, 0, x_19); +lean_ctor_set(x_20, 1, x_17); +return x_20; +} +} +else +{ +uint8_t x_21; +lean_dec(x_5); +lean_dec(x_4); +x_21 = !lean_is_exclusive(x_11); +if (x_21 == 0) +{ +return x_11; +} +else +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_22 = lean_ctor_get(x_11, 0); +x_23 = lean_ctor_get(x_11, 1); +lean_inc(x_23); +lean_inc(x_22); +lean_dec(x_11); +x_24 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_24, 0, x_22); +lean_ctor_set(x_24, 1, x_23); +return x_24; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__39(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +_start: +{ +lean_object* x_10; +x_10 = l_Lean_Meta_mkFalseElim(x_1, x_2, x_5, x_6, x_7, x_8, x_9); +if (lean_obj_tag(x_10) == 0) +{ +uint8_t x_11; +x_11 = !lean_is_exclusive(x_10); +if (x_11 == 0) +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_12 = lean_ctor_get(x_10, 0); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_12); +lean_ctor_set(x_13, 1, x_3); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_4); +lean_ctor_set(x_10, 0, x_14); +return x_10; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_15 = lean_ctor_get(x_10, 0); +x_16 = lean_ctor_get(x_10, 1); +lean_inc(x_16); +lean_inc(x_15); +lean_dec(x_10); +x_17 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_17, 0, x_15); +lean_ctor_set(x_17, 1, x_3); +x_18 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18, 0, x_17); +lean_ctor_set(x_18, 1, x_4); +x_19 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19, 0, x_18); +lean_ctor_set(x_19, 1, x_16); +return x_19; +} +} +else +{ +uint8_t x_20; +lean_dec(x_4); +lean_dec(x_3); +x_20 = !lean_is_exclusive(x_10); +if (x_20 == 0) +{ +return x_10; +} +else +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_21 = lean_ctor_get(x_10, 0); +x_22 = lean_ctor_get(x_10, 1); +lean_inc(x_22); +lean_inc(x_21); +lean_dec(x_10); +x_23 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_23, 0, x_21); +lean_ctor_set(x_23, 1, x_22); +return x_23; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__40(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +_start: +{ +lean_object* x_10; lean_object* x_11; +x_10 = lean_box(0); +x_11 = lean_apply_8(x_1, x_10, x_3, x_4, x_5, x_6, x_7, x_8, x_9); +return x_11; +} +} +static lean_object* _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("False", 5, 5); +return x_1; +} +} +static lean_object* _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("elim", 4, 4); +return x_1; +} +} +static lean_object* _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__1; +x_2 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__2; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__4() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("absurd", 6, 6); +return x_1; +} +} +static lean_object* _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__4; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__41(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15) { +_start: +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; +lean_inc(x_2); +x_16 = l_Lean_Meta_instantiateMVarsIfMVarApp(x_2, x_11, x_12, x_13, x_14, x_15); +x_17 = lean_ctor_get(x_16, 0); +lean_inc(x_17); +x_18 = lean_ctor_get(x_16, 1); +lean_inc(x_18); +lean_dec(x_16); +x_19 = l_Lean_Expr_cleanupAnnotations(x_17); +x_20 = l_Lean_Expr_isApp(x_19); +if (x_20 == 0) +{ +lean_object* x_21; lean_object* x_22; +lean_dec(x_19); +x_21 = lean_box(0); +x_22 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__37(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_21, x_9, x_10, x_11, x_12, x_13, x_14, x_18); +return x_22; +} +else +{ +lean_object* x_23; lean_object* x_24; uint8_t x_25; +x_23 = l_Lean_Expr_appArg(x_19, lean_box(0)); +x_24 = l_Lean_Expr_appFnCleanup(x_19, lean_box(0)); +x_25 = l_Lean_Expr_isApp(x_24); +if (x_25 == 0) +{ +lean_object* x_26; lean_object* x_27; +lean_dec(x_24); +lean_dec(x_23); +x_26 = lean_box(0); +x_27 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__37(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_26, x_9, x_10, x_11, x_12, x_13, x_14, x_18); +return x_27; +} +else +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; uint8_t x_31; +x_28 = l_Lean_Expr_appArg(x_24, lean_box(0)); +x_29 = l_Lean_Expr_appFnCleanup(x_24, lean_box(0)); +x_30 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__3; +x_31 = l_Lean_Expr_isConstOf(x_29, x_30); +if (x_31 == 0) +{ +uint8_t x_32; +x_32 = l_Lean_Expr_isApp(x_29); +if (x_32 == 0) +{ +lean_object* x_33; lean_object* x_34; +lean_dec(x_29); +lean_dec(x_28); +lean_dec(x_23); +x_33 = lean_box(0); +x_34 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__37(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_33, x_9, x_10, x_11, x_12, x_13, x_14, x_18); +return x_34; +} +else +{ +lean_object* x_35; uint8_t x_36; +x_35 = l_Lean_Expr_appFnCleanup(x_29, lean_box(0)); +x_36 = l_Lean_Expr_isApp(x_35); +if (x_36 == 0) +{ +lean_object* x_37; lean_object* x_38; +lean_dec(x_35); +lean_dec(x_28); +lean_dec(x_23); +x_37 = lean_box(0); +x_38 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__37(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_37, x_9, x_10, x_11, x_12, x_13, x_14, x_18); +return x_38; +} +else +{ +lean_object* x_39; lean_object* x_40; uint8_t x_41; +x_39 = l_Lean_Expr_appFnCleanup(x_35, lean_box(0)); +x_40 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__5; +x_41 = l_Lean_Expr_isConstOf(x_39, x_40); +lean_dec(x_39); +if (x_41 == 0) +{ +lean_object* x_42; lean_object* x_43; +lean_dec(x_28); +lean_dec(x_23); +x_42 = lean_box(0); +x_43 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__37(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_42, x_9, x_10, x_11, x_12, x_13, x_14, x_18); +return x_43; +} +else +{ +lean_object* x_44; +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_44 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__38(x_1, x_28, x_23, x_9, x_10, x_11, x_12, x_13, x_14, x_18); +return x_44; +} +} +} +} +else +{ +lean_object* x_45; +lean_dec(x_29); +lean_dec(x_28); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_45 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__39(x_1, x_23, x_9, x_10, x_11, x_12, x_13, x_14, x_18); +return x_45; +} +} +} +} +} +static lean_object* _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__42___closed__1() { +_start: +{ +lean_object* x_1; x_1 = lean_mk_string_unchecked("cond", 4, 4); return x_1; } } -static lean_object* _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___closed__2() { +static lean_object* _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__42___closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___closed__1; +x_2 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__42___closed__1; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__37(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15) { +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__42(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15) { _start: { lean_object* x_16; uint8_t x_17; @@ -59073,7 +60427,7 @@ if (x_17 == 0) lean_object* x_18; lean_object* x_19; lean_dec(x_16); x_18 = lean_box(0); -x_19 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_6, x_7, x_4, x_5, x_1, x_2, x_3, x_18, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +x_19 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__41(x_6, x_7, x_4, x_5, x_1, x_2, x_3, x_18, x_9, x_10, x_11, x_12, x_13, x_14, x_15); return x_19; } else @@ -59088,7 +60442,7 @@ lean_object* x_23; lean_object* x_24; lean_dec(x_21); lean_dec(x_20); x_23 = lean_box(0); -x_24 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_6, x_7, x_4, x_5, x_1, x_2, x_3, x_23, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +x_24 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__41(x_6, x_7, x_4, x_5, x_1, x_2, x_3, x_23, x_9, x_10, x_11, x_12, x_13, x_14, x_15); return x_24; } else @@ -59104,7 +60458,7 @@ lean_dec(x_26); lean_dec(x_25); lean_dec(x_20); x_28 = lean_box(0); -x_29 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_6, x_7, x_4, x_5, x_1, x_2, x_3, x_28, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +x_29 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__41(x_6, x_7, x_4, x_5, x_1, x_2, x_3, x_28, x_9, x_10, x_11, x_12, x_13, x_14, x_15); return x_29; } else @@ -59121,7 +60475,7 @@ lean_dec(x_30); lean_dec(x_25); lean_dec(x_20); x_33 = lean_box(0); -x_34 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_6, x_7, x_4, x_5, x_1, x_2, x_3, x_33, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +x_34 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__41(x_6, x_7, x_4, x_5, x_1, x_2, x_3, x_33, x_9, x_10, x_11, x_12, x_13, x_14, x_15); return x_34; } else @@ -59129,7 +60483,7 @@ else lean_object* x_35; lean_object* x_36; lean_object* x_37; uint8_t x_38; x_35 = l_Lean_Expr_appArg(x_31, lean_box(0)); x_36 = l_Lean_Expr_appFnCleanup(x_31, lean_box(0)); -x_37 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___closed__2; +x_37 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__42___closed__2; x_38 = l_Lean_Expr_isConstOf(x_36, x_37); if (x_38 == 0) { @@ -59144,7 +60498,7 @@ lean_dec(x_30); lean_dec(x_25); lean_dec(x_20); x_40 = lean_box(0); -x_41 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_6, x_7, x_4, x_5, x_1, x_2, x_3, x_40, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +x_41 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__41(x_6, x_7, x_4, x_5, x_1, x_2, x_3, x_40, x_9, x_10, x_11, x_12, x_13, x_14, x_15); return x_41; } else @@ -59169,7 +60523,7 @@ lean_dec(x_30); lean_dec(x_25); lean_dec(x_20); x_48 = lean_box(0); -x_49 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__36(x_6, x_7, x_4, x_5, x_1, x_2, x_3, x_48, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +x_49 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__41(x_6, x_7, x_4, x_5, x_1, x_2, x_3, x_48, x_9, x_10, x_11, x_12, x_13, x_14, x_15); return x_49; } else @@ -59227,7 +60581,7 @@ x_17 = lean_alloc_closure((void*)(l_StateT_lift___at_Lean_Tactic_FunInd_buildInd lean_closure_set(x_17, 0, x_16); x_18 = lean_alloc_closure((void*)(l_StateT_lift___at_Lean_Tactic_FunInd_buildInductionCase___spec__1___rarg), 8, 1); lean_closure_set(x_18, 0, x_17); -x_19 = lean_alloc_closure((void*)(l_Lean_Tactic_FunInd_buildInductionBody___lambda__37), 15, 7); +x_19 = lean_alloc_closure((void*)(l_Lean_Tactic_FunInd_buildInductionBody___lambda__42), 15, 7); lean_closure_set(x_19, 0, x_4); lean_closure_set(x_19, 1, x_5); lean_closure_set(x_19, 2, x_6); @@ -60341,6 +61695,33 @@ lean_dec(x_8); return x_16; } } +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15) { +_start: +{ +lean_object* x_16; +x_16 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__37(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +lean_dec(x_8); +return x_16; +} +} +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__40___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +_start: +{ +lean_object* x_10; +x_10 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__40(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9); +lean_dec(x_2); +return x_10; +} +} +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15) { +_start: +{ +lean_object* x_16; +x_16 = l_Lean_Tactic_FunInd_buildInductionBody___lambda__41(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +lean_dec(x_8); +return x_16; +} +} LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__1___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { _start: { @@ -66380,7 +67761,7 @@ static lean_object* _init_l_Std_Range_forIn_x27_loop___at_Lean_Tactic_FunInd_set lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__1; x_2 = l_Std_Range_forIn_x27_loop___at_Lean_Tactic_FunInd_setNaryFunIndInfo___spec__2___closed__3; -x_3 = lean_unsigned_to_nat(988u); +x_3 = lean_unsigned_to_nat(1009u); x_4 = lean_unsigned_to_nat(8u); x_5 = l_Std_Range_forIn_x27_loop___at_Lean_Tactic_FunInd_setNaryFunIndInfo___spec__2___closed__2; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -66751,7 +68132,7 @@ static lean_object* _init_l_Lean_Tactic_FunInd_setNaryFunIndInfo___closed__3() { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__1; x_2 = l_Std_Range_forIn_x27_loop___at_Lean_Tactic_FunInd_setNaryFunIndInfo___spec__2___closed__3; -x_3 = lean_unsigned_to_nat(978u); +x_3 = lean_unsigned_to_nat(999u); x_4 = lean_unsigned_to_nat(2u); x_5 = l_Lean_Tactic_FunInd_setNaryFunIndInfo___closed__2; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -66825,7 +68206,7 @@ static lean_object* _init_l_Lean_Tactic_FunInd_setNaryFunIndInfo___closed__11() lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__1; x_2 = l_Std_Range_forIn_x27_loop___at_Lean_Tactic_FunInd_setNaryFunIndInfo___spec__2___closed__3; -x_3 = lean_unsigned_to_nat(993u); +x_3 = lean_unsigned_to_nat(1014u); x_4 = lean_unsigned_to_nat(4u); x_5 = l_Lean_Tactic_FunInd_setNaryFunIndInfo___closed__10; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -67598,7 +68979,7 @@ static lean_object* _init_l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__5___clo lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__1; x_2 = l_Lean_Tactic_FunInd_cleanPackedArgs___lambda__5___closed__2; -x_3 = lean_unsigned_to_nat(1020u); +x_3 = lean_unsigned_to_nat(1041u); x_4 = lean_unsigned_to_nat(50u); x_5 = l_List_forIn_x27_loop___at_Lean_Tactic_FunInd_foldAndCollect___spec__6___closed__3; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -71820,7 +73201,7 @@ static lean_object* _init_l_Lean_Tactic_FunInd_withLetDecls___rarg___closed__4() lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__1; x_2 = l_Lean_Tactic_FunInd_withLetDecls___rarg___closed__3; -x_3 = lean_unsigned_to_nat(1110u); +x_3 = lean_unsigned_to_nat(1131u); x_4 = lean_unsigned_to_nat(2u); x_5 = l_Lean_Tactic_FunInd_withLetDecls___rarg___closed__2; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -79041,7 +80422,7 @@ static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_d lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__1; x_2 = l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__1; -x_3 = lean_unsigned_to_nat(1302u); +x_3 = lean_unsigned_to_nat(1323u); x_4 = lean_unsigned_to_nat(73u); x_5 = l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__2; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -79054,7 +80435,7 @@ static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_d lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__1; x_2 = l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__1; -x_3 = lean_unsigned_to_nat(1303u); +x_3 = lean_unsigned_to_nat(1324u); x_4 = lean_unsigned_to_nat(67u); x_5 = l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__2; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -79067,7 +80448,7 @@ static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_d lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__1; x_2 = l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__1; -x_3 = lean_unsigned_to_nat(1304u); +x_3 = lean_unsigned_to_nat(1325u); x_4 = lean_unsigned_to_nat(62u); x_5 = l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__2; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -83900,7 +85281,7 @@ static lean_object* _init_l_Lean_Tactic_FunInd_deriveInductionStructural_doReali lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__1; x_2 = l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__1; -x_3 = lean_unsigned_to_nat(1206u); +x_3 = lean_unsigned_to_nat(1227u); x_4 = lean_unsigned_to_nat(6u); x_5 = l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___lambda__12___closed__2; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -84807,7 +86188,7 @@ static lean_object* _init_l_Lean_Tactic_FunInd_deriveInductionStructural_doReali lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__1; x_2 = l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__1; -x_3 = lean_unsigned_to_nat(1157u); +x_3 = lean_unsigned_to_nat(1178u); x_4 = lean_unsigned_to_nat(41u); x_5 = l_List_forIn_x27_loop___at_Lean_Tactic_FunInd_foldAndCollect___spec__6___closed__3; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -85706,7 +87087,7 @@ static lean_object* _init_l_Lean_Tactic_FunInd_deriveInductionStructural_doReali lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__9___closed__1; x_2 = l_Array_forIn_x27Unsafe_loop___at_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___spec__19___lambda__3___closed__1; -x_3 = lean_unsigned_to_nat(1135u); +x_3 = lean_unsigned_to_nat(1156u); x_4 = lean_unsigned_to_nat(2u); x_5 = l_Lean_Tactic_FunInd_deriveInductionStructural_doRealize___closed__2; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -91433,7 +92814,7 @@ x_4 = lean_box(x_3); return x_4; } } -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { uint8_t x_5; lean_object* x_6; lean_object* x_7; @@ -91445,15 +92826,15 @@ lean_ctor_set(x_7, 1, x_4); return x_7; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__1() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__1() { _start: { lean_object* x_1; -x_1 = lean_alloc_closure((void*)(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__1___boxed), 4, 0); +x_1 = lean_alloc_closure((void*)(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__1___boxed), 4, 0); return x_1; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__2() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__2() { _start: { uint8_t x_1; uint8_t x_2; uint8_t x_3; uint8_t x_4; uint8_t x_5; lean_object* x_6; @@ -91484,16 +92865,16 @@ lean_ctor_set_uint8(x_6, 17, x_2); return x_6; } } -static uint64_t _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__3() { +static uint64_t _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__3() { _start: { lean_object* x_1; uint64_t x_2; -x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__2; +x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__2; x_2 = l___private_Lean_Meta_Basic_0__Lean_Meta_Config_toKey(x_1); return x_2; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__4() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__4() { _start: { lean_object* x_1; @@ -91501,22 +92882,22 @@ x_1 = l_Lean_PersistentHashMap_mkEmptyEntriesArray(lean_box(0), lean_box(0)); return x_1; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__5() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__5() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__4; +x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__4; x_2 = lean_alloc_ctor(0, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__6() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = lean_box(0); -x_2 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__5; +x_2 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__5; x_3 = l___private_Lean_Util_Trace_0__Lean_getResetTraces___at_Lean_Tactic_FunInd_foldAndCollect___spec__19___closed__3; x_4 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_4, 0, x_2); @@ -91525,16 +92906,16 @@ lean_ctor_set(x_4, 2, x_1); return x_4; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__7() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__7() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; uint64_t x_4; uint8_t x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; x_1 = lean_box(0); x_2 = lean_box(0); -x_3 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__2; -x_4 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__3; +x_3 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__2; +x_4 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__3; x_5 = 0; -x_6 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__6; +x_6 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__6; x_7 = l_Lean_Tactic_FunInd_M_run___rarg___closed__1; x_8 = lean_unsigned_to_nat(0u); x_9 = lean_alloc_ctor(0, 7, 11); @@ -91552,12 +92933,12 @@ lean_ctor_set_uint8(x_9, sizeof(void*)*7 + 10, x_5); return x_9; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__8() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__8() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_unsigned_to_nat(0u); -x_2 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__5; +x_2 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__5; x_3 = lean_alloc_ctor(0, 9, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_1); @@ -91571,11 +92952,11 @@ lean_ctor_set(x_3, 8, x_2); return x_3; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__9() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__9() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__5; +x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__5; x_2 = lean_alloc_ctor(0, 6, 0); lean_ctor_set(x_2, 0, x_1); lean_ctor_set(x_2, 1, x_1); @@ -91586,11 +92967,11 @@ lean_ctor_set(x_2, 5, x_1); return x_2; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__10() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__10() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__5; +x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__5; x_2 = lean_alloc_ctor(0, 4, 0); lean_ctor_set(x_2, 0, x_1); lean_ctor_set(x_2, 1, x_1); @@ -91599,15 +92980,15 @@ lean_ctor_set(x_2, 3, x_1); return x_2; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__11() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__11() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = lean_box(0); -x_2 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__8; -x_3 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__9; +x_2 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__8; +x_3 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__9; x_4 = l___private_Lean_Util_Trace_0__Lean_getResetTraces___at_Lean_Tactic_FunInd_foldAndCollect___spec__19___closed__3; -x_5 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__10; +x_5 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__10; x_6 = lean_alloc_ctor(0, 5, 0); lean_ctor_set(x_6, 0, x_2); lean_ctor_set(x_6, 1, x_3); @@ -91617,7 +92998,7 @@ lean_ctor_set(x_6, 4, x_5); return x_6; } } -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { _start: { lean_object* x_6; uint8_t x_7; @@ -91631,7 +93012,7 @@ x_9 = lean_ctor_get(x_6, 1); x_10 = lean_ctor_get(x_8, 0); lean_inc(x_10); lean_dec(x_8); -x_11 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__1; +x_11 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__1; lean_inc(x_1); x_12 = l_Lean_Tactic_FunInd_isFunCasesName(x_10, x_1); if (x_12 == 0) @@ -91657,14 +93038,14 @@ lean_dec(x_1); x_17 = l_Lean_Tactic_FunInd_isFunCasesName___closed__2; x_18 = lean_string_dec_eq(x_16, x_17); lean_dec(x_16); -x_19 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__11; +x_19 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__11; x_20 = lean_st_mk_ref(x_19, x_9); x_21 = lean_ctor_get(x_20, 0); lean_inc(x_21); x_22 = lean_ctor_get(x_20, 1); lean_inc(x_22); lean_dec(x_20); -x_23 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__7; +x_23 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__7; lean_inc(x_21); x_24 = l_Lean_Tactic_FunInd_deriveCases(x_18, x_15, x_23, x_21, x_3, x_4, x_22); if (lean_obj_tag(x_24) == 0) @@ -91748,7 +93129,7 @@ lean_dec(x_6); x_43 = lean_ctor_get(x_41, 0); lean_inc(x_43); lean_dec(x_41); -x_44 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__1; +x_44 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__1; lean_inc(x_1); x_45 = l_Lean_Tactic_FunInd_isFunCasesName(x_43, x_1); if (x_45 == 0) @@ -91772,14 +93153,14 @@ lean_dec(x_1); x_50 = l_Lean_Tactic_FunInd_isFunCasesName___closed__2; x_51 = lean_string_dec_eq(x_49, x_50); lean_dec(x_49); -x_52 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__11; +x_52 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__11; x_53 = lean_st_mk_ref(x_52, x_42); x_54 = lean_ctor_get(x_53, 0); lean_inc(x_54); x_55 = lean_ctor_get(x_53, 1); lean_inc(x_55); lean_dec(x_53); -x_56 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__7; +x_56 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__7; lean_inc(x_54); x_57 = l_Lean_Tactic_FunInd_deriveCases(x_51, x_48, x_56, x_54, x_3, x_4, x_55); if (lean_obj_tag(x_57) == 0) @@ -91854,7 +93235,7 @@ return x_71; } } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__1() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__1() { _start: { lean_object* x_1; @@ -91862,31 +93243,31 @@ x_1 = lean_mk_string_unchecked("_unfolding", 10, 10); return x_1; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__2() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__2() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__1; +x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__1; x_2 = lean_string_length(x_1); return x_2; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__3() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__3() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__1; +x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__1; x_2 = lean_string_utf8_byte_size(x_1); return x_2; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__4() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; -x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__1; +x_1 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__1; x_2 = lean_unsigned_to_nat(0u); -x_3 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__3; +x_3 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__3; x_4 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_4, 0, x_1); lean_ctor_set(x_4, 1, x_2); @@ -91894,7 +93275,7 @@ lean_ctor_set(x_4, 2, x_3); return x_4; } } -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; uint8_t x_6; @@ -91917,7 +93298,7 @@ if (x_11 == 0) lean_object* x_12; lean_object* x_13; lean_free_object(x_5); x_12 = lean_box(0); -x_13 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2(x_1, x_12, x_2, x_3, x_8); +x_13 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2(x_1, x_12, x_2, x_3, x_8); return x_13; } else @@ -91940,7 +93321,7 @@ lean_ctor_set(x_18, 0, x_15); lean_ctor_set(x_18, 1, x_17); lean_ctor_set(x_18, 2, x_16); x_19 = lean_nat_sub(x_16, x_17); -x_20 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__2; +x_20 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__2; x_21 = l_Substring_prevn(x_18, x_20, x_19); lean_dec(x_18); x_22 = lean_nat_add(x_17, x_21); @@ -91949,16 +93330,16 @@ x_23 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_23, 0, x_15); lean_ctor_set(x_23, 1, x_22); lean_ctor_set(x_23, 2, x_16); -x_24 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__4; +x_24 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__4; x_25 = l_Substring_beq(x_23, x_24); -x_26 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__11; +x_26 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__11; x_27 = lean_st_mk_ref(x_26, x_8); x_28 = lean_ctor_get(x_27, 0); lean_inc(x_28); x_29 = lean_ctor_get(x_27, 1); lean_inc(x_29); lean_dec(x_27); -x_30 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__7; +x_30 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__7; lean_inc(x_28); x_31 = l_Lean_Tactic_FunInd_deriveInduction(x_25, x_14, x_30, x_28, x_2, x_3, x_29); if (lean_obj_tag(x_31) == 0) @@ -92050,7 +93431,7 @@ if (x_52 == 0) { lean_object* x_53; lean_object* x_54; x_53 = lean_box(0); -x_54 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2(x_1, x_53, x_2, x_3, x_49); +x_54 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2(x_1, x_53, x_2, x_3, x_49); return x_54; } else @@ -92072,7 +93453,7 @@ lean_ctor_set(x_59, 0, x_56); lean_ctor_set(x_59, 1, x_58); lean_ctor_set(x_59, 2, x_57); x_60 = lean_nat_sub(x_57, x_58); -x_61 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__2; +x_61 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__2; x_62 = l_Substring_prevn(x_59, x_61, x_60); lean_dec(x_59); x_63 = lean_nat_add(x_58, x_62); @@ -92081,16 +93462,16 @@ x_64 = lean_alloc_ctor(0, 3, 0); lean_ctor_set(x_64, 0, x_56); lean_ctor_set(x_64, 1, x_63); lean_ctor_set(x_64, 2, x_57); -x_65 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__4; +x_65 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__4; x_66 = l_Substring_beq(x_64, x_65); -x_67 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__11; +x_67 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__11; x_68 = lean_st_mk_ref(x_67, x_49); x_69 = lean_ctor_get(x_68, 0); lean_inc(x_69); x_70 = lean_ctor_get(x_68, 1); lean_inc(x_70); lean_dec(x_68); -x_71 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__7; +x_71 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__7; lean_inc(x_69); x_72 = l_Lean_Tactic_FunInd_deriveInduction(x_66, x_55, x_71, x_69, x_2, x_3, x_70); if (lean_obj_tag(x_72) == 0) @@ -92165,7 +93546,7 @@ return x_86; } } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__1() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__1() { _start: { lean_object* x_1; @@ -92173,7 +93554,7 @@ x_1 = lean_alloc_closure((void*)(l_Lean_Tactic_FunInd_isFunInductName), 2, 0); return x_1; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__2() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__2() { _start: { lean_object* x_1; @@ -92181,19 +93562,19 @@ x_1 = lean_alloc_closure((void*)(l_Lean_Tactic_FunInd_isFunCasesName___boxed), 2 return x_1; } } -static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__3() { +static lean_object* _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__3() { _start: { lean_object* x_1; -x_1 = lean_alloc_closure((void*)(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3), 4, 0); +x_1 = lean_alloc_closure((void*)(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3), 4, 0); return x_1; } } -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481_(lean_object* x_1) { _start: { lean_object* x_2; lean_object* x_3; -x_2 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__1; +x_2 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__1; x_3 = l_Lean_registerReservedNamePredicate(x_2, x_1); if (lean_obj_tag(x_3) == 0) { @@ -92201,7 +93582,7 @@ lean_object* x_4; lean_object* x_5; lean_object* x_6; x_4 = lean_ctor_get(x_3, 1); lean_inc(x_4); lean_dec(x_3); -x_5 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__2; +x_5 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__2; x_6 = l_Lean_registerReservedNamePredicate(x_5, x_4); if (lean_obj_tag(x_6) == 0) { @@ -92209,7 +93590,7 @@ lean_object* x_7; lean_object* x_8; lean_object* x_9; x_7 = lean_ctor_get(x_6, 1); lean_inc(x_7); lean_dec(x_6); -x_8 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__3; +x_8 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__3; x_9 = l_Lean_registerReservedNameAction(x_8, x_7); return x_9; } @@ -92260,27 +93641,27 @@ return x_17; } } } -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { lean_object* x_5; -x_5 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__1(x_1, x_2, x_3, x_4); +x_5 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__1(x_1, x_2, x_3, x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); return x_5; } } -LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +LEAN_EXPORT lean_object* l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { _start: { lean_object* x_6; -x_6 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2(x_1, x_2, x_3, x_4, x_5); +x_6 = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2(x_1, x_2, x_3, x_4, x_5); lean_dec(x_2); return x_6; } } -static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__1() { +static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__1() { _start: { lean_object* x_1; @@ -92288,17 +93669,17 @@ x_1 = lean_mk_string_unchecked("initFn", 6, 6); return x_1; } } -static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__2() { +static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__1; +x_2 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__1; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__3() { +static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__3() { _start: { lean_object* x_1; @@ -92306,17 +93687,17 @@ x_1 = lean_mk_string_unchecked("_@", 2, 2); return x_1; } } -static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__4() { +static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__4() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__2; -x_2 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__3; +x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__2; +x_2 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__3; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__5() { +static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__5() { _start: { lean_object* x_1; @@ -92324,47 +93705,47 @@ x_1 = lean_mk_string_unchecked("Lean", 4, 4); return x_1; } } -static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__6() { +static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__4; -x_2 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__5; +x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__4; +x_2 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__5; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__7() { +static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__7() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__6; +x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__6; x_2 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__28___closed__1; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__8() { +static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__8() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__7; +x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__7; x_2 = l_Lean_logAt___at_Lean_Tactic_FunInd_buildInductionBody___spec__25___lambda__2___closed__3; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__9() { +static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__9() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__8; +x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__8; x_2 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__28___closed__2; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__10() { +static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__10() { _start: { lean_object* x_1; @@ -92372,33 +93753,33 @@ x_1 = lean_mk_string_unchecked("_hyg", 4, 4); return x_1; } } -static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__11() { +static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__11() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__9; -x_2 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__10; +x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__9; +x_2 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__10; x_3 = l_Lean_Name_str___override(x_1, x_2); return x_3; } } -static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__12() { +static lean_object* _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__12() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__11; -x_2 = lean_unsigned_to_nat(23129u); +x_1 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__11; +x_2 = lean_unsigned_to_nat(23761u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761_(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_2 = l_Lean_Tactic_FunInd_foldAndCollect___lambda__28___closed__3; x_3 = 0; -x_4 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__12; +x_4 = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__12; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); return x_5; } @@ -92804,8 +94185,20 @@ l_Lean_Tactic_FunInd_buildInductionBody___lambda__36___closed__2 = _init_l_Lean_ lean_mark_persistent(l_Lean_Tactic_FunInd_buildInductionBody___lambda__36___closed__2); l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___closed__1 = _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___closed__1(); lean_mark_persistent(l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___closed__1); -l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___closed__2 = _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___closed__2(); -lean_mark_persistent(l_Lean_Tactic_FunInd_buildInductionBody___lambda__37___closed__2); +l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__1 = _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__1(); +lean_mark_persistent(l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__1); +l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__2 = _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__2(); +lean_mark_persistent(l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__2); +l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__3 = _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__3(); +lean_mark_persistent(l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__3); +l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__4 = _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__4(); +lean_mark_persistent(l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__4); +l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__5 = _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__5(); +lean_mark_persistent(l_Lean_Tactic_FunInd_buildInductionBody___lambda__41___closed__5); +l_Lean_Tactic_FunInd_buildInductionBody___lambda__42___closed__1 = _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__42___closed__1(); +lean_mark_persistent(l_Lean_Tactic_FunInd_buildInductionBody___lambda__42___closed__1); +l_Lean_Tactic_FunInd_buildInductionBody___lambda__42___closed__2 = _init_l_Lean_Tactic_FunInd_buildInductionBody___lambda__42___closed__2(); +lean_mark_persistent(l_Lean_Tactic_FunInd_buildInductionBody___lambda__42___closed__2); l_Lean_Meta_withLocalDecls_loop___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__10___rarg___lambda__1___closed__1 = _init_l_Lean_Meta_withLocalDecls_loop___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__10___rarg___lambda__1___closed__1(); lean_mark_persistent(l_Lean_Meta_withLocalDecls_loop___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__10___rarg___lambda__1___closed__1); l_Lean_Meta_withLocalDecls_loop___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__10___rarg___closed__1 = _init_l_Lean_Meta_withLocalDecls_loop___at_Lean_Tactic_FunInd_abstractIndependentMVars___spec__10___rarg___closed__1(); @@ -93216,69 +94609,69 @@ l_Lean_Tactic_FunInd_isFunCasesName___closed__1 = _init_l_Lean_Tactic_FunInd_isF lean_mark_persistent(l_Lean_Tactic_FunInd_isFunCasesName___closed__1); l_Lean_Tactic_FunInd_isFunCasesName___closed__2 = _init_l_Lean_Tactic_FunInd_isFunCasesName___closed__2(); lean_mark_persistent(l_Lean_Tactic_FunInd_isFunCasesName___closed__2); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__1 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__1(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__1); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__2 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__2(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__2); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__3 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__3(); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__4 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__4(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__4); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__5 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__5(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__5); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__6 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__6(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__6); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__7 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__7(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__7); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__8 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__8(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__8); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__9 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__9(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__9); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__10 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__10(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__10); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__11 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__11(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__2___closed__11); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__1 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__1(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__1); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__2 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__2(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__2); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__3 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__3(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__3); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__4 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__4(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____lambda__3___closed__4); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__1 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__1(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__1); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__2 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__2(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__2); -l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__3 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__3(); -lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849____closed__3); -if (builtin) {res = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_22849_(lean_io_mk_world()); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__1 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__1(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__1); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__2 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__2(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__2); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__3 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__3(); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__4 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__4(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__4); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__5 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__5(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__5); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__6 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__6(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__6); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__7 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__7(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__7); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__8 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__8(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__8); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__9 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__9(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__9); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__10 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__10(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__10); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__11 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__11(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__2___closed__11); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__1 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__1(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__1); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__2 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__2(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__2); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__3 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__3(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__3); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__4 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__4(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____lambda__3___closed__4); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__1 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__1(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__1); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__2 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__2(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__2); +l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__3 = _init_l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__3(); +lean_mark_persistent(l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481____closed__3); +if (builtin) {res = l_Lean_Tactic_FunInd_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23481_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); -}l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__1 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__1(); -lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__1); -l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__2 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__2(); -lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__2); -l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__3 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__3(); -lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__3); -l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__4 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__4(); -lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__4); -l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__5 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__5(); -lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__5); -l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__6 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__6(); -lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__6); -l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__7 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__7(); -lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__7); -l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__8 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__8(); -lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__8); -l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__9 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__9(); -lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__9); -l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__10 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__10(); -lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__10); -l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__11 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__11(); -lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__11); -l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__12 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__12(); -lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129____closed__12); -if (builtin) {res = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23129_(lean_io_mk_world()); +}l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__1 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__1(); +lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__1); +l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__2 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__2(); +lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__2); +l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__3 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__3(); +lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__3); +l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__4 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__4(); +lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__4); +l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__5 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__5(); +lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__5); +l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__6 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__6(); +lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__6); +l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__7 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__7(); +lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__7); +l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__8 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__8(); +lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__8); +l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__9 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__9(); +lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__9); +l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__10 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__10(); +lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__10); +l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__11 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__11(); +lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__11); +l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__12 = _init_l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__12(); +lean_mark_persistent(l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761____closed__12); +if (builtin) {res = l_initFn____x40_Lean_Meta_Tactic_FunInd___hyg_23761_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); }return lean_io_result_mk_ok(lean_box(0)); diff --git a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing.c b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing.c index 48d70ebd31c5..30cd0cae1b88 100644 --- a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing.c +++ b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing.c @@ -1,6 +1,6 @@ // Lean compiler output // Module: Lean.Meta.Tactic.Grind.Arith.CommRing -// Imports: Lean.Util.Trace Lean.Meta.Tactic.Grind.Arith.CommRing.Poly Lean.Meta.Tactic.Grind.Arith.CommRing.Types Lean.Meta.Tactic.Grind.Arith.CommRing.RingId Lean.Meta.Tactic.Grind.Arith.CommRing.Internalize Lean.Meta.Tactic.Grind.Arith.CommRing.ToExpr Lean.Meta.Tactic.Grind.Arith.CommRing.Var Lean.Meta.Tactic.Grind.Arith.CommRing.Reify Lean.Meta.Tactic.Grind.Arith.CommRing.EqCnstr Lean.Meta.Tactic.Grind.Arith.CommRing.Proof Lean.Meta.Tactic.Grind.Arith.CommRing.DenoteExpr Lean.Meta.Tactic.Grind.Arith.CommRing.Inv +// Imports: Lean.Util.Trace Lean.Meta.Tactic.Grind.Arith.CommRing.Poly Lean.Meta.Tactic.Grind.Arith.CommRing.Types Lean.Meta.Tactic.Grind.Arith.CommRing.RingId Lean.Meta.Tactic.Grind.Arith.CommRing.Internalize Lean.Meta.Tactic.Grind.Arith.CommRing.ToExpr Lean.Meta.Tactic.Grind.Arith.CommRing.Var Lean.Meta.Tactic.Grind.Arith.CommRing.Reify Lean.Meta.Tactic.Grind.Arith.CommRing.EqCnstr Lean.Meta.Tactic.Grind.Arith.CommRing.Proof Lean.Meta.Tactic.Grind.Arith.CommRing.DenoteExpr Lean.Meta.Tactic.Grind.Arith.CommRing.Inv Lean.Meta.Tactic.Grind.Arith.CommRing.PP #include <lean/lean.h> #if defined(__clang__) #pragma clang diagnostic ignored "-Wunused-parameter" @@ -13,47 +13,51 @@ #ifdef __cplusplus extern "C" { #endif -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__2; +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542_(lean_object*); +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__1; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_210____closed__2; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__15; -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__3; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__3; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__1; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__18; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__9; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624____closed__1; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__3; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_44____closed__3; -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__2; LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_252_(lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_294____closed__3; LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_336_(lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_210____closed__1; -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__1; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__1; lean_object* l_Lean_Name_mkStr3(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_336____closed__3; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_85____closed__1; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_85____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_168____closed__1; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__1; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__23; -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__1; +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419_(lean_object*); +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460_(lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_252____closed__1; -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__17; LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_44_(lean_object*); -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__3; lean_object* l_Lean_registerTraceClass(lean_object*, uint8_t, lean_object*, lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_336____closed__1; -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__2; LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_126_(lean_object*); -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377_(lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__12; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__14; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__3; lean_object* l_Lean_Name_num___override(lean_object*, lean_object*); -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582____closed__1; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_126____closed__2; -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582_(lean_object*); +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501_(lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_44____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__20; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_252____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__2; +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624_(lean_object*); lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_85_(lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__6; @@ -61,41 +65,45 @@ static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing__ static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__5; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_44____closed__1; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__4; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__1; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__16; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_294____closed__1; +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583_(lean_object*); +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__7; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__2; LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_294_(lean_object*); +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_294____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_85____closed__3; -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500_(lean_object*); -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__3; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__3; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__10; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_126____closed__3; -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459_(lean_object*); -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__3; LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_210_(lean_object*); lean_object* l_Lean_Name_mkStr2(lean_object*, lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__21; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__3; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__3; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_336____closed__2; -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__1; -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__1; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_168____closed__3; LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_168_(lean_object*); -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541_(lean_object*); +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__3; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__1; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__3; lean_object* l_Lean_Name_mkStr4(lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_210____closed__3; +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665_(lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_126____closed__1; -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__1; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__13; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_168____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__19; -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582____closed__2; -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__3; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__1; +static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__2; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_252____closed__3; LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3_(lean_object*); -static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__2; -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418_(lean_object*); +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378_(lean_object*); static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__11; static lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__22; static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1() { @@ -565,7 +573,7 @@ static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_Comm _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("discard", 7, 7); +x_1 = lean_mk_string_unchecked("store", 5, 5); return x_1; } } @@ -606,19 +614,20 @@ static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_Comm _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("simp", 4, 4); +x_1 = lean_mk_string_unchecked("discard", 7, 7); return x_1; } } static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_336____closed__2() { _start: { -lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1; x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__2; -x_3 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_336____closed__1; -x_4 = l_Lean_Name_mkStr3(x_1, x_2, x_3); -return x_4; +x_3 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_85____closed__1; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_336____closed__1; +x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); +return x_5; } } static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_336____closed__3() { @@ -636,13 +645,53 @@ LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommR { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_336____closed__2; -x_3 = 0; +x_3 = 1; x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_336____closed__3; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); return x_5; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__1() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("simp", 4, 4); +return x_1; +} +} +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; +x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__2; +x_3 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__1; +x_4 = l_Lean_Name_mkStr3(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__22; +x_2 = lean_unsigned_to_nat(378u); +x_3 = l_Lean_Name_num___override(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378_(lean_object* x_1) { +_start: +{ +lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__2; +x_3 = 0; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__3; +x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); +return x_5; +} +} +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__1() { _start: { lean_object* x_1; @@ -650,39 +699,39 @@ x_1 = lean_mk_string_unchecked("superpose", 9, 9); return x_1; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__2() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1; x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__2; -x_3 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__1; +x_3 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__1; x_4 = l_Lean_Name_mkStr3(x_1, x_2, x_3); return x_4; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__3() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__22; -x_2 = lean_unsigned_to_nat(377u); +x_2 = lean_unsigned_to_nat(419u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419_(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; -x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__2; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__2; x_3 = 0; -x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__3; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__3; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); return x_5; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__1() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__1() { _start: { lean_object* x_1; @@ -690,39 +739,39 @@ x_1 = lean_mk_string_unchecked("impEq", 5, 5); return x_1; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__2() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1; x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__2; -x_3 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__1; +x_3 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__1; x_4 = l_Lean_Name_mkStr3(x_1, x_2, x_3); return x_4; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__3() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__22; -x_2 = lean_unsigned_to_nat(418u); +x_2 = lean_unsigned_to_nat(460u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460_(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; -x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__2; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__2; x_3 = 0; -x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__3; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__3; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); return x_5; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__1() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__1() { _start: { lean_object* x_1; @@ -730,40 +779,40 @@ x_1 = lean_mk_string_unchecked("debug", 5, 5); return x_1; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__2() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1; -x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__1; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__1; x_3 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__2; -x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_336____closed__1; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__1; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__3() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__22; -x_2 = lean_unsigned_to_nat(459u); +x_2 = lean_unsigned_to_nat(501u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501_(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; -x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__2; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__2; x_3 = 0; -x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__3; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__3; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); return x_5; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__1() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__1() { _start: { lean_object* x_1; @@ -771,40 +820,40 @@ x_1 = lean_mk_string_unchecked("proof", 5, 5); return x_1; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__2() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1; -x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__1; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__1; x_3 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__2; -x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__1; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__1; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__3() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__22; -x_2 = lean_unsigned_to_nat(500u); +x_2 = lean_unsigned_to_nat(542u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542_(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; -x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__2; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__2; x_3 = 0; -x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__3; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__3; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); return x_5; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__1() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__1() { _start: { lean_object* x_1; @@ -812,68 +861,109 @@ x_1 = lean_mk_string_unchecked("check", 5, 5); return x_1; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__2() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1; -x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__1; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__1; x_3 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__2; -x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__1; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__1; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__3() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__22; -x_2 = lean_unsigned_to_nat(541u); +x_2 = lean_unsigned_to_nat(583u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583_(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; -x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__2; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__2; x_3 = 0; -x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__3; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__3; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); return x_5; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582____closed__1() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624____closed__1() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1; -x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__1; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__1; x_3 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__2; -x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__1; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__1; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582____closed__2() { +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624____closed__2() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__22; -x_2 = lean_unsigned_to_nat(582u); +x_2 = lean_unsigned_to_nat(624u); x_3 = l_Lean_Name_num___override(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582_(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624_(lean_object* x_1) { _start: { lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; -x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582____closed__1; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624____closed__1; x_3 = 0; -x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582____closed__2; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624____closed__2; +x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); +return x_5; +} +} +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("simpBasis", 9, 9); +return x_1; +} +} +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; +x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__1; +x_3 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__2; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__1; +x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); +return x_5; +} +} +static lean_object* _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__22; +x_2 = lean_unsigned_to_nat(665u); +x_3 = l_Lean_Name_num___override(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665_(lean_object* x_1) { +_start: +{ +lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; +x_2 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__2; +x_3 = 0; +x_4 = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__3; x_5 = l_Lean_registerTraceClass(x_2, x_3, x_4, x_1); return x_5; } @@ -890,6 +980,7 @@ lean_object* initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr(uint8_t bu lean_object* initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_Inv(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_PP(uint8_t builtin, lean_object*); static bool _G_initialized = false; LEAN_EXPORT lean_object* initialize_Lean_Meta_Tactic_Grind_Arith_CommRing(uint8_t builtin, lean_object* w) { lean_object * res; @@ -931,6 +1022,9 @@ lean_dec_ref(res); res = initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_Inv(builtin, lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); +res = initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_PP(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1(); lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__1); l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_3____closed__2(); @@ -1052,56 +1146,74 @@ lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing_ if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_336_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); -}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__1(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__1); -l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__2(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__2); -l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__3 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__3(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377____closed__3); -if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_377_(lean_io_mk_world()); +}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__1(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__1); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__2(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__2); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__3 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__3(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378____closed__3); +if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_378_(lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__1(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__1); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__2(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__2); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__3 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__3(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419____closed__3); +if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_419_(lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__1(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__1); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__2(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__2); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__3 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__3(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460____closed__3); +if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_460_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); -}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__1(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__1); -l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__2(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__2); -l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__3 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__3(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418____closed__3); -if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_418_(lean_io_mk_world()); +}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__1(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__1); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__2(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__2); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__3 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__3(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501____closed__3); +if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_501_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); -}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__1(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__1); -l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__2(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__2); -l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__3 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__3(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459____closed__3); -if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_459_(lean_io_mk_world()); +}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__1(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__1); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__2(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__2); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__3 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__3(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542____closed__3); +if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_542_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); -}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__1(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__1); -l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__2(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__2); -l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__3 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__3(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500____closed__3); -if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_500_(lean_io_mk_world()); +}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__1(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__1); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__2(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__2); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__3 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__3(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583____closed__3); +if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_583_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); -}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__1(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__1); -l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__2(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__2); -l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__3 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__3(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541____closed__3); -if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_541_(lean_io_mk_world()); +}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624____closed__1(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624____closed__1); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624____closed__2(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624____closed__2); +if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_624_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); -}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582____closed__1(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582____closed__1); -l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582____closed__2(); -lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582____closed__2); -if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_582_(lean_io_mk_world()); +}l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__1 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__1(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__1); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__2 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__2(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__2); +l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__3 = _init_l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__3(); +lean_mark_persistent(l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665____closed__3); +if (builtin) {res = l_Lean_initFn____x40_Lean_Meta_Tactic_Grind_Arith_CommRing___hyg_665_(lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); }return lean_io_result_mk_ok(lean_box(0)); diff --git a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/DenoteExpr.c b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/DenoteExpr.c index 50c46dabeff1..d85bf6a0c120 100644 --- a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/DenoteExpr.c +++ b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/DenoteExpr.c @@ -13,61 +13,98 @@ #ifdef __cplusplus extern "C" { #endif +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Expr_const___override(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go(lean_object*); lean_object* l_Lean_mkNatLit(lean_object*); -static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__7; -static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__2; -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__1; -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___rarg___boxed(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___rarg(lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__6; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr___rarg(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_mkAppB(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___closed__1; -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__5; -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__3; -static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__4; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___rarg___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__2; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm(lean_object*); +lean_object* l_Lean_Level_succ___override(lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum(lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__3; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__2(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___rarg(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__1; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__7; lean_object* lean_nat_to_int(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__8(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1___closed__1; +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___rarg___closed__1; lean_object* l_outOfBounds___rarg(lean_object*); -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__6; -lean_object* l_Lean_Meta_mkEq(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg___lambda__1(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg(lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__8; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr(lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr(lean_object*); extern lean_object* l_Lean_instInhabitedExpr; -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg(lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__5; +lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); lean_object* l_Lean_mkNot(lean_object*); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__9(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___rarg___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__10(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__1(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___rarg(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go(lean_object*); lean_object* lean_nat_abs(lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___rarg(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Expr_app___override(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_eq(lean_object*, lean_object*); lean_object* l_Lean_mkApp3(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_lt(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr(lean_object*); lean_object* l_Lean_mkRawNatLit(lean_object*); -static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__8; lean_object* l_Lean_Name_mkStr2(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_int_dec_lt(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__4; lean_object* l_Lean_PersistentArray_get_x21___rarg(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__1___boxed(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__7(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___rarg(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr(lean_object*); lean_object* l_Lean_Name_mkStr4(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_int_dec_eq(lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_p(lean_object*); -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__1() { +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__11(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__12(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__6(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go(lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1___closed__2; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___rarg(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__1() { _start: { lean_object* x_1; @@ -75,7 +112,7 @@ x_1 = lean_mk_string_unchecked("Lean", 4, 4); return x_1; } } -static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__2() { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__2() { _start: { lean_object* x_1; @@ -83,7 +120,7 @@ x_1 = lean_mk_string_unchecked("Grind", 5, 5); return x_1; } } -static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__3() { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__3() { _start: { lean_object* x_1; @@ -91,7 +128,7 @@ x_1 = lean_mk_string_unchecked("CommRing", 8, 8); return x_1; } } -static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__4() { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__4() { _start: { lean_object* x_1; @@ -99,19 +136,19 @@ x_1 = lean_mk_string_unchecked("ofNat", 5, 5); return x_1; } } -static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__5() { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__5() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__1; -x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__2; -x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__3; -x_4 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__4; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__3; +x_4 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__4; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; } } -static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__6() { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__6() { _start: { lean_object* x_1; @@ -119,17 +156,17 @@ x_1 = lean_mk_string_unchecked("OfNat", 5, 5); return x_1; } } -static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__7() { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__7() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__6; -x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__4; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__6; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__4; x_3 = l_Lean_Name_mkStr2(x_1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__8() { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__8() { _start: { lean_object* x_1; lean_object* x_2; @@ -138,2265 +175,1216 @@ x_2 = lean_nat_to_int(x_1); return x_2; } } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { -lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_12) == 0) -{ -uint8_t x_13; -x_13 = !lean_is_exclusive(x_12); -if (x_13 == 0) +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; uint8_t x_18; +x_4 = lean_nat_abs(x_1); +x_5 = l_Lean_mkRawNatLit(x_4); +x_6 = lean_ctor_get(x_3, 2); +lean_inc(x_6); +x_7 = lean_box(0); +x_8 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_8, 0, x_6); +lean_ctor_set(x_8, 1, x_7); +x_9 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__5; +lean_inc(x_8); +x_10 = l_Lean_Expr_const___override(x_9, x_8); +x_11 = lean_ctor_get(x_3, 1); +lean_inc(x_11); +x_12 = lean_ctor_get(x_3, 3); +lean_inc(x_12); +lean_inc(x_5); +lean_inc(x_11); +x_13 = l_Lean_mkApp3(x_10, x_11, x_12, x_5); +x_14 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__7; +x_15 = l_Lean_Expr_const___override(x_14, x_8); +x_16 = l_Lean_mkApp3(x_15, x_11, x_5, x_13); +x_17 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__8; +x_18 = lean_int_dec_lt(x_1, x_17); +if (x_18 == 0) { -lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; uint8_t x_29; -x_14 = lean_ctor_get(x_12, 0); -x_15 = lean_nat_abs(x_1); -x_16 = l_Lean_mkRawNatLit(x_15); -x_17 = lean_ctor_get(x_14, 2); -lean_inc(x_17); -x_18 = lean_box(0); -x_19 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_19, 0, x_17); -lean_ctor_set(x_19, 1, x_18); -x_20 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__5; +lean_object* x_19; lean_object* x_20; lean_object* x_21; +lean_dec(x_3); +x_19 = lean_ctor_get(x_2, 0); lean_inc(x_19); -x_21 = l_Lean_Expr_const___override(x_20, x_19); -x_22 = lean_ctor_get(x_14, 1); -lean_inc(x_22); -x_23 = lean_ctor_get(x_14, 3); -lean_inc(x_23); -lean_inc(x_16); -lean_inc(x_22); -x_24 = l_Lean_mkApp3(x_21, x_22, x_23, x_16); -x_25 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__7; -x_26 = l_Lean_Expr_const___override(x_25, x_19); -x_27 = l_Lean_mkApp3(x_26, x_22, x_16, x_24); -x_28 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__8; -x_29 = lean_int_dec_lt(x_1, x_28); -if (x_29 == 0) -{ -lean_dec(x_14); -lean_ctor_set(x_12, 0, x_27); -return x_12; +lean_dec(x_2); +x_20 = lean_ctor_get(x_19, 1); +lean_inc(x_20); +lean_dec(x_19); +x_21 = lean_apply_2(x_20, lean_box(0), x_16); +return x_21; } else { -lean_object* x_30; lean_object* x_31; -x_30 = lean_ctor_get(x_14, 9); -lean_inc(x_30); -lean_dec(x_14); -x_31 = l_Lean_Expr_app___override(x_30, x_27); -lean_ctor_set(x_12, 0, x_31); -return x_12; +lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; +x_22 = lean_ctor_get(x_2, 0); +lean_inc(x_22); +lean_dec(x_2); +x_23 = lean_ctor_get(x_22, 1); +lean_inc(x_23); +lean_dec(x_22); +x_24 = lean_ctor_get(x_3, 9); +lean_inc(x_24); +lean_dec(x_3); +x_25 = l_Lean_Expr_app___override(x_24, x_16); +x_26 = lean_apply_2(x_23, lean_box(0), x_25); +return x_26; } } -else -{ -lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; uint8_t x_48; -x_32 = lean_ctor_get(x_12, 0); -x_33 = lean_ctor_get(x_12, 1); -lean_inc(x_33); -lean_inc(x_32); -lean_dec(x_12); -x_34 = lean_nat_abs(x_1); -x_35 = l_Lean_mkRawNatLit(x_34); -x_36 = lean_ctor_get(x_32, 2); -lean_inc(x_36); -x_37 = lean_box(0); -x_38 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_38, 0, x_36); -lean_ctor_set(x_38, 1, x_37); -x_39 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__5; -lean_inc(x_38); -x_40 = l_Lean_Expr_const___override(x_39, x_38); -x_41 = lean_ctor_get(x_32, 1); -lean_inc(x_41); -x_42 = lean_ctor_get(x_32, 3); -lean_inc(x_42); -lean_inc(x_35); -lean_inc(x_41); -x_43 = l_Lean_mkApp3(x_40, x_41, x_42, x_35); -x_44 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__7; -x_45 = l_Lean_Expr_const___override(x_44, x_38); -x_46 = l_Lean_mkApp3(x_45, x_41, x_35, x_43); -x_47 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__8; -x_48 = lean_int_dec_lt(x_1, x_47); -if (x_48 == 0) -{ -lean_object* x_49; -lean_dec(x_32); -x_49 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_49, 0, x_46); -lean_ctor_set(x_49, 1, x_33); -return x_49; } -else +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: { -lean_object* x_50; lean_object* x_51; lean_object* x_52; -x_50 = lean_ctor_get(x_32, 9); -lean_inc(x_50); -lean_dec(x_32); -x_51 = l_Lean_Expr_app___override(x_50, x_46); -x_52 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_52, 0, x_51); -lean_ctor_set(x_52, 1, x_33); -return x_52; -} +lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_4 = lean_ctor_get(x_1, 1); +lean_inc(x_4); +x_5 = lean_alloc_closure((void*)(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___boxed), 3, 2); +lean_closure_set(x_5, 0, x_3); +lean_closure_set(x_5, 1, x_1); +x_6 = lean_apply_4(x_4, lean_box(0), lean_box(0), x_2, x_5); +return x_6; } } -else -{ -uint8_t x_53; -x_53 = !lean_is_exclusive(x_12); -if (x_53 == 0) -{ -return x_12; -} -else +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum(lean_object* x_1) { +_start: { -lean_object* x_54; lean_object* x_55; lean_object* x_56; -x_54 = lean_ctor_get(x_12, 0); -x_55 = lean_ctor_get(x_12, 1); -lean_inc(x_55); -lean_inc(x_54); -lean_dec(x_12); -x_56 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_56, 0, x_54); -lean_ctor_set(x_56, 1, x_55); -return x_56; +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg), 3, 0); +return x_2; } } +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; +x_4 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1(x_1, x_2, x_3); +lean_dec(x_1); +return x_4; } } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___rarg___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { -lean_object* x_12; -x_12 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_5 = lean_ctor_get(x_1, 0); +lean_inc(x_5); +lean_dec(x_1); +x_6 = lean_ctor_get(x_5, 1); +lean_inc(x_6); lean_dec(x_5); +x_7 = lean_ctor_get(x_4, 10); +lean_inc(x_7); lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -return x_12; +x_8 = l_Lean_mkNatLit(x_2); +x_9 = l_Lean_mkAppB(x_7, x_3, x_8); +x_10 = lean_apply_2(x_6, lean_box(0), x_9); +return x_10; } } -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___rarg___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { _start: { -lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_12) == 0) -{ -uint8_t x_13; -x_13 = !lean_is_exclusive(x_12); -if (x_13 == 0) -{ -lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; uint8_t x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; -x_14 = lean_ctor_get(x_12, 0); -x_15 = lean_ctor_get(x_12, 1); -x_16 = lean_ctor_get(x_14, 13); -lean_inc(x_16); -lean_dec(x_14); -x_17 = lean_ctor_get(x_1, 0); -lean_inc(x_17); -x_18 = lean_ctor_get(x_16, 2); -lean_inc(x_18); -x_19 = lean_nat_dec_lt(x_17, x_18); -lean_dec(x_18); -x_20 = lean_ctor_get(x_1, 1); -lean_inc(x_20); +lean_object* x_6; lean_object* x_7; lean_object* x_8; uint8_t x_9; lean_object* x_10; lean_object* x_11; uint8_t x_12; +x_6 = lean_ctor_get(x_5, 13); +lean_inc(x_6); +lean_dec(x_5); +x_7 = lean_ctor_get(x_1, 0); +lean_inc(x_7); +x_8 = lean_ctor_get(x_6, 2); +lean_inc(x_8); +x_9 = lean_nat_dec_lt(x_7, x_8); +lean_dec(x_8); +x_10 = lean_ctor_get(x_1, 1); +lean_inc(x_10); lean_dec(x_1); -x_21 = lean_unsigned_to_nat(1u); -x_22 = lean_nat_dec_eq(x_20, x_21); -if (x_19 == 0) +x_11 = lean_unsigned_to_nat(1u); +x_12 = lean_nat_dec_eq(x_10, x_11); +if (x_9 == 0) { -lean_object* x_23; lean_object* x_24; -lean_dec(x_17); -lean_dec(x_16); -x_23 = l_Lean_instInhabitedExpr; -x_24 = l_outOfBounds___rarg(x_23); -if (x_22 == 0) -{ -lean_object* x_25; -lean_free_object(x_12); -x_25 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_15); -if (lean_obj_tag(x_25) == 0) -{ -uint8_t x_26; -x_26 = !lean_is_exclusive(x_25); -if (x_26 == 0) -{ -lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; -x_27 = lean_ctor_get(x_25, 0); -x_28 = lean_ctor_get(x_27, 10); -lean_inc(x_28); -lean_dec(x_27); -x_29 = l_Lean_mkNatLit(x_20); -x_30 = l_Lean_mkAppB(x_28, x_24, x_29); -lean_ctor_set(x_25, 0, x_30); -return x_25; +lean_object* x_13; lean_object* x_14; +lean_dec(x_7); +lean_dec(x_6); +x_13 = l_Lean_instInhabitedExpr; +x_14 = l_outOfBounds___rarg(x_13); +if (x_12 == 0) +{ +lean_object* x_15; lean_object* x_16; +x_15 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Power_denoteExpr___rarg___lambda__1), 4, 3); +lean_closure_set(x_15, 0, x_2); +lean_closure_set(x_15, 1, x_10); +lean_closure_set(x_15, 2, x_14); +x_16 = lean_apply_4(x_3, lean_box(0), lean_box(0), x_4, x_15); +return x_16; } else { -lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; -x_31 = lean_ctor_get(x_25, 0); -x_32 = lean_ctor_get(x_25, 1); -lean_inc(x_32); -lean_inc(x_31); -lean_dec(x_25); -x_33 = lean_ctor_get(x_31, 10); -lean_inc(x_33); -lean_dec(x_31); -x_34 = l_Lean_mkNatLit(x_20); -x_35 = l_Lean_mkAppB(x_33, x_24, x_34); -x_36 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_36, 0, x_35); -lean_ctor_set(x_36, 1, x_32); -return x_36; +lean_object* x_17; lean_object* x_18; lean_object* x_19; +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_3); +x_17 = lean_ctor_get(x_2, 0); +lean_inc(x_17); +lean_dec(x_2); +x_18 = lean_ctor_get(x_17, 1); +lean_inc(x_18); +lean_dec(x_17); +x_19 = lean_apply_2(x_18, lean_box(0), x_14); +return x_19; } } else { -uint8_t x_37; -lean_dec(x_24); -lean_dec(x_20); -x_37 = !lean_is_exclusive(x_25); -if (x_37 == 0) +lean_object* x_20; lean_object* x_21; +x_20 = l_Lean_instInhabitedExpr; +x_21 = l_Lean_PersistentArray_get_x21___rarg(x_20, x_6, x_7); +lean_dec(x_7); +if (x_12 == 0) { -return x_25; +lean_object* x_22; lean_object* x_23; +x_22 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Power_denoteExpr___rarg___lambda__1), 4, 3); +lean_closure_set(x_22, 0, x_2); +lean_closure_set(x_22, 1, x_10); +lean_closure_set(x_22, 2, x_21); +x_23 = lean_apply_4(x_3, lean_box(0), lean_box(0), x_4, x_22); +return x_23; } else { -lean_object* x_38; lean_object* x_39; lean_object* x_40; -x_38 = lean_ctor_get(x_25, 0); -x_39 = lean_ctor_get(x_25, 1); -lean_inc(x_39); -lean_inc(x_38); -lean_dec(x_25); -x_40 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_40, 0, x_38); -lean_ctor_set(x_40, 1, x_39); -return x_40; -} +lean_object* x_24; lean_object* x_25; lean_object* x_26; +lean_dec(x_10); +lean_dec(x_4); +lean_dec(x_3); +x_24 = lean_ctor_get(x_2, 0); +lean_inc(x_24); +lean_dec(x_2); +x_25 = lean_ctor_get(x_24, 1); +lean_inc(x_25); +lean_dec(x_24); +x_26 = lean_apply_2(x_25, lean_box(0), x_21); +return x_26; } } -else -{ -lean_dec(x_20); -lean_ctor_set(x_12, 0, x_24); -return x_12; } } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: { -lean_object* x_41; lean_object* x_42; -x_41 = l_Lean_instInhabitedExpr; -x_42 = l_Lean_PersistentArray_get_x21___rarg(x_41, x_16, x_17); -lean_dec(x_17); -if (x_22 == 0) -{ -lean_object* x_43; -lean_free_object(x_12); -x_43 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_15); -if (lean_obj_tag(x_43) == 0) -{ -uint8_t x_44; -x_44 = !lean_is_exclusive(x_43); -if (x_44 == 0) -{ -lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; -x_45 = lean_ctor_get(x_43, 0); -x_46 = lean_ctor_get(x_45, 10); -lean_inc(x_46); -lean_dec(x_45); -x_47 = l_Lean_mkNatLit(x_20); -x_48 = l_Lean_mkAppB(x_46, x_42, x_47); -lean_ctor_set(x_43, 0, x_48); -return x_43; +lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_4 = lean_ctor_get(x_1, 1); +lean_inc(x_4); +lean_inc(x_2); +lean_inc(x_4); +x_5 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Power_denoteExpr___rarg___lambda__2), 5, 4); +lean_closure_set(x_5, 0, x_3); +lean_closure_set(x_5, 1, x_1); +lean_closure_set(x_5, 2, x_4); +lean_closure_set(x_5, 3, x_2); +x_6 = lean_apply_4(x_4, lean_box(0), lean_box(0), x_2, x_5); +return x_6; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Power_denoteExpr___rarg), 3, 0); +return x_2; } -else +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___rarg___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: { -lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; -x_49 = lean_ctor_get(x_43, 0); -x_50 = lean_ctor_get(x_43, 1); -lean_inc(x_50); -lean_inc(x_49); -lean_dec(x_43); -x_51 = lean_ctor_get(x_49, 10); -lean_inc(x_51); -lean_dec(x_49); -x_52 = l_Lean_mkNatLit(x_20); -x_53 = l_Lean_mkAppB(x_51, x_42, x_52); -x_54 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_54, 0, x_53); -lean_ctor_set(x_54, 1, x_50); -return x_54; +lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_7 = lean_ctor_get(x_1, 7); +lean_inc(x_7); +lean_dec(x_1); +x_8 = l_Lean_mkAppB(x_7, x_2, x_6); +x_9 = l_Lean_Grind_CommRing_Mon_denoteExpr_go___rarg(x_3, x_4, x_5, x_8); +return x_9; } } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___rarg___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; +lean_inc(x_2); +lean_inc(x_1); +x_8 = l_Lean_Grind_CommRing_Power_denoteExpr___rarg(x_1, x_2, x_3); +x_9 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Mon_denoteExpr_go___rarg___lambda__1), 6, 5); +lean_closure_set(x_9, 0, x_7); +lean_closure_set(x_9, 1, x_4); +lean_closure_set(x_9, 2, x_1); +lean_closure_set(x_9, 3, x_2); +lean_closure_set(x_9, 4, x_5); +x_10 = lean_apply_4(x_6, lean_box(0), lean_box(0), x_8, x_9); +return x_10; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: { -uint8_t x_55; -lean_dec(x_42); -lean_dec(x_20); -x_55 = !lean_is_exclusive(x_43); -if (x_55 == 0) +if (lean_obj_tag(x_3) == 0) { -return x_43; +lean_object* x_5; lean_object* x_6; lean_object* x_7; +lean_dec(x_2); +x_5 = lean_ctor_get(x_1, 0); +lean_inc(x_5); +lean_dec(x_1); +x_6 = lean_ctor_get(x_5, 1); +lean_inc(x_6); +lean_dec(x_5); +x_7 = lean_apply_2(x_6, lean_box(0), x_4); +return x_7; } else { -lean_object* x_56; lean_object* x_57; lean_object* x_58; -x_56 = lean_ctor_get(x_43, 0); -x_57 = lean_ctor_get(x_43, 1); -lean_inc(x_57); -lean_inc(x_56); -lean_dec(x_43); -x_58 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_58, 0, x_56); -lean_ctor_set(x_58, 1, x_57); -return x_58; +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; +x_8 = lean_ctor_get(x_3, 0); +lean_inc(x_8); +x_9 = lean_ctor_get(x_3, 1); +lean_inc(x_9); +lean_dec(x_3); +x_10 = lean_ctor_get(x_1, 1); +lean_inc(x_10); +lean_inc(x_10); +lean_inc(x_2); +x_11 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Mon_denoteExpr_go___rarg___lambda__2), 7, 6); +lean_closure_set(x_11, 0, x_1); +lean_closure_set(x_11, 1, x_2); +lean_closure_set(x_11, 2, x_8); +lean_closure_set(x_11, 3, x_4); +lean_closure_set(x_11, 4, x_9); +lean_closure_set(x_11, 5, x_10); +x_12 = lean_apply_4(x_10, lean_box(0), lean_box(0), x_2, x_11); +return x_12; } } } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go(lean_object* x_1) { +_start: { -lean_dec(x_20); -lean_ctor_set(x_12, 0, x_42); -return x_12; -} +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Mon_denoteExpr_go___rarg), 4, 0); +return x_2; } } -else +static lean_object* _init_l_Lean_Grind_CommRing_Mon_denoteExpr___rarg___closed__1() { +_start: { -lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; uint8_t x_64; lean_object* x_65; lean_object* x_66; uint8_t x_67; -x_59 = lean_ctor_get(x_12, 0); -x_60 = lean_ctor_get(x_12, 1); -lean_inc(x_60); -lean_inc(x_59); -lean_dec(x_12); -x_61 = lean_ctor_get(x_59, 13); -lean_inc(x_61); -lean_dec(x_59); -x_62 = lean_ctor_get(x_1, 0); -lean_inc(x_62); -x_63 = lean_ctor_get(x_61, 2); -lean_inc(x_63); -x_64 = lean_nat_dec_lt(x_62, x_63); -lean_dec(x_63); -x_65 = lean_ctor_get(x_1, 1); -lean_inc(x_65); -lean_dec(x_1); -x_66 = lean_unsigned_to_nat(1u); -x_67 = lean_nat_dec_eq(x_65, x_66); -if (x_64 == 0) -{ -lean_object* x_68; lean_object* x_69; -lean_dec(x_62); -lean_dec(x_61); -x_68 = l_Lean_instInhabitedExpr; -x_69 = l_outOfBounds___rarg(x_68); -if (x_67 == 0) -{ -lean_object* x_70; -x_70 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_60); -if (lean_obj_tag(x_70) == 0) -{ -lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; -x_71 = lean_ctor_get(x_70, 0); -lean_inc(x_71); -x_72 = lean_ctor_get(x_70, 1); -lean_inc(x_72); -if (lean_is_exclusive(x_70)) { - lean_ctor_release(x_70, 0); - lean_ctor_release(x_70, 1); - x_73 = x_70; -} else { - lean_dec_ref(x_70); - x_73 = lean_box(0); -} -x_74 = lean_ctor_get(x_71, 10); -lean_inc(x_74); -lean_dec(x_71); -x_75 = l_Lean_mkNatLit(x_65); -x_76 = l_Lean_mkAppB(x_74, x_69, x_75); -if (lean_is_scalar(x_73)) { - x_77 = lean_alloc_ctor(0, 2, 0); -} else { - x_77 = x_73; -} -lean_ctor_set(x_77, 0, x_76); -lean_ctor_set(x_77, 1, x_72); -return x_77; +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(1u); +x_2 = lean_nat_to_int(x_1); +return x_2; } -else -{ -lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; -lean_dec(x_69); -lean_dec(x_65); -x_78 = lean_ctor_get(x_70, 0); -lean_inc(x_78); -x_79 = lean_ctor_get(x_70, 1); -lean_inc(x_79); -if (lean_is_exclusive(x_70)) { - lean_ctor_release(x_70, 0); - lean_ctor_release(x_70, 1); - x_80 = x_70; -} else { - lean_dec_ref(x_70); - x_80 = lean_box(0); -} -if (lean_is_scalar(x_80)) { - x_81 = lean_alloc_ctor(1, 2, 0); -} else { - x_81 = x_80; -} -lean_ctor_set(x_81, 0, x_78); -lean_ctor_set(x_81, 1, x_79); -return x_81; } +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_3) == 0) +{ +lean_object* x_4; lean_object* x_5; +x_4 = l_Lean_Grind_CommRing_Mon_denoteExpr___rarg___closed__1; +x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg(x_1, x_2, x_4); +return x_5; } else { -lean_object* x_82; -lean_dec(x_65); -x_82 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_82, 0, x_69); -lean_ctor_set(x_82, 1, x_60); -return x_82; +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_6 = lean_ctor_get(x_3, 0); +lean_inc(x_6); +x_7 = lean_ctor_get(x_3, 1); +lean_inc(x_7); +lean_dec(x_3); +x_8 = lean_ctor_get(x_1, 1); +lean_inc(x_8); +lean_inc(x_2); +lean_inc(x_1); +x_9 = l_Lean_Grind_CommRing_Power_denoteExpr___rarg(x_1, x_2, x_6); +x_10 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Mon_denoteExpr_go___rarg), 4, 3); +lean_closure_set(x_10, 0, x_1); +lean_closure_set(x_10, 1, x_2); +lean_closure_set(x_10, 2, x_7); +x_11 = lean_apply_4(x_8, lean_box(0), lean_box(0), x_9, x_10); +return x_11; } } -else -{ -lean_object* x_83; lean_object* x_84; -x_83 = l_Lean_instInhabitedExpr; -x_84 = l_Lean_PersistentArray_get_x21___rarg(x_83, x_61, x_62); -lean_dec(x_62); -if (x_67 == 0) -{ -lean_object* x_85; -x_85 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_60); -if (lean_obj_tag(x_85) == 0) -{ -lean_object* x_86; lean_object* x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; lean_object* x_92; -x_86 = lean_ctor_get(x_85, 0); -lean_inc(x_86); -x_87 = lean_ctor_get(x_85, 1); -lean_inc(x_87); -if (lean_is_exclusive(x_85)) { - lean_ctor_release(x_85, 0); - lean_ctor_release(x_85, 1); - x_88 = x_85; -} else { - lean_dec_ref(x_85); - x_88 = lean_box(0); -} -x_89 = lean_ctor_get(x_86, 10); -lean_inc(x_89); -lean_dec(x_86); -x_90 = l_Lean_mkNatLit(x_65); -x_91 = l_Lean_mkAppB(x_89, x_84, x_90); -if (lean_is_scalar(x_88)) { - x_92 = lean_alloc_ctor(0, 2, 0); -} else { - x_92 = x_88; -} -lean_ctor_set(x_92, 0, x_91); -lean_ctor_set(x_92, 1, x_87); -return x_92; } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr(lean_object* x_1) { +_start: { -lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; -lean_dec(x_84); -lean_dec(x_65); -x_93 = lean_ctor_get(x_85, 0); -lean_inc(x_93); -x_94 = lean_ctor_get(x_85, 1); -lean_inc(x_94); -if (lean_is_exclusive(x_85)) { - lean_ctor_release(x_85, 0); - lean_ctor_release(x_85, 1); - x_95 = x_85; -} else { - lean_dec_ref(x_85); - x_95 = lean_box(0); -} -if (lean_is_scalar(x_95)) { - x_96 = lean_alloc_ctor(1, 2, 0); -} else { - x_96 = x_95; -} -lean_ctor_set(x_96, 0, x_93); -lean_ctor_set(x_96, 1, x_94); -return x_96; +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Mon_denoteExpr___rarg), 3, 0); +return x_2; } } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: { -lean_object* x_97; -lean_dec(x_65); -x_97 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_97, 0, x_84); -lean_ctor_set(x_97, 1, x_60); -return x_97; +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_5 = lean_ctor_get(x_1, 0); +lean_inc(x_5); +lean_dec(x_1); +x_6 = lean_ctor_get(x_5, 1); +lean_inc(x_6); +lean_dec(x_5); +x_7 = lean_ctor_get(x_2, 7); +lean_inc(x_7); +lean_dec(x_2); +x_8 = l_Lean_mkAppB(x_7, x_3, x_4); +x_9 = lean_apply_2(x_6, lean_box(0), x_8); +return x_9; } } +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; +lean_inc(x_1); +x_7 = l_Lean_Grind_CommRing_Mon_denoteExpr___rarg(x_1, x_2, x_3); +x_8 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg___lambda__1), 4, 3); +lean_closure_set(x_8, 0, x_1); +lean_closure_set(x_8, 1, x_4); +lean_closure_set(x_8, 2, x_6); +x_9 = lean_apply_4(x_5, lean_box(0), lean_box(0), x_7, x_8); +return x_9; } } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: { -uint8_t x_98; -lean_dec(x_1); -x_98 = !lean_is_exclusive(x_12); -if (x_98 == 0) +lean_object* x_7; lean_object* x_8; lean_object* x_9; +lean_inc(x_2); +lean_inc(x_1); +x_7 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg(x_1, x_2, x_3); +lean_inc(x_5); +x_8 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg___lambda__2), 6, 5); +lean_closure_set(x_8, 0, x_1); +lean_closure_set(x_8, 1, x_2); +lean_closure_set(x_8, 2, x_4); +lean_closure_set(x_8, 3, x_6); +lean_closure_set(x_8, 4, x_5); +x_9 = lean_apply_4(x_5, lean_box(0), lean_box(0), x_7, x_8); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: { -return x_12; +lean_object* x_5; uint8_t x_6; +x_5 = l_Lean_Grind_CommRing_Mon_denoteExpr___rarg___closed__1; +x_6 = lean_int_dec_eq(x_3, x_5); +if (x_6 == 0) +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_7 = lean_ctor_get(x_1, 1); +lean_inc(x_7); +lean_inc(x_7); +lean_inc(x_2); +x_8 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg___lambda__3), 6, 5); +lean_closure_set(x_8, 0, x_1); +lean_closure_set(x_8, 1, x_2); +lean_closure_set(x_8, 2, x_3); +lean_closure_set(x_8, 3, x_4); +lean_closure_set(x_8, 4, x_7); +x_9 = lean_apply_4(x_7, lean_box(0), lean_box(0), x_2, x_8); +return x_9; } else { -lean_object* x_99; lean_object* x_100; lean_object* x_101; -x_99 = lean_ctor_get(x_12, 0); -x_100 = lean_ctor_get(x_12, 1); -lean_inc(x_100); -lean_inc(x_99); -lean_dec(x_12); -x_101 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_101, 0, x_99); -lean_ctor_set(x_101, 1, x_100); -return x_101; +lean_object* x_10; +lean_dec(x_3); +x_10 = l_Lean_Grind_CommRing_Mon_denoteExpr___rarg(x_1, x_2, x_4); +return x_10; } } } +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg), 4, 0); +return x_2; +} } -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { -lean_object* x_12; -x_12 = l_Lean_Grind_CommRing_Power_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_5 = lean_ctor_get(x_1, 0); +lean_inc(x_5); +lean_dec(x_1); +x_6 = lean_ctor_get(x_5, 1); +lean_inc(x_6); lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); +x_7 = lean_ctor_get(x_2, 6); +lean_inc(x_7); lean_dec(x_2); -return x_12; +x_8 = l_Lean_mkAppB(x_7, x_3, x_4); +x_9 = lean_apply_2(x_6, lean_box(0), x_8); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; +lean_inc(x_1); +x_7 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg(x_1, x_2, x_3); +x_8 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__1), 4, 3); +lean_closure_set(x_8, 0, x_1); +lean_closure_set(x_8, 1, x_6); +lean_closure_set(x_8, 2, x_4); +x_9 = lean_apply_4(x_5, lean_box(0), lean_box(0), x_7, x_8); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_7 = lean_ctor_get(x_1, 6); +lean_inc(x_7); +lean_dec(x_1); +x_8 = l_Lean_mkAppB(x_7, x_2, x_6); +x_9 = l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg(x_3, x_4, x_5, x_8); +return x_9; } } -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__4(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; lean_object* x_10; lean_object* x_11; +lean_inc(x_2); +lean_inc(x_1); +x_9 = l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg(x_1, x_2, x_3, x_4); +x_10 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__3), 6, 5); +lean_closure_set(x_10, 0, x_8); +lean_closure_set(x_10, 1, x_5); +lean_closure_set(x_10, 2, x_1); +lean_closure_set(x_10, 3, x_2); +lean_closure_set(x_10, 4, x_6); +x_11 = lean_apply_4(x_7, lean_box(0), lean_box(0), x_9, x_10); +return x_11; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { -if (lean_obj_tag(x_1) == 0) +if (lean_obj_tag(x_3) == 0) { -lean_object* x_13; -x_13 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_13, 0, x_2); -lean_ctor_set(x_13, 1, x_12); +lean_object* x_5; lean_object* x_6; uint8_t x_7; +x_5 = lean_ctor_get(x_3, 0); +lean_inc(x_5); +lean_dec(x_3); +x_6 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__8; +x_7 = lean_int_dec_eq(x_5, x_6); +if (x_7 == 0) +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_8 = lean_ctor_get(x_1, 1); +lean_inc(x_8); +lean_inc(x_8); +lean_inc(x_2); +x_9 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__2), 6, 5); +lean_closure_set(x_9, 0, x_1); +lean_closure_set(x_9, 1, x_2); +lean_closure_set(x_9, 2, x_5); +lean_closure_set(x_9, 3, x_4); +lean_closure_set(x_9, 4, x_8); +x_10 = lean_apply_4(x_8, lean_box(0), lean_box(0), x_2, x_9); +return x_10; +} +else +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; +lean_dec(x_5); +lean_dec(x_2); +x_11 = lean_ctor_get(x_1, 0); +lean_inc(x_11); +lean_dec(x_1); +x_12 = lean_ctor_get(x_11, 1); +lean_inc(x_12); +lean_dec(x_11); +x_13 = lean_apply_2(x_12, lean_box(0), x_4); return x_13; } +} else { -lean_object* x_14; lean_object* x_15; lean_object* x_16; -x_14 = lean_ctor_get(x_1, 0); +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_14 = lean_ctor_get(x_3, 0); lean_inc(x_14); -x_15 = lean_ctor_get(x_1, 1); +x_15 = lean_ctor_get(x_3, 1); lean_inc(x_15); -lean_dec(x_1); -x_16 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -if (lean_obj_tag(x_16) == 0) -{ -lean_object* x_17; lean_object* x_18; lean_object* x_19; -x_17 = lean_ctor_get(x_16, 0); +x_16 = lean_ctor_get(x_3, 2); +lean_inc(x_16); +lean_dec(x_3); +x_17 = lean_ctor_get(x_1, 1); lean_inc(x_17); -x_18 = lean_ctor_get(x_16, 1); -lean_inc(x_18); -lean_dec(x_16); -x_19 = l_Lean_Grind_CommRing_Power_denoteExpr(x_14, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_18); -if (lean_obj_tag(x_19) == 0) +lean_inc(x_17); +lean_inc(x_2); +x_18 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__4), 8, 7); +lean_closure_set(x_18, 0, x_1); +lean_closure_set(x_18, 1, x_2); +lean_closure_set(x_18, 2, x_14); +lean_closure_set(x_18, 3, x_15); +lean_closure_set(x_18, 4, x_4); +lean_closure_set(x_18, 5, x_16); +lean_closure_set(x_18, 6, x_17); +x_19 = lean_apply_4(x_17, lean_box(0), lean_box(0), x_2, x_18); +return x_19; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go(lean_object* x_1) { +_start: { -lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; -x_20 = lean_ctor_get(x_19, 0); -lean_inc(x_20); -x_21 = lean_ctor_get(x_19, 1); -lean_inc(x_21); -lean_dec(x_19); -x_22 = lean_ctor_get(x_17, 7); -lean_inc(x_22); -lean_dec(x_17); -x_23 = l_Lean_mkAppB(x_22, x_2, x_20); -x_1 = x_15; -x_2 = x_23; -x_12 = x_21; -goto _start; +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg), 4, 0); +return x_2; } -else +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: { -uint8_t x_25; -lean_dec(x_17); -lean_dec(x_15); -lean_dec(x_2); -x_25 = !lean_is_exclusive(x_19); -if (x_25 == 0) +if (lean_obj_tag(x_3) == 0) { -return x_19; +lean_object* x_4; lean_object* x_5; +x_4 = lean_ctor_get(x_3, 0); +lean_inc(x_4); +lean_dec(x_3); +x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg(x_1, x_2, x_4); +return x_5; } else { -lean_object* x_26; lean_object* x_27; lean_object* x_28; -x_26 = lean_ctor_get(x_19, 0); -x_27 = lean_ctor_get(x_19, 1); -lean_inc(x_27); -lean_inc(x_26); -lean_dec(x_19); -x_28 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_28, 0, x_26); -lean_ctor_set(x_28, 1, x_27); -return x_28; +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; +x_6 = lean_ctor_get(x_3, 0); +lean_inc(x_6); +x_7 = lean_ctor_get(x_3, 1); +lean_inc(x_7); +x_8 = lean_ctor_get(x_3, 2); +lean_inc(x_8); +lean_dec(x_3); +x_9 = lean_ctor_get(x_1, 1); +lean_inc(x_9); +lean_inc(x_2); +lean_inc(x_1); +x_10 = l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg(x_1, x_2, x_6, x_7); +x_11 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg), 4, 3); +lean_closure_set(x_11, 0, x_1); +lean_closure_set(x_11, 1, x_2); +lean_closure_set(x_11, 2, x_8); +x_12 = lean_apply_4(x_9, lean_box(0), lean_box(0), x_10, x_11); +return x_12; } } } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr(lean_object* x_1) { +_start: { -uint8_t x_29; -lean_dec(x_15); -lean_dec(x_14); -lean_dec(x_2); -x_29 = !lean_is_exclusive(x_16); -if (x_29 == 0) +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr___rarg), 3, 0); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: { -return x_16; +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; uint8_t x_8; +x_4 = lean_ctor_get(x_1, 0); +lean_inc(x_4); +lean_dec(x_1); +x_5 = lean_ctor_get(x_4, 1); +lean_inc(x_5); +lean_dec(x_4); +x_6 = lean_ctor_get(x_3, 13); +lean_inc(x_6); +lean_dec(x_3); +x_7 = lean_ctor_get(x_6, 2); +lean_inc(x_7); +x_8 = lean_nat_dec_lt(x_2, x_7); +lean_dec(x_7); +if (x_8 == 0) +{ +lean_object* x_9; lean_object* x_10; lean_object* x_11; +lean_dec(x_6); +x_9 = l_Lean_instInhabitedExpr; +x_10 = l_outOfBounds___rarg(x_9); +x_11 = lean_apply_2(x_5, lean_box(0), x_10); +return x_11; } else { -lean_object* x_30; lean_object* x_31; lean_object* x_32; -x_30 = lean_ctor_get(x_16, 0); -x_31 = lean_ctor_get(x_16, 1); -lean_inc(x_31); -lean_inc(x_30); -lean_dec(x_16); -x_32 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_32, 0, x_30); -lean_ctor_set(x_32, 1, x_31); -return x_32; -} +lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_12 = l_Lean_instInhabitedExpr; +x_13 = l_Lean_PersistentArray_get_x21___rarg(x_12, x_6, x_2); +x_14 = lean_apply_2(x_5, lean_box(0), x_13); +return x_14; } } } -} -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { -lean_object* x_13; -x_13 = l_Lean_Grind_CommRing_Mon_denoteExpr_go(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_4 = lean_ctor_get(x_1, 0); +lean_inc(x_4); +lean_dec(x_1); +x_5 = lean_ctor_get(x_4, 1); +lean_inc(x_5); lean_dec(x_4); -lean_dec(x_3); -return x_13; +x_6 = lean_ctor_get(x_2, 9); +lean_inc(x_6); +lean_dec(x_2); +x_7 = l_Lean_Expr_app___override(x_6, x_3); +x_8 = lean_apply_2(x_5, lean_box(0), x_7); +return x_8; } } -static lean_object* _init_l_Lean_Grind_CommRing_Mon_denoteExpr___closed__1() { +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { _start: { -lean_object* x_1; lean_object* x_2; -x_1 = lean_unsigned_to_nat(1u); -x_2 = lean_nat_to_int(x_1); -return x_2; +lean_object* x_6; lean_object* x_7; lean_object* x_8; +lean_inc(x_1); +x_6 = l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg(x_1, x_2, x_3); +x_7 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__2), 3, 2); +lean_closure_set(x_7, 0, x_1); +lean_closure_set(x_7, 1, x_5); +x_8 = lean_apply_4(x_4, lean_box(0), lean_box(0), x_6, x_7); +return x_8; } } -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__4(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { _start: { -if (lean_obj_tag(x_1) == 0) -{ -lean_object* x_12; lean_object* x_13; -x_12 = l_Lean_Grind_CommRing_Mon_denoteExpr___closed__1; -x_13 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -return x_13; +lean_object* x_7; lean_object* x_8; lean_object* x_9; +lean_inc(x_1); +x_7 = l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg(x_1, x_2, x_3); +x_8 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr_go___rarg___lambda__1), 4, 3); +lean_closure_set(x_8, 0, x_1); +lean_closure_set(x_8, 1, x_4); +lean_closure_set(x_8, 2, x_6); +x_9 = lean_apply_4(x_5, lean_box(0), lean_box(0), x_7, x_8); +return x_9; } -else +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__5(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: { -lean_object* x_14; lean_object* x_15; lean_object* x_16; -x_14 = lean_ctor_get(x_1, 0); -lean_inc(x_14); -x_15 = lean_ctor_get(x_1, 1); -lean_inc(x_15); +lean_object* x_7; lean_object* x_8; lean_object* x_9; +lean_inc(x_2); +lean_inc(x_1); +x_7 = l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg(x_1, x_2, x_3); +lean_inc(x_5); +x_8 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__4), 6, 5); +lean_closure_set(x_8, 0, x_1); +lean_closure_set(x_8, 1, x_2); +lean_closure_set(x_8, 2, x_4); +lean_closure_set(x_8, 3, x_6); +lean_closure_set(x_8, 4, x_5); +x_9 = lean_apply_4(x_5, lean_box(0), lean_box(0), x_7, x_8); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__6(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_5 = lean_ctor_get(x_1, 0); +lean_inc(x_5); lean_dec(x_1); -x_16 = l_Lean_Grind_CommRing_Power_denoteExpr(x_14, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_16) == 0) +x_6 = lean_ctor_get(x_5, 1); +lean_inc(x_6); +lean_dec(x_5); +x_7 = lean_ctor_get(x_2, 8); +lean_inc(x_7); +lean_dec(x_2); +x_8 = l_Lean_mkAppB(x_7, x_3, x_4); +x_9 = lean_apply_2(x_6, lean_box(0), x_8); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__7(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: { -lean_object* x_17; lean_object* x_18; lean_object* x_19; -x_17 = lean_ctor_get(x_16, 0); -lean_inc(x_17); -x_18 = lean_ctor_get(x_16, 1); -lean_inc(x_18); -lean_dec(x_16); -x_19 = l_Lean_Grind_CommRing_Mon_denoteExpr_go(x_15, x_17, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_18); -return x_19; +lean_object* x_7; lean_object* x_8; lean_object* x_9; +lean_inc(x_1); +x_7 = l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg(x_1, x_2, x_3); +x_8 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__6), 4, 3); +lean_closure_set(x_8, 0, x_1); +lean_closure_set(x_8, 1, x_4); +lean_closure_set(x_8, 2, x_6); +x_9 = lean_apply_4(x_5, lean_box(0), lean_box(0), x_7, x_8); +return x_9; } -else +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__8(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: { -uint8_t x_20; -lean_dec(x_15); -x_20 = !lean_is_exclusive(x_16); -if (x_20 == 0) +lean_object* x_7; lean_object* x_8; lean_object* x_9; +lean_inc(x_2); +lean_inc(x_1); +x_7 = l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg(x_1, x_2, x_3); +lean_inc(x_5); +x_8 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__7), 6, 5); +lean_closure_set(x_8, 0, x_1); +lean_closure_set(x_8, 1, x_2); +lean_closure_set(x_8, 2, x_4); +lean_closure_set(x_8, 3, x_6); +lean_closure_set(x_8, 4, x_5); +x_9 = lean_apply_4(x_5, lean_box(0), lean_box(0), x_7, x_8); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__9(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: { -return x_16; +lean_object* x_7; lean_object* x_8; lean_object* x_9; +lean_inc(x_1); +x_7 = l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg(x_1, x_2, x_3); +x_8 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___rarg___lambda__1), 4, 3); +lean_closure_set(x_8, 0, x_1); +lean_closure_set(x_8, 1, x_4); +lean_closure_set(x_8, 2, x_6); +x_9 = lean_apply_4(x_5, lean_box(0), lean_box(0), x_7, x_8); +return x_9; } -else -{ -lean_object* x_21; lean_object* x_22; lean_object* x_23; -x_21 = lean_ctor_get(x_16, 0); -x_22 = lean_ctor_get(x_16, 1); -lean_inc(x_22); -lean_inc(x_21); -lean_dec(x_16); -x_23 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_23, 0, x_21); -lean_ctor_set(x_23, 1, x_22); -return x_23; -} -} -} -} -} -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: -{ -lean_object* x_12; -x_12 = l_Lean_Grind_CommRing_Mon_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -return x_12; -} -} -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: -{ -lean_object* x_13; uint8_t x_14; -x_13 = l_Lean_Grind_CommRing_Mon_denoteExpr___closed__1; -x_14 = lean_int_dec_eq(x_1, x_13); -if (x_14 == 0) -{ -lean_object* x_15; -x_15 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -if (lean_obj_tag(x_15) == 0) -{ -lean_object* x_16; lean_object* x_17; lean_object* x_18; -x_16 = lean_ctor_get(x_15, 0); -lean_inc(x_16); -x_17 = lean_ctor_get(x_15, 1); -lean_inc(x_17); -lean_dec(x_15); -x_18 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum(x_1, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_17); -if (lean_obj_tag(x_18) == 0) -{ -lean_object* x_19; lean_object* x_20; lean_object* x_21; -x_19 = lean_ctor_get(x_18, 0); -lean_inc(x_19); -x_20 = lean_ctor_get(x_18, 1); -lean_inc(x_20); -lean_dec(x_18); -x_21 = l_Lean_Grind_CommRing_Mon_denoteExpr(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_20); -if (lean_obj_tag(x_21) == 0) -{ -uint8_t x_22; -x_22 = !lean_is_exclusive(x_21); -if (x_22 == 0) -{ -lean_object* x_23; lean_object* x_24; lean_object* x_25; -x_23 = lean_ctor_get(x_21, 0); -x_24 = lean_ctor_get(x_16, 7); -lean_inc(x_24); -lean_dec(x_16); -x_25 = l_Lean_mkAppB(x_24, x_19, x_23); -lean_ctor_set(x_21, 0, x_25); -return x_21; -} -else -{ -lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; -x_26 = lean_ctor_get(x_21, 0); -x_27 = lean_ctor_get(x_21, 1); -lean_inc(x_27); -lean_inc(x_26); -lean_dec(x_21); -x_28 = lean_ctor_get(x_16, 7); -lean_inc(x_28); -lean_dec(x_16); -x_29 = l_Lean_mkAppB(x_28, x_19, x_26); -x_30 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_30, 0, x_29); -lean_ctor_set(x_30, 1, x_27); -return x_30; -} -} -else -{ -uint8_t x_31; -lean_dec(x_19); -lean_dec(x_16); -x_31 = !lean_is_exclusive(x_21); -if (x_31 == 0) -{ -return x_21; -} -else -{ -lean_object* x_32; lean_object* x_33; lean_object* x_34; -x_32 = lean_ctor_get(x_21, 0); -x_33 = lean_ctor_get(x_21, 1); -lean_inc(x_33); -lean_inc(x_32); -lean_dec(x_21); -x_34 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_34, 0, x_32); -lean_ctor_set(x_34, 1, x_33); -return x_34; -} -} -} -else -{ -uint8_t x_35; -lean_dec(x_16); -lean_dec(x_2); -x_35 = !lean_is_exclusive(x_18); -if (x_35 == 0) -{ -return x_18; -} -else -{ -lean_object* x_36; lean_object* x_37; lean_object* x_38; -x_36 = lean_ctor_get(x_18, 0); -x_37 = lean_ctor_get(x_18, 1); -lean_inc(x_37); -lean_inc(x_36); -lean_dec(x_18); -x_38 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_38, 0, x_36); -lean_ctor_set(x_38, 1, x_37); -return x_38; -} -} -} -else -{ -uint8_t x_39; -lean_dec(x_2); -x_39 = !lean_is_exclusive(x_15); -if (x_39 == 0) -{ -return x_15; -} -else -{ -lean_object* x_40; lean_object* x_41; lean_object* x_42; -x_40 = lean_ctor_get(x_15, 0); -x_41 = lean_ctor_get(x_15, 1); -lean_inc(x_41); -lean_inc(x_40); -lean_dec(x_15); -x_42 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_42, 0, x_40); -lean_ctor_set(x_42, 1, x_41); -return x_42; -} -} -} -else -{ -lean_object* x_43; -x_43 = l_Lean_Grind_CommRing_Mon_denoteExpr(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -return x_43; -} -} -} -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: -{ -lean_object* x_13; -x_13 = l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_1); -return x_13; -} -} -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: -{ -if (lean_obj_tag(x_1) == 0) -{ -lean_object* x_13; lean_object* x_14; uint8_t x_15; -x_13 = lean_ctor_get(x_1, 0); -lean_inc(x_13); -lean_dec(x_1); -x_14 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__8; -x_15 = lean_int_dec_eq(x_13, x_14); -if (x_15 == 0) -{ -lean_object* x_16; -x_16 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -if (lean_obj_tag(x_16) == 0) -{ -lean_object* x_17; lean_object* x_18; lean_object* x_19; -x_17 = lean_ctor_get(x_16, 0); -lean_inc(x_17); -x_18 = lean_ctor_get(x_16, 1); -lean_inc(x_18); -lean_dec(x_16); -x_19 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum(x_13, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_18); -lean_dec(x_13); -if (lean_obj_tag(x_19) == 0) -{ -uint8_t x_20; -x_20 = !lean_is_exclusive(x_19); -if (x_20 == 0) -{ -lean_object* x_21; lean_object* x_22; lean_object* x_23; -x_21 = lean_ctor_get(x_19, 0); -x_22 = lean_ctor_get(x_17, 6); -lean_inc(x_22); -lean_dec(x_17); -x_23 = l_Lean_mkAppB(x_22, x_2, x_21); -lean_ctor_set(x_19, 0, x_23); -return x_19; -} -else -{ -lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; -x_24 = lean_ctor_get(x_19, 0); -x_25 = lean_ctor_get(x_19, 1); -lean_inc(x_25); -lean_inc(x_24); -lean_dec(x_19); -x_26 = lean_ctor_get(x_17, 6); -lean_inc(x_26); -lean_dec(x_17); -x_27 = l_Lean_mkAppB(x_26, x_2, x_24); -x_28 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_28, 0, x_27); -lean_ctor_set(x_28, 1, x_25); -return x_28; -} -} -else -{ -uint8_t x_29; -lean_dec(x_17); -lean_dec(x_2); -x_29 = !lean_is_exclusive(x_19); -if (x_29 == 0) -{ -return x_19; -} -else -{ -lean_object* x_30; lean_object* x_31; lean_object* x_32; -x_30 = lean_ctor_get(x_19, 0); -x_31 = lean_ctor_get(x_19, 1); -lean_inc(x_31); -lean_inc(x_30); -lean_dec(x_19); -x_32 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_32, 0, x_30); -lean_ctor_set(x_32, 1, x_31); -return x_32; -} -} -} -else -{ -uint8_t x_33; -lean_dec(x_13); -lean_dec(x_2); -x_33 = !lean_is_exclusive(x_16); -if (x_33 == 0) -{ -return x_16; -} -else -{ -lean_object* x_34; lean_object* x_35; lean_object* x_36; -x_34 = lean_ctor_get(x_16, 0); -x_35 = lean_ctor_get(x_16, 1); -lean_inc(x_35); -lean_inc(x_34); -lean_dec(x_16); -x_36 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_36, 0, x_34); -lean_ctor_set(x_36, 1, x_35); -return x_36; -} -} -} -else -{ -lean_object* x_37; -lean_dec(x_13); -x_37 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_37, 0, x_2); -lean_ctor_set(x_37, 1, x_12); -return x_37; -} -} -else -{ -lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; -x_38 = lean_ctor_get(x_1, 0); -lean_inc(x_38); -x_39 = lean_ctor_get(x_1, 1); -lean_inc(x_39); -x_40 = lean_ctor_get(x_1, 2); -lean_inc(x_40); -lean_dec(x_1); -x_41 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -if (lean_obj_tag(x_41) == 0) -{ -lean_object* x_42; lean_object* x_43; lean_object* x_44; -x_42 = lean_ctor_get(x_41, 0); -lean_inc(x_42); -x_43 = lean_ctor_get(x_41, 1); -lean_inc(x_43); -lean_dec(x_41); -x_44 = l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm(x_38, x_39, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_43); -lean_dec(x_38); -if (lean_obj_tag(x_44) == 0) -{ -lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; -x_45 = lean_ctor_get(x_44, 0); -lean_inc(x_45); -x_46 = lean_ctor_get(x_44, 1); -lean_inc(x_46); -lean_dec(x_44); -x_47 = lean_ctor_get(x_42, 6); -lean_inc(x_47); -lean_dec(x_42); -x_48 = l_Lean_mkAppB(x_47, x_2, x_45); -x_1 = x_40; -x_2 = x_48; -x_12 = x_46; -goto _start; -} -else -{ -uint8_t x_50; -lean_dec(x_42); -lean_dec(x_40); -lean_dec(x_2); -x_50 = !lean_is_exclusive(x_44); -if (x_50 == 0) -{ -return x_44; -} -else -{ -lean_object* x_51; lean_object* x_52; lean_object* x_53; -x_51 = lean_ctor_get(x_44, 0); -x_52 = lean_ctor_get(x_44, 1); -lean_inc(x_52); -lean_inc(x_51); -lean_dec(x_44); -x_53 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_53, 0, x_51); -lean_ctor_set(x_53, 1, x_52); -return x_53; -} -} -} -else -{ -uint8_t x_54; -lean_dec(x_40); -lean_dec(x_39); -lean_dec(x_38); -lean_dec(x_2); -x_54 = !lean_is_exclusive(x_41); -if (x_54 == 0) -{ -return x_41; -} -else -{ -lean_object* x_55; lean_object* x_56; lean_object* x_57; -x_55 = lean_ctor_get(x_41, 0); -x_56 = lean_ctor_get(x_41, 1); -lean_inc(x_56); -lean_inc(x_55); -lean_dec(x_41); -x_57 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_57, 0, x_55); -lean_ctor_set(x_57, 1, x_56); -return x_57; -} -} -} -} -} -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: -{ -lean_object* x_13; -x_13 = l_Lean_Grind_CommRing_Poly_denoteExpr_go(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -return x_13; -} -} -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: -{ -if (lean_obj_tag(x_1) == 0) -{ -lean_object* x_12; lean_object* x_13; -x_12 = lean_ctor_get(x_1, 0); -lean_inc(x_12); -lean_dec(x_1); -x_13 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_12); -return x_13; -} -else -{ -lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; -x_14 = lean_ctor_get(x_1, 0); -lean_inc(x_14); -x_15 = lean_ctor_get(x_1, 1); -lean_inc(x_15); -x_16 = lean_ctor_get(x_1, 2); -lean_inc(x_16); -lean_dec(x_1); -x_17 = l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm(x_14, x_15, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_14); -if (lean_obj_tag(x_17) == 0) -{ -lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_18 = lean_ctor_get(x_17, 0); -lean_inc(x_18); -x_19 = lean_ctor_get(x_17, 1); -lean_inc(x_19); -lean_dec(x_17); -x_20 = l_Lean_Grind_CommRing_Poly_denoteExpr_go(x_16, x_18, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_19); -return x_20; -} -else -{ -uint8_t x_21; -lean_dec(x_16); -x_21 = !lean_is_exclusive(x_17); -if (x_21 == 0) -{ -return x_17; -} -else -{ -lean_object* x_22; lean_object* x_23; lean_object* x_24; -x_22 = lean_ctor_get(x_17, 0); -x_23 = lean_ctor_get(x_17, 1); -lean_inc(x_23); -lean_inc(x_22); -lean_dec(x_17); -x_24 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_24, 0, x_22); -lean_ctor_set(x_24, 1, x_23); -return x_24; -} -} -} -} -} -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: -{ -lean_object* x_12; -x_12 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -return x_12; -} -} -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: -{ -switch (lean_obj_tag(x_1)) { -case 0: -{ -lean_object* x_12; lean_object* x_13; -x_12 = lean_ctor_get(x_1, 0); -lean_inc(x_12); -lean_dec(x_1); -x_13 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_12); -return x_13; -} -case 1: -{ -lean_object* x_14; lean_object* x_15; -x_14 = lean_ctor_get(x_1, 0); -lean_inc(x_14); -lean_dec(x_1); -x_15 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_15) == 0) -{ -uint8_t x_16; -x_16 = !lean_is_exclusive(x_15); -if (x_16 == 0) -{ -lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; -x_17 = lean_ctor_get(x_15, 0); -x_18 = lean_ctor_get(x_17, 13); -lean_inc(x_18); -lean_dec(x_17); -x_19 = lean_ctor_get(x_18, 2); -lean_inc(x_19); -x_20 = lean_nat_dec_lt(x_14, x_19); -lean_dec(x_19); -if (x_20 == 0) -{ -lean_object* x_21; lean_object* x_22; -lean_dec(x_18); -lean_dec(x_14); -x_21 = l_Lean_instInhabitedExpr; -x_22 = l_outOfBounds___rarg(x_21); -lean_ctor_set(x_15, 0, x_22); -return x_15; -} -else -{ -lean_object* x_23; lean_object* x_24; -x_23 = l_Lean_instInhabitedExpr; -x_24 = l_Lean_PersistentArray_get_x21___rarg(x_23, x_18, x_14); -lean_dec(x_14); -lean_ctor_set(x_15, 0, x_24); -return x_15; -} -} -else -{ -lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; uint8_t x_29; -x_25 = lean_ctor_get(x_15, 0); -x_26 = lean_ctor_get(x_15, 1); -lean_inc(x_26); -lean_inc(x_25); -lean_dec(x_15); -x_27 = lean_ctor_get(x_25, 13); -lean_inc(x_27); -lean_dec(x_25); -x_28 = lean_ctor_get(x_27, 2); -lean_inc(x_28); -x_29 = lean_nat_dec_lt(x_14, x_28); -lean_dec(x_28); -if (x_29 == 0) -{ -lean_object* x_30; lean_object* x_31; lean_object* x_32; -lean_dec(x_27); -lean_dec(x_14); -x_30 = l_Lean_instInhabitedExpr; -x_31 = l_outOfBounds___rarg(x_30); -x_32 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_32, 0, x_31); -lean_ctor_set(x_32, 1, x_26); -return x_32; -} -else -{ -lean_object* x_33; lean_object* x_34; lean_object* x_35; -x_33 = l_Lean_instInhabitedExpr; -x_34 = l_Lean_PersistentArray_get_x21___rarg(x_33, x_27, x_14); -lean_dec(x_14); -x_35 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_35, 0, x_34); -lean_ctor_set(x_35, 1, x_26); -return x_35; -} -} -} -else -{ -uint8_t x_36; -lean_dec(x_14); -x_36 = !lean_is_exclusive(x_15); -if (x_36 == 0) -{ -return x_15; -} -else -{ -lean_object* x_37; lean_object* x_38; lean_object* x_39; -x_37 = lean_ctor_get(x_15, 0); -x_38 = lean_ctor_get(x_15, 1); -lean_inc(x_38); -lean_inc(x_37); -lean_dec(x_15); -x_39 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_39, 0, x_37); -lean_ctor_set(x_39, 1, x_38); -return x_39; -} -} -} -case 2: -{ -lean_object* x_40; lean_object* x_41; -x_40 = lean_ctor_get(x_1, 0); -lean_inc(x_40); -lean_dec(x_1); -x_41 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_41) == 0) -{ -lean_object* x_42; lean_object* x_43; lean_object* x_44; -x_42 = lean_ctor_get(x_41, 0); -lean_inc(x_42); -x_43 = lean_ctor_get(x_41, 1); -lean_inc(x_43); -lean_dec(x_41); -x_44 = l_Lean_Grind_CommRing_Expr_denoteExpr_go(x_40, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_43); -if (lean_obj_tag(x_44) == 0) -{ -uint8_t x_45; -x_45 = !lean_is_exclusive(x_44); -if (x_45 == 0) -{ -lean_object* x_46; lean_object* x_47; lean_object* x_48; -x_46 = lean_ctor_get(x_44, 0); -x_47 = lean_ctor_get(x_42, 9); -lean_inc(x_47); -lean_dec(x_42); -x_48 = l_Lean_Expr_app___override(x_47, x_46); -lean_ctor_set(x_44, 0, x_48); -return x_44; -} -else -{ -lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; -x_49 = lean_ctor_get(x_44, 0); -x_50 = lean_ctor_get(x_44, 1); -lean_inc(x_50); -lean_inc(x_49); -lean_dec(x_44); -x_51 = lean_ctor_get(x_42, 9); -lean_inc(x_51); -lean_dec(x_42); -x_52 = l_Lean_Expr_app___override(x_51, x_49); -x_53 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_53, 0, x_52); -lean_ctor_set(x_53, 1, x_50); -return x_53; -} -} -else -{ -uint8_t x_54; -lean_dec(x_42); -x_54 = !lean_is_exclusive(x_44); -if (x_54 == 0) -{ -return x_44; -} -else -{ -lean_object* x_55; lean_object* x_56; lean_object* x_57; -x_55 = lean_ctor_get(x_44, 0); -x_56 = lean_ctor_get(x_44, 1); -lean_inc(x_56); -lean_inc(x_55); -lean_dec(x_44); -x_57 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_57, 0, x_55); -lean_ctor_set(x_57, 1, x_56); -return x_57; -} -} -} -else -{ -uint8_t x_58; -lean_dec(x_40); -x_58 = !lean_is_exclusive(x_41); -if (x_58 == 0) -{ -return x_41; -} -else -{ -lean_object* x_59; lean_object* x_60; lean_object* x_61; -x_59 = lean_ctor_get(x_41, 0); -x_60 = lean_ctor_get(x_41, 1); -lean_inc(x_60); -lean_inc(x_59); -lean_dec(x_41); -x_61 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_61, 0, x_59); -lean_ctor_set(x_61, 1, x_60); -return x_61; -} -} -} -case 3: -{ -lean_object* x_62; lean_object* x_63; lean_object* x_64; -x_62 = lean_ctor_get(x_1, 0); -lean_inc(x_62); -x_63 = lean_ctor_get(x_1, 1); -lean_inc(x_63); -lean_dec(x_1); -x_64 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_64) == 0) -{ -lean_object* x_65; lean_object* x_66; lean_object* x_67; -x_65 = lean_ctor_get(x_64, 0); -lean_inc(x_65); -x_66 = lean_ctor_get(x_64, 1); -lean_inc(x_66); -lean_dec(x_64); -x_67 = l_Lean_Grind_CommRing_Expr_denoteExpr_go(x_62, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_66); -if (lean_obj_tag(x_67) == 0) -{ -lean_object* x_68; lean_object* x_69; lean_object* x_70; -x_68 = lean_ctor_get(x_67, 0); -lean_inc(x_68); -x_69 = lean_ctor_get(x_67, 1); -lean_inc(x_69); -lean_dec(x_67); -x_70 = l_Lean_Grind_CommRing_Expr_denoteExpr_go(x_63, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_69); -if (lean_obj_tag(x_70) == 0) -{ -uint8_t x_71; -x_71 = !lean_is_exclusive(x_70); -if (x_71 == 0) -{ -lean_object* x_72; lean_object* x_73; lean_object* x_74; -x_72 = lean_ctor_get(x_70, 0); -x_73 = lean_ctor_get(x_65, 6); -lean_inc(x_73); -lean_dec(x_65); -x_74 = l_Lean_mkAppB(x_73, x_68, x_72); -lean_ctor_set(x_70, 0, x_74); -return x_70; -} -else -{ -lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; -x_75 = lean_ctor_get(x_70, 0); -x_76 = lean_ctor_get(x_70, 1); -lean_inc(x_76); -lean_inc(x_75); -lean_dec(x_70); -x_77 = lean_ctor_get(x_65, 6); -lean_inc(x_77); -lean_dec(x_65); -x_78 = l_Lean_mkAppB(x_77, x_68, x_75); -x_79 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_79, 0, x_78); -lean_ctor_set(x_79, 1, x_76); -return x_79; -} -} -else -{ -uint8_t x_80; -lean_dec(x_68); -lean_dec(x_65); -x_80 = !lean_is_exclusive(x_70); -if (x_80 == 0) -{ -return x_70; -} -else -{ -lean_object* x_81; lean_object* x_82; lean_object* x_83; -x_81 = lean_ctor_get(x_70, 0); -x_82 = lean_ctor_get(x_70, 1); -lean_inc(x_82); -lean_inc(x_81); -lean_dec(x_70); -x_83 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_83, 0, x_81); -lean_ctor_set(x_83, 1, x_82); -return x_83; -} -} -} -else -{ -uint8_t x_84; -lean_dec(x_65); -lean_dec(x_63); -x_84 = !lean_is_exclusive(x_67); -if (x_84 == 0) -{ -return x_67; -} -else -{ -lean_object* x_85; lean_object* x_86; lean_object* x_87; -x_85 = lean_ctor_get(x_67, 0); -x_86 = lean_ctor_get(x_67, 1); -lean_inc(x_86); -lean_inc(x_85); -lean_dec(x_67); -x_87 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_87, 0, x_85); -lean_ctor_set(x_87, 1, x_86); -return x_87; -} -} -} -else -{ -uint8_t x_88; -lean_dec(x_63); -lean_dec(x_62); -x_88 = !lean_is_exclusive(x_64); -if (x_88 == 0) -{ -return x_64; } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__10(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: { -lean_object* x_89; lean_object* x_90; lean_object* x_91; -x_89 = lean_ctor_get(x_64, 0); -x_90 = lean_ctor_get(x_64, 1); -lean_inc(x_90); -lean_inc(x_89); -lean_dec(x_64); -x_91 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_91, 0, x_89); -lean_ctor_set(x_91, 1, x_90); -return x_91; -} -} -} -case 4: +lean_object* x_7; lean_object* x_8; lean_object* x_9; +lean_inc(x_2); +lean_inc(x_1); +x_7 = l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg(x_1, x_2, x_3); +lean_inc(x_5); +x_8 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__9), 6, 5); +lean_closure_set(x_8, 0, x_1); +lean_closure_set(x_8, 1, x_2); +lean_closure_set(x_8, 2, x_4); +lean_closure_set(x_8, 3, x_6); +lean_closure_set(x_8, 4, x_5); +x_9 = lean_apply_4(x_5, lean_box(0), lean_box(0), x_7, x_8); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__11(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: { -lean_object* x_92; lean_object* x_93; lean_object* x_94; -x_92 = lean_ctor_get(x_1, 0); -lean_inc(x_92); -x_93 = lean_ctor_get(x_1, 1); -lean_inc(x_93); +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_5 = lean_ctor_get(x_1, 0); +lean_inc(x_5); lean_dec(x_1); -x_94 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_94) == 0) -{ -lean_object* x_95; lean_object* x_96; lean_object* x_97; -x_95 = lean_ctor_get(x_94, 0); -lean_inc(x_95); -x_96 = lean_ctor_get(x_94, 1); -lean_inc(x_96); -lean_dec(x_94); -x_97 = l_Lean_Grind_CommRing_Expr_denoteExpr_go(x_92, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_96); -if (lean_obj_tag(x_97) == 0) -{ -lean_object* x_98; lean_object* x_99; lean_object* x_100; -x_98 = lean_ctor_get(x_97, 0); -lean_inc(x_98); -x_99 = lean_ctor_get(x_97, 1); -lean_inc(x_99); -lean_dec(x_97); -x_100 = l_Lean_Grind_CommRing_Expr_denoteExpr_go(x_93, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_99); -if (lean_obj_tag(x_100) == 0) -{ -uint8_t x_101; -x_101 = !lean_is_exclusive(x_100); -if (x_101 == 0) -{ -lean_object* x_102; lean_object* x_103; lean_object* x_104; -x_102 = lean_ctor_get(x_100, 0); -x_103 = lean_ctor_get(x_95, 8); -lean_inc(x_103); -lean_dec(x_95); -x_104 = l_Lean_mkAppB(x_103, x_98, x_102); -lean_ctor_set(x_100, 0, x_104); -return x_100; -} -else -{ -lean_object* x_105; lean_object* x_106; lean_object* x_107; lean_object* x_108; lean_object* x_109; -x_105 = lean_ctor_get(x_100, 0); -x_106 = lean_ctor_get(x_100, 1); -lean_inc(x_106); -lean_inc(x_105); -lean_dec(x_100); -x_107 = lean_ctor_get(x_95, 8); -lean_inc(x_107); -lean_dec(x_95); -x_108 = l_Lean_mkAppB(x_107, x_98, x_105); -x_109 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_109, 0, x_108); -lean_ctor_set(x_109, 1, x_106); -return x_109; -} -} -else -{ -uint8_t x_110; -lean_dec(x_98); -lean_dec(x_95); -x_110 = !lean_is_exclusive(x_100); -if (x_110 == 0) -{ -return x_100; -} -else -{ -lean_object* x_111; lean_object* x_112; lean_object* x_113; -x_111 = lean_ctor_get(x_100, 0); -x_112 = lean_ctor_get(x_100, 1); -lean_inc(x_112); -lean_inc(x_111); -lean_dec(x_100); -x_113 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_113, 0, x_111); -lean_ctor_set(x_113, 1, x_112); -return x_113; -} -} +x_6 = lean_ctor_get(x_5, 1); +lean_inc(x_6); +lean_dec(x_5); +x_7 = lean_ctor_get(x_2, 10); +lean_inc(x_7); +lean_dec(x_2); +x_8 = l_Lean_mkNatLit(x_3); +x_9 = l_Lean_mkAppB(x_7, x_4, x_8); +x_10 = lean_apply_2(x_6, lean_box(0), x_9); +return x_10; } -else -{ -uint8_t x_114; -lean_dec(x_95); -lean_dec(x_93); -x_114 = !lean_is_exclusive(x_97); -if (x_114 == 0) -{ -return x_97; } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__12(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: { -lean_object* x_115; lean_object* x_116; lean_object* x_117; -x_115 = lean_ctor_get(x_97, 0); -x_116 = lean_ctor_get(x_97, 1); -lean_inc(x_116); -lean_inc(x_115); -lean_dec(x_97); -x_117 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_117, 0, x_115); -lean_ctor_set(x_117, 1, x_116); -return x_117; +lean_object* x_7; lean_object* x_8; lean_object* x_9; +lean_inc(x_1); +x_7 = l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg(x_1, x_2, x_3); +x_8 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__11), 4, 3); +lean_closure_set(x_8, 0, x_1); +lean_closure_set(x_8, 1, x_6); +lean_closure_set(x_8, 2, x_4); +x_9 = lean_apply_4(x_5, lean_box(0), lean_box(0), x_7, x_8); +return x_9; } } -} -else -{ -uint8_t x_118; -lean_dec(x_93); -lean_dec(x_92); -x_118 = !lean_is_exclusive(x_94); -if (x_118 == 0) +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: { -return x_94; -} -else +switch (lean_obj_tag(x_3)) { +case 0: { -lean_object* x_119; lean_object* x_120; lean_object* x_121; -x_119 = lean_ctor_get(x_94, 0); -x_120 = lean_ctor_get(x_94, 1); -lean_inc(x_120); -lean_inc(x_119); -lean_dec(x_94); -x_121 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_121, 0, x_119); -lean_ctor_set(x_121, 1, x_120); -return x_121; -} -} +lean_object* x_4; lean_object* x_5; +x_4 = lean_ctor_get(x_3, 0); +lean_inc(x_4); +lean_dec(x_3); +x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg(x_1, x_2, x_4); +return x_5; } -case 5: +case 1: { -lean_object* x_122; lean_object* x_123; lean_object* x_124; -x_122 = lean_ctor_get(x_1, 0); -lean_inc(x_122); -x_123 = lean_ctor_get(x_1, 1); -lean_inc(x_123); -lean_dec(x_1); -x_124 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_124) == 0) -{ -lean_object* x_125; lean_object* x_126; lean_object* x_127; -x_125 = lean_ctor_get(x_124, 0); -lean_inc(x_125); -x_126 = lean_ctor_get(x_124, 1); -lean_inc(x_126); -lean_dec(x_124); -x_127 = l_Lean_Grind_CommRing_Expr_denoteExpr_go(x_122, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_126); -if (lean_obj_tag(x_127) == 0) -{ -lean_object* x_128; lean_object* x_129; lean_object* x_130; -x_128 = lean_ctor_get(x_127, 0); -lean_inc(x_128); -x_129 = lean_ctor_get(x_127, 1); -lean_inc(x_129); -lean_dec(x_127); -x_130 = l_Lean_Grind_CommRing_Expr_denoteExpr_go(x_123, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_129); -if (lean_obj_tag(x_130) == 0) -{ -uint8_t x_131; -x_131 = !lean_is_exclusive(x_130); -if (x_131 == 0) -{ -lean_object* x_132; lean_object* x_133; lean_object* x_134; -x_132 = lean_ctor_get(x_130, 0); -x_133 = lean_ctor_get(x_125, 7); -lean_inc(x_133); -lean_dec(x_125); -x_134 = l_Lean_mkAppB(x_133, x_128, x_132); -lean_ctor_set(x_130, 0, x_134); -return x_130; +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_6 = lean_ctor_get(x_3, 0); +lean_inc(x_6); +lean_dec(x_3); +x_7 = lean_ctor_get(x_1, 1); +lean_inc(x_7); +x_8 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__1___boxed), 3, 2); +lean_closure_set(x_8, 0, x_1); +lean_closure_set(x_8, 1, x_6); +x_9 = lean_apply_4(x_7, lean_box(0), lean_box(0), x_2, x_8); +return x_9; } -else +case 2: { -lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; -x_135 = lean_ctor_get(x_130, 0); -x_136 = lean_ctor_get(x_130, 1); -lean_inc(x_136); -lean_inc(x_135); -lean_dec(x_130); -x_137 = lean_ctor_get(x_125, 7); -lean_inc(x_137); -lean_dec(x_125); -x_138 = l_Lean_mkAppB(x_137, x_128, x_135); -x_139 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_139, 0, x_138); -lean_ctor_set(x_139, 1, x_136); -return x_139; -} +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; +x_10 = lean_ctor_get(x_3, 0); +lean_inc(x_10); +lean_dec(x_3); +x_11 = lean_ctor_get(x_1, 1); +lean_inc(x_11); +lean_inc(x_11); +lean_inc(x_2); +x_12 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__3), 5, 4); +lean_closure_set(x_12, 0, x_1); +lean_closure_set(x_12, 1, x_2); +lean_closure_set(x_12, 2, x_10); +lean_closure_set(x_12, 3, x_11); +x_13 = lean_apply_4(x_11, lean_box(0), lean_box(0), x_2, x_12); +return x_13; } -else -{ -uint8_t x_140; -lean_dec(x_128); -lean_dec(x_125); -x_140 = !lean_is_exclusive(x_130); -if (x_140 == 0) +case 3: { -return x_130; +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_14 = lean_ctor_get(x_3, 0); +lean_inc(x_14); +x_15 = lean_ctor_get(x_3, 1); +lean_inc(x_15); +lean_dec(x_3); +x_16 = lean_ctor_get(x_1, 1); +lean_inc(x_16); +lean_inc(x_16); +lean_inc(x_2); +x_17 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__5), 6, 5); +lean_closure_set(x_17, 0, x_1); +lean_closure_set(x_17, 1, x_2); +lean_closure_set(x_17, 2, x_14); +lean_closure_set(x_17, 3, x_15); +lean_closure_set(x_17, 4, x_16); +x_18 = lean_apply_4(x_16, lean_box(0), lean_box(0), x_2, x_17); +return x_18; } -else +case 4: { -lean_object* x_141; lean_object* x_142; lean_object* x_143; -x_141 = lean_ctor_get(x_130, 0); -x_142 = lean_ctor_get(x_130, 1); -lean_inc(x_142); -lean_inc(x_141); -lean_dec(x_130); -x_143 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_143, 0, x_141); -lean_ctor_set(x_143, 1, x_142); -return x_143; -} -} +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_19 = lean_ctor_get(x_3, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_3, 1); +lean_inc(x_20); +lean_dec(x_3); +x_21 = lean_ctor_get(x_1, 1); +lean_inc(x_21); +lean_inc(x_21); +lean_inc(x_2); +x_22 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__8), 6, 5); +lean_closure_set(x_22, 0, x_1); +lean_closure_set(x_22, 1, x_2); +lean_closure_set(x_22, 2, x_19); +lean_closure_set(x_22, 3, x_20); +lean_closure_set(x_22, 4, x_21); +x_23 = lean_apply_4(x_21, lean_box(0), lean_box(0), x_2, x_22); +return x_23; } -else -{ -uint8_t x_144; -lean_dec(x_125); -lean_dec(x_123); -x_144 = !lean_is_exclusive(x_127); -if (x_144 == 0) +case 5: { -return x_127; +lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_24 = lean_ctor_get(x_3, 0); +lean_inc(x_24); +x_25 = lean_ctor_get(x_3, 1); +lean_inc(x_25); +lean_dec(x_3); +x_26 = lean_ctor_get(x_1, 1); +lean_inc(x_26); +lean_inc(x_26); +lean_inc(x_2); +x_27 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__10), 6, 5); +lean_closure_set(x_27, 0, x_1); +lean_closure_set(x_27, 1, x_2); +lean_closure_set(x_27, 2, x_24); +lean_closure_set(x_27, 3, x_25); +lean_closure_set(x_27, 4, x_26); +x_28 = lean_apply_4(x_26, lean_box(0), lean_box(0), x_2, x_27); +return x_28; } -else +default: { -lean_object* x_145; lean_object* x_146; lean_object* x_147; -x_145 = lean_ctor_get(x_127, 0); -x_146 = lean_ctor_get(x_127, 1); -lean_inc(x_146); -lean_inc(x_145); -lean_dec(x_127); -x_147 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_147, 0, x_145); -lean_ctor_set(x_147, 1, x_146); -return x_147; +lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_29 = lean_ctor_get(x_3, 0); +lean_inc(x_29); +x_30 = lean_ctor_get(x_3, 1); +lean_inc(x_30); +lean_dec(x_3); +x_31 = lean_ctor_get(x_1, 1); +lean_inc(x_31); +lean_inc(x_31); +lean_inc(x_2); +x_32 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__12), 6, 5); +lean_closure_set(x_32, 0, x_1); +lean_closure_set(x_32, 1, x_2); +lean_closure_set(x_32, 2, x_29); +lean_closure_set(x_32, 3, x_30); +lean_closure_set(x_32, 4, x_31); +x_33 = lean_apply_4(x_31, lean_box(0), lean_box(0), x_2, x_32); +return x_33; } } } -else -{ -uint8_t x_148; -lean_dec(x_123); -lean_dec(x_122); -x_148 = !lean_is_exclusive(x_124); -if (x_148 == 0) -{ -return x_124; } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go(lean_object* x_1) { +_start: { -lean_object* x_149; lean_object* x_150; lean_object* x_151; -x_149 = lean_ctor_get(x_124, 0); -x_150 = lean_ctor_get(x_124, 1); -lean_inc(x_150); -lean_inc(x_149); -lean_dec(x_124); -x_151 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_151, 0, x_149); -lean_ctor_set(x_151, 1, x_150); -return x_151; -} -} +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg), 3, 0); +return x_2; } -default: -{ -lean_object* x_152; lean_object* x_153; lean_object* x_154; -x_152 = lean_ctor_get(x_1, 0); -lean_inc(x_152); -x_153 = lean_ctor_get(x_1, 1); -lean_inc(x_153); -lean_dec(x_1); -x_154 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_154) == 0) -{ -lean_object* x_155; lean_object* x_156; lean_object* x_157; -x_155 = lean_ctor_get(x_154, 0); -lean_inc(x_155); -x_156 = lean_ctor_get(x_154, 1); -lean_inc(x_156); -lean_dec(x_154); -x_157 = l_Lean_Grind_CommRing_Expr_denoteExpr_go(x_152, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_156); -if (lean_obj_tag(x_157) == 0) -{ -uint8_t x_158; -x_158 = !lean_is_exclusive(x_157); -if (x_158 == 0) -{ -lean_object* x_159; lean_object* x_160; lean_object* x_161; lean_object* x_162; -x_159 = lean_ctor_get(x_157, 0); -x_160 = lean_ctor_get(x_155, 10); -lean_inc(x_160); -lean_dec(x_155); -x_161 = l_Lean_mkNatLit(x_153); -x_162 = l_Lean_mkAppB(x_160, x_159, x_161); -lean_ctor_set(x_157, 0, x_162); -return x_157; } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: { -lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; -x_163 = lean_ctor_get(x_157, 0); -x_164 = lean_ctor_get(x_157, 1); -lean_inc(x_164); -lean_inc(x_163); -lean_dec(x_157); -x_165 = lean_ctor_get(x_155, 10); -lean_inc(x_165); -lean_dec(x_155); -x_166 = l_Lean_mkNatLit(x_153); -x_167 = l_Lean_mkAppB(x_165, x_163, x_166); -x_168 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_168, 0, x_167); -lean_ctor_set(x_168, 1, x_164); -return x_168; -} +lean_object* x_4; +x_4 = l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg___lambda__1(x_1, x_2, x_3); +lean_dec(x_2); +return x_4; } -else -{ -uint8_t x_169; -lean_dec(x_155); -lean_dec(x_153); -x_169 = !lean_is_exclusive(x_157); -if (x_169 == 0) -{ -return x_157; } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: { -lean_object* x_170; lean_object* x_171; lean_object* x_172; -x_170 = lean_ctor_get(x_157, 0); -x_171 = lean_ctor_get(x_157, 1); -lean_inc(x_171); -lean_inc(x_170); -lean_dec(x_157); -x_172 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_172, 0, x_170); -lean_ctor_set(x_172, 1, x_171); -return x_172; -} -} +lean_object* x_4; +x_4 = l_Lean_Grind_CommRing_Expr_denoteExpr_go___rarg(x_1, x_2, x_3); +return x_4; } -else -{ -uint8_t x_173; -lean_dec(x_153); -lean_dec(x_152); -x_173 = !lean_is_exclusive(x_154); -if (x_173 == 0) -{ -return x_154; } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr(lean_object* x_1) { +_start: { -lean_object* x_174; lean_object* x_175; lean_object* x_176; -x_174 = lean_ctor_get(x_154, 0); -x_175 = lean_ctor_get(x_154, 1); -lean_inc(x_175); -lean_inc(x_174); -lean_dec(x_154); -x_176 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_176, 0, x_174); -lean_ctor_set(x_176, 1, x_175); -return x_176; -} -} -} -} +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Grind_CommRing_Expr_denoteExpr___rarg), 3, 0); +return x_2; } } -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1___closed__1() { _start: { -lean_object* x_12; -x_12 = l_Lean_Grind_CommRing_Expr_denoteExpr_go(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -return x_12; +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Eq", 2, 2); +return x_1; } } -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1___closed__2() { _start: { -lean_object* x_12; -x_12 = l_Lean_Grind_CommRing_Expr_denoteExpr_go(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -return x_12; +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1___closed__1; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; } } -LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_denoteExpr___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { -lean_object* x_12; -x_12 = l_Lean_Grind_CommRing_Expr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; +x_5 = lean_ctor_get(x_1, 0); +lean_inc(x_5); +lean_dec(x_1); +x_6 = lean_ctor_get(x_5, 1); +lean_inc(x_6); lean_dec(x_5); +x_7 = lean_ctor_get(x_4, 2); +lean_inc(x_7); +x_8 = l_Lean_Level_succ___override(x_7); +x_9 = lean_box(0); +x_10 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_10, 0, x_8); +lean_ctor_set(x_10, 1, x_9); +x_11 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1___closed__2; +x_12 = l_Lean_Expr_const___override(x_11, x_10); +x_13 = lean_ctor_get(x_4, 1); +lean_inc(x_13); lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -return x_12; +x_14 = l_Lean_mkApp3(x_12, x_13, x_2, x_3); +x_15 = lean_apply_2(x_6, lean_box(0), x_14); +return x_15; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { -lean_object* x_12; lean_object* x_13; -x_12 = lean_ctor_get(x_1, 0); -lean_inc(x_12); -lean_dec(x_1); -x_13 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_13) == 0) -{ -lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; -x_14 = lean_ctor_get(x_13, 0); -lean_inc(x_14); -x_15 = lean_ctor_get(x_13, 1); -lean_inc(x_15); -lean_dec(x_13); -x_16 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__8; -x_17 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum(x_16, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_15); -if (lean_obj_tag(x_17) == 0) -{ -lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_18 = lean_ctor_get(x_17, 0); -lean_inc(x_18); -x_19 = lean_ctor_get(x_17, 1); -lean_inc(x_19); -lean_dec(x_17); -x_20 = l_Lean_Meta_mkEq(x_14, x_18, x_7, x_8, x_9, x_10, x_19); -return x_20; +lean_object* x_5; lean_object* x_6; lean_object* x_7; +x_5 = lean_ctor_get(x_1, 1); +lean_inc(x_5); +x_6 = lean_alloc_closure((void*)(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1), 4, 3); +lean_closure_set(x_6, 0, x_1); +lean_closure_set(x_6, 1, x_3); +lean_closure_set(x_6, 2, x_4); +x_7 = lean_apply_4(x_5, lean_box(0), lean_box(0), x_2, x_6); +return x_7; } -else -{ -uint8_t x_21; -lean_dec(x_14); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -x_21 = !lean_is_exclusive(x_17); -if (x_21 == 0) -{ -return x_17; } -else +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq(lean_object* x_1) { +_start: { -lean_object* x_22; lean_object* x_23; lean_object* x_24; -x_22 = lean_ctor_get(x_17, 0); -x_23 = lean_ctor_get(x_17, 1); -lean_inc(x_23); -lean_inc(x_22); -lean_dec(x_17); -x_24 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_24, 0, x_22); -lean_ctor_set(x_24, 1, x_23); -return x_24; -} -} +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg), 4, 0); +return x_2; } -else -{ -uint8_t x_25; -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -x_25 = !lean_is_exclusive(x_13); -if (x_25 == 0) -{ -return x_13; } -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___rarg___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: { -lean_object* x_26; lean_object* x_27; lean_object* x_28; -x_26 = lean_ctor_get(x_13, 0); -x_27 = lean_ctor_get(x_13, 1); -lean_inc(x_27); -lean_inc(x_26); -lean_dec(x_13); -x_28 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_28, 0, x_26); -lean_ctor_set(x_28, 1, x_27); -return x_28; +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__8; +lean_inc(x_2); +lean_inc(x_1); +x_6 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg(x_1, x_2, x_5); +x_7 = lean_alloc_closure((void*)(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg), 4, 3); +lean_closure_set(x_7, 0, x_1); +lean_closure_set(x_7, 1, x_2); +lean_closure_set(x_7, 2, x_4); +x_8 = lean_apply_4(x_3, lean_box(0), lean_box(0), x_6, x_7); +return x_8; } } -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { -lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_4 = lean_ctor_get(x_1, 1); +lean_inc(x_4); +x_5 = lean_ctor_get(x_3, 0); +lean_inc(x_5); lean_dec(x_3); -lean_dec(x_2); -return x_12; -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +lean_inc(x_2); +lean_inc(x_1); +x_6 = l_Lean_Grind_CommRing_Poly_denoteExpr___rarg(x_1, x_2, x_5); +lean_inc(x_4); +x_7 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___rarg___lambda__1), 4, 3); +lean_closure_set(x_7, 0, x_1); +lean_closure_set(x_7, 1, x_2); +lean_closure_set(x_7, 2, x_4); +x_8 = lean_apply_4(x_4, lean_box(0), lean_box(0), x_6, x_7); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(lean_object* x_1) { _start: { -lean_object* x_12; lean_object* x_13; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_p(x_1); -x_13 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -return x_13; +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___rarg), 3, 0); +return x_2; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { -lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -return x_12; +lean_object* x_4; lean_object* x_5; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_p(x_3); +x_5 = l_Lean_Grind_CommRing_Poly_denoteExpr___rarg(x_1, x_2, x_4); +return x_5; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr(lean_object* x_1) { _start: { -lean_object* x_12; lean_object* x_13; -x_12 = lean_ctor_get(x_1, 4); -x_13 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_13) == 0) -{ -lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; -x_14 = lean_ctor_get(x_13, 0); -lean_inc(x_14); -x_15 = lean_ctor_get(x_13, 1); -lean_inc(x_15); -lean_dec(x_13); -x_16 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__8; -x_17 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum(x_16, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_15); -if (lean_obj_tag(x_17) == 0) -{ -lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_18 = lean_ctor_get(x_17, 0); -lean_inc(x_18); -x_19 = lean_ctor_get(x_17, 1); -lean_inc(x_19); -lean_dec(x_17); -x_20 = l_Lean_Meta_mkEq(x_14, x_18, x_7, x_8, x_9, x_10, x_19); -if (lean_obj_tag(x_20) == 0) -{ -uint8_t x_21; -x_21 = !lean_is_exclusive(x_20); -if (x_21 == 0) -{ -lean_object* x_22; lean_object* x_23; -x_22 = lean_ctor_get(x_20, 0); -x_23 = l_Lean_mkNot(x_22); -lean_ctor_set(x_20, 0, x_23); -return x_20; -} -else -{ -lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; -x_24 = lean_ctor_get(x_20, 0); -x_25 = lean_ctor_get(x_20, 1); -lean_inc(x_25); -lean_inc(x_24); -lean_dec(x_20); -x_26 = l_Lean_mkNot(x_24); -x_27 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_27, 0, x_26); -lean_ctor_set(x_27, 1, x_25); -return x_27; -} +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___rarg___boxed), 3, 0); +return x_2; } -else -{ -uint8_t x_28; -x_28 = !lean_is_exclusive(x_20); -if (x_28 == 0) -{ -return x_20; } -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___rarg___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: { -lean_object* x_29; lean_object* x_30; lean_object* x_31; -x_29 = lean_ctor_get(x_20, 0); -x_30 = lean_ctor_get(x_20, 1); -lean_inc(x_30); -lean_inc(x_29); -lean_dec(x_20); -x_31 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_31, 0, x_29); -lean_ctor_set(x_31, 1, x_30); -return x_31; -} +lean_object* x_4; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___rarg(x_1, x_2, x_3); +lean_dec(x_3); +return x_4; } } -else -{ -uint8_t x_32; -lean_dec(x_14); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -x_32 = !lean_is_exclusive(x_17); -if (x_32 == 0) +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg___lambda__1(lean_object* x_1, lean_object* x_2) { +_start: { -return x_17; +lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_3 = lean_ctor_get(x_1, 0); +lean_inc(x_3); +lean_dec(x_1); +x_4 = lean_ctor_get(x_3, 1); +lean_inc(x_4); +lean_dec(x_3); +x_5 = l_Lean_mkNot(x_2); +x_6 = lean_apply_2(x_4, lean_box(0), x_5); +return x_6; } -else -{ -lean_object* x_33; lean_object* x_34; lean_object* x_35; -x_33 = lean_ctor_get(x_17, 0); -x_34 = lean_ctor_get(x_17, 1); -lean_inc(x_34); -lean_inc(x_33); -lean_dec(x_17); -x_35 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_35, 0, x_33); -lean_ctor_set(x_35, 1, x_34); -return x_35; } +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; +lean_inc(x_1); +x_6 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg(x_1, x_2, x_3, x_5); +x_7 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg___lambda__1), 2, 1); +lean_closure_set(x_7, 0, x_1); +x_8 = lean_apply_4(x_4, lean_box(0), lean_box(0), x_6, x_7); +return x_8; } } -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: { -uint8_t x_36; -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -x_36 = !lean_is_exclusive(x_13); -if (x_36 == 0) +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__8; +lean_inc(x_2); +lean_inc(x_1); +x_6 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg(x_1, x_2, x_5); +lean_inc(x_3); +x_7 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg___lambda__2), 5, 4); +lean_closure_set(x_7, 0, x_1); +lean_closure_set(x_7, 1, x_2); +lean_closure_set(x_7, 2, x_4); +lean_closure_set(x_7, 3, x_3); +x_8 = lean_apply_4(x_3, lean_box(0), lean_box(0), x_6, x_7); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: { -return x_13; -} -else +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_4 = lean_ctor_get(x_1, 1); +lean_inc(x_4); +x_5 = lean_ctor_get(x_3, 4); +lean_inc(x_2); +lean_inc(x_1); +x_6 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___rarg(x_1, x_2, x_5); +lean_inc(x_4); +x_7 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg___lambda__3), 4, 3); +lean_closure_set(x_7, 0, x_1); +lean_closure_set(x_7, 1, x_2); +lean_closure_set(x_7, 2, x_4); +x_8 = lean_apply_4(x_4, lean_box(0), lean_box(0), x_6, x_7); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr(lean_object* x_1) { +_start: { -lean_object* x_37; lean_object* x_38; lean_object* x_39; -x_37 = lean_ctor_get(x_13, 0); -x_38 = lean_ctor_get(x_13, 1); -lean_inc(x_38); -lean_inc(x_37); -lean_dec(x_13); -x_39 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_39, 0, x_37); -lean_ctor_set(x_39, 1, x_38); -return x_39; -} -} +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg___boxed), 3, 0); +return x_2; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { -lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); +lean_object* x_4; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___rarg(x_1, x_2, x_3); lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -return x_12; +return x_4; } } lean_object* initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_Util(uint8_t builtin, lean_object*); @@ -2412,24 +1400,28 @@ lean_dec_ref(res); res = initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_Var(builtin, lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); -l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__1 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__1(); -lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__1); -l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__2 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__2(); -lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__2); -l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__3 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__3(); -lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__3); -l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__4 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__4(); -lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__4); -l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__5 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__5(); -lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__5); -l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__6 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__6(); -lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__6); -l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__7 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__7(); -lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__7); -l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__8 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__8(); -lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___closed__8); -l_Lean_Grind_CommRing_Mon_denoteExpr___closed__1 = _init_l_Lean_Grind_CommRing_Mon_denoteExpr___closed__1(); -lean_mark_persistent(l_Lean_Grind_CommRing_Mon_denoteExpr___closed__1); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__1 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__1(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__1); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__2 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__2(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__2); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__3 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__3(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__3); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__4 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__4(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__4); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__5 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__5(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__5); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__6 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__6(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__6); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__7 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__7(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__7); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__8 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__8(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___rarg___lambda__1___closed__8); +l_Lean_Grind_CommRing_Mon_denoteExpr___rarg___closed__1 = _init_l_Lean_Grind_CommRing_Mon_denoteExpr___rarg___closed__1(); +lean_mark_persistent(l_Lean_Grind_CommRing_Mon_denoteExpr___rarg___closed__1); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1___closed__1 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1___closed__1(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1___closed__1); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1___closed__2 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1___closed__2(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___rarg___lambda__1___closed__2); return lean_io_result_mk_ok(lean_box(0)); } #ifdef __cplusplus diff --git a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/EqCnstr.c b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/EqCnstr.c index 2d6c57c5efd8..c91bdf06ffa8 100644 --- a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/EqCnstr.c +++ b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/EqCnstr.c @@ -33,9 +33,12 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_saveDiseq___boxed(lean lean_object* l_Lean_Meta_Grind_Arith_CommRing_noZeroDivisors(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(lean_object*, lean_object*, lean_object*, size_t, size_t); lean_object* lean_mk_empty_array_with_capacity(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_go___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_usize_shift_right(size_t, size_t); +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs_process___spec__7___boxed(lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_lm(lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_addNewDiseq___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -44,20 +47,18 @@ static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_checkDiseqs___closed__3; lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_getMultiplier(lean_object*); LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_findSimp_x3f_go___lambda__1(lean_object*, lean_object*, uint8_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___boxed(lean_object**); LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_check(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkRing___lambda__2___closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_mkEqCnstr___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__3; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlMAux_traverse___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__10___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_saveDiseq___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToBasisAfterSimp___closed__2; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_indentD(lean_object*); static lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__2___closed__1; @@ -73,6 +74,7 @@ LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at___private_Lean_Meta_T size_t lean_uint64_to_usize(uint64_t); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs_process___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_Arith_CommRing_getNext_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_reportIssue(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_checkRing___spec__1___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -98,6 +100,7 @@ LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommR static lean_object* l_Lean_Meta_Grind_Arith_CommRing_saveDiseq___closed__1; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__1; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs_process___lambda__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_mulConstC(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_findSimp_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Nat_nextPowerOfTwo_go(lean_object*, lean_object*, lean_object*); @@ -107,6 +110,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant_ LEAN_EXPORT lean_object* l_Lean_RBNode_insert___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs_process___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_check___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__4; lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkMaxSteps(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -125,6 +129,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic(lean_o LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_inSameRing_x3f___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_checkDiseqs___spec__2___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkRing___closed__1; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithExhaustively(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkRing___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__3___boxed(lean_object**); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_inSameRing_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -144,7 +149,7 @@ LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_findAux___at___private_Lean_Me LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_addToBasisCore(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___closed__1; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs_process___lambda__4___closed__1; lean_object* lean_st_ref_take(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_check___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -157,7 +162,8 @@ uint64_t lean_uint64_shift_right(uint64_t, uint64_t); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_RBNode_setBlack___rarg(lean_object*); lean_object* lean_nat_to_int(lean_object*); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_nat_div(lean_object*, lean_object*); uint8_t l_Lean_Grind_CommRing_Mon_grevlex(lean_object*, lean_object*); static lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__2___closed__2; @@ -175,7 +181,6 @@ LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_T lean_object* l_outOfBounds___rarg(lean_object*); uint64_t l___private_Init_Grind_CommRing_Poly_0__Lean_Grind_CommRing_hashPoly____x40_Init_Grind_CommRing_Poly___hyg_3673_(lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__3; -lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToBasisAfterSimp___closed__1; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_checkDiseqs___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -190,18 +195,19 @@ uint8_t l_List_isEmpty___rarg(lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_checkDiseqs___spec__3___boxed(lean_object**); static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__4___lambda__1___closed__3; lean_object* l_Lean_Meta_mkEq(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs_process___lambda__2___closed__1; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__4___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__7; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlMAux_traverse___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__10___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_Arith_CommRing_isQueueEmpty(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_checkDiseqs___closed__1; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_checkDiseqs___spec__4___boxed(lean_object**); LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_checkDiseqs___spec__2___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__2; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_forIn___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs_process___spec__6(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlMAux___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__8(lean_object*, lean_object*, lean_object*); @@ -213,12 +219,14 @@ static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic lean_object* l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkRing___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_Arith_CommRing_incSteps(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_checkDiseqs___spec__4___closed__1; static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs_process___lambda__4___closed__5; static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs_process___lambda__2___closed__5; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_addNewDiseq(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Meta_Grind_isSameExpr_unsafe__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_checkDiseqs(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Grind_CommRing_Mon_findSimp_x3f_go___spec__1___boxed(lean_object**); static lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__2___closed__4; lean_object* l___private_Init_Util_0__mkPanicMessageWithDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -246,12 +254,16 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCns LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Grind_CommRing_Mon_findSimp_x3f_go___spec__1(lean_object*, lean_object*, uint8_t, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_mulConst(lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_degree(lean_object*); +lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_usize_to_nat(size_t); lean_object* l_Lean_MessageData_ofExpr(lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_setUnsat(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__4; +static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___closed__1; +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___boxed(lean_object**); static lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_checkRing___spec__1___lambda__1___closed__2; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_spolM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__7(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -261,6 +273,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCns static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__4; LEAN_EXPORT lean_object* lean_process_ring_eq(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToBasisAfterSimp(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__9___rarg(lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_fget(lean_object*, lean_object*); lean_object* lean_nat_abs(lean_object*); @@ -277,22 +290,24 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_addNewDiseq___lambda__ uint8_t lean_nat_dec_eq(lean_object*, lean_object*); uint8_t lean_nat_dec_lt(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_check___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_checkDiseqs___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__4; LEAN_EXPORT lean_object* l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_findSimp_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_indentExpr(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_go(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_needCheck___closed__1; LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_checkRing___spec__1___lambda__1___closed__3; +static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__2; LEAN_EXPORT lean_object* l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_compare(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_check___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint64_t lean_uint64_xor(uint64_t, uint64_t); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_processNewDiseqImpl___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -301,11 +316,15 @@ static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___clo static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs_process___closed__1; lean_object* l_Lean_Meta_Grind_isEqv(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_int_dec_lt(lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__2; +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_nat_sub(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_processNewEqImpl___lambda__2___closed__1; +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_nat_mul(lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_lc(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkRing___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs_process___spec__2(lean_object*); @@ -314,7 +333,6 @@ LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at___private_Lean_Met static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__5; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_check___closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToBasisAfterSimp___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__8; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkRing___lambda__3___closed__2; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToBasis(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -325,6 +343,7 @@ size_t lean_usize_sub(size_t, size_t); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_PersistentArray_get_x21___rarg(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_Arith_CommRing_get_x27(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_saveDiseq___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Grind_CommRing_Poly_divides(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -332,6 +351,7 @@ static lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_checkDiseqs___spec__4___closed__3; uint64_t l_Lean_Meta_Grind_instHashableENodeKey_unsafe__1(lean_object*); size_t lean_usize_add(size_t, size_t); +static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__1; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs_process___lambda__2___closed__2; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -342,14 +362,15 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToBasisAfte static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___lambda__4___closed__4; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__4___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_checkRing___spec__1___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__3; +lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_st_ref_set(lean_object*, lean_object*, lean_object*); uint8_t l___private_Init_Grind_CommRing_Poly_0__Lean_Grind_CommRing_beqPoly____x40_Init_Grind_CommRing_Poly___hyg_3550_(lean_object*, lean_object*); size_t lean_usize_shift_left(size_t, size_t); lean_object* l_Lean_Name_mkStr4(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_int_dec_eq(lean_object*, lean_object*); lean_object* l_Lean_PersistentHashMap_find_x3f___at_Lean_Meta_Grind_Arith_CommRing_mkVar___spec__5(lean_object*, lean_object*); +static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__1; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkRing___lambda__3___closed__1; lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_p(lean_object*); lean_object* lean_string_append(lean_object*, lean_object*); @@ -381,12 +402,13 @@ LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommR lean_object* l_Lean_Meta_Grind_isInconsistent(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_find_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___spec__1___boxed(lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___lambda__4___closed__3; +lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__7; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___closed__1; size_t lean_usize_land(size_t, size_t); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Grind_CommRing_Mon_divides(lean_object*, lean_object*); static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__2; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -578,7 +600,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_mkEqCnstr(lean_object* _start: { lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); if (lean_obj_tag(x_13) == 0) { lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; uint8_t x_24; @@ -1831,7 +1853,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCns _start: { lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_12 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); if (lean_obj_tag(x_12) == 0) { uint8_t x_13; @@ -2628,7 +2650,7 @@ else lean_object* x_18; lean_object* x_19; lean_object* x_20; x_18 = lean_ctor_get(x_5, 0); x_19 = lean_ctor_get(x_5, 1); -x_20 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +x_20 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); if (lean_obj_tag(x_20) == 0) { lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; uint8_t x_28; lean_object* x_29; @@ -3120,7 +3142,7 @@ lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); lean_inc(x_3); -x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); if (lean_obj_tag(x_13) == 0) { uint8_t x_14; @@ -3210,7 +3232,7 @@ lean_inc(x_36); lean_dec(x_35); x_37 = lean_ctor_get(x_20, 0); lean_inc(x_37); -x_38 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_37, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_36); +x_38 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_37, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_36); if (lean_obj_tag(x_38) == 0) { lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; @@ -3336,7 +3358,7 @@ lean_inc(x_57); lean_dec(x_56); x_58 = lean_ctor_get(x_20, 0); lean_inc(x_58); -x_59 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_58, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_57); +x_59 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_58, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_57); if (lean_obj_tag(x_59) == 0) { lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; @@ -3505,7 +3527,7 @@ lean_inc(x_88); lean_dec(x_87); x_89 = lean_ctor_get(x_20, 0); lean_inc(x_89); -x_90 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_89, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_88); +x_90 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_89, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_88); if (lean_obj_tag(x_90) == 0) { lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; @@ -3722,7 +3744,7 @@ lean_inc(x_129); lean_dec(x_128); x_130 = lean_ctor_get(x_115, 0); lean_inc(x_130); -x_131 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_130, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_129); +x_131 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_130, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_129); if (lean_obj_tag(x_131) == 0) { lean_object* x_132; lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; @@ -4019,7 +4041,7 @@ lean_object* x_29; lean_object* x_30; x_29 = lean_ctor_get(x_28, 1); lean_inc(x_29); lean_dec(x_28); -x_30 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr(x_2, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_29); +x_30 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__3(x_2, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_29); if (lean_obj_tag(x_30) == 0) { lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; @@ -4138,7 +4160,7 @@ lean_object* x_52; lean_object* x_53; x_52 = lean_ctor_get(x_51, 1); lean_inc(x_52); lean_dec(x_51); -x_53 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr(x_2, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_52); +x_53 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__3(x_2, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_52); if (lean_obj_tag(x_53) == 0) { lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; @@ -4903,7 +4925,19 @@ lean_dec(x_2); return x_13; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWith(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; lean_object* x_14; +x_13 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_13, 0, x_1); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_12); +return x_14; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { lean_object* x_13; @@ -4916,7 +4950,7 @@ lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); lean_inc(x_3); -x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); if (lean_obj_tag(x_13) == 0) { uint8_t x_14; @@ -4937,6 +4971,7 @@ lean_inc(x_20); x_21 = l_Lean_Grind_CommRing_Poly_simp_x3f(x_17, x_20, x_15); if (lean_obj_tag(x_21) == 0) { +lean_object* x_22; lean_dec(x_19); lean_dec(x_18); lean_dec(x_11); @@ -4949,53 +4984,60 @@ lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); -lean_ctor_set(x_13, 0, x_1); +lean_dec(x_1); +x_22 = lean_box(0); +lean_ctor_set(x_13, 0, x_22); return x_13; } else { -lean_object* x_22; uint8_t x_23; +lean_object* x_23; uint8_t x_24; lean_free_object(x_13); -x_22 = lean_ctor_get(x_21, 0); -lean_inc(x_22); +x_23 = lean_ctor_get(x_21, 0); +lean_inc(x_23); lean_dec(x_21); -x_23 = !lean_is_exclusive(x_22); -if (x_23 == 0) +x_24 = !lean_is_exclusive(x_23); +if (x_24 == 0) { -lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; uint8_t x_30; -x_24 = lean_ctor_get(x_22, 0); -x_25 = lean_ctor_get(x_22, 1); -x_26 = lean_ctor_get(x_22, 2); -x_27 = lean_ctor_get(x_22, 3); -x_28 = lean_alloc_ctor(2, 5, 0); -lean_ctor_set(x_28, 0, x_25); -lean_ctor_set(x_28, 1, x_1); -lean_ctor_set(x_28, 2, x_26); -lean_ctor_set(x_28, 3, x_27); -lean_ctor_set(x_28, 4, x_2); -lean_inc(x_24); -lean_ctor_set(x_22, 3, x_19); -lean_ctor_set(x_22, 2, x_18); -lean_ctor_set(x_22, 1, x_28); -x_29 = l_Lean_Meta_Grind_Arith_CommRing_incSteps(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_16); -x_30 = !lean_is_exclusive(x_29); -if (x_30 == 0) +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; uint8_t x_31; +x_25 = lean_ctor_get(x_23, 0); +x_26 = lean_ctor_get(x_23, 1); +x_27 = lean_ctor_get(x_23, 2); +x_28 = lean_ctor_get(x_23, 3); +x_29 = lean_alloc_ctor(2, 5, 0); +lean_ctor_set(x_29, 0, x_26); +lean_ctor_set(x_29, 1, x_1); +lean_ctor_set(x_29, 2, x_27); +lean_ctor_set(x_29, 3, x_28); +lean_ctor_set(x_29, 4, x_2); +lean_inc(x_25); +lean_ctor_set(x_23, 3, x_19); +lean_ctor_set(x_23, 2, x_18); +lean_ctor_set(x_23, 1, x_29); +x_30 = l_Lean_Meta_Grind_Arith_CommRing_incSteps(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_16); +x_31 = !lean_is_exclusive(x_30); +if (x_31 == 0) { -lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; uint8_t x_36; -x_31 = lean_ctor_get(x_29, 1); -x_32 = lean_ctor_get(x_29, 0); -lean_dec(x_32); -x_33 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__4; -x_34 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_33, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_31); -x_35 = lean_ctor_get(x_34, 0); -lean_inc(x_35); -x_36 = lean_unbox(x_35); -lean_dec(x_35); -if (x_36 == 0) +lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; uint8_t x_37; +x_32 = lean_ctor_get(x_30, 1); +x_33 = lean_ctor_get(x_30, 0); +lean_dec(x_33); +x_34 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__4; +x_35 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_34, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_32); +x_36 = lean_ctor_get(x_35, 0); +lean_inc(x_36); +x_37 = lean_unbox(x_36); +lean_dec(x_36); +if (x_37 == 0) { -uint8_t x_37; -lean_free_object(x_29); -lean_dec(x_24); +lean_object* x_38; lean_object* x_39; lean_object* x_40; +lean_free_object(x_30); +lean_dec(x_25); +x_38 = lean_ctor_get(x_35, 1); +lean_inc(x_38); +lean_dec(x_35); +x_39 = lean_box(0); +x_40 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1(x_23, x_39, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_38); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5005,58 +5047,49 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_37 = !lean_is_exclusive(x_34); -if (x_37 == 0) -{ -lean_object* x_38; -x_38 = lean_ctor_get(x_34, 0); -lean_dec(x_38); -lean_ctor_set(x_34, 0, x_22); -return x_34; -} -else -{ -lean_object* x_39; lean_object* x_40; -x_39 = lean_ctor_get(x_34, 1); -lean_inc(x_39); -lean_dec(x_34); -x_40 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_40, 0, x_22); -lean_ctor_set(x_40, 1, x_39); return x_40; } -} else { -lean_object* x_41; lean_object* x_42; -x_41 = lean_ctor_get(x_34, 1); -lean_inc(x_41); -lean_dec(x_34); -x_42 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_41); -if (lean_obj_tag(x_42) == 0) +uint8_t x_41; +x_41 = !lean_is_exclusive(x_35); +if (x_41 == 0) { -lean_object* x_43; lean_object* x_44; -x_43 = lean_ctor_get(x_42, 1); -lean_inc(x_43); -lean_dec(x_42); -x_44 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_24, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_43); +lean_object* x_42; lean_object* x_43; lean_object* x_44; +x_42 = lean_ctor_get(x_35, 1); +x_43 = lean_ctor_get(x_35, 0); +lean_dec(x_43); +x_44 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_42); if (lean_obj_tag(x_44) == 0) { -lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; uint8_t x_51; -x_45 = lean_ctor_get(x_44, 0); +lean_object* x_45; lean_object* x_46; +x_45 = lean_ctor_get(x_44, 1); lean_inc(x_45); -x_46 = lean_ctor_get(x_44, 1); -lean_inc(x_46); lean_dec(x_44); -x_47 = l_Lean_MessageData_ofExpr(x_45); -x_48 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -lean_ctor_set_tag(x_29, 7); -lean_ctor_set(x_29, 1, x_47); -lean_ctor_set(x_29, 0, x_48); -x_49 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_49, 0, x_29); -lean_ctor_set(x_49, 1, x_48); -x_50 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_33, x_49, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_46); +x_46 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_25, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_45); +if (lean_obj_tag(x_46) == 0) +{ +lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; +x_47 = lean_ctor_get(x_46, 0); +lean_inc(x_47); +x_48 = lean_ctor_get(x_46, 1); +lean_inc(x_48); +lean_dec(x_46); +x_49 = l_Lean_MessageData_ofExpr(x_47); +x_50 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +lean_ctor_set_tag(x_35, 7); +lean_ctor_set(x_35, 1, x_49); +lean_ctor_set(x_35, 0, x_50); +lean_ctor_set_tag(x_30, 7); +lean_ctor_set(x_30, 1, x_50); +lean_ctor_set(x_30, 0, x_35); +x_51 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_34, x_30, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_48); +x_52 = lean_ctor_get(x_51, 0); +lean_inc(x_52); +x_53 = lean_ctor_get(x_51, 1); +lean_inc(x_53); +lean_dec(x_51); +x_54 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1(x_23, x_52, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_53); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5066,32 +5099,15 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_51 = !lean_is_exclusive(x_50); -if (x_51 == 0) -{ -lean_object* x_52; -x_52 = lean_ctor_get(x_50, 0); lean_dec(x_52); -lean_ctor_set(x_50, 0, x_22); -return x_50; -} -else -{ -lean_object* x_53; lean_object* x_54; -x_53 = lean_ctor_get(x_50, 1); -lean_inc(x_53); -lean_dec(x_50); -x_54 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_54, 0, x_22); -lean_ctor_set(x_54, 1, x_53); return x_54; } -} else { uint8_t x_55; -lean_free_object(x_29); -lean_dec(x_22); +lean_free_object(x_35); +lean_free_object(x_30); +lean_dec(x_23); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5101,19 +5117,19 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_55 = !lean_is_exclusive(x_44); +x_55 = !lean_is_exclusive(x_46); if (x_55 == 0) { -return x_44; +return x_46; } else { lean_object* x_56; lean_object* x_57; lean_object* x_58; -x_56 = lean_ctor_get(x_44, 0); -x_57 = lean_ctor_get(x_44, 1); +x_56 = lean_ctor_get(x_46, 0); +x_57 = lean_ctor_get(x_46, 1); lean_inc(x_57); lean_inc(x_56); -lean_dec(x_44); +lean_dec(x_46); x_58 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_58, 0, x_56); lean_ctor_set(x_58, 1, x_57); @@ -5124,9 +5140,10 @@ return x_58; else { uint8_t x_59; -lean_free_object(x_29); -lean_dec(x_22); -lean_dec(x_24); +lean_free_object(x_35); +lean_free_object(x_30); +lean_dec(x_23); +lean_dec(x_25); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5136,19 +5153,19 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_59 = !lean_is_exclusive(x_42); +x_59 = !lean_is_exclusive(x_44); if (x_59 == 0) { -return x_42; +return x_44; } else { lean_object* x_60; lean_object* x_61; lean_object* x_62; -x_60 = lean_ctor_get(x_42, 0); -x_61 = lean_ctor_get(x_42, 1); +x_60 = lean_ctor_get(x_44, 0); +x_61 = lean_ctor_get(x_44, 1); lean_inc(x_61); lean_inc(x_60); -lean_dec(x_42); +lean_dec(x_44); x_62 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_62, 0, x_60); lean_ctor_set(x_62, 1, x_61); @@ -5156,82 +5173,43 @@ return x_62; } } } -} else { -lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; uint8_t x_67; -x_63 = lean_ctor_get(x_29, 1); +lean_object* x_63; lean_object* x_64; +x_63 = lean_ctor_get(x_35, 1); lean_inc(x_63); -lean_dec(x_29); -x_64 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__4; -x_65 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_64, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_63); -x_66 = lean_ctor_get(x_65, 0); -lean_inc(x_66); -x_67 = lean_unbox(x_66); -lean_dec(x_66); -if (x_67 == 0) -{ -lean_object* x_68; lean_object* x_69; lean_object* x_70; -lean_dec(x_24); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -x_68 = lean_ctor_get(x_65, 1); -lean_inc(x_68); -if (lean_is_exclusive(x_65)) { - lean_ctor_release(x_65, 0); - lean_ctor_release(x_65, 1); - x_69 = x_65; -} else { - lean_dec_ref(x_65); - x_69 = lean_box(0); -} -if (lean_is_scalar(x_69)) { - x_70 = lean_alloc_ctor(0, 2, 0); -} else { - x_70 = x_69; -} -lean_ctor_set(x_70, 0, x_22); -lean_ctor_set(x_70, 1, x_68); -return x_70; -} -else +lean_dec(x_35); +x_64 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_63); +if (lean_obj_tag(x_64) == 0) { -lean_object* x_71; lean_object* x_72; -x_71 = lean_ctor_get(x_65, 1); -lean_inc(x_71); -lean_dec(x_65); -x_72 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_71); -if (lean_obj_tag(x_72) == 0) +lean_object* x_65; lean_object* x_66; +x_65 = lean_ctor_get(x_64, 1); +lean_inc(x_65); +lean_dec(x_64); +x_66 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_25, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_65); +if (lean_obj_tag(x_66) == 0) { -lean_object* x_73; lean_object* x_74; -x_73 = lean_ctor_get(x_72, 1); +lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; +x_67 = lean_ctor_get(x_66, 0); +lean_inc(x_67); +x_68 = lean_ctor_get(x_66, 1); +lean_inc(x_68); +lean_dec(x_66); +x_69 = l_Lean_MessageData_ofExpr(x_67); +x_70 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +x_71 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_71, 0, x_70); +lean_ctor_set(x_71, 1, x_69); +lean_ctor_set_tag(x_30, 7); +lean_ctor_set(x_30, 1, x_70); +lean_ctor_set(x_30, 0, x_71); +x_72 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_34, x_30, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_68); +x_73 = lean_ctor_get(x_72, 0); lean_inc(x_73); +x_74 = lean_ctor_get(x_72, 1); +lean_inc(x_74); lean_dec(x_72); -x_74 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_24, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_73); -if (lean_obj_tag(x_74) == 0) -{ -lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; -x_75 = lean_ctor_get(x_74, 0); -lean_inc(x_75); -x_76 = lean_ctor_get(x_74, 1); -lean_inc(x_76); -lean_dec(x_74); -x_77 = l_Lean_MessageData_ofExpr(x_75); -x_78 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -x_79 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_79, 0, x_78); -lean_ctor_set(x_79, 1, x_77); -x_80 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_80, 0, x_79); -lean_ctor_set(x_80, 1, x_78); -x_81 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_64, x_80, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_76); +x_75 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1(x_23, x_73, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_74); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5241,29 +5219,14 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_82 = lean_ctor_get(x_81, 1); -lean_inc(x_82); -if (lean_is_exclusive(x_81)) { - lean_ctor_release(x_81, 0); - lean_ctor_release(x_81, 1); - x_83 = x_81; -} else { - lean_dec_ref(x_81); - x_83 = lean_box(0); -} -if (lean_is_scalar(x_83)) { - x_84 = lean_alloc_ctor(0, 2, 0); -} else { - x_84 = x_83; -} -lean_ctor_set(x_84, 0, x_22); -lean_ctor_set(x_84, 1, x_82); -return x_84; +lean_dec(x_73); +return x_75; } else { -lean_object* x_85; lean_object* x_86; lean_object* x_87; lean_object* x_88; -lean_dec(x_22); +lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; +lean_free_object(x_30); +lean_dec(x_23); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5273,33 +5236,34 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_85 = lean_ctor_get(x_74, 0); -lean_inc(x_85); -x_86 = lean_ctor_get(x_74, 1); -lean_inc(x_86); -if (lean_is_exclusive(x_74)) { - lean_ctor_release(x_74, 0); - lean_ctor_release(x_74, 1); - x_87 = x_74; +x_76 = lean_ctor_get(x_66, 0); +lean_inc(x_76); +x_77 = lean_ctor_get(x_66, 1); +lean_inc(x_77); +if (lean_is_exclusive(x_66)) { + lean_ctor_release(x_66, 0); + lean_ctor_release(x_66, 1); + x_78 = x_66; } else { - lean_dec_ref(x_74); - x_87 = lean_box(0); + lean_dec_ref(x_66); + x_78 = lean_box(0); } -if (lean_is_scalar(x_87)) { - x_88 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_78)) { + x_79 = lean_alloc_ctor(1, 2, 0); } else { - x_88 = x_87; + x_79 = x_78; } -lean_ctor_set(x_88, 0, x_85); -lean_ctor_set(x_88, 1, x_86); -return x_88; +lean_ctor_set(x_79, 0, x_76); +lean_ctor_set(x_79, 1, x_77); +return x_79; } } else { -lean_object* x_89; lean_object* x_90; lean_object* x_91; lean_object* x_92; -lean_dec(x_22); -lean_dec(x_24); +lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; +lean_free_object(x_30); +lean_dec(x_23); +lean_dec(x_25); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5309,76 +5273,51 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_89 = lean_ctor_get(x_72, 0); -lean_inc(x_89); -x_90 = lean_ctor_get(x_72, 1); -lean_inc(x_90); -if (lean_is_exclusive(x_72)) { - lean_ctor_release(x_72, 0); - lean_ctor_release(x_72, 1); - x_91 = x_72; +x_80 = lean_ctor_get(x_64, 0); +lean_inc(x_80); +x_81 = lean_ctor_get(x_64, 1); +lean_inc(x_81); +if (lean_is_exclusive(x_64)) { + lean_ctor_release(x_64, 0); + lean_ctor_release(x_64, 1); + x_82 = x_64; } else { - lean_dec_ref(x_72); - x_91 = lean_box(0); + lean_dec_ref(x_64); + x_82 = lean_box(0); } -if (lean_is_scalar(x_91)) { - x_92 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_82)) { + x_83 = lean_alloc_ctor(1, 2, 0); } else { - x_92 = x_91; + x_83 = x_82; } -lean_ctor_set(x_92, 0, x_89); -lean_ctor_set(x_92, 1, x_90); -return x_92; +lean_ctor_set(x_83, 0, x_80); +lean_ctor_set(x_83, 1, x_81); +return x_83; } } } } else { -lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; uint8_t x_105; -x_93 = lean_ctor_get(x_22, 0); -x_94 = lean_ctor_get(x_22, 1); -x_95 = lean_ctor_get(x_22, 2); -x_96 = lean_ctor_get(x_22, 3); -lean_inc(x_96); -lean_inc(x_95); -lean_inc(x_94); -lean_inc(x_93); -lean_dec(x_22); -x_97 = lean_alloc_ctor(2, 5, 0); -lean_ctor_set(x_97, 0, x_94); -lean_ctor_set(x_97, 1, x_1); -lean_ctor_set(x_97, 2, x_95); -lean_ctor_set(x_97, 3, x_96); -lean_ctor_set(x_97, 4, x_2); -lean_inc(x_93); -x_98 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_98, 0, x_93); -lean_ctor_set(x_98, 1, x_97); -lean_ctor_set(x_98, 2, x_18); -lean_ctor_set(x_98, 3, x_19); -x_99 = l_Lean_Meta_Grind_Arith_CommRing_incSteps(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_16); -x_100 = lean_ctor_get(x_99, 1); -lean_inc(x_100); -if (lean_is_exclusive(x_99)) { - lean_ctor_release(x_99, 0); - lean_ctor_release(x_99, 1); - x_101 = x_99; -} else { - lean_dec_ref(x_99); - x_101 = lean_box(0); -} -x_102 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__4; -x_103 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_102, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_100); -x_104 = lean_ctor_get(x_103, 0); -lean_inc(x_104); -x_105 = lean_unbox(x_104); -lean_dec(x_104); -if (x_105 == 0) +lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; uint8_t x_88; +x_84 = lean_ctor_get(x_30, 1); +lean_inc(x_84); +lean_dec(x_30); +x_85 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__4; +x_86 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_85, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_84); +x_87 = lean_ctor_get(x_86, 0); +lean_inc(x_87); +x_88 = lean_unbox(x_87); +lean_dec(x_87); +if (x_88 == 0) { -lean_object* x_106; lean_object* x_107; lean_object* x_108; -lean_dec(x_101); -lean_dec(x_93); +lean_object* x_89; lean_object* x_90; lean_object* x_91; +lean_dec(x_25); +x_89 = lean_ctor_get(x_86, 1); +lean_inc(x_89); +lean_dec(x_86); +x_90 = lean_box(0); +x_91 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1(x_23, x_90, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_89); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5388,61 +5327,57 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_106 = lean_ctor_get(x_103, 1); -lean_inc(x_106); -if (lean_is_exclusive(x_103)) { - lean_ctor_release(x_103, 0); - lean_ctor_release(x_103, 1); - x_107 = x_103; -} else { - lean_dec_ref(x_103); - x_107 = lean_box(0); -} -if (lean_is_scalar(x_107)) { - x_108 = lean_alloc_ctor(0, 2, 0); -} else { - x_108 = x_107; -} -lean_ctor_set(x_108, 0, x_98); -lean_ctor_set(x_108, 1, x_106); -return x_108; +return x_91; } else { -lean_object* x_109; lean_object* x_110; -x_109 = lean_ctor_get(x_103, 1); -lean_inc(x_109); -lean_dec(x_103); -x_110 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_109); -if (lean_obj_tag(x_110) == 0) +lean_object* x_92; lean_object* x_93; lean_object* x_94; +x_92 = lean_ctor_get(x_86, 1); +lean_inc(x_92); +if (lean_is_exclusive(x_86)) { + lean_ctor_release(x_86, 0); + lean_ctor_release(x_86, 1); + x_93 = x_86; +} else { + lean_dec_ref(x_86); + x_93 = lean_box(0); +} +x_94 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_92); +if (lean_obj_tag(x_94) == 0) { -lean_object* x_111; lean_object* x_112; -x_111 = lean_ctor_get(x_110, 1); -lean_inc(x_111); -lean_dec(x_110); -x_112 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_93, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_111); -if (lean_obj_tag(x_112) == 0) +lean_object* x_95; lean_object* x_96; +x_95 = lean_ctor_get(x_94, 1); +lean_inc(x_95); +lean_dec(x_94); +x_96 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_25, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_95); +if (lean_obj_tag(x_96) == 0) { -lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; -x_113 = lean_ctor_get(x_112, 0); -lean_inc(x_113); -x_114 = lean_ctor_get(x_112, 1); -lean_inc(x_114); -lean_dec(x_112); -x_115 = l_Lean_MessageData_ofExpr(x_113); -x_116 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -if (lean_is_scalar(x_101)) { - x_117 = lean_alloc_ctor(7, 2, 0); +lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; +x_97 = lean_ctor_get(x_96, 0); +lean_inc(x_97); +x_98 = lean_ctor_get(x_96, 1); +lean_inc(x_98); +lean_dec(x_96); +x_99 = l_Lean_MessageData_ofExpr(x_97); +x_100 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +if (lean_is_scalar(x_93)) { + x_101 = lean_alloc_ctor(7, 2, 0); } else { - x_117 = x_101; - lean_ctor_set_tag(x_117, 7); + x_101 = x_93; + lean_ctor_set_tag(x_101, 7); } -lean_ctor_set(x_117, 0, x_116); -lean_ctor_set(x_117, 1, x_115); -x_118 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_118, 0, x_117); -lean_ctor_set(x_118, 1, x_116); -x_119 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_102, x_118, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_114); +lean_ctor_set(x_101, 0, x_100); +lean_ctor_set(x_101, 1, x_99); +x_102 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_102, 0, x_101); +lean_ctor_set(x_102, 1, x_100); +x_103 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_85, x_102, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_98); +x_104 = lean_ctor_get(x_103, 0); +lean_inc(x_104); +x_105 = lean_ctor_get(x_103, 1); +lean_inc(x_105); +lean_dec(x_103); +x_106 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1(x_23, x_104, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_105); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5452,30 +5387,14 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_120 = lean_ctor_get(x_119, 1); -lean_inc(x_120); -if (lean_is_exclusive(x_119)) { - lean_ctor_release(x_119, 0); - lean_ctor_release(x_119, 1); - x_121 = x_119; -} else { - lean_dec_ref(x_119); - x_121 = lean_box(0); -} -if (lean_is_scalar(x_121)) { - x_122 = lean_alloc_ctor(0, 2, 0); -} else { - x_122 = x_121; -} -lean_ctor_set(x_122, 0, x_98); -lean_ctor_set(x_122, 1, x_120); -return x_122; +lean_dec(x_104); +return x_106; } else { -lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; -lean_dec(x_101); -lean_dec(x_98); +lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; +lean_dec(x_93); +lean_dec(x_23); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5485,34 +5404,34 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_123 = lean_ctor_get(x_112, 0); -lean_inc(x_123); -x_124 = lean_ctor_get(x_112, 1); -lean_inc(x_124); -if (lean_is_exclusive(x_112)) { - lean_ctor_release(x_112, 0); - lean_ctor_release(x_112, 1); - x_125 = x_112; +x_107 = lean_ctor_get(x_96, 0); +lean_inc(x_107); +x_108 = lean_ctor_get(x_96, 1); +lean_inc(x_108); +if (lean_is_exclusive(x_96)) { + lean_ctor_release(x_96, 0); + lean_ctor_release(x_96, 1); + x_109 = x_96; } else { - lean_dec_ref(x_112); - x_125 = lean_box(0); + lean_dec_ref(x_96); + x_109 = lean_box(0); } -if (lean_is_scalar(x_125)) { - x_126 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_109)) { + x_110 = lean_alloc_ctor(1, 2, 0); } else { - x_126 = x_125; + x_110 = x_109; } -lean_ctor_set(x_126, 0, x_123); -lean_ctor_set(x_126, 1, x_124); -return x_126; +lean_ctor_set(x_110, 0, x_107); +lean_ctor_set(x_110, 1, x_108); +return x_110; } } else { -lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; -lean_dec(x_101); -lean_dec(x_98); +lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_dec(x_93); +lean_dec(x_23); +lean_dec(x_25); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5522,53 +5441,81 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_127 = lean_ctor_get(x_110, 0); -lean_inc(x_127); -x_128 = lean_ctor_get(x_110, 1); -lean_inc(x_128); -if (lean_is_exclusive(x_110)) { - lean_ctor_release(x_110, 0); - lean_ctor_release(x_110, 1); - x_129 = x_110; +x_111 = lean_ctor_get(x_94, 0); +lean_inc(x_111); +x_112 = lean_ctor_get(x_94, 1); +lean_inc(x_112); +if (lean_is_exclusive(x_94)) { + lean_ctor_release(x_94, 0); + lean_ctor_release(x_94, 1); + x_113 = x_94; } else { - lean_dec_ref(x_110); - x_129 = lean_box(0); + lean_dec_ref(x_94); + x_113 = lean_box(0); } -if (lean_is_scalar(x_129)) { - x_130 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_113)) { + x_114 = lean_alloc_ctor(1, 2, 0); } else { - x_130 = x_129; -} -lean_ctor_set(x_130, 0, x_127); -lean_ctor_set(x_130, 1, x_128); -return x_130; + x_114 = x_113; } +lean_ctor_set(x_114, 0, x_111); +lean_ctor_set(x_114, 1, x_112); +return x_114; } } } } else { -lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; -x_131 = lean_ctor_get(x_13, 0); -x_132 = lean_ctor_get(x_13, 1); -lean_inc(x_132); -lean_inc(x_131); -lean_dec(x_13); -x_133 = lean_ctor_get(x_1, 0); -lean_inc(x_133); -x_134 = lean_ctor_get(x_1, 2); -lean_inc(x_134); -x_135 = lean_ctor_get(x_1, 3); -lean_inc(x_135); -x_136 = lean_ctor_get(x_2, 0); -lean_inc(x_136); -x_137 = l_Lean_Grind_CommRing_Poly_simp_x3f(x_133, x_136, x_131); -if (lean_obj_tag(x_137) == 0) +lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; uint8_t x_127; +x_115 = lean_ctor_get(x_23, 0); +x_116 = lean_ctor_get(x_23, 1); +x_117 = lean_ctor_get(x_23, 2); +x_118 = lean_ctor_get(x_23, 3); +lean_inc(x_118); +lean_inc(x_117); +lean_inc(x_116); +lean_inc(x_115); +lean_dec(x_23); +x_119 = lean_alloc_ctor(2, 5, 0); +lean_ctor_set(x_119, 0, x_116); +lean_ctor_set(x_119, 1, x_1); +lean_ctor_set(x_119, 2, x_117); +lean_ctor_set(x_119, 3, x_118); +lean_ctor_set(x_119, 4, x_2); +lean_inc(x_115); +x_120 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_120, 0, x_115); +lean_ctor_set(x_120, 1, x_119); +lean_ctor_set(x_120, 2, x_18); +lean_ctor_set(x_120, 3, x_19); +x_121 = l_Lean_Meta_Grind_Arith_CommRing_incSteps(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_16); +x_122 = lean_ctor_get(x_121, 1); +lean_inc(x_122); +if (lean_is_exclusive(x_121)) { + lean_ctor_release(x_121, 0); + lean_ctor_release(x_121, 1); + x_123 = x_121; +} else { + lean_dec_ref(x_121); + x_123 = lean_box(0); +} +x_124 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__4; +x_125 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_124, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_122); +x_126 = lean_ctor_get(x_125, 0); +lean_inc(x_126); +x_127 = lean_unbox(x_126); +lean_dec(x_126); +if (x_127 == 0) { -lean_object* x_138; -lean_dec(x_135); -lean_dec(x_134); +lean_object* x_128; lean_object* x_129; lean_object* x_130; +lean_dec(x_123); +lean_dec(x_115); +x_128 = lean_ctor_get(x_125, 1); +lean_inc(x_128); +lean_dec(x_125); +x_129 = lean_box(0); +x_130 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1(x_120, x_129, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_128); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5578,74 +5525,62 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -x_138 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_138, 0, x_1); -lean_ctor_set(x_138, 1, x_132); -return x_138; +return x_130; } else { -lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; lean_object* x_152; uint8_t x_153; -x_139 = lean_ctor_get(x_137, 0); -lean_inc(x_139); -lean_dec(x_137); -x_140 = lean_ctor_get(x_139, 0); -lean_inc(x_140); -x_141 = lean_ctor_get(x_139, 1); -lean_inc(x_141); -x_142 = lean_ctor_get(x_139, 2); -lean_inc(x_142); -x_143 = lean_ctor_get(x_139, 3); -lean_inc(x_143); -if (lean_is_exclusive(x_139)) { - lean_ctor_release(x_139, 0); - lean_ctor_release(x_139, 1); - lean_ctor_release(x_139, 2); - lean_ctor_release(x_139, 3); - x_144 = x_139; -} else { - lean_dec_ref(x_139); - x_144 = lean_box(0); +lean_object* x_131; lean_object* x_132; lean_object* x_133; +x_131 = lean_ctor_get(x_125, 1); +lean_inc(x_131); +if (lean_is_exclusive(x_125)) { + lean_ctor_release(x_125, 0); + lean_ctor_release(x_125, 1); + x_132 = x_125; +} else { + lean_dec_ref(x_125); + x_132 = lean_box(0); } -x_145 = lean_alloc_ctor(2, 5, 0); -lean_ctor_set(x_145, 0, x_141); -lean_ctor_set(x_145, 1, x_1); -lean_ctor_set(x_145, 2, x_142); -lean_ctor_set(x_145, 3, x_143); -lean_ctor_set(x_145, 4, x_2); -lean_inc(x_140); -if (lean_is_scalar(x_144)) { - x_146 = lean_alloc_ctor(0, 4, 0); +x_133 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_131); +if (lean_obj_tag(x_133) == 0) +{ +lean_object* x_134; lean_object* x_135; +x_134 = lean_ctor_get(x_133, 1); +lean_inc(x_134); +lean_dec(x_133); +x_135 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_115, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_134); +if (lean_obj_tag(x_135) == 0) +{ +lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; lean_object* x_144; lean_object* x_145; +x_136 = lean_ctor_get(x_135, 0); +lean_inc(x_136); +x_137 = lean_ctor_get(x_135, 1); +lean_inc(x_137); +lean_dec(x_135); +x_138 = l_Lean_MessageData_ofExpr(x_136); +x_139 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +if (lean_is_scalar(x_132)) { + x_140 = lean_alloc_ctor(7, 2, 0); } else { - x_146 = x_144; + x_140 = x_132; + lean_ctor_set_tag(x_140, 7); } -lean_ctor_set(x_146, 0, x_140); -lean_ctor_set(x_146, 1, x_145); -lean_ctor_set(x_146, 2, x_134); -lean_ctor_set(x_146, 3, x_135); -x_147 = l_Lean_Meta_Grind_Arith_CommRing_incSteps(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_132); -x_148 = lean_ctor_get(x_147, 1); -lean_inc(x_148); -if (lean_is_exclusive(x_147)) { - lean_ctor_release(x_147, 0); - lean_ctor_release(x_147, 1); - x_149 = x_147; +lean_ctor_set(x_140, 0, x_139); +lean_ctor_set(x_140, 1, x_138); +if (lean_is_scalar(x_123)) { + x_141 = lean_alloc_ctor(7, 2, 0); } else { - lean_dec_ref(x_147); - x_149 = lean_box(0); + x_141 = x_123; + lean_ctor_set_tag(x_141, 7); } -x_150 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__4; -x_151 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_150, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_148); -x_152 = lean_ctor_get(x_151, 0); -lean_inc(x_152); -x_153 = lean_unbox(x_152); -lean_dec(x_152); -if (x_153 == 0) -{ -lean_object* x_154; lean_object* x_155; lean_object* x_156; -lean_dec(x_149); -lean_dec(x_140); +lean_ctor_set(x_141, 0, x_140); +lean_ctor_set(x_141, 1, x_139); +x_142 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_124, x_141, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_137); +x_143 = lean_ctor_get(x_142, 0); +lean_inc(x_143); +x_144 = lean_ctor_get(x_142, 1); +lean_inc(x_144); +lean_dec(x_142); +x_145 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1(x_120, x_143, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_144); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5655,61 +5590,15 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_154 = lean_ctor_get(x_151, 1); -lean_inc(x_154); -if (lean_is_exclusive(x_151)) { - lean_ctor_release(x_151, 0); - lean_ctor_release(x_151, 1); - x_155 = x_151; -} else { - lean_dec_ref(x_151); - x_155 = lean_box(0); -} -if (lean_is_scalar(x_155)) { - x_156 = lean_alloc_ctor(0, 2, 0); -} else { - x_156 = x_155; -} -lean_ctor_set(x_156, 0, x_146); -lean_ctor_set(x_156, 1, x_154); -return x_156; +lean_dec(x_143); +return x_145; } else { -lean_object* x_157; lean_object* x_158; -x_157 = lean_ctor_get(x_151, 1); -lean_inc(x_157); -lean_dec(x_151); -x_158 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_157); -if (lean_obj_tag(x_158) == 0) -{ -lean_object* x_159; lean_object* x_160; -x_159 = lean_ctor_get(x_158, 1); -lean_inc(x_159); -lean_dec(x_158); -x_160 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_140, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_159); -if (lean_obj_tag(x_160) == 0) -{ -lean_object* x_161; lean_object* x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; -x_161 = lean_ctor_get(x_160, 0); -lean_inc(x_161); -x_162 = lean_ctor_get(x_160, 1); -lean_inc(x_162); -lean_dec(x_160); -x_163 = l_Lean_MessageData_ofExpr(x_161); -x_164 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -if (lean_is_scalar(x_149)) { - x_165 = lean_alloc_ctor(7, 2, 0); -} else { - x_165 = x_149; - lean_ctor_set_tag(x_165, 7); -} -lean_ctor_set(x_165, 0, x_164); -lean_ctor_set(x_165, 1, x_163); -x_166 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_166, 0, x_165); -lean_ctor_set(x_166, 1, x_164); -x_167 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_150, x_166, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_162); +lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; +lean_dec(x_132); +lean_dec(x_123); +lean_dec(x_120); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5719,30 +5608,35 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_168 = lean_ctor_get(x_167, 1); -lean_inc(x_168); -if (lean_is_exclusive(x_167)) { - lean_ctor_release(x_167, 0); - lean_ctor_release(x_167, 1); - x_169 = x_167; +x_146 = lean_ctor_get(x_135, 0); +lean_inc(x_146); +x_147 = lean_ctor_get(x_135, 1); +lean_inc(x_147); +if (lean_is_exclusive(x_135)) { + lean_ctor_release(x_135, 0); + lean_ctor_release(x_135, 1); + x_148 = x_135; } else { - lean_dec_ref(x_167); - x_169 = lean_box(0); + lean_dec_ref(x_135); + x_148 = lean_box(0); } -if (lean_is_scalar(x_169)) { - x_170 = lean_alloc_ctor(0, 2, 0); +if (lean_is_scalar(x_148)) { + x_149 = lean_alloc_ctor(1, 2, 0); } else { - x_170 = x_169; + x_149 = x_148; +} +lean_ctor_set(x_149, 0, x_146); +lean_ctor_set(x_149, 1, x_147); +return x_149; } -lean_ctor_set(x_170, 0, x_146); -lean_ctor_set(x_170, 1, x_168); -return x_170; } else { -lean_object* x_171; lean_object* x_172; lean_object* x_173; lean_object* x_174; -lean_dec(x_149); -lean_dec(x_146); +lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; +lean_dec(x_132); +lean_dec(x_123); +lean_dec(x_120); +lean_dec(x_115); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5752,34 +5646,53 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_171 = lean_ctor_get(x_160, 0); -lean_inc(x_171); -x_172 = lean_ctor_get(x_160, 1); -lean_inc(x_172); -if (lean_is_exclusive(x_160)) { - lean_ctor_release(x_160, 0); - lean_ctor_release(x_160, 1); - x_173 = x_160; +x_150 = lean_ctor_get(x_133, 0); +lean_inc(x_150); +x_151 = lean_ctor_get(x_133, 1); +lean_inc(x_151); +if (lean_is_exclusive(x_133)) { + lean_ctor_release(x_133, 0); + lean_ctor_release(x_133, 1); + x_152 = x_133; } else { - lean_dec_ref(x_160); - x_173 = lean_box(0); + lean_dec_ref(x_133); + x_152 = lean_box(0); } -if (lean_is_scalar(x_173)) { - x_174 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_152)) { + x_153 = lean_alloc_ctor(1, 2, 0); } else { - x_174 = x_173; + x_153 = x_152; +} +lean_ctor_set(x_153, 0, x_150); +lean_ctor_set(x_153, 1, x_151); +return x_153; +} +} } -lean_ctor_set(x_174, 0, x_171); -lean_ctor_set(x_174, 1, x_172); -return x_174; } } else { -lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; -lean_dec(x_149); -lean_dec(x_146); -lean_dec(x_140); +lean_object* x_154; lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; lean_object* x_160; +x_154 = lean_ctor_get(x_13, 0); +x_155 = lean_ctor_get(x_13, 1); +lean_inc(x_155); +lean_inc(x_154); +lean_dec(x_13); +x_156 = lean_ctor_get(x_1, 0); +lean_inc(x_156); +x_157 = lean_ctor_get(x_1, 2); +lean_inc(x_157); +x_158 = lean_ctor_get(x_1, 3); +lean_inc(x_158); +x_159 = lean_ctor_get(x_2, 0); +lean_inc(x_159); +x_160 = l_Lean_Grind_CommRing_Poly_simp_x3f(x_156, x_159, x_154); +if (lean_obj_tag(x_160) == 0) +{ +lean_object* x_161; lean_object* x_162; +lean_dec(x_158); +lean_dec(x_157); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5789,34 +5702,81 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -x_175 = lean_ctor_get(x_158, 0); -lean_inc(x_175); -x_176 = lean_ctor_get(x_158, 1); -lean_inc(x_176); -if (lean_is_exclusive(x_158)) { - lean_ctor_release(x_158, 0); - lean_ctor_release(x_158, 1); - x_177 = x_158; -} else { - lean_dec_ref(x_158); - x_177 = lean_box(0); -} -if (lean_is_scalar(x_177)) { - x_178 = lean_alloc_ctor(1, 2, 0); -} else { - x_178 = x_177; -} -lean_ctor_set(x_178, 0, x_175); -lean_ctor_set(x_178, 1, x_176); -return x_178; -} -} +lean_dec(x_2); +lean_dec(x_1); +x_161 = lean_box(0); +x_162 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_162, 0, x_161); +lean_ctor_set(x_162, 1, x_155); +return x_162; } +else +{ +lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; lean_object* x_174; lean_object* x_175; lean_object* x_176; uint8_t x_177; +x_163 = lean_ctor_get(x_160, 0); +lean_inc(x_163); +lean_dec(x_160); +x_164 = lean_ctor_get(x_163, 0); +lean_inc(x_164); +x_165 = lean_ctor_get(x_163, 1); +lean_inc(x_165); +x_166 = lean_ctor_get(x_163, 2); +lean_inc(x_166); +x_167 = lean_ctor_get(x_163, 3); +lean_inc(x_167); +if (lean_is_exclusive(x_163)) { + lean_ctor_release(x_163, 0); + lean_ctor_release(x_163, 1); + lean_ctor_release(x_163, 2); + lean_ctor_release(x_163, 3); + x_168 = x_163; +} else { + lean_dec_ref(x_163); + x_168 = lean_box(0); +} +x_169 = lean_alloc_ctor(2, 5, 0); +lean_ctor_set(x_169, 0, x_165); +lean_ctor_set(x_169, 1, x_1); +lean_ctor_set(x_169, 2, x_166); +lean_ctor_set(x_169, 3, x_167); +lean_ctor_set(x_169, 4, x_2); +lean_inc(x_164); +if (lean_is_scalar(x_168)) { + x_170 = lean_alloc_ctor(0, 4, 0); +} else { + x_170 = x_168; } +lean_ctor_set(x_170, 0, x_164); +lean_ctor_set(x_170, 1, x_169); +lean_ctor_set(x_170, 2, x_157); +lean_ctor_set(x_170, 3, x_158); +x_171 = l_Lean_Meta_Grind_Arith_CommRing_incSteps(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_155); +x_172 = lean_ctor_get(x_171, 1); +lean_inc(x_172); +if (lean_is_exclusive(x_171)) { + lean_ctor_release(x_171, 0); + lean_ctor_release(x_171, 1); + x_173 = x_171; +} else { + lean_dec_ref(x_171); + x_173 = lean_box(0); } -else +x_174 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__4; +x_175 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_174, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_172); +x_176 = lean_ctor_get(x_175, 0); +lean_inc(x_176); +x_177 = lean_unbox(x_176); +lean_dec(x_176); +if (x_177 == 0) { -uint8_t x_179; +lean_object* x_178; lean_object* x_179; lean_object* x_180; +lean_dec(x_173); +lean_dec(x_164); +x_178 = lean_ctor_get(x_175, 1); +lean_inc(x_178); +lean_dec(x_175); +x_179 = lean_box(0); +x_180 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1(x_170, x_179, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_178); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5826,63 +5786,62 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_179 = !lean_is_exclusive(x_13); -if (x_179 == 0) -{ -return x_13; +return x_180; } else { -lean_object* x_180; lean_object* x_181; lean_object* x_182; -x_180 = lean_ctor_get(x_13, 0); -x_181 = lean_ctor_get(x_13, 1); +lean_object* x_181; lean_object* x_182; lean_object* x_183; +x_181 = lean_ctor_get(x_175, 1); lean_inc(x_181); -lean_inc(x_180); -lean_dec(x_13); -x_182 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_182, 0, x_180); -lean_ctor_set(x_182, 1, x_181); -return x_182; -} -} -} +if (lean_is_exclusive(x_175)) { + lean_ctor_release(x_175, 0); + lean_ctor_release(x_175, 1); + x_182 = x_175; +} else { + lean_dec_ref(x_175); + x_182 = lean_box(0); } -LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { -_start: -{ -lean_object* x_14; lean_object* x_15; -x_14 = lean_ctor_get(x_2, 0); -lean_inc(x_14); -x_15 = l_Lean_Grind_CommRing_Poly_findSimp_x3f(x_14, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); -if (lean_obj_tag(x_15) == 0) -{ -lean_object* x_16; -x_16 = lean_ctor_get(x_15, 0); -lean_inc(x_16); -if (lean_obj_tag(x_16) == 0) +x_183 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_181); +if (lean_obj_tag(x_183) == 0) { -lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; uint8_t x_21; -lean_dec(x_1); -x_17 = lean_ctor_get(x_15, 1); -lean_inc(x_17); -lean_dec(x_15); -x_18 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__2___closed__2; -x_19 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_18, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_17); -x_20 = lean_ctor_get(x_19, 0); -lean_inc(x_20); -x_21 = lean_unbox(x_20); -lean_dec(x_20); -if (x_21 == 0) +lean_object* x_184; lean_object* x_185; +x_184 = lean_ctor_get(x_183, 1); +lean_inc(x_184); +lean_dec(x_183); +x_185 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_164, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_184); +if (lean_obj_tag(x_185) == 0) { -lean_object* x_22; lean_object* x_23; lean_object* x_24; -x_22 = lean_ctor_get(x_19, 1); -lean_inc(x_22); -lean_dec(x_19); -x_23 = lean_box(0); -x_24 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__1(x_2, x_23, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_22); -lean_dec(x_12); +lean_object* x_186; lean_object* x_187; lean_object* x_188; lean_object* x_189; lean_object* x_190; lean_object* x_191; lean_object* x_192; lean_object* x_193; lean_object* x_194; lean_object* x_195; +x_186 = lean_ctor_get(x_185, 0); +lean_inc(x_186); +x_187 = lean_ctor_get(x_185, 1); +lean_inc(x_187); +lean_dec(x_185); +x_188 = l_Lean_MessageData_ofExpr(x_186); +x_189 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +if (lean_is_scalar(x_182)) { + x_190 = lean_alloc_ctor(7, 2, 0); +} else { + x_190 = x_182; + lean_ctor_set_tag(x_190, 7); +} +lean_ctor_set(x_190, 0, x_189); +lean_ctor_set(x_190, 1, x_188); +if (lean_is_scalar(x_173)) { + x_191 = lean_alloc_ctor(7, 2, 0); +} else { + x_191 = x_173; + lean_ctor_set_tag(x_191, 7); +} +lean_ctor_set(x_191, 0, x_190); +lean_ctor_set(x_191, 1, x_189); +x_192 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_174, x_191, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_187); +x_193 = lean_ctor_get(x_192, 0); +lean_inc(x_193); +x_194 = lean_ctor_get(x_192, 1); +lean_inc(x_194); +lean_dec(x_192); +x_195 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1(x_170, x_193, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_194); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5891,57 +5850,16 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -return x_24; +lean_dec(x_3); +lean_dec(x_193); +return x_195; } else { -uint8_t x_25; -x_25 = !lean_is_exclusive(x_19); -if (x_25 == 0) -{ -lean_object* x_26; lean_object* x_27; lean_object* x_28; -x_26 = lean_ctor_get(x_19, 1); -x_27 = lean_ctor_get(x_19, 0); -lean_dec(x_27); -x_28 = l_Lean_Meta_Grind_updateLastTag(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_26); -if (lean_obj_tag(x_28) == 0) -{ -lean_object* x_29; lean_object* x_30; -x_29 = lean_ctor_get(x_28, 1); -lean_inc(x_29); -lean_dec(x_28); -lean_inc(x_12); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_2); -x_30 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_2, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_29); -if (lean_obj_tag(x_30) == 0) -{ -lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; -x_31 = lean_ctor_get(x_30, 0); -lean_inc(x_31); -x_32 = lean_ctor_get(x_30, 1); -lean_inc(x_32); -lean_dec(x_30); -x_33 = l_Lean_MessageData_ofExpr(x_31); -x_34 = l_Lean_indentD(x_33); -x_35 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__2___closed__4; -lean_ctor_set_tag(x_19, 7); -lean_ctor_set(x_19, 1, x_34); -lean_ctor_set(x_19, 0, x_35); -x_36 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -x_37 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_37, 0, x_19); -lean_ctor_set(x_37, 1, x_36); -x_38 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_18, x_37, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_32); -x_39 = lean_ctor_get(x_38, 0); -lean_inc(x_39); -x_40 = lean_ctor_get(x_38, 1); -lean_inc(x_40); -lean_dec(x_38); -x_41 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__1(x_2, x_39, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_40); -lean_dec(x_12); +lean_object* x_196; lean_object* x_197; lean_object* x_198; lean_object* x_199; +lean_dec(x_182); +lean_dec(x_173); +lean_dec(x_170); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5950,14 +5868,36 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_39); -return x_41; +lean_dec(x_3); +x_196 = lean_ctor_get(x_185, 0); +lean_inc(x_196); +x_197 = lean_ctor_get(x_185, 1); +lean_inc(x_197); +if (lean_is_exclusive(x_185)) { + lean_ctor_release(x_185, 0); + lean_ctor_release(x_185, 1); + x_198 = x_185; +} else { + lean_dec_ref(x_185); + x_198 = lean_box(0); +} +if (lean_is_scalar(x_198)) { + x_199 = lean_alloc_ctor(1, 2, 0); +} else { + x_199 = x_198; +} +lean_ctor_set(x_199, 0, x_196); +lean_ctor_set(x_199, 1, x_197); +return x_199; +} } else { -uint8_t x_42; -lean_free_object(x_19); -lean_dec(x_12); +lean_object* x_200; lean_object* x_201; lean_object* x_202; lean_object* x_203; +lean_dec(x_182); +lean_dec(x_173); +lean_dec(x_170); +lean_dec(x_164); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -5966,32 +5906,35 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_2); -x_42 = !lean_is_exclusive(x_30); -if (x_42 == 0) -{ -return x_30; +lean_dec(x_3); +x_200 = lean_ctor_get(x_183, 0); +lean_inc(x_200); +x_201 = lean_ctor_get(x_183, 1); +lean_inc(x_201); +if (lean_is_exclusive(x_183)) { + lean_ctor_release(x_183, 0); + lean_ctor_release(x_183, 1); + x_202 = x_183; +} else { + lean_dec_ref(x_183); + x_202 = lean_box(0); +} +if (lean_is_scalar(x_202)) { + x_203 = lean_alloc_ctor(1, 2, 0); +} else { + x_203 = x_202; +} +lean_ctor_set(x_203, 0, x_200); +lean_ctor_set(x_203, 1, x_201); +return x_203; +} } -else -{ -lean_object* x_43; lean_object* x_44; lean_object* x_45; -x_43 = lean_ctor_get(x_30, 0); -x_44 = lean_ctor_get(x_30, 1); -lean_inc(x_44); -lean_inc(x_43); -lean_dec(x_30); -x_45 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_45, 0, x_43); -lean_ctor_set(x_45, 1, x_44); -return x_45; } } } else { -uint8_t x_46; -lean_free_object(x_19); -lean_dec(x_12); +uint8_t x_204; lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -6000,72 +5943,35 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); -x_46 = !lean_is_exclusive(x_28); -if (x_46 == 0) +lean_dec(x_1); +x_204 = !lean_is_exclusive(x_13); +if (x_204 == 0) { -return x_28; +return x_13; } else { -lean_object* x_47; lean_object* x_48; lean_object* x_49; -x_47 = lean_ctor_get(x_28, 0); -x_48 = lean_ctor_get(x_28, 1); -lean_inc(x_48); -lean_inc(x_47); -lean_dec(x_28); -x_49 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_49, 0, x_47); -lean_ctor_set(x_49, 1, x_48); -return x_49; +lean_object* x_205; lean_object* x_206; lean_object* x_207; +x_205 = lean_ctor_get(x_13, 0); +x_206 = lean_ctor_get(x_13, 1); +lean_inc(x_206); +lean_inc(x_205); +lean_dec(x_13); +x_207 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_207, 0, x_205); +lean_ctor_set(x_207, 1, x_206); +return x_207; } } } -else -{ -lean_object* x_50; lean_object* x_51; -x_50 = lean_ctor_get(x_19, 1); -lean_inc(x_50); -lean_dec(x_19); -x_51 = l_Lean_Meta_Grind_updateLastTag(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_50); -if (lean_obj_tag(x_51) == 0) -{ -lean_object* x_52; lean_object* x_53; -x_52 = lean_ctor_get(x_51, 1); -lean_inc(x_52); -lean_dec(x_51); -lean_inc(x_12); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_2); -x_53 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_2, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_52); -if (lean_obj_tag(x_53) == 0) +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: { -lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; -x_54 = lean_ctor_get(x_53, 0); -lean_inc(x_54); -x_55 = lean_ctor_get(x_53, 1); -lean_inc(x_55); -lean_dec(x_53); -x_56 = l_Lean_MessageData_ofExpr(x_54); -x_57 = l_Lean_indentD(x_56); -x_58 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__2___closed__4; -x_59 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_59, 0, x_58); -lean_ctor_set(x_59, 1, x_57); -x_60 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -x_61 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_61, 0, x_59); -lean_ctor_set(x_61, 1, x_60); -x_62 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_18, x_61, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_55); -x_63 = lean_ctor_get(x_62, 0); -lean_inc(x_63); -x_64 = lean_ctor_get(x_62, 1); -lean_inc(x_64); -lean_dec(x_62); -x_65 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__1(x_2, x_63, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_64); -lean_dec(x_12); +lean_object* x_13; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -6074,219 +5980,127 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_63); -return x_65; +lean_dec(x_3); +lean_dec(x_2); +return x_13; } -else +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWith(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: { -lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_2); -x_66 = lean_ctor_get(x_53, 0); -lean_inc(x_66); -x_67 = lean_ctor_get(x_53, 1); -lean_inc(x_67); -if (lean_is_exclusive(x_53)) { - lean_ctor_release(x_53, 0); - lean_ctor_release(x_53, 1); - x_68 = x_53; -} else { - lean_dec_ref(x_53); - x_68 = lean_box(0); -} -if (lean_is_scalar(x_68)) { - x_69 = lean_alloc_ctor(1, 2, 0); -} else { - x_69 = x_68; -} -lean_ctor_set(x_69, 0, x_66); -lean_ctor_set(x_69, 1, x_67); -return x_69; -} -} -else -{ -lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_2); -x_70 = lean_ctor_get(x_51, 0); -lean_inc(x_70); -x_71 = lean_ctor_get(x_51, 1); -lean_inc(x_71); -if (lean_is_exclusive(x_51)) { - lean_ctor_release(x_51, 0); - lean_ctor_release(x_51, 1); - x_72 = x_51; -} else { - lean_dec_ref(x_51); - x_72 = lean_box(0); -} -if (lean_is_scalar(x_72)) { - x_73 = lean_alloc_ctor(1, 2, 0); -} else { - x_73 = x_72; -} -lean_ctor_set(x_73, 0, x_70); -lean_ctor_set(x_73, 1, x_71); -return x_73; -} -} -} -} -else -{ -lean_object* x_74; uint8_t x_75; -x_74 = lean_ctor_get(x_15, 1); -lean_inc(x_74); -lean_dec(x_15); -x_75 = !lean_is_exclusive(x_16); -if (x_75 == 0) +lean_object* x_13; +lean_inc(x_1); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_13) == 0) { -lean_object* x_76; lean_object* x_77; -x_76 = lean_ctor_get(x_16, 0); -x_77 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWith(x_2, x_76, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_74); -if (lean_obj_tag(x_77) == 0) +lean_object* x_14; +x_14 = lean_ctor_get(x_13, 0); +lean_inc(x_14); +if (lean_obj_tag(x_14) == 0) { -uint8_t x_78; -x_78 = !lean_is_exclusive(x_77); -if (x_78 == 0) +uint8_t x_15; +x_15 = !lean_is_exclusive(x_13); +if (x_15 == 0) { -lean_object* x_79; lean_object* x_80; -x_79 = lean_ctor_get(x_77, 0); -x_80 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_80, 0, x_1); -lean_ctor_set(x_80, 1, x_79); -lean_ctor_set(x_16, 0, x_80); -lean_ctor_set(x_77, 0, x_16); -return x_77; +lean_object* x_16; +x_16 = lean_ctor_get(x_13, 0); +lean_dec(x_16); +lean_ctor_set(x_13, 0, x_1); +return x_13; } else { -lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; -x_81 = lean_ctor_get(x_77, 0); -x_82 = lean_ctor_get(x_77, 1); -lean_inc(x_82); -lean_inc(x_81); -lean_dec(x_77); -x_83 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_83, 0, x_1); -lean_ctor_set(x_83, 1, x_81); -lean_ctor_set(x_16, 0, x_83); -x_84 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_84, 0, x_16); -lean_ctor_set(x_84, 1, x_82); -return x_84; +lean_object* x_17; lean_object* x_18; +x_17 = lean_ctor_get(x_13, 1); +lean_inc(x_17); +lean_dec(x_13); +x_18 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18, 0, x_1); +lean_ctor_set(x_18, 1, x_17); +return x_18; } } else { -uint8_t x_85; -lean_free_object(x_16); +uint8_t x_19; lean_dec(x_1); -x_85 = !lean_is_exclusive(x_77); -if (x_85 == 0) +x_19 = !lean_is_exclusive(x_13); +if (x_19 == 0) { -return x_77; +lean_object* x_20; lean_object* x_21; +x_20 = lean_ctor_get(x_13, 0); +lean_dec(x_20); +x_21 = lean_ctor_get(x_14, 0); +lean_inc(x_21); +lean_dec(x_14); +lean_ctor_set(x_13, 0, x_21); +return x_13; } else { -lean_object* x_86; lean_object* x_87; lean_object* x_88; -x_86 = lean_ctor_get(x_77, 0); -x_87 = lean_ctor_get(x_77, 1); -lean_inc(x_87); -lean_inc(x_86); -lean_dec(x_77); -x_88 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_88, 0, x_86); -lean_ctor_set(x_88, 1, x_87); -return x_88; +lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_22 = lean_ctor_get(x_13, 1); +lean_inc(x_22); +lean_dec(x_13); +x_23 = lean_ctor_get(x_14, 0); +lean_inc(x_23); +lean_dec(x_14); +x_24 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_24, 0, x_23); +lean_ctor_set(x_24, 1, x_22); +return x_24; } } } else { -lean_object* x_89; lean_object* x_90; -x_89 = lean_ctor_get(x_16, 0); -lean_inc(x_89); -lean_dec(x_16); -x_90 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWith(x_2, x_89, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_74); -if (lean_obj_tag(x_90) == 0) +uint8_t x_25; +lean_dec(x_1); +x_25 = !lean_is_exclusive(x_13); +if (x_25 == 0) { -lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; -x_91 = lean_ctor_get(x_90, 0); -lean_inc(x_91); -x_92 = lean_ctor_get(x_90, 1); -lean_inc(x_92); -if (lean_is_exclusive(x_90)) { - lean_ctor_release(x_90, 0); - lean_ctor_release(x_90, 1); - x_93 = x_90; -} else { - lean_dec_ref(x_90); - x_93 = lean_box(0); -} -x_94 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_94, 0, x_1); -lean_ctor_set(x_94, 1, x_91); -x_95 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_95, 0, x_94); -if (lean_is_scalar(x_93)) { - x_96 = lean_alloc_ctor(0, 2, 0); -} else { - x_96 = x_93; -} -lean_ctor_set(x_96, 0, x_95); -lean_ctor_set(x_96, 1, x_92); -return x_96; +return x_13; } else { -lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; -lean_dec(x_1); -x_97 = lean_ctor_get(x_90, 0); -lean_inc(x_97); -x_98 = lean_ctor_get(x_90, 1); -lean_inc(x_98); -if (lean_is_exclusive(x_90)) { - lean_ctor_release(x_90, 0); - lean_ctor_release(x_90, 1); - x_99 = x_90; -} else { - lean_dec_ref(x_90); - x_99 = lean_box(0); -} -if (lean_is_scalar(x_99)) { - x_100 = lean_alloc_ctor(1, 2, 0); -} else { - x_100 = x_99; -} -lean_ctor_set(x_100, 0, x_97); -lean_ctor_set(x_100, 1, x_98); -return x_100; +lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_26 = lean_ctor_get(x_13, 0); +x_27 = lean_ctor_get(x_13, 1); +lean_inc(x_27); +lean_inc(x_26); +lean_dec(x_13); +x_28 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_28, 0, x_26); +lean_ctor_set(x_28, 1, x_27); +return x_28; } } } } -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithExhaustively(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: { -uint8_t x_101; -lean_dec(x_12); +lean_object* x_13; +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_8); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_3); +lean_inc(x_2); +lean_inc(x_1); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithCore(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_13) == 0) +{ +lean_object* x_14; +x_14 = lean_ctor_get(x_13, 0); +lean_inc(x_14); +if (lean_obj_tag(x_14) == 0) +{ +uint8_t x_15; lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -6295,72 +6109,47 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); -lean_dec(x_1); -x_101 = !lean_is_exclusive(x_15); -if (x_101 == 0) -{ -return x_15; -} -else +x_15 = !lean_is_exclusive(x_13); +if (x_15 == 0) { -lean_object* x_102; lean_object* x_103; lean_object* x_104; -x_102 = lean_ctor_get(x_15, 0); -x_103 = lean_ctor_get(x_15, 1); -lean_inc(x_103); -lean_inc(x_102); -lean_dec(x_15); -x_104 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_104, 0, x_102); -lean_ctor_set(x_104, 1, x_103); -return x_104; -} -} -} +lean_object* x_16; +x_16 = lean_ctor_get(x_13, 0); +lean_dec(x_16); +lean_ctor_set(x_13, 0, x_1); +return x_13; } -LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: -{ -uint8_t x_13; -x_13 = !lean_is_exclusive(x_2); -if (x_13 == 0) +else { -lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; uint8_t x_18; -x_14 = lean_ctor_get(x_2, 1); -x_15 = lean_ctor_get(x_2, 0); -lean_dec(x_15); -x_16 = l_Lean_Meta_Grind_Arith_CommRing_checkMaxSteps(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -x_17 = lean_ctor_get(x_16, 0); +lean_object* x_17; lean_object* x_18; +x_17 = lean_ctor_get(x_13, 1); lean_inc(x_17); -x_18 = lean_unbox(x_17); -lean_dec(x_17); -if (x_18 == 0) +lean_dec(x_13); +x_18 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18, 0, x_1); +lean_ctor_set(x_18, 1, x_17); +return x_18; +} +} +else { -lean_object* x_19; lean_object* x_20; lean_object* x_21; -lean_free_object(x_2); -x_19 = lean_ctor_get(x_16, 1); +lean_object* x_19; lean_object* x_20; +lean_dec(x_1); +x_19 = lean_ctor_get(x_13, 1); lean_inc(x_19); -lean_dec(x_16); -x_20 = lean_box(0); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -lean_inc(x_6); -lean_inc(x_5); -lean_inc(x_4); -lean_inc(x_3); -lean_inc(x_1); -x_21 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1___lambda__1(x_1, x_14, x_20, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_19); -if (lean_obj_tag(x_21) == 0) -{ -lean_object* x_22; -x_22 = lean_ctor_get(x_21, 0); -lean_inc(x_22); -if (lean_obj_tag(x_22) == 0) +lean_dec(x_13); +x_20 = lean_ctor_get(x_14, 0); +lean_inc(x_20); +lean_dec(x_14); +x_1 = x_20; +x_12 = x_19; +goto _start; +} +} +else { -uint8_t x_23; +uint8_t x_22; lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -6370,51 +6159,134 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); +lean_dec(x_2); lean_dec(x_1); -x_23 = !lean_is_exclusive(x_21); -if (x_23 == 0) +x_22 = !lean_is_exclusive(x_13); +if (x_22 == 0) { -lean_object* x_24; lean_object* x_25; -x_24 = lean_ctor_get(x_21, 0); -lean_dec(x_24); -x_25 = lean_ctor_get(x_22, 0); -lean_inc(x_25); -lean_dec(x_22); -lean_ctor_set(x_21, 0, x_25); -return x_21; +return x_13; } else { -lean_object* x_26; lean_object* x_27; lean_object* x_28; -x_26 = lean_ctor_get(x_21, 1); -lean_inc(x_26); -lean_dec(x_21); -x_27 = lean_ctor_get(x_22, 0); -lean_inc(x_27); -lean_dec(x_22); -x_28 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_28, 0, x_27); -lean_ctor_set(x_28, 1, x_26); -return x_28; +lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_23 = lean_ctor_get(x_13, 0); +x_24 = lean_ctor_get(x_13, 1); +lean_inc(x_24); +lean_inc(x_23); +lean_dec(x_13); +x_25 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_25, 0, x_23); +lean_ctor_set(x_25, 1, x_24); +return x_25; +} +} } } +LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +lean_object* x_14; lean_object* x_15; +x_14 = lean_ctor_get(x_2, 0); +lean_inc(x_14); +x_15 = l_Lean_Grind_CommRing_Poly_findSimp_x3f(x_14, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +if (lean_obj_tag(x_15) == 0) +{ +lean_object* x_16; +x_16 = lean_ctor_get(x_15, 0); +lean_inc(x_16); +if (lean_obj_tag(x_16) == 0) +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; uint8_t x_21; +lean_dec(x_1); +x_17 = lean_ctor_get(x_15, 1); +lean_inc(x_17); +lean_dec(x_15); +x_18 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__2___closed__2; +x_19 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_18, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_17); +x_20 = lean_ctor_get(x_19, 0); +lean_inc(x_20); +x_21 = lean_unbox(x_20); +lean_dec(x_20); +if (x_21 == 0) +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_22 = lean_ctor_get(x_19, 1); +lean_inc(x_22); +lean_dec(x_19); +x_23 = lean_box(0); +x_24 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__1(x_2, x_23, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_22); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +return x_24; +} else { +uint8_t x_25; +x_25 = !lean_is_exclusive(x_19); +if (x_25 == 0) +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_26 = lean_ctor_get(x_19, 1); +x_27 = lean_ctor_get(x_19, 0); +lean_dec(x_27); +x_28 = l_Lean_Meta_Grind_updateLastTag(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_26); +if (lean_obj_tag(x_28) == 0) +{ lean_object* x_29; lean_object* x_30; -x_29 = lean_ctor_get(x_21, 1); +x_29 = lean_ctor_get(x_28, 1); lean_inc(x_29); -lean_dec(x_21); -x_30 = lean_ctor_get(x_22, 0); -lean_inc(x_30); -lean_dec(x_22); -x_2 = x_30; -x_12 = x_29; -goto _start; -} +lean_dec(x_28); +lean_inc(x_2); +x_30 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_2, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_29); +if (lean_obj_tag(x_30) == 0) +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_31 = lean_ctor_get(x_30, 0); +lean_inc(x_31); +x_32 = lean_ctor_get(x_30, 1); +lean_inc(x_32); +lean_dec(x_30); +x_33 = l_Lean_MessageData_ofExpr(x_31); +x_34 = l_Lean_indentD(x_33); +x_35 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__2___closed__4; +lean_ctor_set_tag(x_19, 7); +lean_ctor_set(x_19, 1, x_34); +lean_ctor_set(x_19, 0, x_35); +x_36 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +x_37 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_37, 0, x_19); +lean_ctor_set(x_37, 1, x_36); +x_38 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_18, x_37, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_32); +x_39 = lean_ctor_get(x_38, 0); +lean_inc(x_39); +x_40 = lean_ctor_get(x_38, 1); +lean_inc(x_40); +lean_dec(x_38); +x_41 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__1(x_2, x_39, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_40); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_39); +return x_41; } else { -uint8_t x_32; +uint8_t x_42; +lean_free_object(x_19); +lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -6423,31 +6295,32 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_1); -x_32 = !lean_is_exclusive(x_21); -if (x_32 == 0) +lean_dec(x_2); +x_42 = !lean_is_exclusive(x_30); +if (x_42 == 0) { -return x_21; +return x_30; } else { -lean_object* x_33; lean_object* x_34; lean_object* x_35; -x_33 = lean_ctor_get(x_21, 0); -x_34 = lean_ctor_get(x_21, 1); -lean_inc(x_34); -lean_inc(x_33); -lean_dec(x_21); -x_35 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_35, 0, x_33); -lean_ctor_set(x_35, 1, x_34); -return x_35; +lean_object* x_43; lean_object* x_44; lean_object* x_45; +x_43 = lean_ctor_get(x_30, 0); +x_44 = lean_ctor_get(x_30, 1); +lean_inc(x_44); +lean_inc(x_43); +lean_dec(x_30); +x_45 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_45, 0, x_43); +lean_ctor_set(x_45, 1, x_44); +return x_45; } } } else { -uint8_t x_36; +uint8_t x_46; +lean_free_object(x_19); +lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -6456,75 +6329,68 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_1); -x_36 = !lean_is_exclusive(x_16); -if (x_36 == 0) +lean_dec(x_2); +x_46 = !lean_is_exclusive(x_28); +if (x_46 == 0) { -lean_object* x_37; lean_object* x_38; -x_37 = lean_ctor_get(x_16, 0); -lean_dec(x_37); -lean_inc(x_14); -x_38 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_38, 0, x_14); -lean_ctor_set(x_2, 0, x_38); -lean_ctor_set(x_16, 0, x_2); -return x_16; +return x_28; } else { -lean_object* x_39; lean_object* x_40; lean_object* x_41; -x_39 = lean_ctor_get(x_16, 1); -lean_inc(x_39); -lean_dec(x_16); -lean_inc(x_14); -x_40 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_40, 0, x_14); -lean_ctor_set(x_2, 0, x_40); -x_41 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_41, 0, x_2); -lean_ctor_set(x_41, 1, x_39); -return x_41; +lean_object* x_47; lean_object* x_48; lean_object* x_49; +x_47 = lean_ctor_get(x_28, 0); +x_48 = lean_ctor_get(x_28, 1); +lean_inc(x_48); +lean_inc(x_47); +lean_dec(x_28); +x_49 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_49, 0, x_47); +lean_ctor_set(x_49, 1, x_48); +return x_49; } } } else { -lean_object* x_42; lean_object* x_43; lean_object* x_44; uint8_t x_45; -x_42 = lean_ctor_get(x_2, 1); -lean_inc(x_42); -lean_dec(x_2); -x_43 = l_Lean_Meta_Grind_Arith_CommRing_checkMaxSteps(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -x_44 = lean_ctor_get(x_43, 0); -lean_inc(x_44); -x_45 = lean_unbox(x_44); -lean_dec(x_44); -if (x_45 == 0) -{ -lean_object* x_46; lean_object* x_47; lean_object* x_48; -x_46 = lean_ctor_get(x_43, 1); -lean_inc(x_46); -lean_dec(x_43); -x_47 = lean_box(0); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -lean_inc(x_6); -lean_inc(x_5); -lean_inc(x_4); -lean_inc(x_3); -lean_inc(x_1); -x_48 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1___lambda__1(x_1, x_42, x_47, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_46); -if (lean_obj_tag(x_48) == 0) +lean_object* x_50; lean_object* x_51; +x_50 = lean_ctor_get(x_19, 1); +lean_inc(x_50); +lean_dec(x_19); +x_51 = l_Lean_Meta_Grind_updateLastTag(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_50); +if (lean_obj_tag(x_51) == 0) { -lean_object* x_49; -x_49 = lean_ctor_get(x_48, 0); -lean_inc(x_49); -if (lean_obj_tag(x_49) == 0) +lean_object* x_52; lean_object* x_53; +x_52 = lean_ctor_get(x_51, 1); +lean_inc(x_52); +lean_dec(x_51); +lean_inc(x_2); +x_53 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_2, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_52); +if (lean_obj_tag(x_53) == 0) { -lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; +lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; +x_54 = lean_ctor_get(x_53, 0); +lean_inc(x_54); +x_55 = lean_ctor_get(x_53, 1); +lean_inc(x_55); +lean_dec(x_53); +x_56 = l_Lean_MessageData_ofExpr(x_54); +x_57 = l_Lean_indentD(x_56); +x_58 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__2___closed__4; +x_59 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_59, 0, x_58); +lean_ctor_set(x_59, 1, x_57); +x_60 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +x_61 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_61, 0, x_59); +lean_ctor_set(x_61, 1, x_60); +x_62 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_18, x_61, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_55); +x_63 = lean_ctor_get(x_62, 0); +lean_inc(x_63); +x_64 = lean_ctor_get(x_62, 1); +lean_inc(x_64); +lean_dec(x_62); +x_65 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__1(x_2, x_63, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_64); +lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -6533,47 +6399,13 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_1); -x_50 = lean_ctor_get(x_48, 1); -lean_inc(x_50); -if (lean_is_exclusive(x_48)) { - lean_ctor_release(x_48, 0); - lean_ctor_release(x_48, 1); - x_51 = x_48; -} else { - lean_dec_ref(x_48); - x_51 = lean_box(0); -} -x_52 = lean_ctor_get(x_49, 0); -lean_inc(x_52); -lean_dec(x_49); -if (lean_is_scalar(x_51)) { - x_53 = lean_alloc_ctor(0, 2, 0); -} else { - x_53 = x_51; -} -lean_ctor_set(x_53, 0, x_52); -lean_ctor_set(x_53, 1, x_50); -return x_53; -} -else -{ -lean_object* x_54; lean_object* x_55; -x_54 = lean_ctor_get(x_48, 1); -lean_inc(x_54); -lean_dec(x_48); -x_55 = lean_ctor_get(x_49, 0); -lean_inc(x_55); -lean_dec(x_49); -x_2 = x_55; -x_12 = x_54; -goto _start; -} +lean_dec(x_63); +return x_65; } else { -lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; +lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; +lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -6582,33 +6414,33 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_1); -x_57 = lean_ctor_get(x_48, 0); -lean_inc(x_57); -x_58 = lean_ctor_get(x_48, 1); -lean_inc(x_58); -if (lean_is_exclusive(x_48)) { - lean_ctor_release(x_48, 0); - lean_ctor_release(x_48, 1); - x_59 = x_48; +lean_dec(x_2); +x_66 = lean_ctor_get(x_53, 0); +lean_inc(x_66); +x_67 = lean_ctor_get(x_53, 1); +lean_inc(x_67); +if (lean_is_exclusive(x_53)) { + lean_ctor_release(x_53, 0); + lean_ctor_release(x_53, 1); + x_68 = x_53; } else { - lean_dec_ref(x_48); - x_59 = lean_box(0); + lean_dec_ref(x_53); + x_68 = lean_box(0); } -if (lean_is_scalar(x_59)) { - x_60 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_68)) { + x_69 = lean_alloc_ctor(1, 2, 0); } else { - x_60 = x_59; + x_69 = x_68; } -lean_ctor_set(x_60, 0, x_57); -lean_ctor_set(x_60, 1, x_58); -return x_60; +lean_ctor_set(x_69, 0, x_66); +lean_ctor_set(x_69, 1, x_67); +return x_69; } } else { -lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; +lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; +lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -6617,325 +6449,298 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_1); -x_61 = lean_ctor_get(x_43, 1); -lean_inc(x_61); -if (lean_is_exclusive(x_43)) { - lean_ctor_release(x_43, 0); - lean_ctor_release(x_43, 1); - x_62 = x_43; +lean_dec(x_2); +x_70 = lean_ctor_get(x_51, 0); +lean_inc(x_70); +x_71 = lean_ctor_get(x_51, 1); +lean_inc(x_71); +if (lean_is_exclusive(x_51)) { + lean_ctor_release(x_51, 0); + lean_ctor_release(x_51, 1); + x_72 = x_51; } else { - lean_dec_ref(x_43); - x_62 = lean_box(0); + lean_dec_ref(x_51); + x_72 = lean_box(0); } -lean_inc(x_42); -x_63 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_63, 0, x_42); -x_64 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_64, 0, x_63); -lean_ctor_set(x_64, 1, x_42); -if (lean_is_scalar(x_62)) { - x_65 = lean_alloc_ctor(0, 2, 0); +if (lean_is_scalar(x_72)) { + x_73 = lean_alloc_ctor(1, 2, 0); } else { - x_65 = x_62; + x_73 = x_72; } -lean_ctor_set(x_65, 0, x_64); -lean_ctor_set(x_65, 1, x_61); -return x_65; +lean_ctor_set(x_73, 0, x_70); +lean_ctor_set(x_73, 1, x_71); +return x_73; } } } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: +else { -lean_object* x_12; lean_object* x_13; lean_object* x_14; -x_12 = lean_box(0); -x_13 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_13, 0, x_12); -lean_ctor_set(x_13, 1, x_1); -x_14 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1(x_12, x_13, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_14) == 0) +lean_object* x_74; uint8_t x_75; +x_74 = lean_ctor_get(x_15, 1); +lean_inc(x_74); +lean_dec(x_15); +x_75 = !lean_is_exclusive(x_16); +if (x_75 == 0) { -lean_object* x_15; lean_object* x_16; -x_15 = lean_ctor_get(x_14, 0); -lean_inc(x_15); -x_16 = lean_ctor_get(x_15, 0); -lean_inc(x_16); -if (lean_obj_tag(x_16) == 0) +lean_object* x_76; lean_object* x_77; +x_76 = lean_ctor_get(x_16, 0); +x_77 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWith(x_2, x_76, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_74); +if (lean_obj_tag(x_77) == 0) { -uint8_t x_17; -x_17 = !lean_is_exclusive(x_14); -if (x_17 == 0) +uint8_t x_78; +x_78 = !lean_is_exclusive(x_77); +if (x_78 == 0) { -lean_object* x_18; lean_object* x_19; -x_18 = lean_ctor_get(x_14, 0); -lean_dec(x_18); -x_19 = lean_ctor_get(x_15, 1); -lean_inc(x_19); -lean_dec(x_15); -lean_ctor_set(x_14, 0, x_19); -return x_14; +lean_object* x_79; lean_object* x_80; +x_79 = lean_ctor_get(x_77, 0); +x_80 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_80, 0, x_1); +lean_ctor_set(x_80, 1, x_79); +lean_ctor_set(x_16, 0, x_80); +lean_ctor_set(x_77, 0, x_16); +return x_77; } else { -lean_object* x_20; lean_object* x_21; lean_object* x_22; -x_20 = lean_ctor_get(x_14, 1); -lean_inc(x_20); -lean_dec(x_14); -x_21 = lean_ctor_get(x_15, 1); -lean_inc(x_21); -lean_dec(x_15); -x_22 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_22, 0, x_21); -lean_ctor_set(x_22, 1, x_20); -return x_22; +lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; +x_81 = lean_ctor_get(x_77, 0); +x_82 = lean_ctor_get(x_77, 1); +lean_inc(x_82); +lean_inc(x_81); +lean_dec(x_77); +x_83 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_83, 0, x_1); +lean_ctor_set(x_83, 1, x_81); +lean_ctor_set(x_16, 0, x_83); +x_84 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_84, 0, x_16); +lean_ctor_set(x_84, 1, x_82); +return x_84; } } else { -uint8_t x_23; -lean_dec(x_15); -x_23 = !lean_is_exclusive(x_14); -if (x_23 == 0) +uint8_t x_85; +lean_free_object(x_16); +lean_dec(x_1); +x_85 = !lean_is_exclusive(x_77); +if (x_85 == 0) { -lean_object* x_24; lean_object* x_25; -x_24 = lean_ctor_get(x_14, 0); -lean_dec(x_24); -x_25 = lean_ctor_get(x_16, 0); -lean_inc(x_25); -lean_dec(x_16); -lean_ctor_set(x_14, 0, x_25); -return x_14; +return x_77; } else { -lean_object* x_26; lean_object* x_27; lean_object* x_28; -x_26 = lean_ctor_get(x_14, 1); -lean_inc(x_26); -lean_dec(x_14); -x_27 = lean_ctor_get(x_16, 0); -lean_inc(x_27); -lean_dec(x_16); -x_28 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_28, 0, x_27); -lean_ctor_set(x_28, 1, x_26); -return x_28; -} +lean_object* x_86; lean_object* x_87; lean_object* x_88; +x_86 = lean_ctor_get(x_77, 0); +x_87 = lean_ctor_get(x_77, 1); +lean_inc(x_87); +lean_inc(x_86); +lean_dec(x_77); +x_88 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_88, 0, x_86); +lean_ctor_set(x_88, 1, x_87); +return x_88; } } -else -{ -uint8_t x_29; -x_29 = !lean_is_exclusive(x_14); -if (x_29 == 0) -{ -return x_14; } else { -lean_object* x_30; lean_object* x_31; lean_object* x_32; -x_30 = lean_ctor_get(x_14, 0); -x_31 = lean_ctor_get(x_14, 1); -lean_inc(x_31); -lean_inc(x_30); -lean_dec(x_14); -x_32 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_32, 0, x_30); -lean_ctor_set(x_32, 1, x_31); -return x_32; -} -} -} -} -LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { -_start: +lean_object* x_89; lean_object* x_90; +x_89 = lean_ctor_get(x_16, 0); +lean_inc(x_89); +lean_dec(x_16); +x_90 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWith(x_2, x_89, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_74); +if (lean_obj_tag(x_90) == 0) { -lean_object* x_14; -x_14 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); -lean_dec(x_3); -return x_14; -} +lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; +x_91 = lean_ctor_get(x_90, 0); +lean_inc(x_91); +x_92 = lean_ctor_get(x_90, 1); +lean_inc(x_92); +if (lean_is_exclusive(x_90)) { + lean_ctor_release(x_90, 0); + lean_ctor_release(x_90, 1); + x_93 = x_90; +} else { + lean_dec_ref(x_90); + x_93 = lean_box(0); } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: -{ -uint8_t x_12; lean_object* x_13; lean_object* x_14; -x_12 = 1; -x_13 = lean_box(x_12); -x_14 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_14, 0, x_13); -lean_ctor_set(x_14, 1, x_11); -return x_14; +x_94 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_94, 0, x_1); +lean_ctor_set(x_94, 1, x_91); +x_95 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_95, 0, x_94); +if (lean_is_scalar(x_93)) { + x_96 = lean_alloc_ctor(0, 2, 0); +} else { + x_96 = x_93; } +lean_ctor_set(x_96, 0, x_95); +lean_ctor_set(x_96, 1, x_92); +return x_96; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__1() { -_start: +else { -lean_object* x_1; -x_1 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___lambda__1___boxed), 11, 0); -return x_1; +lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; +lean_dec(x_1); +x_97 = lean_ctor_get(x_90, 0); +lean_inc(x_97); +x_98 = lean_ctor_get(x_90, 1); +lean_inc(x_98); +if (lean_is_exclusive(x_90)) { + lean_ctor_release(x_90, 0); + lean_ctor_release(x_90, 1); + x_99 = x_90; +} else { + lean_dec_ref(x_90); + x_99 = lean_box(0); } +if (lean_is_scalar(x_99)) { + x_100 = lean_alloc_ctor(1, 2, 0); +} else { + x_100 = x_99; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = lean_unsigned_to_nat(0u); -x_2 = lean_nat_to_int(x_1); -return x_2; +lean_ctor_set(x_100, 0, x_97); +lean_ctor_set(x_100, 1, x_98); +return x_100; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__3() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("assert", 6, 6); -return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__4() { -_start: +else { -lean_object* x_1; -x_1 = lean_mk_string_unchecked("discard", 7, 7); -return x_1; -} -} -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__5() { -_start: +uint8_t x_101; +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_101 = !lean_is_exclusive(x_15); +if (x_101 == 0) { -lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__1; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__2; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__3; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__4; -x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); -return x_5; -} +return x_15; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__6() { -_start: +else { -lean_object* x_1; -x_1 = lean_mk_string_unchecked("trivial", 7, 7); -return x_1; +lean_object* x_102; lean_object* x_103; lean_object* x_104; +x_102 = lean_ctor_get(x_15, 0); +x_103 = lean_ctor_get(x_15, 1); +lean_inc(x_103); +lean_inc(x_102); +lean_dec(x_15); +x_104 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_104, 0, x_102); +lean_ctor_set(x_104, 1, x_103); +return x_104; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__7() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__1; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__2; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__3; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__6; -x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); -return x_5; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { -lean_object* x_12; -x_12 = lean_ctor_get(x_1, 0); -lean_inc(x_12); -if (lean_obj_tag(x_12) == 0) +uint8_t x_13; +x_13 = !lean_is_exclusive(x_2); +if (x_13 == 0) { -lean_object* x_13; lean_object* x_14; lean_object* x_15; uint8_t x_16; -x_13 = lean_ctor_get(x_12, 0); -lean_inc(x_13); -lean_dec(x_12); -x_14 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__1; -x_15 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; -x_16 = lean_int_dec_eq(x_13, x_15); -lean_dec(x_13); -if (x_16 == 0) -{ -lean_object* x_17; -x_17 = l_Lean_Meta_Grind_Arith_CommRing_hasChar(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_17) == 0) +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; uint8_t x_18; +x_14 = lean_ctor_get(x_2, 1); +x_15 = lean_ctor_get(x_2, 0); +lean_dec(x_15); +x_16 = l_Lean_Meta_Grind_Arith_CommRing_checkMaxSteps(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_17 = lean_ctor_get(x_16, 0); +lean_inc(x_17); +x_18 = lean_unbox(x_17); +lean_dec(x_17); +if (x_18 == 0) { -lean_object* x_18; uint8_t x_19; -x_18 = lean_ctor_get(x_17, 0); -lean_inc(x_18); -x_19 = lean_unbox(x_18); -lean_dec(x_18); -if (x_19 == 0) +lean_object* x_19; lean_object* x_20; lean_object* x_21; +lean_free_object(x_2); +x_19 = lean_ctor_get(x_16, 1); +lean_inc(x_19); +lean_dec(x_16); +x_20 = lean_box(0); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_8); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_3); +lean_inc(x_1); +x_21 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1___lambda__1(x_1, x_14, x_20, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_19); +if (lean_obj_tag(x_21) == 0) { -lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; uint8_t x_24; -x_20 = lean_ctor_get(x_17, 1); -lean_inc(x_20); -lean_dec(x_17); -x_21 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__5; -x_22 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_21, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_20); -x_23 = lean_ctor_get(x_22, 0); -lean_inc(x_23); -x_24 = lean_unbox(x_23); -lean_dec(x_23); -if (x_24 == 0) +lean_object* x_22; +x_22 = lean_ctor_get(x_21, 0); +lean_inc(x_22); +if (lean_obj_tag(x_22) == 0) { -lean_object* x_25; lean_object* x_26; lean_object* x_27; +uint8_t x_23; +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); lean_dec(x_1); -x_25 = lean_ctor_get(x_22, 1); +x_23 = !lean_is_exclusive(x_21); +if (x_23 == 0) +{ +lean_object* x_24; lean_object* x_25; +x_24 = lean_ctor_get(x_21, 0); +lean_dec(x_24); +x_25 = lean_ctor_get(x_22, 0); lean_inc(x_25); lean_dec(x_22); -x_26 = lean_box(0); -x_27 = lean_apply_11(x_14, x_26, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_25); -return x_27; +lean_ctor_set(x_21, 0, x_25); +return x_21; } else { -uint8_t x_28; -x_28 = !lean_is_exclusive(x_22); -if (x_28 == 0) +lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_26 = lean_ctor_get(x_21, 1); +lean_inc(x_26); +lean_dec(x_21); +x_27 = lean_ctor_get(x_22, 0); +lean_inc(x_27); +lean_dec(x_22); +x_28 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_28, 0, x_27); +lean_ctor_set(x_28, 1, x_26); +return x_28; +} +} +else { -lean_object* x_29; lean_object* x_30; lean_object* x_31; -x_29 = lean_ctor_get(x_22, 1); +lean_object* x_29; lean_object* x_30; +x_29 = lean_ctor_get(x_21, 1); +lean_inc(x_29); +lean_dec(x_21); x_30 = lean_ctor_get(x_22, 0); -lean_dec(x_30); -x_31 = l_Lean_Meta_Grind_updateLastTag(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_29); -if (lean_obj_tag(x_31) == 0) -{ -lean_object* x_32; lean_object* x_33; -x_32 = lean_ctor_get(x_31, 1); -lean_inc(x_32); -lean_dec(x_31); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -x_33 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_32); -if (lean_obj_tag(x_33) == 0) -{ -lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; -x_34 = lean_ctor_get(x_33, 0); -lean_inc(x_34); -x_35 = lean_ctor_get(x_33, 1); -lean_inc(x_35); -lean_dec(x_33); -x_36 = l_Lean_MessageData_ofExpr(x_34); -x_37 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -lean_ctor_set_tag(x_22, 7); -lean_ctor_set(x_22, 1, x_36); -lean_ctor_set(x_22, 0, x_37); -x_38 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_38, 0, x_22); -lean_ctor_set(x_38, 1, x_37); -x_39 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_21, x_38, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_35); -x_40 = lean_ctor_get(x_39, 0); -lean_inc(x_40); -x_41 = lean_ctor_get(x_39, 1); -lean_inc(x_41); -lean_dec(x_39); -x_42 = lean_apply_11(x_14, x_40, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_41); -return x_42; +lean_inc(x_30); +lean_dec(x_22); +x_2 = x_30; +x_12 = x_29; +goto _start; +} } else { -uint8_t x_43; -lean_free_object(x_22); +uint8_t x_32; +lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -6944,31 +6749,31 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -x_43 = !lean_is_exclusive(x_33); -if (x_43 == 0) +lean_dec(x_1); +x_32 = !lean_is_exclusive(x_21); +if (x_32 == 0) { -return x_33; +return x_21; } else { -lean_object* x_44; lean_object* x_45; lean_object* x_46; -x_44 = lean_ctor_get(x_33, 0); -x_45 = lean_ctor_get(x_33, 1); -lean_inc(x_45); -lean_inc(x_44); -lean_dec(x_33); -x_46 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_46, 0, x_44); -lean_ctor_set(x_46, 1, x_45); -return x_46; +lean_object* x_33; lean_object* x_34; lean_object* x_35; +x_33 = lean_ctor_get(x_21, 0); +x_34 = lean_ctor_get(x_21, 1); +lean_inc(x_34); +lean_inc(x_33); +lean_dec(x_21); +x_35 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_35, 0, x_33); +lean_ctor_set(x_35, 1, x_34); +return x_35; } } } else { -uint8_t x_47; -lean_free_object(x_22); +uint8_t x_36; +lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -6977,74 +6782,75 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); lean_dec(x_1); -x_47 = !lean_is_exclusive(x_31); -if (x_47 == 0) +x_36 = !lean_is_exclusive(x_16); +if (x_36 == 0) { -return x_31; +lean_object* x_37; lean_object* x_38; +x_37 = lean_ctor_get(x_16, 0); +lean_dec(x_37); +lean_inc(x_14); +x_38 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_38, 0, x_14); +lean_ctor_set(x_2, 0, x_38); +lean_ctor_set(x_16, 0, x_2); +return x_16; } else { -lean_object* x_48; lean_object* x_49; lean_object* x_50; -x_48 = lean_ctor_get(x_31, 0); -x_49 = lean_ctor_get(x_31, 1); -lean_inc(x_49); -lean_inc(x_48); -lean_dec(x_31); -x_50 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_50, 0, x_48); -lean_ctor_set(x_50, 1, x_49); -return x_50; +lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_39 = lean_ctor_get(x_16, 1); +lean_inc(x_39); +lean_dec(x_16); +lean_inc(x_14); +x_40 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_40, 0, x_14); +lean_ctor_set(x_2, 0, x_40); +x_41 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_41, 0, x_2); +lean_ctor_set(x_41, 1, x_39); +return x_41; } } } else { -lean_object* x_51; lean_object* x_52; -x_51 = lean_ctor_get(x_22, 1); -lean_inc(x_51); -lean_dec(x_22); -x_52 = l_Lean_Meta_Grind_updateLastTag(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_51); -if (lean_obj_tag(x_52) == 0) +lean_object* x_42; lean_object* x_43; lean_object* x_44; uint8_t x_45; +x_42 = lean_ctor_get(x_2, 1); +lean_inc(x_42); +lean_dec(x_2); +x_43 = l_Lean_Meta_Grind_Arith_CommRing_checkMaxSteps(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_44 = lean_ctor_get(x_43, 0); +lean_inc(x_44); +x_45 = lean_unbox(x_44); +lean_dec(x_44); +if (x_45 == 0) { -lean_object* x_53; lean_object* x_54; -x_53 = lean_ctor_get(x_52, 1); -lean_inc(x_53); -lean_dec(x_52); +lean_object* x_46; lean_object* x_47; lean_object* x_48; +x_46 = lean_ctor_get(x_43, 1); +lean_inc(x_46); +lean_dec(x_43); +x_47 = lean_box(0); +lean_inc(x_11); lean_inc(x_10); lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); -x_54 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_53); -if (lean_obj_tag(x_54) == 0) -{ -lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; -x_55 = lean_ctor_get(x_54, 0); -lean_inc(x_55); -x_56 = lean_ctor_get(x_54, 1); -lean_inc(x_56); -lean_dec(x_54); -x_57 = l_Lean_MessageData_ofExpr(x_55); -x_58 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -x_59 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_59, 0, x_58); -lean_ctor_set(x_59, 1, x_57); -x_60 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_60, 0, x_59); -lean_ctor_set(x_60, 1, x_58); -x_61 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_21, x_60, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_56); -x_62 = lean_ctor_get(x_61, 0); -lean_inc(x_62); -x_63 = lean_ctor_get(x_61, 1); -lean_inc(x_63); -lean_dec(x_61); -x_64 = lean_apply_11(x_14, x_62, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_63); -return x_64; -} -else +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_3); +lean_inc(x_1); +x_48 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1___lambda__1(x_1, x_42, x_47, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_46); +if (lean_obj_tag(x_48) == 0) { -lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; +lean_object* x_49; +x_49 = lean_ctor_get(x_48, 0); +lean_inc(x_49); +if (lean_obj_tag(x_49) == 0) +{ +lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; +lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -7053,32 +6859,47 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -x_65 = lean_ctor_get(x_54, 0); -lean_inc(x_65); -x_66 = lean_ctor_get(x_54, 1); -lean_inc(x_66); -if (lean_is_exclusive(x_54)) { - lean_ctor_release(x_54, 0); - lean_ctor_release(x_54, 1); - x_67 = x_54; +lean_dec(x_1); +x_50 = lean_ctor_get(x_48, 1); +lean_inc(x_50); +if (lean_is_exclusive(x_48)) { + lean_ctor_release(x_48, 0); + lean_ctor_release(x_48, 1); + x_51 = x_48; } else { - lean_dec_ref(x_54); - x_67 = lean_box(0); + lean_dec_ref(x_48); + x_51 = lean_box(0); } -if (lean_is_scalar(x_67)) { - x_68 = lean_alloc_ctor(1, 2, 0); +x_52 = lean_ctor_get(x_49, 0); +lean_inc(x_52); +lean_dec(x_49); +if (lean_is_scalar(x_51)) { + x_53 = lean_alloc_ctor(0, 2, 0); } else { - x_68 = x_67; + x_53 = x_51; } -lean_ctor_set(x_68, 0, x_65); -lean_ctor_set(x_68, 1, x_66); -return x_68; +lean_ctor_set(x_53, 0, x_52); +lean_ctor_set(x_53, 1, x_50); +return x_53; +} +else +{ +lean_object* x_54; lean_object* x_55; +x_54 = lean_ctor_get(x_48, 1); +lean_inc(x_54); +lean_dec(x_48); +x_55 = lean_ctor_get(x_49, 0); +lean_inc(x_55); +lean_dec(x_49); +x_2 = x_55; +x_12 = x_54; +goto _start; } } else { -lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; +lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; +lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -7087,62 +6908,33 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); lean_dec(x_1); -x_69 = lean_ctor_get(x_52, 0); -lean_inc(x_69); -x_70 = lean_ctor_get(x_52, 1); -lean_inc(x_70); -if (lean_is_exclusive(x_52)) { - lean_ctor_release(x_52, 0); - lean_ctor_release(x_52, 1); - x_71 = x_52; +x_57 = lean_ctor_get(x_48, 0); +lean_inc(x_57); +x_58 = lean_ctor_get(x_48, 1); +lean_inc(x_58); +if (lean_is_exclusive(x_48)) { + lean_ctor_release(x_48, 0); + lean_ctor_release(x_48, 1); + x_59 = x_48; } else { - lean_dec_ref(x_52); - x_71 = lean_box(0); + lean_dec_ref(x_48); + x_59 = lean_box(0); } -if (lean_is_scalar(x_71)) { - x_72 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_59)) { + x_60 = lean_alloc_ctor(1, 2, 0); } else { - x_72 = x_71; -} -lean_ctor_set(x_72, 0, x_69); -lean_ctor_set(x_72, 1, x_70); -return x_72; -} -} + x_60 = x_59; } +lean_ctor_set(x_60, 0, x_57); +lean_ctor_set(x_60, 1, x_58); +return x_60; } -else -{ -lean_object* x_73; lean_object* x_74; -x_73 = lean_ctor_get(x_17, 1); -lean_inc(x_73); -lean_dec(x_17); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -lean_inc(x_6); -lean_inc(x_5); -lean_inc(x_4); -lean_inc(x_3); -lean_inc(x_2); -x_74 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_setUnsat(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_73); -if (lean_obj_tag(x_74) == 0) -{ -lean_object* x_75; lean_object* x_76; lean_object* x_77; -x_75 = lean_ctor_get(x_74, 0); -lean_inc(x_75); -x_76 = lean_ctor_get(x_74, 1); -lean_inc(x_76); -lean_dec(x_74); -x_77 = lean_apply_11(x_14, x_75, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_76); -return x_77; } else { -uint8_t x_78; +lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; +lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -7151,465 +6943,320 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -x_78 = !lean_is_exclusive(x_74); -if (x_78 == 0) -{ -return x_74; +lean_dec(x_1); +x_61 = lean_ctor_get(x_43, 1); +lean_inc(x_61); +if (lean_is_exclusive(x_43)) { + lean_ctor_release(x_43, 0); + lean_ctor_release(x_43, 1); + x_62 = x_43; +} else { + lean_dec_ref(x_43); + x_62 = lean_box(0); } -else -{ -lean_object* x_79; lean_object* x_80; lean_object* x_81; -x_79 = lean_ctor_get(x_74, 0); -x_80 = lean_ctor_get(x_74, 1); -lean_inc(x_80); -lean_inc(x_79); -lean_dec(x_74); -x_81 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_81, 0, x_79); -lean_ctor_set(x_81, 1, x_80); -return x_81; +lean_inc(x_42); +x_63 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_63, 0, x_42); +x_64 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_64, 0, x_63); +lean_ctor_set(x_64, 1, x_42); +if (lean_is_scalar(x_62)) { + x_65 = lean_alloc_ctor(0, 2, 0); +} else { + x_65 = x_62; +} +lean_ctor_set(x_65, 0, x_64); +lean_ctor_set(x_65, 1, x_61); +return x_65; } } } } -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -uint8_t x_82; -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_82 = !lean_is_exclusive(x_17); -if (x_82 == 0) +lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_12 = lean_box(0); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_12); +lean_ctor_set(x_13, 1, x_1); +x_14 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1(x_12, x_13, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +if (lean_obj_tag(x_14) == 0) { -return x_17; +lean_object* x_15; lean_object* x_16; +x_15 = lean_ctor_get(x_14, 0); +lean_inc(x_15); +x_16 = lean_ctor_get(x_15, 0); +lean_inc(x_16); +if (lean_obj_tag(x_16) == 0) +{ +uint8_t x_17; +x_17 = !lean_is_exclusive(x_14); +if (x_17 == 0) +{ +lean_object* x_18; lean_object* x_19; +x_18 = lean_ctor_get(x_14, 0); +lean_dec(x_18); +x_19 = lean_ctor_get(x_15, 1); +lean_inc(x_19); +lean_dec(x_15); +lean_ctor_set(x_14, 0, x_19); +return x_14; } else { -lean_object* x_83; lean_object* x_84; lean_object* x_85; -x_83 = lean_ctor_get(x_17, 0); -x_84 = lean_ctor_get(x_17, 1); -lean_inc(x_84); -lean_inc(x_83); -lean_dec(x_17); -x_85 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_85, 0, x_83); -lean_ctor_set(x_85, 1, x_84); -return x_85; -} +lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_20 = lean_ctor_get(x_14, 1); +lean_inc(x_20); +lean_dec(x_14); +x_21 = lean_ctor_get(x_15, 1); +lean_inc(x_21); +lean_dec(x_15); +x_22 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_22, 0, x_21); +lean_ctor_set(x_22, 1, x_20); +return x_22; } } else { -lean_object* x_86; lean_object* x_87; lean_object* x_88; uint8_t x_89; -x_86 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__7; -x_87 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_86, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -x_88 = lean_ctor_get(x_87, 0); -lean_inc(x_88); -x_89 = lean_unbox(x_88); -lean_dec(x_88); -if (x_89 == 0) +uint8_t x_23; +lean_dec(x_15); +x_23 = !lean_is_exclusive(x_14); +if (x_23 == 0) { -lean_object* x_90; lean_object* x_91; lean_object* x_92; -lean_dec(x_1); -x_90 = lean_ctor_get(x_87, 1); -lean_inc(x_90); -lean_dec(x_87); -x_91 = lean_box(0); -x_92 = lean_apply_11(x_14, x_91, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_90); -return x_92; +lean_object* x_24; lean_object* x_25; +x_24 = lean_ctor_get(x_14, 0); +lean_dec(x_24); +x_25 = lean_ctor_get(x_16, 0); +lean_inc(x_25); +lean_dec(x_16); +lean_ctor_set(x_14, 0, x_25); +return x_14; } else { -uint8_t x_93; -x_93 = !lean_is_exclusive(x_87); -if (x_93 == 0) -{ -lean_object* x_94; lean_object* x_95; lean_object* x_96; -x_94 = lean_ctor_get(x_87, 1); -x_95 = lean_ctor_get(x_87, 0); -lean_dec(x_95); -x_96 = l_Lean_Meta_Grind_updateLastTag(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_94); -if (lean_obj_tag(x_96) == 0) -{ -lean_object* x_97; lean_object* x_98; -x_97 = lean_ctor_get(x_96, 1); -lean_inc(x_97); -lean_dec(x_96); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -x_98 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_97); -if (lean_obj_tag(x_98) == 0) -{ -lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; -x_99 = lean_ctor_get(x_98, 0); -lean_inc(x_99); -x_100 = lean_ctor_get(x_98, 1); -lean_inc(x_100); -lean_dec(x_98); -x_101 = l_Lean_MessageData_ofExpr(x_99); -x_102 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -lean_ctor_set_tag(x_87, 7); -lean_ctor_set(x_87, 1, x_101); -lean_ctor_set(x_87, 0, x_102); -x_103 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_103, 0, x_87); -lean_ctor_set(x_103, 1, x_102); -x_104 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_86, x_103, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_100); -x_105 = lean_ctor_get(x_104, 0); -lean_inc(x_105); -x_106 = lean_ctor_get(x_104, 1); -lean_inc(x_106); -lean_dec(x_104); -x_107 = lean_apply_11(x_14, x_105, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_106); -return x_107; +lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_26 = lean_ctor_get(x_14, 1); +lean_inc(x_26); +lean_dec(x_14); +x_27 = lean_ctor_get(x_16, 0); +lean_inc(x_27); +lean_dec(x_16); +x_28 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_28, 0, x_27); +lean_ctor_set(x_28, 1, x_26); +return x_28; +} +} } else { -uint8_t x_108; -lean_free_object(x_87); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -x_108 = !lean_is_exclusive(x_98); -if (x_108 == 0) +uint8_t x_29; +x_29 = !lean_is_exclusive(x_14); +if (x_29 == 0) { -return x_98; +return x_14; } else { -lean_object* x_109; lean_object* x_110; lean_object* x_111; -x_109 = lean_ctor_get(x_98, 0); -x_110 = lean_ctor_get(x_98, 1); -lean_inc(x_110); -lean_inc(x_109); -lean_dec(x_98); -x_111 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_111, 0, x_109); -lean_ctor_set(x_111, 1, x_110); -return x_111; +lean_object* x_30; lean_object* x_31; lean_object* x_32; +x_30 = lean_ctor_get(x_14, 0); +x_31 = lean_ctor_get(x_14, 1); +lean_inc(x_31); +lean_inc(x_30); +lean_dec(x_14); +x_32 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_32, 0, x_30); +lean_ctor_set(x_32, 1, x_31); +return x_32; } } } -else +} +LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: { -uint8_t x_112; -lean_free_object(x_87); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); +lean_object* x_14; +x_14 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify___spec__1___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_112 = !lean_is_exclusive(x_96); -if (x_112 == 0) -{ -return x_96; +return x_14; } -else -{ -lean_object* x_113; lean_object* x_114; lean_object* x_115; -x_113 = lean_ctor_get(x_96, 0); -x_114 = lean_ctor_get(x_96, 1); -lean_inc(x_114); -lean_inc(x_113); -lean_dec(x_96); -x_115 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_115, 0, x_113); -lean_ctor_set(x_115, 1, x_114); -return x_115; } +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +uint8_t x_12; lean_object* x_13; lean_object* x_14; +x_12 = 1; +x_13 = lean_box(x_12); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_11); +return x_14; } } -else -{ -lean_object* x_116; lean_object* x_117; -x_116 = lean_ctor_get(x_87, 1); -lean_inc(x_116); -lean_dec(x_87); -x_117 = l_Lean_Meta_Grind_updateLastTag(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_116); -if (lean_obj_tag(x_117) == 0) -{ -lean_object* x_118; lean_object* x_119; -x_118 = lean_ctor_get(x_117, 1); -lean_inc(x_118); -lean_dec(x_117); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -x_119 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_118); -if (lean_obj_tag(x_119) == 0) +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__1() { +_start: { -lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; -x_120 = lean_ctor_get(x_119, 0); -lean_inc(x_120); -x_121 = lean_ctor_get(x_119, 1); -lean_inc(x_121); -lean_dec(x_119); -x_122 = l_Lean_MessageData_ofExpr(x_120); -x_123 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -x_124 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_124, 0, x_123); -lean_ctor_set(x_124, 1, x_122); -x_125 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_125, 0, x_124); -lean_ctor_set(x_125, 1, x_123); -x_126 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_86, x_125, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_121); -x_127 = lean_ctor_get(x_126, 0); -lean_inc(x_127); -x_128 = lean_ctor_get(x_126, 1); -lean_inc(x_128); -lean_dec(x_126); -x_129 = lean_apply_11(x_14, x_127, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_128); -return x_129; +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___lambda__1___boxed), 11, 0); +return x_1; } -else +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2() { +_start: { -lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -x_130 = lean_ctor_get(x_119, 0); -lean_inc(x_130); -x_131 = lean_ctor_get(x_119, 1); -lean_inc(x_131); -if (lean_is_exclusive(x_119)) { - lean_ctor_release(x_119, 0); - lean_ctor_release(x_119, 1); - x_132 = x_119; -} else { - lean_dec_ref(x_119); - x_132 = lean_box(0); +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(0u); +x_2 = lean_nat_to_int(x_1); +return x_2; } -if (lean_is_scalar(x_132)) { - x_133 = lean_alloc_ctor(1, 2, 0); -} else { - x_133 = x_132; } -lean_ctor_set(x_133, 0, x_130); -lean_ctor_set(x_133, 1, x_131); -return x_133; +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("assert", 6, 6); +return x_1; } } -else +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__4() { +_start: { -lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_134 = lean_ctor_get(x_117, 0); -lean_inc(x_134); -x_135 = lean_ctor_get(x_117, 1); -lean_inc(x_135); -if (lean_is_exclusive(x_117)) { - lean_ctor_release(x_117, 0); - lean_ctor_release(x_117, 1); - x_136 = x_117; -} else { - lean_dec_ref(x_117); - x_136 = lean_box(0); -} -if (lean_is_scalar(x_136)) { - x_137 = lean_alloc_ctor(1, 2, 0); -} else { - x_137 = x_136; -} -lean_ctor_set(x_137, 0, x_134); -lean_ctor_set(x_137, 1, x_135); -return x_137; -} +lean_object* x_1; +x_1 = lean_mk_string_unchecked("discard", 7, 7); +return x_1; } } +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__1; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__2; +x_3 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__4; +x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); +return x_5; } } -else +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__6() { +_start: { -uint8_t x_138; lean_object* x_139; lean_object* x_140; -lean_dec(x_12); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_138 = 0; -x_139 = lean_box(x_138); -x_140 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_140, 0, x_139); -lean_ctor_set(x_140, 1, x_11); -return x_140; -} +lean_object* x_1; +x_1 = lean_mk_string_unchecked("trivial", 7, 7); +return x_1; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__7() { _start: { -lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -return x_12; +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__1; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__2; +x_3 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__6; +x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); +return x_5; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyAndCheck(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { lean_object* x_12; -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -lean_inc(x_6); -lean_inc(x_5); -lean_inc(x_4); -lean_inc(x_3); -lean_inc(x_2); -x_12 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_12 = lean_ctor_get(x_1, 0); +lean_inc(x_12); if (lean_obj_tag(x_12) == 0) { -lean_object* x_13; lean_object* x_14; lean_object* x_15; +lean_object* x_13; lean_object* x_14; lean_object* x_15; uint8_t x_16; x_13 = lean_ctor_get(x_12, 0); lean_inc(x_13); -x_14 = lean_ctor_get(x_12, 1); -lean_inc(x_14); lean_dec(x_12); -lean_inc(x_13); -x_15 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant(x_13, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_14); -if (lean_obj_tag(x_15) == 0) -{ -lean_object* x_16; uint8_t x_17; -x_16 = lean_ctor_get(x_15, 0); -lean_inc(x_16); -x_17 = lean_unbox(x_16); -lean_dec(x_16); -if (x_17 == 0) -{ -uint8_t x_18; -x_18 = !lean_is_exclusive(x_15); -if (x_18 == 0) +x_14 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__1; +x_15 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; +x_16 = lean_int_dec_eq(x_13, x_15); +lean_dec(x_13); +if (x_16 == 0) { -lean_object* x_19; lean_object* x_20; -x_19 = lean_ctor_get(x_15, 0); -lean_dec(x_19); -x_20 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_20, 0, x_13); -lean_ctor_set(x_15, 0, x_20); -return x_15; -} -else +lean_object* x_17; +x_17 = l_Lean_Meta_Grind_Arith_CommRing_hasChar(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +if (lean_obj_tag(x_17) == 0) { -lean_object* x_21; lean_object* x_22; lean_object* x_23; -x_21 = lean_ctor_get(x_15, 1); -lean_inc(x_21); -lean_dec(x_15); -x_22 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_22, 0, x_13); -x_23 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_23, 0, x_22); -lean_ctor_set(x_23, 1, x_21); -return x_23; -} -} -else +lean_object* x_18; uint8_t x_19; +x_18 = lean_ctor_get(x_17, 0); +lean_inc(x_18); +x_19 = lean_unbox(x_18); +lean_dec(x_18); +if (x_19 == 0) { -uint8_t x_24; -lean_dec(x_13); -x_24 = !lean_is_exclusive(x_15); +lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; uint8_t x_24; +x_20 = lean_ctor_get(x_17, 1); +lean_inc(x_20); +lean_dec(x_17); +x_21 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__5; +x_22 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_21, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_20); +x_23 = lean_ctor_get(x_22, 0); +lean_inc(x_23); +x_24 = lean_unbox(x_23); +lean_dec(x_23); if (x_24 == 0) { -lean_object* x_25; lean_object* x_26; -x_25 = lean_ctor_get(x_15, 0); -lean_dec(x_25); +lean_object* x_25; lean_object* x_26; lean_object* x_27; +lean_dec(x_1); +x_25 = lean_ctor_get(x_22, 1); +lean_inc(x_25); +lean_dec(x_22); x_26 = lean_box(0); -lean_ctor_set(x_15, 0, x_26); -return x_15; -} -else -{ -lean_object* x_27; lean_object* x_28; lean_object* x_29; -x_27 = lean_ctor_get(x_15, 1); -lean_inc(x_27); -lean_dec(x_15); -x_28 = lean_box(0); -x_29 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_29, 0, x_28); -lean_ctor_set(x_29, 1, x_27); -return x_29; -} -} +x_27 = lean_apply_11(x_14, x_26, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_25); +return x_27; } else { -uint8_t x_30; -lean_dec(x_13); -x_30 = !lean_is_exclusive(x_15); -if (x_30 == 0) +uint8_t x_28; +x_28 = !lean_is_exclusive(x_22); +if (x_28 == 0) { -return x_15; -} -else +lean_object* x_29; lean_object* x_30; lean_object* x_31; +x_29 = lean_ctor_get(x_22, 1); +x_30 = lean_ctor_get(x_22, 0); +lean_dec(x_30); +x_31 = l_Lean_Meta_Grind_updateLastTag(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_29); +if (lean_obj_tag(x_31) == 0) { -lean_object* x_31; lean_object* x_32; lean_object* x_33; -x_31 = lean_ctor_get(x_15, 0); -x_32 = lean_ctor_get(x_15, 1); +lean_object* x_32; lean_object* x_33; +x_32 = lean_ctor_get(x_31, 1); lean_inc(x_32); -lean_inc(x_31); -lean_dec(x_15); -x_33 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_33, 0, x_31); -lean_ctor_set(x_33, 1, x_32); -return x_33; -} -} +lean_dec(x_31); +x_33 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_32); +if (lean_obj_tag(x_33) == 0) +{ +lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; +x_34 = lean_ctor_get(x_33, 0); +lean_inc(x_34); +x_35 = lean_ctor_get(x_33, 1); +lean_inc(x_35); +lean_dec(x_33); +x_36 = l_Lean_MessageData_ofExpr(x_34); +x_37 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +lean_ctor_set_tag(x_22, 7); +lean_ctor_set(x_22, 1, x_36); +lean_ctor_set(x_22, 0, x_37); +x_38 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_38, 0, x_22); +lean_ctor_set(x_38, 1, x_37); +x_39 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_21, x_38, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_35); +x_40 = lean_ctor_get(x_39, 0); +lean_inc(x_40); +x_41 = lean_ctor_get(x_39, 1); +lean_inc(x_41); +lean_dec(x_39); +x_42 = lean_apply_11(x_14, x_40, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_41); +return x_42; } else { -uint8_t x_34; +uint8_t x_43; +lean_free_object(x_22); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -7619,4260 +7266,3663 @@ lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); -x_34 = !lean_is_exclusive(x_12); -if (x_34 == 0) +x_43 = !lean_is_exclusive(x_33); +if (x_43 == 0) { -return x_12; +return x_33; } else { -lean_object* x_35; lean_object* x_36; lean_object* x_37; -x_35 = lean_ctor_get(x_12, 0); -x_36 = lean_ctor_get(x_12, 1); -lean_inc(x_36); -lean_inc(x_35); -lean_dec(x_12); -x_37 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_37, 0, x_35); -lean_ctor_set(x_37, 1, x_36); -return x_37; -} -} +lean_object* x_44; lean_object* x_45; lean_object* x_46; +x_44 = lean_ctor_get(x_33, 0); +x_45 = lean_ctor_get(x_33, 1); +lean_inc(x_45); +lean_inc(x_44); +lean_dec(x_33); +x_46 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_46, 0, x_44); +lean_ctor_set(x_46, 1, x_45); +return x_46; } } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(lean_object* x_1, lean_object* x_2) { -_start: -{ -if (lean_obj_tag(x_2) == 0) -{ -lean_object* x_3; lean_object* x_4; -x_3 = lean_box(0); -x_4 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_4, 0, x_1); -lean_ctor_set(x_4, 1, x_3); -return x_4; } else { -uint8_t x_5; -x_5 = !lean_is_exclusive(x_2); -if (x_5 == 0) -{ -lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; uint8_t x_12; uint8_t x_13; uint8_t x_14; -x_6 = lean_ctor_get(x_2, 0); -x_7 = lean_ctor_get(x_2, 1); -x_8 = lean_ctor_get(x_1, 0); -lean_inc(x_8); -x_9 = l_Lean_Grind_CommRing_Poly_lm(x_8); -lean_dec(x_8); -x_10 = lean_ctor_get(x_6, 0); -lean_inc(x_10); -x_11 = l_Lean_Grind_CommRing_Poly_lm(x_10); +uint8_t x_47; +lean_free_object(x_22); lean_dec(x_10); -x_12 = l_Lean_Grind_CommRing_Mon_grevlex(x_9, x_11); lean_dec(x_9); -x_13 = 2; -x_14 = l_instDecidableEqOrdering(x_12, x_13); -if (x_14 == 0) +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_47 = !lean_is_exclusive(x_31); +if (x_47 == 0) { -lean_object* x_15; -x_15 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(x_1, x_7); -lean_ctor_set(x_2, 1, x_15); -return x_2; +return x_31; } else { -lean_object* x_16; -x_16 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_16, 0, x_1); -lean_ctor_set(x_16, 1, x_2); -return x_16; +lean_object* x_48; lean_object* x_49; lean_object* x_50; +x_48 = lean_ctor_get(x_31, 0); +x_49 = lean_ctor_get(x_31, 1); +lean_inc(x_49); +lean_inc(x_48); +lean_dec(x_31); +x_50 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_50, 0, x_48); +lean_ctor_set(x_50, 1, x_49); +return x_50; +} } } else { -lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; uint8_t x_23; uint8_t x_24; uint8_t x_25; -x_17 = lean_ctor_get(x_2, 0); -x_18 = lean_ctor_get(x_2, 1); -lean_inc(x_18); -lean_inc(x_17); -lean_dec(x_2); -x_19 = lean_ctor_get(x_1, 0); -lean_inc(x_19); -x_20 = l_Lean_Grind_CommRing_Poly_lm(x_19); -lean_dec(x_19); -x_21 = lean_ctor_get(x_17, 0); -lean_inc(x_21); -x_22 = l_Lean_Grind_CommRing_Poly_lm(x_21); -lean_dec(x_21); -x_23 = l_Lean_Grind_CommRing_Mon_grevlex(x_20, x_22); -lean_dec(x_20); -x_24 = 2; -x_25 = l_instDecidableEqOrdering(x_23, x_24); -if (x_25 == 0) +lean_object* x_51; lean_object* x_52; +x_51 = lean_ctor_get(x_22, 1); +lean_inc(x_51); +lean_dec(x_22); +x_52 = l_Lean_Meta_Grind_updateLastTag(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_51); +if (lean_obj_tag(x_52) == 0) { -lean_object* x_26; lean_object* x_27; -x_26 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(x_1, x_18); -x_27 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_27, 0, x_17); -lean_ctor_set(x_27, 1, x_26); -return x_27; +lean_object* x_53; lean_object* x_54; +x_53 = lean_ctor_get(x_52, 1); +lean_inc(x_53); +lean_dec(x_52); +x_54 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_53); +if (lean_obj_tag(x_54) == 0) +{ +lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; +x_55 = lean_ctor_get(x_54, 0); +lean_inc(x_55); +x_56 = lean_ctor_get(x_54, 1); +lean_inc(x_56); +lean_dec(x_54); +x_57 = l_Lean_MessageData_ofExpr(x_55); +x_58 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +x_59 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_59, 0, x_58); +lean_ctor_set(x_59, 1, x_57); +x_60 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_60, 0, x_59); +lean_ctor_set(x_60, 1, x_58); +x_61 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_21, x_60, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_56); +x_62 = lean_ctor_get(x_61, 0); +lean_inc(x_62); +x_63 = lean_ctor_get(x_61, 1); +lean_inc(x_63); +lean_dec(x_61); +x_64 = lean_apply_11(x_14, x_62, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_63); +return x_64; } else { -lean_object* x_28; lean_object* x_29; -x_28 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_28, 0, x_17); -lean_ctor_set(x_28, 1, x_18); -x_29 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_29, 0, x_1); -lean_ctor_set(x_29, 1, x_28); -return x_29; -} +lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_65 = lean_ctor_get(x_54, 0); +lean_inc(x_65); +x_66 = lean_ctor_get(x_54, 1); +lean_inc(x_66); +if (lean_is_exclusive(x_54)) { + lean_ctor_release(x_54, 0); + lean_ctor_release(x_54, 1); + x_67 = x_54; +} else { + lean_dec_ref(x_54); + x_67 = lean_box(0); } +if (lean_is_scalar(x_67)) { + x_68 = lean_alloc_ctor(1, 2, 0); +} else { + x_68 = x_67; } +lean_ctor_set(x_68, 0, x_65); +lean_ctor_set(x_68, 1, x_66); +return x_68; } } -LEAN_EXPORT lean_object* l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, size_t x_4, size_t x_5) { -_start: -{ -if (lean_obj_tag(x_3) == 0) -{ -uint8_t x_6; -x_6 = !lean_is_exclusive(x_3); -if (x_6 == 0) -{ -lean_object* x_7; size_t x_8; size_t x_9; size_t x_10; size_t x_11; size_t x_12; size_t x_13; size_t x_14; lean_object* x_15; lean_object* x_16; uint8_t x_17; -x_7 = lean_ctor_get(x_3, 0); -x_8 = lean_usize_shift_right(x_4, x_5); -x_9 = 1; -x_10 = lean_usize_shift_left(x_9, x_5); -x_11 = lean_usize_sub(x_10, x_9); -x_12 = lean_usize_land(x_4, x_11); -x_13 = 5; -x_14 = lean_usize_sub(x_5, x_13); -x_15 = lean_usize_to_nat(x_8); -x_16 = lean_array_get_size(x_7); -x_17 = lean_nat_dec_lt(x_15, x_16); -lean_dec(x_16); -if (x_17 == 0) +else { -lean_dec(x_15); +lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); lean_dec(x_1); -return x_3; +x_69 = lean_ctor_get(x_52, 0); +lean_inc(x_69); +x_70 = lean_ctor_get(x_52, 1); +lean_inc(x_70); +if (lean_is_exclusive(x_52)) { + lean_ctor_release(x_52, 0); + lean_ctor_release(x_52, 1); + x_71 = x_52; +} else { + lean_dec_ref(x_52); + x_71 = lean_box(0); +} +if (lean_is_scalar(x_71)) { + x_72 = lean_alloc_ctor(1, 2, 0); +} else { + x_72 = x_71; +} +lean_ctor_set(x_72, 0, x_69); +lean_ctor_set(x_72, 1, x_70); +return x_72; +} } -else -{ -lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; -x_18 = lean_array_fget(x_7, x_15); -x_19 = lean_box(0); -x_20 = lean_array_fset(x_7, x_15, x_19); -x_21 = l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(x_1, x_2, x_18, x_12, x_14); -x_22 = lean_array_fset(x_20, x_15, x_21); -lean_dec(x_15); -lean_ctor_set(x_3, 0, x_22); -return x_3; } } else { -lean_object* x_23; size_t x_24; size_t x_25; size_t x_26; size_t x_27; size_t x_28; size_t x_29; size_t x_30; lean_object* x_31; lean_object* x_32; uint8_t x_33; -x_23 = lean_ctor_get(x_3, 0); -lean_inc(x_23); +lean_object* x_73; lean_object* x_74; +x_73 = lean_ctor_get(x_17, 1); +lean_inc(x_73); +lean_dec(x_17); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_8); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_3); +lean_inc(x_2); +x_74 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_setUnsat(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_73); +if (lean_obj_tag(x_74) == 0) +{ +lean_object* x_75; lean_object* x_76; lean_object* x_77; +x_75 = lean_ctor_get(x_74, 0); +lean_inc(x_75); +x_76 = lean_ctor_get(x_74, 1); +lean_inc(x_76); +lean_dec(x_74); +x_77 = lean_apply_11(x_14, x_75, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_76); +return x_77; +} +else +{ +uint8_t x_78; +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); -x_24 = lean_usize_shift_right(x_4, x_5); -x_25 = 1; -x_26 = lean_usize_shift_left(x_25, x_5); -x_27 = lean_usize_sub(x_26, x_25); -x_28 = lean_usize_land(x_4, x_27); -x_29 = 5; -x_30 = lean_usize_sub(x_5, x_29); -x_31 = lean_usize_to_nat(x_24); -x_32 = lean_array_get_size(x_23); -x_33 = lean_nat_dec_lt(x_31, x_32); -lean_dec(x_32); -if (x_33 == 0) +lean_dec(x_2); +x_78 = !lean_is_exclusive(x_74); +if (x_78 == 0) { -lean_object* x_34; -lean_dec(x_31); -lean_dec(x_1); -x_34 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_34, 0, x_23); -return x_34; +return x_74; } else { -lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; -x_35 = lean_array_fget(x_23, x_31); -x_36 = lean_box(0); -x_37 = lean_array_fset(x_23, x_31, x_36); -x_38 = l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(x_1, x_2, x_35, x_28, x_30); -x_39 = lean_array_fset(x_37, x_31, x_38); -lean_dec(x_31); -x_40 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_40, 0, x_39); -return x_40; +lean_object* x_79; lean_object* x_80; lean_object* x_81; +x_79 = lean_ctor_get(x_74, 0); +x_80 = lean_ctor_get(x_74, 1); +lean_inc(x_80); +lean_inc(x_79); +lean_dec(x_74); +x_81 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_81, 0, x_79); +lean_ctor_set(x_81, 1, x_80); +return x_81; +} } } } else { -uint8_t x_41; -x_41 = !lean_is_exclusive(x_3); -if (x_41 == 0) -{ -lean_object* x_42; lean_object* x_43; lean_object* x_44; uint8_t x_45; -x_42 = lean_ctor_get(x_3, 0); -x_43 = lean_usize_to_nat(x_4); -x_44 = lean_array_get_size(x_42); -x_45 = lean_nat_dec_lt(x_43, x_44); -lean_dec(x_44); -if (x_45 == 0) -{ -lean_dec(x_43); +uint8_t x_82; +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); lean_dec(x_1); -return x_3; +x_82 = !lean_is_exclusive(x_17); +if (x_82 == 0) +{ +return x_17; } else { -lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; -x_46 = lean_array_fget(x_42, x_43); -x_47 = lean_box(0); -x_48 = lean_array_fset(x_42, x_43, x_47); -x_49 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(x_1, x_46); -x_50 = lean_array_fset(x_48, x_43, x_49); -lean_dec(x_43); -lean_ctor_set(x_3, 0, x_50); -return x_3; +lean_object* x_83; lean_object* x_84; lean_object* x_85; +x_83 = lean_ctor_get(x_17, 0); +x_84 = lean_ctor_get(x_17, 1); +lean_inc(x_84); +lean_inc(x_83); +lean_dec(x_17); +x_85 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_85, 0, x_83); +lean_ctor_set(x_85, 1, x_84); +return x_85; +} } } else { -lean_object* x_51; lean_object* x_52; lean_object* x_53; uint8_t x_54; -x_51 = lean_ctor_get(x_3, 0); -lean_inc(x_51); -lean_dec(x_3); -x_52 = lean_usize_to_nat(x_4); -x_53 = lean_array_get_size(x_51); -x_54 = lean_nat_dec_lt(x_52, x_53); -lean_dec(x_53); -if (x_54 == 0) +lean_object* x_86; lean_object* x_87; lean_object* x_88; uint8_t x_89; +x_86 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__7; +x_87 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_86, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_88 = lean_ctor_get(x_87, 0); +lean_inc(x_88); +x_89 = lean_unbox(x_88); +lean_dec(x_88); +if (x_89 == 0) { -lean_object* x_55; -lean_dec(x_52); +lean_object* x_90; lean_object* x_91; lean_object* x_92; lean_dec(x_1); -x_55 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_55, 0, x_51); -return x_55; +x_90 = lean_ctor_get(x_87, 1); +lean_inc(x_90); +lean_dec(x_87); +x_91 = lean_box(0); +x_92 = lean_apply_11(x_14, x_91, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_90); +return x_92; } else { -lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; -x_56 = lean_array_fget(x_51, x_52); -x_57 = lean_box(0); -x_58 = lean_array_fset(x_51, x_52, x_57); -x_59 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(x_1, x_56); -x_60 = lean_array_fset(x_58, x_52, x_59); -lean_dec(x_52); -x_61 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_61, 0, x_60); -return x_61; -} -} -} -} -} -LEAN_EXPORT lean_object* l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { -_start: +uint8_t x_93; +x_93 = !lean_is_exclusive(x_87); +if (x_93 == 0) { -uint8_t x_5; -x_5 = !lean_is_exclusive(x_3); -if (x_5 == 0) +lean_object* x_94; lean_object* x_95; lean_object* x_96; +x_94 = lean_ctor_get(x_87, 1); +x_95 = lean_ctor_get(x_87, 0); +lean_dec(x_95); +x_96 = l_Lean_Meta_Grind_updateLastTag(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_94); +if (lean_obj_tag(x_96) == 0) { -lean_object* x_6; lean_object* x_7; size_t x_8; lean_object* x_9; uint8_t x_10; -x_6 = lean_ctor_get(x_3, 0); -x_7 = lean_ctor_get(x_3, 1); -x_8 = lean_ctor_get_usize(x_3, 4); -x_9 = lean_ctor_get(x_3, 3); -x_10 = lean_nat_dec_le(x_9, x_4); -if (x_10 == 0) +lean_object* x_97; lean_object* x_98; +x_97 = lean_ctor_get(x_96, 1); +lean_inc(x_97); +lean_dec(x_96); +x_98 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_97); +if (lean_obj_tag(x_98) == 0) { -size_t x_11; lean_object* x_12; -x_11 = lean_usize_of_nat(x_4); -x_12 = l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(x_1, x_2, x_6, x_11, x_8); -lean_ctor_set(x_3, 0, x_12); -return x_3; +lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; +x_99 = lean_ctor_get(x_98, 0); +lean_inc(x_99); +x_100 = lean_ctor_get(x_98, 1); +lean_inc(x_100); +lean_dec(x_98); +x_101 = l_Lean_MessageData_ofExpr(x_99); +x_102 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +lean_ctor_set_tag(x_87, 7); +lean_ctor_set(x_87, 1, x_101); +lean_ctor_set(x_87, 0, x_102); +x_103 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_103, 0, x_87); +lean_ctor_set(x_103, 1, x_102); +x_104 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_86, x_103, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_100); +x_105 = lean_ctor_get(x_104, 0); +lean_inc(x_105); +x_106 = lean_ctor_get(x_104, 1); +lean_inc(x_106); +lean_dec(x_104); +x_107 = lean_apply_11(x_14, x_105, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_106); +return x_107; } else { -lean_object* x_13; lean_object* x_14; uint8_t x_15; -x_13 = lean_nat_sub(x_4, x_9); -x_14 = lean_array_get_size(x_7); -x_15 = lean_nat_dec_lt(x_13, x_14); -lean_dec(x_14); -if (x_15 == 0) +uint8_t x_108; +lean_free_object(x_87); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_108 = !lean_is_exclusive(x_98); +if (x_108 == 0) { -lean_dec(x_13); -lean_dec(x_1); -return x_3; +return x_98; } else { -lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_16 = lean_array_fget(x_7, x_13); -x_17 = lean_box(0); -x_18 = lean_array_fset(x_7, x_13, x_17); -x_19 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(x_1, x_16); -x_20 = lean_array_fset(x_18, x_13, x_19); -lean_dec(x_13); -lean_ctor_set(x_3, 1, x_20); -return x_3; +lean_object* x_109; lean_object* x_110; lean_object* x_111; +x_109 = lean_ctor_get(x_98, 0); +x_110 = lean_ctor_get(x_98, 1); +lean_inc(x_110); +lean_inc(x_109); +lean_dec(x_98); +x_111 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_111, 0, x_109); +lean_ctor_set(x_111, 1, x_110); +return x_111; } } } else { -lean_object* x_21; lean_object* x_22; lean_object* x_23; size_t x_24; lean_object* x_25; uint8_t x_26; -x_21 = lean_ctor_get(x_3, 0); -x_22 = lean_ctor_get(x_3, 1); -x_23 = lean_ctor_get(x_3, 2); -x_24 = lean_ctor_get_usize(x_3, 4); -x_25 = lean_ctor_get(x_3, 3); -lean_inc(x_25); -lean_inc(x_23); -lean_inc(x_22); -lean_inc(x_21); +uint8_t x_112; +lean_free_object(x_87); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); -x_26 = lean_nat_dec_le(x_25, x_4); -if (x_26 == 0) +lean_dec(x_2); +lean_dec(x_1); +x_112 = !lean_is_exclusive(x_96); +if (x_112 == 0) { -size_t x_27; lean_object* x_28; lean_object* x_29; -x_27 = lean_usize_of_nat(x_4); -x_28 = l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(x_1, x_2, x_21, x_27, x_24); -x_29 = lean_alloc_ctor(0, 4, sizeof(size_t)*1); -lean_ctor_set(x_29, 0, x_28); -lean_ctor_set(x_29, 1, x_22); -lean_ctor_set(x_29, 2, x_23); -lean_ctor_set(x_29, 3, x_25); -lean_ctor_set_usize(x_29, 4, x_24); -return x_29; +return x_96; } else { -lean_object* x_30; lean_object* x_31; uint8_t x_32; -x_30 = lean_nat_sub(x_4, x_25); -x_31 = lean_array_get_size(x_22); -x_32 = lean_nat_dec_lt(x_30, x_31); -lean_dec(x_31); -if (x_32 == 0) -{ -lean_object* x_33; -lean_dec(x_30); -lean_dec(x_1); -x_33 = lean_alloc_ctor(0, 4, sizeof(size_t)*1); -lean_ctor_set(x_33, 0, x_21); -lean_ctor_set(x_33, 1, x_22); -lean_ctor_set(x_33, 2, x_23); -lean_ctor_set(x_33, 3, x_25); -lean_ctor_set_usize(x_33, 4, x_24); -return x_33; +lean_object* x_113; lean_object* x_114; lean_object* x_115; +x_113 = lean_ctor_get(x_96, 0); +x_114 = lean_ctor_get(x_96, 1); +lean_inc(x_114); +lean_inc(x_113); +lean_dec(x_96); +x_115 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_115, 0, x_113); +lean_ctor_set(x_115, 1, x_114); +return x_115; +} +} } else { -lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; -x_34 = lean_array_fget(x_22, x_30); -x_35 = lean_box(0); -x_36 = lean_array_fset(x_22, x_30, x_35); -x_37 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(x_1, x_34); -x_38 = lean_array_fset(x_36, x_30, x_37); -lean_dec(x_30); -x_39 = lean_alloc_ctor(0, 4, sizeof(size_t)*1); -lean_ctor_set(x_39, 0, x_21); -lean_ctor_set(x_39, 1, x_38); -lean_ctor_set(x_39, 2, x_23); -lean_ctor_set(x_39, 3, x_25); -lean_ctor_set_usize(x_39, 4, x_24); -return x_39; +lean_object* x_116; lean_object* x_117; +x_116 = lean_ctor_get(x_87, 1); +lean_inc(x_116); +lean_dec(x_87); +x_117 = l_Lean_Meta_Grind_updateLastTag(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_116); +if (lean_obj_tag(x_117) == 0) +{ +lean_object* x_118; lean_object* x_119; +x_118 = lean_ctor_get(x_117, 1); +lean_inc(x_118); +lean_dec(x_117); +x_119 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_118); +if (lean_obj_tag(x_119) == 0) +{ +lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; +x_120 = lean_ctor_get(x_119, 0); +lean_inc(x_120); +x_121 = lean_ctor_get(x_119, 1); +lean_inc(x_121); +lean_dec(x_119); +x_122 = l_Lean_MessageData_ofExpr(x_120); +x_123 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +x_124 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_124, 0, x_123); +lean_ctor_set(x_124, 1, x_122); +x_125 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_125, 0, x_124); +lean_ctor_set(x_125, 1, x_123); +x_126 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_86, x_125, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_121); +x_127 = lean_ctor_get(x_126, 0); +lean_inc(x_127); +x_128 = lean_ctor_get(x_126, 1); +lean_inc(x_128); +lean_dec(x_126); +x_129 = lean_apply_11(x_14, x_127, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_128); +return x_129; } +else +{ +lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_130 = lean_ctor_get(x_119, 0); +lean_inc(x_130); +x_131 = lean_ctor_get(x_119, 1); +lean_inc(x_131); +if (lean_is_exclusive(x_119)) { + lean_ctor_release(x_119, 0); + lean_ctor_release(x_119, 1); + x_132 = x_119; +} else { + lean_dec_ref(x_119); + x_132 = lean_box(0); } +if (lean_is_scalar(x_132)) { + x_133 = lean_alloc_ctor(1, 2, 0); +} else { + x_133 = x_132; } +lean_ctor_set(x_133, 0, x_130); +lean_ctor_set(x_133, 1, x_131); +return x_133; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_addToBasisCore(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: -{ -lean_object* x_12; -x_12 = lean_ctor_get(x_1, 0); -lean_inc(x_12); -if (lean_obj_tag(x_12) == 0) +else { -lean_object* x_13; lean_object* x_14; -lean_dec(x_12); +lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); lean_dec(x_1); -x_13 = lean_box(0); -x_14 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_14, 0, x_13); -lean_ctor_set(x_14, 1, x_11); -return x_14; +x_134 = lean_ctor_get(x_117, 0); +lean_inc(x_134); +x_135 = lean_ctor_get(x_117, 1); +lean_inc(x_135); +if (lean_is_exclusive(x_117)) { + lean_ctor_release(x_117, 0); + lean_ctor_release(x_117, 1); + x_136 = x_117; +} else { + lean_dec_ref(x_117); + x_136 = lean_box(0); +} +if (lean_is_scalar(x_136)) { + x_137 = lean_alloc_ctor(1, 2, 0); +} else { + x_137 = x_136; +} +lean_ctor_set(x_137, 0, x_134); +lean_ctor_set(x_137, 1, x_135); +return x_137; +} +} +} +} } else { -lean_object* x_15; -x_15 = lean_ctor_get(x_12, 1); -lean_inc(x_15); +uint8_t x_138; lean_object* x_139; lean_object* x_140; lean_dec(x_12); -if (lean_obj_tag(x_15) == 0) +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_138 = 0; +x_139 = lean_box(x_138); +x_140 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_140, 0, x_139); +lean_ctor_set(x_140, 1, x_11); +return x_140; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -lean_object* x_16; lean_object* x_17; +lean_object* x_12; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); lean_dec(x_1); -x_16 = lean_box(0); -x_17 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_17, 0, x_16); -lean_ctor_set(x_17, 1, x_11); -return x_17; +return x_12; } -else +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyAndCheck(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; uint8_t x_25; -x_18 = lean_ctor_get(x_15, 0); -lean_inc(x_18); -lean_dec(x_15); -x_19 = lean_ctor_get(x_2, 0); -x_20 = lean_st_ref_take(x_3, x_11); -x_21 = lean_ctor_get(x_20, 0); +lean_object* x_12; +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_8); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_3); +lean_inc(x_2); +x_12 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplify(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +if (lean_obj_tag(x_12) == 0) +{ +lean_object* x_13; lean_object* x_14; lean_object* x_15; +x_13 = lean_ctor_get(x_12, 0); +lean_inc(x_13); +x_14 = lean_ctor_get(x_12, 1); +lean_inc(x_14); +lean_dec(x_12); +lean_inc(x_13); +x_15 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant(x_13, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_14); +if (lean_obj_tag(x_15) == 0) +{ +lean_object* x_16; uint8_t x_17; +x_16 = lean_ctor_get(x_15, 0); +lean_inc(x_16); +x_17 = lean_unbox(x_16); +lean_dec(x_16); +if (x_17 == 0) +{ +uint8_t x_18; +x_18 = !lean_is_exclusive(x_15); +if (x_18 == 0) +{ +lean_object* x_19; lean_object* x_20; +x_19 = lean_ctor_get(x_15, 0); +lean_dec(x_19); +x_20 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_20, 0, x_13); +lean_ctor_set(x_15, 0, x_20); +return x_15; +} +else +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_21 = lean_ctor_get(x_15, 1); lean_inc(x_21); -x_22 = lean_ctor_get(x_21, 14); -lean_inc(x_22); -x_23 = lean_ctor_get(x_22, 2); -lean_inc(x_23); -x_24 = lean_ctor_get(x_20, 1); -lean_inc(x_24); -lean_dec(x_20); -x_25 = !lean_is_exclusive(x_21); -if (x_25 == 0) +lean_dec(x_15); +x_22 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_22, 0, x_13); +x_23 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_23, 0, x_22); +lean_ctor_set(x_23, 1, x_21); +return x_23; +} +} +else { -lean_object* x_26; uint8_t x_27; -x_26 = lean_ctor_get(x_21, 14); -lean_dec(x_26); -x_27 = !lean_is_exclusive(x_22); -if (x_27 == 0) +uint8_t x_24; +lean_dec(x_13); +x_24 = !lean_is_exclusive(x_15); +if (x_24 == 0) { -lean_object* x_28; uint8_t x_29; -x_28 = lean_ctor_get(x_22, 2); -lean_dec(x_28); -x_29 = !lean_is_exclusive(x_23); -if (x_29 == 0) +lean_object* x_25; lean_object* x_26; +x_25 = lean_ctor_get(x_15, 0); +lean_dec(x_25); +x_26 = lean_box(0); +lean_ctor_set(x_15, 0, x_26); +return x_15; +} +else { -lean_object* x_30; lean_object* x_31; uint8_t x_32; -x_30 = lean_ctor_get(x_23, 0); -x_31 = lean_array_get_size(x_30); -x_32 = lean_nat_dec_lt(x_19, x_31); -lean_dec(x_31); -if (x_32 == 0) +lean_object* x_27; lean_object* x_28; lean_object* x_29; +x_27 = lean_ctor_get(x_15, 1); +lean_inc(x_27); +lean_dec(x_15); +x_28 = lean_box(0); +x_29 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_29, 0, x_28); +lean_ctor_set(x_29, 1, x_27); +return x_29; +} +} +} +else { -lean_object* x_33; uint8_t x_34; -lean_dec(x_18); -lean_dec(x_1); -x_33 = lean_st_ref_set(x_3, x_21, x_24); -x_34 = !lean_is_exclusive(x_33); -if (x_34 == 0) +uint8_t x_30; +lean_dec(x_13); +x_30 = !lean_is_exclusive(x_15); +if (x_30 == 0) { -lean_object* x_35; lean_object* x_36; -x_35 = lean_ctor_get(x_33, 0); -lean_dec(x_35); -x_36 = lean_box(0); -lean_ctor_set(x_33, 0, x_36); +return x_15; +} +else +{ +lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_31 = lean_ctor_get(x_15, 0); +x_32 = lean_ctor_get(x_15, 1); +lean_inc(x_32); +lean_inc(x_31); +lean_dec(x_15); +x_33 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_33, 0, x_31); +lean_ctor_set(x_33, 1, x_32); return x_33; } +} +} else { -lean_object* x_37; lean_object* x_38; lean_object* x_39; -x_37 = lean_ctor_get(x_33, 1); -lean_inc(x_37); -lean_dec(x_33); -x_38 = lean_box(0); -x_39 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_39, 0, x_38); -lean_ctor_set(x_39, 1, x_37); -return x_39; +uint8_t x_34; +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_34 = !lean_is_exclusive(x_12); +if (x_34 == 0) +{ +return x_12; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; +x_35 = lean_ctor_get(x_12, 0); +x_36 = lean_ctor_get(x_12, 1); +lean_inc(x_36); +lean_inc(x_35); +lean_dec(x_12); +x_37 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_37, 0, x_35); +lean_ctor_set(x_37, 1, x_36); +return x_37; +} +} } } +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +lean_object* x_3; lean_object* x_4; +x_3 = lean_box(0); +x_4 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_4, 0, x_1); +lean_ctor_set(x_4, 1, x_3); +return x_4; +} else { -lean_object* x_40; lean_object* x_41; lean_object* x_42; uint8_t x_43; -x_40 = lean_array_fget(x_30, x_19); -x_41 = lean_box(0); -x_42 = lean_array_fset(x_30, x_19, x_41); -x_43 = !lean_is_exclusive(x_40); -if (x_43 == 0) +uint8_t x_5; +x_5 = !lean_is_exclusive(x_2); +if (x_5 == 0) { -lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; uint8_t x_48; lean_object* x_49; lean_object* x_50; uint8_t x_51; -x_44 = lean_ctor_get(x_40, 19); -x_45 = lean_box(0); -x_46 = lean_ctor_get(x_18, 0); -lean_inc(x_46); -lean_dec(x_18); -x_47 = l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(x_1, x_45, x_44, x_46); -lean_dec(x_46); -x_48 = 1; -lean_ctor_set(x_40, 19, x_47); -lean_ctor_set_uint8(x_40, sizeof(void*)*21, x_48); -x_49 = lean_array_fset(x_42, x_19, x_40); -lean_ctor_set(x_23, 0, x_49); -x_50 = lean_st_ref_set(x_3, x_21, x_24); -x_51 = !lean_is_exclusive(x_50); -if (x_51 == 0) +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; uint8_t x_12; uint8_t x_13; uint8_t x_14; +x_6 = lean_ctor_get(x_2, 0); +x_7 = lean_ctor_get(x_2, 1); +x_8 = lean_ctor_get(x_1, 0); +lean_inc(x_8); +x_9 = l_Lean_Grind_CommRing_Poly_lm(x_8); +lean_dec(x_8); +x_10 = lean_ctor_get(x_6, 0); +lean_inc(x_10); +x_11 = l_Lean_Grind_CommRing_Poly_lm(x_10); +lean_dec(x_10); +x_12 = l_Lean_Grind_CommRing_Mon_grevlex(x_9, x_11); +lean_dec(x_9); +x_13 = 2; +x_14 = l_instDecidableEqOrdering(x_12, x_13); +if (x_14 == 0) { -lean_object* x_52; -x_52 = lean_ctor_get(x_50, 0); -lean_dec(x_52); -lean_ctor_set(x_50, 0, x_41); -return x_50; +lean_object* x_15; +x_15 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(x_1, x_7); +lean_ctor_set(x_2, 1, x_15); +return x_2; } else { -lean_object* x_53; lean_object* x_54; -x_53 = lean_ctor_get(x_50, 1); -lean_inc(x_53); -lean_dec(x_50); -x_54 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_54, 0, x_41); -lean_ctor_set(x_54, 1, x_53); -return x_54; +lean_object* x_16; +x_16 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_16, 0, x_1); +lean_ctor_set(x_16, 1, x_2); +return x_16; } } else { -lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; uint8_t x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; -x_55 = lean_ctor_get(x_40, 0); -x_56 = lean_ctor_get(x_40, 1); -x_57 = lean_ctor_get(x_40, 2); -x_58 = lean_ctor_get(x_40, 3); -x_59 = lean_ctor_get(x_40, 4); -x_60 = lean_ctor_get(x_40, 5); -x_61 = lean_ctor_get(x_40, 6); -x_62 = lean_ctor_get(x_40, 7); -x_63 = lean_ctor_get(x_40, 8); -x_64 = lean_ctor_get(x_40, 9); -x_65 = lean_ctor_get(x_40, 10); -x_66 = lean_ctor_get(x_40, 11); -x_67 = lean_ctor_get(x_40, 12); -x_68 = lean_ctor_get(x_40, 13); -x_69 = lean_ctor_get(x_40, 14); -x_70 = lean_ctor_get(x_40, 15); -x_71 = lean_ctor_get(x_40, 16); -x_72 = lean_ctor_get(x_40, 17); -x_73 = lean_ctor_get(x_40, 18); -x_74 = lean_ctor_get(x_40, 19); -x_75 = lean_ctor_get(x_40, 20); -lean_inc(x_75); -lean_inc(x_74); -lean_inc(x_73); -lean_inc(x_72); -lean_inc(x_71); -lean_inc(x_70); -lean_inc(x_69); -lean_inc(x_68); -lean_inc(x_67); -lean_inc(x_66); -lean_inc(x_65); -lean_inc(x_64); -lean_inc(x_63); -lean_inc(x_62); -lean_inc(x_61); -lean_inc(x_60); -lean_inc(x_59); -lean_inc(x_58); -lean_inc(x_57); -lean_inc(x_56); -lean_inc(x_55); -lean_dec(x_40); -x_76 = lean_box(0); -x_77 = lean_ctor_get(x_18, 0); -lean_inc(x_77); -lean_dec(x_18); -x_78 = l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(x_1, x_76, x_74, x_77); -lean_dec(x_77); -x_79 = 1; -x_80 = lean_alloc_ctor(0, 21, 1); -lean_ctor_set(x_80, 0, x_55); -lean_ctor_set(x_80, 1, x_56); -lean_ctor_set(x_80, 2, x_57); -lean_ctor_set(x_80, 3, x_58); -lean_ctor_set(x_80, 4, x_59); -lean_ctor_set(x_80, 5, x_60); -lean_ctor_set(x_80, 6, x_61); -lean_ctor_set(x_80, 7, x_62); -lean_ctor_set(x_80, 8, x_63); -lean_ctor_set(x_80, 9, x_64); -lean_ctor_set(x_80, 10, x_65); -lean_ctor_set(x_80, 11, x_66); -lean_ctor_set(x_80, 12, x_67); -lean_ctor_set(x_80, 13, x_68); -lean_ctor_set(x_80, 14, x_69); -lean_ctor_set(x_80, 15, x_70); -lean_ctor_set(x_80, 16, x_71); -lean_ctor_set(x_80, 17, x_72); -lean_ctor_set(x_80, 18, x_73); -lean_ctor_set(x_80, 19, x_78); -lean_ctor_set(x_80, 20, x_75); -lean_ctor_set_uint8(x_80, sizeof(void*)*21, x_79); -x_81 = lean_array_fset(x_42, x_19, x_80); -lean_ctor_set(x_23, 0, x_81); -x_82 = lean_st_ref_set(x_3, x_21, x_24); -x_83 = lean_ctor_get(x_82, 1); -lean_inc(x_83); -if (lean_is_exclusive(x_82)) { - lean_ctor_release(x_82, 0); - lean_ctor_release(x_82, 1); - x_84 = x_82; -} else { - lean_dec_ref(x_82); - x_84 = lean_box(0); +lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; uint8_t x_23; uint8_t x_24; uint8_t x_25; +x_17 = lean_ctor_get(x_2, 0); +x_18 = lean_ctor_get(x_2, 1); +lean_inc(x_18); +lean_inc(x_17); +lean_dec(x_2); +x_19 = lean_ctor_get(x_1, 0); +lean_inc(x_19); +x_20 = l_Lean_Grind_CommRing_Poly_lm(x_19); +lean_dec(x_19); +x_21 = lean_ctor_get(x_17, 0); +lean_inc(x_21); +x_22 = l_Lean_Grind_CommRing_Poly_lm(x_21); +lean_dec(x_21); +x_23 = l_Lean_Grind_CommRing_Mon_grevlex(x_20, x_22); +lean_dec(x_20); +x_24 = 2; +x_25 = l_instDecidableEqOrdering(x_23, x_24); +if (x_25 == 0) +{ +lean_object* x_26; lean_object* x_27; +x_26 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(x_1, x_18); +x_27 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_27, 0, x_17); +lean_ctor_set(x_27, 1, x_26); +return x_27; } -if (lean_is_scalar(x_84)) { - x_85 = lean_alloc_ctor(0, 2, 0); -} else { - x_85 = x_84; +else +{ +lean_object* x_28; lean_object* x_29; +x_28 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_28, 0, x_17); +lean_ctor_set(x_28, 1, x_18); +x_29 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_29, 0, x_1); +lean_ctor_set(x_29, 1, x_28); +return x_29; } -lean_ctor_set(x_85, 0, x_41); -lean_ctor_set(x_85, 1, x_83); -return x_85; } } } +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, size_t x_4, size_t x_5) { +_start: +{ +if (lean_obj_tag(x_3) == 0) +{ +uint8_t x_6; +x_6 = !lean_is_exclusive(x_3); +if (x_6 == 0) +{ +lean_object* x_7; size_t x_8; size_t x_9; size_t x_10; size_t x_11; size_t x_12; size_t x_13; size_t x_14; lean_object* x_15; lean_object* x_16; uint8_t x_17; +x_7 = lean_ctor_get(x_3, 0); +x_8 = lean_usize_shift_right(x_4, x_5); +x_9 = 1; +x_10 = lean_usize_shift_left(x_9, x_5); +x_11 = lean_usize_sub(x_10, x_9); +x_12 = lean_usize_land(x_4, x_11); +x_13 = 5; +x_14 = lean_usize_sub(x_5, x_13); +x_15 = lean_usize_to_nat(x_8); +x_16 = lean_array_get_size(x_7); +x_17 = lean_nat_dec_lt(x_15, x_16); +lean_dec(x_16); +if (x_17 == 0) +{ +lean_dec(x_15); +lean_dec(x_1); +return x_3; +} +else +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_18 = lean_array_fget(x_7, x_15); +x_19 = lean_box(0); +x_20 = lean_array_fset(x_7, x_15, x_19); +x_21 = l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(x_1, x_2, x_18, x_12, x_14); +x_22 = lean_array_fset(x_20, x_15, x_21); +lean_dec(x_15); +lean_ctor_set(x_3, 0, x_22); +return x_3; +} +} else { -lean_object* x_86; lean_object* x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; uint8_t x_91; -x_86 = lean_ctor_get(x_23, 0); -x_87 = lean_ctor_get(x_23, 1); -x_88 = lean_ctor_get(x_23, 2); -x_89 = lean_ctor_get(x_23, 3); -lean_inc(x_89); -lean_inc(x_88); -lean_inc(x_87); -lean_inc(x_86); -lean_dec(x_23); -x_90 = lean_array_get_size(x_86); -x_91 = lean_nat_dec_lt(x_19, x_90); -lean_dec(x_90); -if (x_91 == 0) +lean_object* x_23; size_t x_24; size_t x_25; size_t x_26; size_t x_27; size_t x_28; size_t x_29; size_t x_30; lean_object* x_31; lean_object* x_32; uint8_t x_33; +x_23 = lean_ctor_get(x_3, 0); +lean_inc(x_23); +lean_dec(x_3); +x_24 = lean_usize_shift_right(x_4, x_5); +x_25 = 1; +x_26 = lean_usize_shift_left(x_25, x_5); +x_27 = lean_usize_sub(x_26, x_25); +x_28 = lean_usize_land(x_4, x_27); +x_29 = 5; +x_30 = lean_usize_sub(x_5, x_29); +x_31 = lean_usize_to_nat(x_24); +x_32 = lean_array_get_size(x_23); +x_33 = lean_nat_dec_lt(x_31, x_32); +lean_dec(x_32); +if (x_33 == 0) { -lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; -lean_dec(x_18); +lean_object* x_34; +lean_dec(x_31); lean_dec(x_1); -x_92 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_92, 0, x_86); -lean_ctor_set(x_92, 1, x_87); -lean_ctor_set(x_92, 2, x_88); -lean_ctor_set(x_92, 3, x_89); -lean_ctor_set(x_22, 2, x_92); -x_93 = lean_st_ref_set(x_3, x_21, x_24); -x_94 = lean_ctor_get(x_93, 1); -lean_inc(x_94); -if (lean_is_exclusive(x_93)) { - lean_ctor_release(x_93, 0); - lean_ctor_release(x_93, 1); - x_95 = x_93; -} else { - lean_dec_ref(x_93); - x_95 = lean_box(0); +x_34 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_34, 0, x_23); +return x_34; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_35 = lean_array_fget(x_23, x_31); +x_36 = lean_box(0); +x_37 = lean_array_fset(x_23, x_31, x_36); +x_38 = l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(x_1, x_2, x_35, x_28, x_30); +x_39 = lean_array_fset(x_37, x_31, x_38); +lean_dec(x_31); +x_40 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_40, 0, x_39); +return x_40; } -x_96 = lean_box(0); -if (lean_is_scalar(x_95)) { - x_97 = lean_alloc_ctor(0, 2, 0); -} else { - x_97 = x_95; } -lean_ctor_set(x_97, 0, x_96); -lean_ctor_set(x_97, 1, x_94); -return x_97; } else { -lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; uint8_t x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; -x_98 = lean_array_fget(x_86, x_19); -x_99 = lean_box(0); -x_100 = lean_array_fset(x_86, x_19, x_99); -x_101 = lean_ctor_get(x_98, 0); -lean_inc(x_101); -x_102 = lean_ctor_get(x_98, 1); -lean_inc(x_102); -x_103 = lean_ctor_get(x_98, 2); -lean_inc(x_103); -x_104 = lean_ctor_get(x_98, 3); -lean_inc(x_104); -x_105 = lean_ctor_get(x_98, 4); -lean_inc(x_105); -x_106 = lean_ctor_get(x_98, 5); -lean_inc(x_106); -x_107 = lean_ctor_get(x_98, 6); -lean_inc(x_107); -x_108 = lean_ctor_get(x_98, 7); -lean_inc(x_108); -x_109 = lean_ctor_get(x_98, 8); -lean_inc(x_109); -x_110 = lean_ctor_get(x_98, 9); -lean_inc(x_110); -x_111 = lean_ctor_get(x_98, 10); -lean_inc(x_111); -x_112 = lean_ctor_get(x_98, 11); -lean_inc(x_112); -x_113 = lean_ctor_get(x_98, 12); -lean_inc(x_113); -x_114 = lean_ctor_get(x_98, 13); -lean_inc(x_114); -x_115 = lean_ctor_get(x_98, 14); -lean_inc(x_115); -x_116 = lean_ctor_get(x_98, 15); -lean_inc(x_116); -x_117 = lean_ctor_get(x_98, 16); -lean_inc(x_117); -x_118 = lean_ctor_get(x_98, 17); -lean_inc(x_118); -x_119 = lean_ctor_get(x_98, 18); -lean_inc(x_119); -x_120 = lean_ctor_get(x_98, 19); -lean_inc(x_120); -x_121 = lean_ctor_get(x_98, 20); -lean_inc(x_121); -if (lean_is_exclusive(x_98)) { - lean_ctor_release(x_98, 0); - lean_ctor_release(x_98, 1); - lean_ctor_release(x_98, 2); - lean_ctor_release(x_98, 3); - lean_ctor_release(x_98, 4); - lean_ctor_release(x_98, 5); - lean_ctor_release(x_98, 6); - lean_ctor_release(x_98, 7); - lean_ctor_release(x_98, 8); - lean_ctor_release(x_98, 9); - lean_ctor_release(x_98, 10); - lean_ctor_release(x_98, 11); - lean_ctor_release(x_98, 12); - lean_ctor_release(x_98, 13); - lean_ctor_release(x_98, 14); - lean_ctor_release(x_98, 15); - lean_ctor_release(x_98, 16); - lean_ctor_release(x_98, 17); - lean_ctor_release(x_98, 18); - lean_ctor_release(x_98, 19); - lean_ctor_release(x_98, 20); - x_122 = x_98; -} else { - lean_dec_ref(x_98); - x_122 = lean_box(0); -} -x_123 = lean_box(0); -x_124 = lean_ctor_get(x_18, 0); -lean_inc(x_124); -lean_dec(x_18); -x_125 = l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(x_1, x_123, x_120, x_124); -lean_dec(x_124); -x_126 = 1; -if (lean_is_scalar(x_122)) { - x_127 = lean_alloc_ctor(0, 21, 1); -} else { - x_127 = x_122; -} -lean_ctor_set(x_127, 0, x_101); -lean_ctor_set(x_127, 1, x_102); -lean_ctor_set(x_127, 2, x_103); -lean_ctor_set(x_127, 3, x_104); -lean_ctor_set(x_127, 4, x_105); -lean_ctor_set(x_127, 5, x_106); -lean_ctor_set(x_127, 6, x_107); -lean_ctor_set(x_127, 7, x_108); -lean_ctor_set(x_127, 8, x_109); -lean_ctor_set(x_127, 9, x_110); -lean_ctor_set(x_127, 10, x_111); -lean_ctor_set(x_127, 11, x_112); -lean_ctor_set(x_127, 12, x_113); -lean_ctor_set(x_127, 13, x_114); -lean_ctor_set(x_127, 14, x_115); -lean_ctor_set(x_127, 15, x_116); -lean_ctor_set(x_127, 16, x_117); -lean_ctor_set(x_127, 17, x_118); -lean_ctor_set(x_127, 18, x_119); -lean_ctor_set(x_127, 19, x_125); -lean_ctor_set(x_127, 20, x_121); -lean_ctor_set_uint8(x_127, sizeof(void*)*21, x_126); -x_128 = lean_array_fset(x_100, x_19, x_127); -x_129 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_129, 0, x_128); -lean_ctor_set(x_129, 1, x_87); -lean_ctor_set(x_129, 2, x_88); -lean_ctor_set(x_129, 3, x_89); -lean_ctor_set(x_22, 2, x_129); -x_130 = lean_st_ref_set(x_3, x_21, x_24); -x_131 = lean_ctor_get(x_130, 1); -lean_inc(x_131); -if (lean_is_exclusive(x_130)) { - lean_ctor_release(x_130, 0); - lean_ctor_release(x_130, 1); - x_132 = x_130; -} else { - lean_dec_ref(x_130); - x_132 = lean_box(0); -} -if (lean_is_scalar(x_132)) { - x_133 = lean_alloc_ctor(0, 2, 0); -} else { - x_133 = x_132; -} -lean_ctor_set(x_133, 0, x_99); -lean_ctor_set(x_133, 1, x_131); -return x_133; +uint8_t x_41; +x_41 = !lean_is_exclusive(x_3); +if (x_41 == 0) +{ +lean_object* x_42; lean_object* x_43; lean_object* x_44; uint8_t x_45; +x_42 = lean_ctor_get(x_3, 0); +x_43 = lean_usize_to_nat(x_4); +x_44 = lean_array_get_size(x_42); +x_45 = lean_nat_dec_lt(x_43, x_44); +lean_dec(x_44); +if (x_45 == 0) +{ +lean_dec(x_43); +lean_dec(x_1); +return x_3; } +else +{ +lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; +x_46 = lean_array_fget(x_42, x_43); +x_47 = lean_box(0); +x_48 = lean_array_fset(x_42, x_43, x_47); +x_49 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(x_1, x_46); +x_50 = lean_array_fset(x_48, x_43, x_49); +lean_dec(x_43); +lean_ctor_set(x_3, 0, x_50); +return x_3; } } else { -lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; uint8_t x_142; -x_134 = lean_ctor_get(x_22, 0); -x_135 = lean_ctor_get(x_22, 1); -lean_inc(x_135); -lean_inc(x_134); -lean_dec(x_22); -x_136 = lean_ctor_get(x_23, 0); -lean_inc(x_136); -x_137 = lean_ctor_get(x_23, 1); -lean_inc(x_137); -x_138 = lean_ctor_get(x_23, 2); -lean_inc(x_138); -x_139 = lean_ctor_get(x_23, 3); -lean_inc(x_139); -if (lean_is_exclusive(x_23)) { - lean_ctor_release(x_23, 0); - lean_ctor_release(x_23, 1); - lean_ctor_release(x_23, 2); - lean_ctor_release(x_23, 3); - x_140 = x_23; -} else { - lean_dec_ref(x_23); - x_140 = lean_box(0); -} -x_141 = lean_array_get_size(x_136); -x_142 = lean_nat_dec_lt(x_19, x_141); -lean_dec(x_141); -if (x_142 == 0) +lean_object* x_51; lean_object* x_52; lean_object* x_53; uint8_t x_54; +x_51 = lean_ctor_get(x_3, 0); +lean_inc(x_51); +lean_dec(x_3); +x_52 = lean_usize_to_nat(x_4); +x_53 = lean_array_get_size(x_51); +x_54 = lean_nat_dec_lt(x_52, x_53); +lean_dec(x_53); +if (x_54 == 0) { -lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; -lean_dec(x_18); +lean_object* x_55; +lean_dec(x_52); lean_dec(x_1); -if (lean_is_scalar(x_140)) { - x_143 = lean_alloc_ctor(0, 4, 0); -} else { - x_143 = x_140; +x_55 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_55, 0, x_51); +return x_55; } -lean_ctor_set(x_143, 0, x_136); -lean_ctor_set(x_143, 1, x_137); -lean_ctor_set(x_143, 2, x_138); -lean_ctor_set(x_143, 3, x_139); -x_144 = lean_alloc_ctor(0, 3, 0); -lean_ctor_set(x_144, 0, x_134); -lean_ctor_set(x_144, 1, x_135); -lean_ctor_set(x_144, 2, x_143); -lean_ctor_set(x_21, 14, x_144); -x_145 = lean_st_ref_set(x_3, x_21, x_24); -x_146 = lean_ctor_get(x_145, 1); -lean_inc(x_146); -if (lean_is_exclusive(x_145)) { - lean_ctor_release(x_145, 0); - lean_ctor_release(x_145, 1); - x_147 = x_145; -} else { - lean_dec_ref(x_145); - x_147 = lean_box(0); +else +{ +lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; +x_56 = lean_array_fget(x_51, x_52); +x_57 = lean_box(0); +x_58 = lean_array_fset(x_51, x_52, x_57); +x_59 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(x_1, x_56); +x_60 = lean_array_fset(x_58, x_52, x_59); +lean_dec(x_52); +x_61 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_61, 0, x_60); +return x_61; } -x_148 = lean_box(0); -if (lean_is_scalar(x_147)) { - x_149 = lean_alloc_ctor(0, 2, 0); -} else { - x_149 = x_147; } -lean_ctor_set(x_149, 0, x_148); -lean_ctor_set(x_149, 1, x_146); -return x_149; +} +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: +{ +uint8_t x_5; +x_5 = !lean_is_exclusive(x_3); +if (x_5 == 0) +{ +lean_object* x_6; lean_object* x_7; size_t x_8; lean_object* x_9; uint8_t x_10; +x_6 = lean_ctor_get(x_3, 0); +x_7 = lean_ctor_get(x_3, 1); +x_8 = lean_ctor_get_usize(x_3, 4); +x_9 = lean_ctor_get(x_3, 3); +x_10 = lean_nat_dec_le(x_9, x_4); +if (x_10 == 0) +{ +size_t x_11; lean_object* x_12; +x_11 = lean_usize_of_nat(x_4); +x_12 = l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(x_1, x_2, x_6, x_11, x_8); +lean_ctor_set(x_3, 0, x_12); +return x_3; } else { -lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; lean_object* x_160; lean_object* x_161; lean_object* x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; uint8_t x_178; lean_object* x_179; lean_object* x_180; lean_object* x_181; lean_object* x_182; lean_object* x_183; lean_object* x_184; lean_object* x_185; lean_object* x_186; -x_150 = lean_array_fget(x_136, x_19); -x_151 = lean_box(0); -x_152 = lean_array_fset(x_136, x_19, x_151); -x_153 = lean_ctor_get(x_150, 0); -lean_inc(x_153); -x_154 = lean_ctor_get(x_150, 1); -lean_inc(x_154); -x_155 = lean_ctor_get(x_150, 2); -lean_inc(x_155); -x_156 = lean_ctor_get(x_150, 3); -lean_inc(x_156); -x_157 = lean_ctor_get(x_150, 4); -lean_inc(x_157); -x_158 = lean_ctor_get(x_150, 5); -lean_inc(x_158); -x_159 = lean_ctor_get(x_150, 6); -lean_inc(x_159); -x_160 = lean_ctor_get(x_150, 7); -lean_inc(x_160); -x_161 = lean_ctor_get(x_150, 8); -lean_inc(x_161); -x_162 = lean_ctor_get(x_150, 9); -lean_inc(x_162); -x_163 = lean_ctor_get(x_150, 10); -lean_inc(x_163); -x_164 = lean_ctor_get(x_150, 11); -lean_inc(x_164); -x_165 = lean_ctor_get(x_150, 12); -lean_inc(x_165); -x_166 = lean_ctor_get(x_150, 13); -lean_inc(x_166); -x_167 = lean_ctor_get(x_150, 14); -lean_inc(x_167); -x_168 = lean_ctor_get(x_150, 15); -lean_inc(x_168); -x_169 = lean_ctor_get(x_150, 16); -lean_inc(x_169); -x_170 = lean_ctor_get(x_150, 17); -lean_inc(x_170); -x_171 = lean_ctor_get(x_150, 18); -lean_inc(x_171); -x_172 = lean_ctor_get(x_150, 19); -lean_inc(x_172); -x_173 = lean_ctor_get(x_150, 20); -lean_inc(x_173); -if (lean_is_exclusive(x_150)) { - lean_ctor_release(x_150, 0); - lean_ctor_release(x_150, 1); - lean_ctor_release(x_150, 2); - lean_ctor_release(x_150, 3); - lean_ctor_release(x_150, 4); - lean_ctor_release(x_150, 5); - lean_ctor_release(x_150, 6); - lean_ctor_release(x_150, 7); - lean_ctor_release(x_150, 8); - lean_ctor_release(x_150, 9); - lean_ctor_release(x_150, 10); - lean_ctor_release(x_150, 11); - lean_ctor_release(x_150, 12); - lean_ctor_release(x_150, 13); - lean_ctor_release(x_150, 14); - lean_ctor_release(x_150, 15); - lean_ctor_release(x_150, 16); - lean_ctor_release(x_150, 17); - lean_ctor_release(x_150, 18); - lean_ctor_release(x_150, 19); - lean_ctor_release(x_150, 20); - x_174 = x_150; -} else { - lean_dec_ref(x_150); - x_174 = lean_box(0); +lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_13 = lean_nat_sub(x_4, x_9); +x_14 = lean_array_get_size(x_7); +x_15 = lean_nat_dec_lt(x_13, x_14); +lean_dec(x_14); +if (x_15 == 0) +{ +lean_dec(x_13); +lean_dec(x_1); +return x_3; } -x_175 = lean_box(0); -x_176 = lean_ctor_get(x_18, 0); -lean_inc(x_176); -lean_dec(x_18); -x_177 = l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(x_1, x_175, x_172, x_176); -lean_dec(x_176); -x_178 = 1; -if (lean_is_scalar(x_174)) { - x_179 = lean_alloc_ctor(0, 21, 1); -} else { - x_179 = x_174; +else +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_16 = lean_array_fget(x_7, x_13); +x_17 = lean_box(0); +x_18 = lean_array_fset(x_7, x_13, x_17); +x_19 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(x_1, x_16); +x_20 = lean_array_fset(x_18, x_13, x_19); +lean_dec(x_13); +lean_ctor_set(x_3, 1, x_20); +return x_3; } -lean_ctor_set(x_179, 0, x_153); -lean_ctor_set(x_179, 1, x_154); -lean_ctor_set(x_179, 2, x_155); -lean_ctor_set(x_179, 3, x_156); -lean_ctor_set(x_179, 4, x_157); -lean_ctor_set(x_179, 5, x_158); -lean_ctor_set(x_179, 6, x_159); -lean_ctor_set(x_179, 7, x_160); -lean_ctor_set(x_179, 8, x_161); -lean_ctor_set(x_179, 9, x_162); -lean_ctor_set(x_179, 10, x_163); -lean_ctor_set(x_179, 11, x_164); -lean_ctor_set(x_179, 12, x_165); -lean_ctor_set(x_179, 13, x_166); -lean_ctor_set(x_179, 14, x_167); -lean_ctor_set(x_179, 15, x_168); -lean_ctor_set(x_179, 16, x_169); -lean_ctor_set(x_179, 17, x_170); -lean_ctor_set(x_179, 18, x_171); -lean_ctor_set(x_179, 19, x_177); -lean_ctor_set(x_179, 20, x_173); -lean_ctor_set_uint8(x_179, sizeof(void*)*21, x_178); -x_180 = lean_array_fset(x_152, x_19, x_179); -if (lean_is_scalar(x_140)) { - x_181 = lean_alloc_ctor(0, 4, 0); -} else { - x_181 = x_140; } -lean_ctor_set(x_181, 0, x_180); -lean_ctor_set(x_181, 1, x_137); -lean_ctor_set(x_181, 2, x_138); -lean_ctor_set(x_181, 3, x_139); -x_182 = lean_alloc_ctor(0, 3, 0); -lean_ctor_set(x_182, 0, x_134); -lean_ctor_set(x_182, 1, x_135); -lean_ctor_set(x_182, 2, x_181); -lean_ctor_set(x_21, 14, x_182); -x_183 = lean_st_ref_set(x_3, x_21, x_24); -x_184 = lean_ctor_get(x_183, 1); -lean_inc(x_184); -if (lean_is_exclusive(x_183)) { - lean_ctor_release(x_183, 0); - lean_ctor_release(x_183, 1); - x_185 = x_183; -} else { - lean_dec_ref(x_183); - x_185 = lean_box(0); } -if (lean_is_scalar(x_185)) { - x_186 = lean_alloc_ctor(0, 2, 0); -} else { - x_186 = x_185; +else +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; size_t x_24; lean_object* x_25; uint8_t x_26; +x_21 = lean_ctor_get(x_3, 0); +x_22 = lean_ctor_get(x_3, 1); +x_23 = lean_ctor_get(x_3, 2); +x_24 = lean_ctor_get_usize(x_3, 4); +x_25 = lean_ctor_get(x_3, 3); +lean_inc(x_25); +lean_inc(x_23); +lean_inc(x_22); +lean_inc(x_21); +lean_dec(x_3); +x_26 = lean_nat_dec_le(x_25, x_4); +if (x_26 == 0) +{ +size_t x_27; lean_object* x_28; lean_object* x_29; +x_27 = lean_usize_of_nat(x_4); +x_28 = l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(x_1, x_2, x_21, x_27, x_24); +x_29 = lean_alloc_ctor(0, 4, sizeof(size_t)*1); +lean_ctor_set(x_29, 0, x_28); +lean_ctor_set(x_29, 1, x_22); +lean_ctor_set(x_29, 2, x_23); +lean_ctor_set(x_29, 3, x_25); +lean_ctor_set_usize(x_29, 4, x_24); +return x_29; } -lean_ctor_set(x_186, 0, x_151); -lean_ctor_set(x_186, 1, x_184); -return x_186; +else +{ +lean_object* x_30; lean_object* x_31; uint8_t x_32; +x_30 = lean_nat_sub(x_4, x_25); +x_31 = lean_array_get_size(x_22); +x_32 = lean_nat_dec_lt(x_30, x_31); +lean_dec(x_31); +if (x_32 == 0) +{ +lean_object* x_33; +lean_dec(x_30); +lean_dec(x_1); +x_33 = lean_alloc_ctor(0, 4, sizeof(size_t)*1); +lean_ctor_set(x_33, 0, x_21); +lean_ctor_set(x_33, 1, x_22); +lean_ctor_set(x_33, 2, x_23); +lean_ctor_set(x_33, 3, x_25); +lean_ctor_set_usize(x_33, 4, x_24); +return x_33; +} +else +{ +lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; +x_34 = lean_array_fget(x_22, x_30); +x_35 = lean_box(0); +x_36 = lean_array_fset(x_22, x_30, x_35); +x_37 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_addSorted(x_1, x_34); +x_38 = lean_array_fset(x_36, x_30, x_37); +lean_dec(x_30); +x_39 = lean_alloc_ctor(0, 4, sizeof(size_t)*1); +lean_ctor_set(x_39, 0, x_21); +lean_ctor_set(x_39, 1, x_38); +lean_ctor_set(x_39, 2, x_23); +lean_ctor_set(x_39, 3, x_25); +lean_ctor_set_usize(x_39, 4, x_24); +return x_39; } } } +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_addToBasisCore(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +lean_object* x_12; +x_12 = lean_ctor_get(x_1, 0); +lean_inc(x_12); +if (lean_obj_tag(x_12) == 0) +{ +lean_object* x_13; lean_object* x_14; +lean_dec(x_12); +lean_dec(x_1); +x_13 = lean_box(0); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_11); +return x_14; +} else { -lean_object* x_187; lean_object* x_188; lean_object* x_189; lean_object* x_190; lean_object* x_191; lean_object* x_192; lean_object* x_193; lean_object* x_194; uint8_t x_195; lean_object* x_196; lean_object* x_197; lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; lean_object* x_202; lean_object* x_203; lean_object* x_204; lean_object* x_205; lean_object* x_206; lean_object* x_207; lean_object* x_208; lean_object* x_209; lean_object* x_210; lean_object* x_211; uint8_t x_212; -x_187 = lean_ctor_get(x_21, 0); -x_188 = lean_ctor_get(x_21, 1); -x_189 = lean_ctor_get(x_21, 2); -x_190 = lean_ctor_get(x_21, 3); -x_191 = lean_ctor_get(x_21, 4); -x_192 = lean_ctor_get(x_21, 5); -x_193 = lean_ctor_get(x_21, 6); -x_194 = lean_ctor_get(x_21, 7); -x_195 = lean_ctor_get_uint8(x_21, sizeof(void*)*16); -x_196 = lean_ctor_get(x_21, 8); -x_197 = lean_ctor_get(x_21, 9); -x_198 = lean_ctor_get(x_21, 10); -x_199 = lean_ctor_get(x_21, 11); -x_200 = lean_ctor_get(x_21, 12); -x_201 = lean_ctor_get(x_21, 13); -x_202 = lean_ctor_get(x_21, 15); -lean_inc(x_202); -lean_inc(x_201); -lean_inc(x_200); -lean_inc(x_199); -lean_inc(x_198); -lean_inc(x_197); -lean_inc(x_196); -lean_inc(x_194); -lean_inc(x_193); -lean_inc(x_192); -lean_inc(x_191); -lean_inc(x_190); -lean_inc(x_189); -lean_inc(x_188); -lean_inc(x_187); -lean_dec(x_21); -x_203 = lean_ctor_get(x_22, 0); -lean_inc(x_203); -x_204 = lean_ctor_get(x_22, 1); -lean_inc(x_204); -if (lean_is_exclusive(x_22)) { - lean_ctor_release(x_22, 0); - lean_ctor_release(x_22, 1); - lean_ctor_release(x_22, 2); - x_205 = x_22; -} else { - lean_dec_ref(x_22); - x_205 = lean_box(0); -} -x_206 = lean_ctor_get(x_23, 0); -lean_inc(x_206); -x_207 = lean_ctor_get(x_23, 1); -lean_inc(x_207); -x_208 = lean_ctor_get(x_23, 2); -lean_inc(x_208); -x_209 = lean_ctor_get(x_23, 3); -lean_inc(x_209); -if (lean_is_exclusive(x_23)) { - lean_ctor_release(x_23, 0); - lean_ctor_release(x_23, 1); - lean_ctor_release(x_23, 2); - lean_ctor_release(x_23, 3); - x_210 = x_23; -} else { - lean_dec_ref(x_23); - x_210 = lean_box(0); +lean_object* x_15; +x_15 = lean_ctor_get(x_12, 1); +lean_inc(x_15); +lean_dec(x_12); +if (lean_obj_tag(x_15) == 0) +{ +lean_object* x_16; lean_object* x_17; +lean_dec(x_1); +x_16 = lean_box(0); +x_17 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_17, 0, x_16); +lean_ctor_set(x_17, 1, x_11); +return x_17; } -x_211 = lean_array_get_size(x_206); -x_212 = lean_nat_dec_lt(x_19, x_211); -lean_dec(x_211); -if (x_212 == 0) +else { -lean_object* x_213; lean_object* x_214; lean_object* x_215; lean_object* x_216; lean_object* x_217; lean_object* x_218; lean_object* x_219; lean_object* x_220; +lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; uint8_t x_25; +x_18 = lean_ctor_get(x_15, 0); +lean_inc(x_18); +lean_dec(x_15); +x_19 = lean_ctor_get(x_2, 0); +x_20 = lean_st_ref_take(x_3, x_11); +x_21 = lean_ctor_get(x_20, 0); +lean_inc(x_21); +x_22 = lean_ctor_get(x_21, 14); +lean_inc(x_22); +x_23 = lean_ctor_get(x_22, 2); +lean_inc(x_23); +x_24 = lean_ctor_get(x_20, 1); +lean_inc(x_24); +lean_dec(x_20); +x_25 = !lean_is_exclusive(x_21); +if (x_25 == 0) +{ +lean_object* x_26; uint8_t x_27; +x_26 = lean_ctor_get(x_21, 14); +lean_dec(x_26); +x_27 = !lean_is_exclusive(x_22); +if (x_27 == 0) +{ +lean_object* x_28; uint8_t x_29; +x_28 = lean_ctor_get(x_22, 2); +lean_dec(x_28); +x_29 = !lean_is_exclusive(x_23); +if (x_29 == 0) +{ +lean_object* x_30; lean_object* x_31; uint8_t x_32; +x_30 = lean_ctor_get(x_23, 0); +x_31 = lean_array_get_size(x_30); +x_32 = lean_nat_dec_lt(x_19, x_31); +lean_dec(x_31); +if (x_32 == 0) +{ +lean_object* x_33; uint8_t x_34; lean_dec(x_18); lean_dec(x_1); -if (lean_is_scalar(x_210)) { - x_213 = lean_alloc_ctor(0, 4, 0); -} else { - x_213 = x_210; +x_33 = lean_st_ref_set(x_3, x_21, x_24); +x_34 = !lean_is_exclusive(x_33); +if (x_34 == 0) +{ +lean_object* x_35; lean_object* x_36; +x_35 = lean_ctor_get(x_33, 0); +lean_dec(x_35); +x_36 = lean_box(0); +lean_ctor_set(x_33, 0, x_36); +return x_33; } -lean_ctor_set(x_213, 0, x_206); -lean_ctor_set(x_213, 1, x_207); -lean_ctor_set(x_213, 2, x_208); -lean_ctor_set(x_213, 3, x_209); -if (lean_is_scalar(x_205)) { - x_214 = lean_alloc_ctor(0, 3, 0); -} else { - x_214 = x_205; +else +{ +lean_object* x_37; lean_object* x_38; lean_object* x_39; +x_37 = lean_ctor_get(x_33, 1); +lean_inc(x_37); +lean_dec(x_33); +x_38 = lean_box(0); +x_39 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_39, 0, x_38); +lean_ctor_set(x_39, 1, x_37); +return x_39; } -lean_ctor_set(x_214, 0, x_203); -lean_ctor_set(x_214, 1, x_204); -lean_ctor_set(x_214, 2, x_213); -x_215 = lean_alloc_ctor(0, 16, 1); -lean_ctor_set(x_215, 0, x_187); -lean_ctor_set(x_215, 1, x_188); -lean_ctor_set(x_215, 2, x_189); -lean_ctor_set(x_215, 3, x_190); -lean_ctor_set(x_215, 4, x_191); -lean_ctor_set(x_215, 5, x_192); -lean_ctor_set(x_215, 6, x_193); -lean_ctor_set(x_215, 7, x_194); -lean_ctor_set(x_215, 8, x_196); -lean_ctor_set(x_215, 9, x_197); -lean_ctor_set(x_215, 10, x_198); -lean_ctor_set(x_215, 11, x_199); -lean_ctor_set(x_215, 12, x_200); -lean_ctor_set(x_215, 13, x_201); -lean_ctor_set(x_215, 14, x_214); -lean_ctor_set(x_215, 15, x_202); -lean_ctor_set_uint8(x_215, sizeof(void*)*16, x_195); -x_216 = lean_st_ref_set(x_3, x_215, x_24); -x_217 = lean_ctor_get(x_216, 1); -lean_inc(x_217); -if (lean_is_exclusive(x_216)) { - lean_ctor_release(x_216, 0); - lean_ctor_release(x_216, 1); - x_218 = x_216; -} else { - lean_dec_ref(x_216); - x_218 = lean_box(0); } -x_219 = lean_box(0); -if (lean_is_scalar(x_218)) { - x_220 = lean_alloc_ctor(0, 2, 0); -} else { - x_220 = x_218; +else +{ +lean_object* x_40; lean_object* x_41; lean_object* x_42; uint8_t x_43; +x_40 = lean_array_fget(x_30, x_19); +x_41 = lean_box(0); +x_42 = lean_array_fset(x_30, x_19, x_41); +x_43 = !lean_is_exclusive(x_40); +if (x_43 == 0) +{ +lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; uint8_t x_48; lean_object* x_49; lean_object* x_50; uint8_t x_51; +x_44 = lean_ctor_get(x_40, 19); +x_45 = lean_box(0); +x_46 = lean_ctor_get(x_18, 0); +lean_inc(x_46); +lean_dec(x_18); +x_47 = l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(x_1, x_45, x_44, x_46); +lean_dec(x_46); +x_48 = 1; +lean_ctor_set(x_40, 19, x_47); +lean_ctor_set_uint8(x_40, sizeof(void*)*21, x_48); +x_49 = lean_array_fset(x_42, x_19, x_40); +lean_ctor_set(x_23, 0, x_49); +x_50 = lean_st_ref_set(x_3, x_21, x_24); +x_51 = !lean_is_exclusive(x_50); +if (x_51 == 0) +{ +lean_object* x_52; +x_52 = lean_ctor_get(x_50, 0); +lean_dec(x_52); +lean_ctor_set(x_50, 0, x_41); +return x_50; +} +else +{ +lean_object* x_53; lean_object* x_54; +x_53 = lean_ctor_get(x_50, 1); +lean_inc(x_53); +lean_dec(x_50); +x_54 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_54, 0, x_41); +lean_ctor_set(x_54, 1, x_53); +return x_54; } -lean_ctor_set(x_220, 0, x_219); -lean_ctor_set(x_220, 1, x_217); -return x_220; } else { -lean_object* x_221; lean_object* x_222; lean_object* x_223; lean_object* x_224; lean_object* x_225; lean_object* x_226; lean_object* x_227; lean_object* x_228; lean_object* x_229; lean_object* x_230; lean_object* x_231; lean_object* x_232; lean_object* x_233; lean_object* x_234; lean_object* x_235; lean_object* x_236; lean_object* x_237; lean_object* x_238; lean_object* x_239; lean_object* x_240; lean_object* x_241; lean_object* x_242; lean_object* x_243; lean_object* x_244; lean_object* x_245; lean_object* x_246; lean_object* x_247; lean_object* x_248; uint8_t x_249; lean_object* x_250; lean_object* x_251; lean_object* x_252; lean_object* x_253; lean_object* x_254; lean_object* x_255; lean_object* x_256; lean_object* x_257; lean_object* x_258; -x_221 = lean_array_fget(x_206, x_19); -x_222 = lean_box(0); -x_223 = lean_array_fset(x_206, x_19, x_222); -x_224 = lean_ctor_get(x_221, 0); -lean_inc(x_224); -x_225 = lean_ctor_get(x_221, 1); -lean_inc(x_225); -x_226 = lean_ctor_get(x_221, 2); -lean_inc(x_226); -x_227 = lean_ctor_get(x_221, 3); -lean_inc(x_227); -x_228 = lean_ctor_get(x_221, 4); -lean_inc(x_228); -x_229 = lean_ctor_get(x_221, 5); -lean_inc(x_229); -x_230 = lean_ctor_get(x_221, 6); -lean_inc(x_230); -x_231 = lean_ctor_get(x_221, 7); -lean_inc(x_231); -x_232 = lean_ctor_get(x_221, 8); -lean_inc(x_232); -x_233 = lean_ctor_get(x_221, 9); -lean_inc(x_233); -x_234 = lean_ctor_get(x_221, 10); -lean_inc(x_234); -x_235 = lean_ctor_get(x_221, 11); -lean_inc(x_235); -x_236 = lean_ctor_get(x_221, 12); -lean_inc(x_236); -x_237 = lean_ctor_get(x_221, 13); -lean_inc(x_237); -x_238 = lean_ctor_get(x_221, 14); -lean_inc(x_238); -x_239 = lean_ctor_get(x_221, 15); -lean_inc(x_239); -x_240 = lean_ctor_get(x_221, 16); -lean_inc(x_240); -x_241 = lean_ctor_get(x_221, 17); -lean_inc(x_241); -x_242 = lean_ctor_get(x_221, 18); -lean_inc(x_242); -x_243 = lean_ctor_get(x_221, 19); -lean_inc(x_243); -x_244 = lean_ctor_get(x_221, 20); -lean_inc(x_244); -if (lean_is_exclusive(x_221)) { - lean_ctor_release(x_221, 0); - lean_ctor_release(x_221, 1); - lean_ctor_release(x_221, 2); - lean_ctor_release(x_221, 3); - lean_ctor_release(x_221, 4); - lean_ctor_release(x_221, 5); - lean_ctor_release(x_221, 6); - lean_ctor_release(x_221, 7); - lean_ctor_release(x_221, 8); - lean_ctor_release(x_221, 9); - lean_ctor_release(x_221, 10); - lean_ctor_release(x_221, 11); - lean_ctor_release(x_221, 12); - lean_ctor_release(x_221, 13); - lean_ctor_release(x_221, 14); - lean_ctor_release(x_221, 15); - lean_ctor_release(x_221, 16); - lean_ctor_release(x_221, 17); - lean_ctor_release(x_221, 18); - lean_ctor_release(x_221, 19); - lean_ctor_release(x_221, 20); - x_245 = x_221; -} else { - lean_dec_ref(x_221); - x_245 = lean_box(0); -} -x_246 = lean_box(0); -x_247 = lean_ctor_get(x_18, 0); -lean_inc(x_247); +lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; uint8_t x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; +x_55 = lean_ctor_get(x_40, 0); +x_56 = lean_ctor_get(x_40, 1); +x_57 = lean_ctor_get(x_40, 2); +x_58 = lean_ctor_get(x_40, 3); +x_59 = lean_ctor_get(x_40, 4); +x_60 = lean_ctor_get(x_40, 5); +x_61 = lean_ctor_get(x_40, 6); +x_62 = lean_ctor_get(x_40, 7); +x_63 = lean_ctor_get(x_40, 8); +x_64 = lean_ctor_get(x_40, 9); +x_65 = lean_ctor_get(x_40, 10); +x_66 = lean_ctor_get(x_40, 11); +x_67 = lean_ctor_get(x_40, 12); +x_68 = lean_ctor_get(x_40, 13); +x_69 = lean_ctor_get(x_40, 14); +x_70 = lean_ctor_get(x_40, 15); +x_71 = lean_ctor_get(x_40, 16); +x_72 = lean_ctor_get(x_40, 17); +x_73 = lean_ctor_get(x_40, 18); +x_74 = lean_ctor_get(x_40, 19); +x_75 = lean_ctor_get(x_40, 20); +lean_inc(x_75); +lean_inc(x_74); +lean_inc(x_73); +lean_inc(x_72); +lean_inc(x_71); +lean_inc(x_70); +lean_inc(x_69); +lean_inc(x_68); +lean_inc(x_67); +lean_inc(x_66); +lean_inc(x_65); +lean_inc(x_64); +lean_inc(x_63); +lean_inc(x_62); +lean_inc(x_61); +lean_inc(x_60); +lean_inc(x_59); +lean_inc(x_58); +lean_inc(x_57); +lean_inc(x_56); +lean_inc(x_55); +lean_dec(x_40); +x_76 = lean_box(0); +x_77 = lean_ctor_get(x_18, 0); +lean_inc(x_77); lean_dec(x_18); -x_248 = l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(x_1, x_246, x_243, x_247); -lean_dec(x_247); -x_249 = 1; -if (lean_is_scalar(x_245)) { - x_250 = lean_alloc_ctor(0, 21, 1); -} else { - x_250 = x_245; -} -lean_ctor_set(x_250, 0, x_224); -lean_ctor_set(x_250, 1, x_225); -lean_ctor_set(x_250, 2, x_226); -lean_ctor_set(x_250, 3, x_227); -lean_ctor_set(x_250, 4, x_228); -lean_ctor_set(x_250, 5, x_229); -lean_ctor_set(x_250, 6, x_230); -lean_ctor_set(x_250, 7, x_231); -lean_ctor_set(x_250, 8, x_232); -lean_ctor_set(x_250, 9, x_233); -lean_ctor_set(x_250, 10, x_234); -lean_ctor_set(x_250, 11, x_235); -lean_ctor_set(x_250, 12, x_236); -lean_ctor_set(x_250, 13, x_237); -lean_ctor_set(x_250, 14, x_238); -lean_ctor_set(x_250, 15, x_239); -lean_ctor_set(x_250, 16, x_240); -lean_ctor_set(x_250, 17, x_241); -lean_ctor_set(x_250, 18, x_242); -lean_ctor_set(x_250, 19, x_248); -lean_ctor_set(x_250, 20, x_244); -lean_ctor_set_uint8(x_250, sizeof(void*)*21, x_249); -x_251 = lean_array_fset(x_223, x_19, x_250); -if (lean_is_scalar(x_210)) { - x_252 = lean_alloc_ctor(0, 4, 0); -} else { - x_252 = x_210; -} -lean_ctor_set(x_252, 0, x_251); -lean_ctor_set(x_252, 1, x_207); -lean_ctor_set(x_252, 2, x_208); -lean_ctor_set(x_252, 3, x_209); -if (lean_is_scalar(x_205)) { - x_253 = lean_alloc_ctor(0, 3, 0); -} else { - x_253 = x_205; -} -lean_ctor_set(x_253, 0, x_203); -lean_ctor_set(x_253, 1, x_204); -lean_ctor_set(x_253, 2, x_252); -x_254 = lean_alloc_ctor(0, 16, 1); -lean_ctor_set(x_254, 0, x_187); -lean_ctor_set(x_254, 1, x_188); -lean_ctor_set(x_254, 2, x_189); -lean_ctor_set(x_254, 3, x_190); -lean_ctor_set(x_254, 4, x_191); -lean_ctor_set(x_254, 5, x_192); -lean_ctor_set(x_254, 6, x_193); -lean_ctor_set(x_254, 7, x_194); -lean_ctor_set(x_254, 8, x_196); -lean_ctor_set(x_254, 9, x_197); -lean_ctor_set(x_254, 10, x_198); -lean_ctor_set(x_254, 11, x_199); -lean_ctor_set(x_254, 12, x_200); -lean_ctor_set(x_254, 13, x_201); -lean_ctor_set(x_254, 14, x_253); -lean_ctor_set(x_254, 15, x_202); -lean_ctor_set_uint8(x_254, sizeof(void*)*16, x_195); -x_255 = lean_st_ref_set(x_3, x_254, x_24); -x_256 = lean_ctor_get(x_255, 1); -lean_inc(x_256); -if (lean_is_exclusive(x_255)) { - lean_ctor_release(x_255, 0); - lean_ctor_release(x_255, 1); - x_257 = x_255; +x_78 = l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(x_1, x_76, x_74, x_77); +lean_dec(x_77); +x_79 = 1; +x_80 = lean_alloc_ctor(0, 21, 1); +lean_ctor_set(x_80, 0, x_55); +lean_ctor_set(x_80, 1, x_56); +lean_ctor_set(x_80, 2, x_57); +lean_ctor_set(x_80, 3, x_58); +lean_ctor_set(x_80, 4, x_59); +lean_ctor_set(x_80, 5, x_60); +lean_ctor_set(x_80, 6, x_61); +lean_ctor_set(x_80, 7, x_62); +lean_ctor_set(x_80, 8, x_63); +lean_ctor_set(x_80, 9, x_64); +lean_ctor_set(x_80, 10, x_65); +lean_ctor_set(x_80, 11, x_66); +lean_ctor_set(x_80, 12, x_67); +lean_ctor_set(x_80, 13, x_68); +lean_ctor_set(x_80, 14, x_69); +lean_ctor_set(x_80, 15, x_70); +lean_ctor_set(x_80, 16, x_71); +lean_ctor_set(x_80, 17, x_72); +lean_ctor_set(x_80, 18, x_73); +lean_ctor_set(x_80, 19, x_78); +lean_ctor_set(x_80, 20, x_75); +lean_ctor_set_uint8(x_80, sizeof(void*)*21, x_79); +x_81 = lean_array_fset(x_42, x_19, x_80); +lean_ctor_set(x_23, 0, x_81); +x_82 = lean_st_ref_set(x_3, x_21, x_24); +x_83 = lean_ctor_get(x_82, 1); +lean_inc(x_83); +if (lean_is_exclusive(x_82)) { + lean_ctor_release(x_82, 0); + lean_ctor_release(x_82, 1); + x_84 = x_82; } else { - lean_dec_ref(x_255); - x_257 = lean_box(0); + lean_dec_ref(x_82); + x_84 = lean_box(0); } -if (lean_is_scalar(x_257)) { - x_258 = lean_alloc_ctor(0, 2, 0); +if (lean_is_scalar(x_84)) { + x_85 = lean_alloc_ctor(0, 2, 0); } else { - x_258 = x_257; + x_85 = x_84; } -lean_ctor_set(x_258, 0, x_222); -lean_ctor_set(x_258, 1, x_256); -return x_258; +lean_ctor_set(x_85, 0, x_41); +lean_ctor_set(x_85, 1, x_83); +return x_85; } } } +else +{ +lean_object* x_86; lean_object* x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; uint8_t x_91; +x_86 = lean_ctor_get(x_23, 0); +x_87 = lean_ctor_get(x_23, 1); +x_88 = lean_ctor_get(x_23, 2); +x_89 = lean_ctor_get(x_23, 3); +lean_inc(x_89); +lean_inc(x_88); +lean_inc(x_87); +lean_inc(x_86); +lean_dec(x_23); +x_90 = lean_array_get_size(x_86); +x_91 = lean_nat_dec_lt(x_19, x_90); +lean_dec(x_90); +if (x_91 == 0) +{ +lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; +lean_dec(x_18); +lean_dec(x_1); +x_92 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_92, 0, x_86); +lean_ctor_set(x_92, 1, x_87); +lean_ctor_set(x_92, 2, x_88); +lean_ctor_set(x_92, 3, x_89); +lean_ctor_set(x_22, 2, x_92); +x_93 = lean_st_ref_set(x_3, x_21, x_24); +x_94 = lean_ctor_get(x_93, 1); +lean_inc(x_94); +if (lean_is_exclusive(x_93)) { + lean_ctor_release(x_93, 0); + lean_ctor_release(x_93, 1); + x_95 = x_93; +} else { + lean_dec_ref(x_93); + x_95 = lean_box(0); } +x_96 = lean_box(0); +if (lean_is_scalar(x_95)) { + x_97 = lean_alloc_ctor(0, 2, 0); +} else { + x_97 = x_95; } +lean_ctor_set(x_97, 0, x_96); +lean_ctor_set(x_97, 1, x_94); +return x_97; } -LEAN_EXPORT lean_object* l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { -_start: +else { -size_t x_6; size_t x_7; lean_object* x_8; -x_6 = lean_unbox_usize(x_4); -lean_dec(x_4); -x_7 = lean_unbox_usize(x_5); -lean_dec(x_5); -x_8 = l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(x_1, x_2, x_3, x_6, x_7); -lean_dec(x_2); -return x_8; -} +lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; uint8_t x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; +x_98 = lean_array_fget(x_86, x_19); +x_99 = lean_box(0); +x_100 = lean_array_fset(x_86, x_19, x_99); +x_101 = lean_ctor_get(x_98, 0); +lean_inc(x_101); +x_102 = lean_ctor_get(x_98, 1); +lean_inc(x_102); +x_103 = lean_ctor_get(x_98, 2); +lean_inc(x_103); +x_104 = lean_ctor_get(x_98, 3); +lean_inc(x_104); +x_105 = lean_ctor_get(x_98, 4); +lean_inc(x_105); +x_106 = lean_ctor_get(x_98, 5); +lean_inc(x_106); +x_107 = lean_ctor_get(x_98, 6); +lean_inc(x_107); +x_108 = lean_ctor_get(x_98, 7); +lean_inc(x_108); +x_109 = lean_ctor_get(x_98, 8); +lean_inc(x_109); +x_110 = lean_ctor_get(x_98, 9); +lean_inc(x_110); +x_111 = lean_ctor_get(x_98, 10); +lean_inc(x_111); +x_112 = lean_ctor_get(x_98, 11); +lean_inc(x_112); +x_113 = lean_ctor_get(x_98, 12); +lean_inc(x_113); +x_114 = lean_ctor_get(x_98, 13); +lean_inc(x_114); +x_115 = lean_ctor_get(x_98, 14); +lean_inc(x_115); +x_116 = lean_ctor_get(x_98, 15); +lean_inc(x_116); +x_117 = lean_ctor_get(x_98, 16); +lean_inc(x_117); +x_118 = lean_ctor_get(x_98, 17); +lean_inc(x_118); +x_119 = lean_ctor_get(x_98, 18); +lean_inc(x_119); +x_120 = lean_ctor_get(x_98, 19); +lean_inc(x_120); +x_121 = lean_ctor_get(x_98, 20); +lean_inc(x_121); +if (lean_is_exclusive(x_98)) { + lean_ctor_release(x_98, 0); + lean_ctor_release(x_98, 1); + lean_ctor_release(x_98, 2); + lean_ctor_release(x_98, 3); + lean_ctor_release(x_98, 4); + lean_ctor_release(x_98, 5); + lean_ctor_release(x_98, 6); + lean_ctor_release(x_98, 7); + lean_ctor_release(x_98, 8); + lean_ctor_release(x_98, 9); + lean_ctor_release(x_98, 10); + lean_ctor_release(x_98, 11); + lean_ctor_release(x_98, 12); + lean_ctor_release(x_98, 13); + lean_ctor_release(x_98, 14); + lean_ctor_release(x_98, 15); + lean_ctor_release(x_98, 16); + lean_ctor_release(x_98, 17); + lean_ctor_release(x_98, 18); + lean_ctor_release(x_98, 19); + lean_ctor_release(x_98, 20); + x_122 = x_98; +} else { + lean_dec_ref(x_98); + x_122 = lean_box(0); } -LEAN_EXPORT lean_object* l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { -_start: -{ -lean_object* x_5; -x_5 = l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(x_1, x_2, x_3, x_4); -lean_dec(x_4); -lean_dec(x_2); -return x_5; +x_123 = lean_box(0); +x_124 = lean_ctor_get(x_18, 0); +lean_inc(x_124); +lean_dec(x_18); +x_125 = l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(x_1, x_123, x_120, x_124); +lean_dec(x_124); +x_126 = 1; +if (lean_is_scalar(x_122)) { + x_127 = lean_alloc_ctor(0, 21, 1); +} else { + x_127 = x_122; } +lean_ctor_set(x_127, 0, x_101); +lean_ctor_set(x_127, 1, x_102); +lean_ctor_set(x_127, 2, x_103); +lean_ctor_set(x_127, 3, x_104); +lean_ctor_set(x_127, 4, x_105); +lean_ctor_set(x_127, 5, x_106); +lean_ctor_set(x_127, 6, x_107); +lean_ctor_set(x_127, 7, x_108); +lean_ctor_set(x_127, 8, x_109); +lean_ctor_set(x_127, 9, x_110); +lean_ctor_set(x_127, 10, x_111); +lean_ctor_set(x_127, 11, x_112); +lean_ctor_set(x_127, 12, x_113); +lean_ctor_set(x_127, 13, x_114); +lean_ctor_set(x_127, 14, x_115); +lean_ctor_set(x_127, 15, x_116); +lean_ctor_set(x_127, 16, x_117); +lean_ctor_set(x_127, 17, x_118); +lean_ctor_set(x_127, 18, x_119); +lean_ctor_set(x_127, 19, x_125); +lean_ctor_set(x_127, 20, x_121); +lean_ctor_set_uint8(x_127, sizeof(void*)*21, x_126); +x_128 = lean_array_fset(x_100, x_19, x_127); +x_129 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_129, 0, x_128); +lean_ctor_set(x_129, 1, x_87); +lean_ctor_set(x_129, 2, x_88); +lean_ctor_set(x_129, 3, x_89); +lean_ctor_set(x_22, 2, x_129); +x_130 = lean_st_ref_set(x_3, x_21, x_24); +x_131 = lean_ctor_get(x_130, 1); +lean_inc(x_131); +if (lean_is_exclusive(x_130)) { + lean_ctor_release(x_130, 0); + lean_ctor_release(x_130, 1); + x_132 = x_130; +} else { + lean_dec_ref(x_130); + x_132 = lean_box(0); } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: -{ -lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_addToBasisCore(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -return x_12; +if (lean_is_scalar(x_132)) { + x_133 = lean_alloc_ctor(0, 2, 0); +} else { + x_133 = x_132; } +lean_ctor_set(x_133, 0, x_99); +lean_ctor_set(x_133, 1, x_131); +return x_133; } -static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___closed__1() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = lean_box(0); -x_2 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; } } -LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15, lean_object* x_16, lean_object* x_17, lean_object* x_18) { -_start: +else { -if (lean_obj_tag(x_6) == 0) +lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; uint8_t x_142; +x_134 = lean_ctor_get(x_22, 0); +x_135 = lean_ctor_get(x_22, 1); +lean_inc(x_135); +lean_inc(x_134); +lean_dec(x_22); +x_136 = lean_ctor_get(x_23, 0); +lean_inc(x_136); +x_137 = lean_ctor_get(x_23, 1); +lean_inc(x_137); +x_138 = lean_ctor_get(x_23, 2); +lean_inc(x_138); +x_139 = lean_ctor_get(x_23, 3); +lean_inc(x_139); +if (lean_is_exclusive(x_23)) { + lean_ctor_release(x_23, 0); + lean_ctor_release(x_23, 1); + lean_ctor_release(x_23, 2); + lean_ctor_release(x_23, 3); + x_140 = x_23; +} else { + lean_dec_ref(x_23); + x_140 = lean_box(0); +} +x_141 = lean_array_get_size(x_136); +x_142 = lean_nat_dec_lt(x_19, x_141); +lean_dec(x_141); +if (x_142 == 0) { -lean_object* x_19; -lean_dec(x_17); -lean_dec(x_16); -lean_dec(x_15); -lean_dec(x_14); -lean_dec(x_13); -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_2); +lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; +lean_dec(x_18); lean_dec(x_1); -x_19 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_19, 0, x_7); -lean_ctor_set(x_19, 1, x_18); -return x_19; +if (lean_is_scalar(x_140)) { + x_143 = lean_alloc_ctor(0, 4, 0); +} else { + x_143 = x_140; +} +lean_ctor_set(x_143, 0, x_136); +lean_ctor_set(x_143, 1, x_137); +lean_ctor_set(x_143, 2, x_138); +lean_ctor_set(x_143, 3, x_139); +x_144 = lean_alloc_ctor(0, 3, 0); +lean_ctor_set(x_144, 0, x_134); +lean_ctor_set(x_144, 1, x_135); +lean_ctor_set(x_144, 2, x_143); +lean_ctor_set(x_21, 14, x_144); +x_145 = lean_st_ref_set(x_3, x_21, x_24); +x_146 = lean_ctor_get(x_145, 1); +lean_inc(x_146); +if (lean_is_exclusive(x_145)) { + lean_ctor_release(x_145, 0); + lean_ctor_release(x_145, 1); + x_147 = x_145; +} else { + lean_dec_ref(x_145); + x_147 = lean_box(0); +} +x_148 = lean_box(0); +if (lean_is_scalar(x_147)) { + x_149 = lean_alloc_ctor(0, 2, 0); +} else { + x_149 = x_147; +} +lean_ctor_set(x_149, 0, x_148); +lean_ctor_set(x_149, 1, x_146); +return x_149; } else { -lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_27; -lean_dec(x_7); -x_20 = lean_ctor_get(x_6, 0); -lean_inc(x_20); -x_21 = lean_ctor_get(x_6, 1); -lean_inc(x_21); -lean_dec(x_6); -x_27 = lean_ctor_get(x_20, 0); -lean_inc(x_27); -if (lean_obj_tag(x_27) == 0) -{ -lean_object* x_28; -lean_dec(x_27); -lean_dec(x_20); -x_28 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___closed__1; -x_22 = x_28; -x_23 = x_18; -goto block_26; -} -else -{ -lean_object* x_29; uint8_t x_30; -x_29 = lean_ctor_get(x_27, 1); -lean_inc(x_29); -lean_dec(x_27); -lean_inc(x_2); -x_30 = l_Lean_Grind_CommRing_Mon_divides(x_2, x_29); -if (x_30 == 0) -{ -lean_object* x_31; lean_object* x_32; lean_object* x_33; -x_31 = l_Lean_Meta_Grind_Arith_CommRing_addToBasisCore(x_20, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_18); -x_32 = lean_ctor_get(x_31, 1); -lean_inc(x_32); -lean_dec(x_31); -x_33 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___closed__1; -x_22 = x_33; -x_23 = x_32; -goto block_26; -} -else -{ -lean_object* x_34; -lean_inc(x_17); -lean_inc(x_16); -lean_inc(x_15); -lean_inc(x_14); -lean_inc(x_13); -lean_inc(x_12); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_1); -x_34 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWith(x_20, x_1, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_18); -if (lean_obj_tag(x_34) == 0) -{ -lean_object* x_35; lean_object* x_36; lean_object* x_37; -x_35 = lean_ctor_get(x_34, 0); -lean_inc(x_35); -x_36 = lean_ctor_get(x_34, 1); -lean_inc(x_36); -lean_dec(x_34); -lean_inc(x_17); -lean_inc(x_16); -lean_inc(x_15); -lean_inc(x_14); -lean_inc(x_13); -lean_inc(x_12); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_35); -x_37 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant(x_35, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_36); -if (lean_obj_tag(x_37) == 0) -{ -lean_object* x_38; uint8_t x_39; -x_38 = lean_ctor_get(x_37, 0); -lean_inc(x_38); -x_39 = lean_unbox(x_38); -lean_dec(x_38); -if (x_39 == 0) -{ -lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; -x_40 = lean_ctor_get(x_37, 1); -lean_inc(x_40); -lean_dec(x_37); -x_41 = l_Lean_Meta_Grind_Arith_CommRing_addToBasisCore(x_35, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_40); -x_42 = lean_ctor_get(x_41, 1); -lean_inc(x_42); -lean_dec(x_41); -x_43 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___closed__1; -x_22 = x_43; -x_23 = x_42; -goto block_26; +lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; lean_object* x_160; lean_object* x_161; lean_object* x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; lean_object* x_171; lean_object* x_172; lean_object* x_173; lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; uint8_t x_178; lean_object* x_179; lean_object* x_180; lean_object* x_181; lean_object* x_182; lean_object* x_183; lean_object* x_184; lean_object* x_185; lean_object* x_186; +x_150 = lean_array_fget(x_136, x_19); +x_151 = lean_box(0); +x_152 = lean_array_fset(x_136, x_19, x_151); +x_153 = lean_ctor_get(x_150, 0); +lean_inc(x_153); +x_154 = lean_ctor_get(x_150, 1); +lean_inc(x_154); +x_155 = lean_ctor_get(x_150, 2); +lean_inc(x_155); +x_156 = lean_ctor_get(x_150, 3); +lean_inc(x_156); +x_157 = lean_ctor_get(x_150, 4); +lean_inc(x_157); +x_158 = lean_ctor_get(x_150, 5); +lean_inc(x_158); +x_159 = lean_ctor_get(x_150, 6); +lean_inc(x_159); +x_160 = lean_ctor_get(x_150, 7); +lean_inc(x_160); +x_161 = lean_ctor_get(x_150, 8); +lean_inc(x_161); +x_162 = lean_ctor_get(x_150, 9); +lean_inc(x_162); +x_163 = lean_ctor_get(x_150, 10); +lean_inc(x_163); +x_164 = lean_ctor_get(x_150, 11); +lean_inc(x_164); +x_165 = lean_ctor_get(x_150, 12); +lean_inc(x_165); +x_166 = lean_ctor_get(x_150, 13); +lean_inc(x_166); +x_167 = lean_ctor_get(x_150, 14); +lean_inc(x_167); +x_168 = lean_ctor_get(x_150, 15); +lean_inc(x_168); +x_169 = lean_ctor_get(x_150, 16); +lean_inc(x_169); +x_170 = lean_ctor_get(x_150, 17); +lean_inc(x_170); +x_171 = lean_ctor_get(x_150, 18); +lean_inc(x_171); +x_172 = lean_ctor_get(x_150, 19); +lean_inc(x_172); +x_173 = lean_ctor_get(x_150, 20); +lean_inc(x_173); +if (lean_is_exclusive(x_150)) { + lean_ctor_release(x_150, 0); + lean_ctor_release(x_150, 1); + lean_ctor_release(x_150, 2); + lean_ctor_release(x_150, 3); + lean_ctor_release(x_150, 4); + lean_ctor_release(x_150, 5); + lean_ctor_release(x_150, 6); + lean_ctor_release(x_150, 7); + lean_ctor_release(x_150, 8); + lean_ctor_release(x_150, 9); + lean_ctor_release(x_150, 10); + lean_ctor_release(x_150, 11); + lean_ctor_release(x_150, 12); + lean_ctor_release(x_150, 13); + lean_ctor_release(x_150, 14); + lean_ctor_release(x_150, 15); + lean_ctor_release(x_150, 16); + lean_ctor_release(x_150, 17); + lean_ctor_release(x_150, 18); + lean_ctor_release(x_150, 19); + lean_ctor_release(x_150, 20); + x_174 = x_150; +} else { + lean_dec_ref(x_150); + x_174 = lean_box(0); } -else -{ -lean_object* x_44; lean_object* x_45; -lean_dec(x_35); -x_44 = lean_ctor_get(x_37, 1); -lean_inc(x_44); -lean_dec(x_37); -x_45 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___closed__1; -x_22 = x_45; -x_23 = x_44; -goto block_26; +x_175 = lean_box(0); +x_176 = lean_ctor_get(x_18, 0); +lean_inc(x_176); +lean_dec(x_18); +x_177 = l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(x_1, x_175, x_172, x_176); +lean_dec(x_176); +x_178 = 1; +if (lean_is_scalar(x_174)) { + x_179 = lean_alloc_ctor(0, 21, 1); +} else { + x_179 = x_174; } +lean_ctor_set(x_179, 0, x_153); +lean_ctor_set(x_179, 1, x_154); +lean_ctor_set(x_179, 2, x_155); +lean_ctor_set(x_179, 3, x_156); +lean_ctor_set(x_179, 4, x_157); +lean_ctor_set(x_179, 5, x_158); +lean_ctor_set(x_179, 6, x_159); +lean_ctor_set(x_179, 7, x_160); +lean_ctor_set(x_179, 8, x_161); +lean_ctor_set(x_179, 9, x_162); +lean_ctor_set(x_179, 10, x_163); +lean_ctor_set(x_179, 11, x_164); +lean_ctor_set(x_179, 12, x_165); +lean_ctor_set(x_179, 13, x_166); +lean_ctor_set(x_179, 14, x_167); +lean_ctor_set(x_179, 15, x_168); +lean_ctor_set(x_179, 16, x_169); +lean_ctor_set(x_179, 17, x_170); +lean_ctor_set(x_179, 18, x_171); +lean_ctor_set(x_179, 19, x_177); +lean_ctor_set(x_179, 20, x_173); +lean_ctor_set_uint8(x_179, sizeof(void*)*21, x_178); +x_180 = lean_array_fset(x_152, x_19, x_179); +if (lean_is_scalar(x_140)) { + x_181 = lean_alloc_ctor(0, 4, 0); +} else { + x_181 = x_140; } -else -{ -uint8_t x_46; -lean_dec(x_35); -lean_dec(x_21); -lean_dec(x_17); -lean_dec(x_16); -lean_dec(x_15); -lean_dec(x_14); -lean_dec(x_13); -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_2); -lean_dec(x_1); -x_46 = !lean_is_exclusive(x_37); -if (x_46 == 0) -{ -return x_37; +lean_ctor_set(x_181, 0, x_180); +lean_ctor_set(x_181, 1, x_137); +lean_ctor_set(x_181, 2, x_138); +lean_ctor_set(x_181, 3, x_139); +x_182 = lean_alloc_ctor(0, 3, 0); +lean_ctor_set(x_182, 0, x_134); +lean_ctor_set(x_182, 1, x_135); +lean_ctor_set(x_182, 2, x_181); +lean_ctor_set(x_21, 14, x_182); +x_183 = lean_st_ref_set(x_3, x_21, x_24); +x_184 = lean_ctor_get(x_183, 1); +lean_inc(x_184); +if (lean_is_exclusive(x_183)) { + lean_ctor_release(x_183, 0); + lean_ctor_release(x_183, 1); + x_185 = x_183; +} else { + lean_dec_ref(x_183); + x_185 = lean_box(0); } -else -{ -lean_object* x_47; lean_object* x_48; lean_object* x_49; -x_47 = lean_ctor_get(x_37, 0); -x_48 = lean_ctor_get(x_37, 1); -lean_inc(x_48); -lean_inc(x_47); -lean_dec(x_37); -x_49 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_49, 0, x_47); -lean_ctor_set(x_49, 1, x_48); -return x_49; +if (lean_is_scalar(x_185)) { + x_186 = lean_alloc_ctor(0, 2, 0); +} else { + x_186 = x_185; } +lean_ctor_set(x_186, 0, x_151); +lean_ctor_set(x_186, 1, x_184); +return x_186; } } -else -{ -uint8_t x_50; -lean_dec(x_21); -lean_dec(x_17); -lean_dec(x_16); -lean_dec(x_15); -lean_dec(x_14); -lean_dec(x_13); -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_2); -lean_dec(x_1); -x_50 = !lean_is_exclusive(x_34); -if (x_50 == 0) -{ -return x_34; } else { -lean_object* x_51; lean_object* x_52; lean_object* x_53; -x_51 = lean_ctor_get(x_34, 0); -x_52 = lean_ctor_get(x_34, 1); -lean_inc(x_52); -lean_inc(x_51); -lean_dec(x_34); -x_53 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_53, 0, x_51); -lean_ctor_set(x_53, 1, x_52); -return x_53; -} -} +lean_object* x_187; lean_object* x_188; lean_object* x_189; lean_object* x_190; lean_object* x_191; lean_object* x_192; lean_object* x_193; lean_object* x_194; uint8_t x_195; lean_object* x_196; lean_object* x_197; lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; lean_object* x_202; lean_object* x_203; lean_object* x_204; lean_object* x_205; lean_object* x_206; lean_object* x_207; lean_object* x_208; lean_object* x_209; lean_object* x_210; lean_object* x_211; uint8_t x_212; +x_187 = lean_ctor_get(x_21, 0); +x_188 = lean_ctor_get(x_21, 1); +x_189 = lean_ctor_get(x_21, 2); +x_190 = lean_ctor_get(x_21, 3); +x_191 = lean_ctor_get(x_21, 4); +x_192 = lean_ctor_get(x_21, 5); +x_193 = lean_ctor_get(x_21, 6); +x_194 = lean_ctor_get(x_21, 7); +x_195 = lean_ctor_get_uint8(x_21, sizeof(void*)*16); +x_196 = lean_ctor_get(x_21, 8); +x_197 = lean_ctor_get(x_21, 9); +x_198 = lean_ctor_get(x_21, 10); +x_199 = lean_ctor_get(x_21, 11); +x_200 = lean_ctor_get(x_21, 12); +x_201 = lean_ctor_get(x_21, 13); +x_202 = lean_ctor_get(x_21, 15); +lean_inc(x_202); +lean_inc(x_201); +lean_inc(x_200); +lean_inc(x_199); +lean_inc(x_198); +lean_inc(x_197); +lean_inc(x_196); +lean_inc(x_194); +lean_inc(x_193); +lean_inc(x_192); +lean_inc(x_191); +lean_inc(x_190); +lean_inc(x_189); +lean_inc(x_188); +lean_inc(x_187); +lean_dec(x_21); +x_203 = lean_ctor_get(x_22, 0); +lean_inc(x_203); +x_204 = lean_ctor_get(x_22, 1); +lean_inc(x_204); +if (lean_is_exclusive(x_22)) { + lean_ctor_release(x_22, 0); + lean_ctor_release(x_22, 1); + lean_ctor_release(x_22, 2); + x_205 = x_22; +} else { + lean_dec_ref(x_22); + x_205 = lean_box(0); } +x_206 = lean_ctor_get(x_23, 0); +lean_inc(x_206); +x_207 = lean_ctor_get(x_23, 1); +lean_inc(x_207); +x_208 = lean_ctor_get(x_23, 2); +lean_inc(x_208); +x_209 = lean_ctor_get(x_23, 3); +lean_inc(x_209); +if (lean_is_exclusive(x_23)) { + lean_ctor_release(x_23, 0); + lean_ctor_release(x_23, 1); + lean_ctor_release(x_23, 2); + lean_ctor_release(x_23, 3); + x_210 = x_23; +} else { + lean_dec_ref(x_23); + x_210 = lean_box(0); } -block_26: +x_211 = lean_array_get_size(x_206); +x_212 = lean_nat_dec_lt(x_19, x_211); +lean_dec(x_211); +if (x_212 == 0) { -lean_object* x_24; -x_24 = lean_ctor_get(x_22, 0); -lean_inc(x_24); -lean_dec(x_22); -x_6 = x_21; -x_7 = x_24; -x_8 = lean_box(0); -x_18 = x_23; -goto _start; -} +lean_object* x_213; lean_object* x_214; lean_object* x_215; lean_object* x_216; lean_object* x_217; lean_object* x_218; lean_object* x_219; lean_object* x_220; +lean_dec(x_18); +lean_dec(x_1); +if (lean_is_scalar(x_210)) { + x_213 = lean_alloc_ctor(0, 4, 0); +} else { + x_213 = x_210; } +lean_ctor_set(x_213, 0, x_206); +lean_ctor_set(x_213, 1, x_207); +lean_ctor_set(x_213, 2, x_208); +lean_ctor_set(x_213, 3, x_209); +if (lean_is_scalar(x_205)) { + x_214 = lean_alloc_ctor(0, 3, 0); +} else { + x_214 = x_205; } +lean_ctor_set(x_214, 0, x_203); +lean_ctor_set(x_214, 1, x_204); +lean_ctor_set(x_214, 2, x_213); +x_215 = lean_alloc_ctor(0, 16, 1); +lean_ctor_set(x_215, 0, x_187); +lean_ctor_set(x_215, 1, x_188); +lean_ctor_set(x_215, 2, x_189); +lean_ctor_set(x_215, 3, x_190); +lean_ctor_set(x_215, 4, x_191); +lean_ctor_set(x_215, 5, x_192); +lean_ctor_set(x_215, 6, x_193); +lean_ctor_set(x_215, 7, x_194); +lean_ctor_set(x_215, 8, x_196); +lean_ctor_set(x_215, 9, x_197); +lean_ctor_set(x_215, 10, x_198); +lean_ctor_set(x_215, 11, x_199); +lean_ctor_set(x_215, 12, x_200); +lean_ctor_set(x_215, 13, x_201); +lean_ctor_set(x_215, 14, x_214); +lean_ctor_set(x_215, 15, x_202); +lean_ctor_set_uint8(x_215, sizeof(void*)*16, x_195); +x_216 = lean_st_ref_set(x_3, x_215, x_24); +x_217 = lean_ctor_get(x_216, 1); +lean_inc(x_217); +if (lean_is_exclusive(x_216)) { + lean_ctor_release(x_216, 0); + lean_ctor_release(x_216, 1); + x_218 = x_216; +} else { + lean_dec_ref(x_216); + x_218 = lean_box(0); } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15) { -_start: -{ -lean_object* x_16; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; uint8_t x_35; -x_29 = lean_ctor_get(x_6, 0); -lean_inc(x_29); -x_30 = lean_st_ref_take(x_7, x_15); -x_31 = lean_ctor_get(x_30, 0); -lean_inc(x_31); -x_32 = lean_ctor_get(x_31, 14); -lean_inc(x_32); -x_33 = lean_ctor_get(x_32, 2); -lean_inc(x_33); -x_34 = lean_ctor_get(x_30, 1); -lean_inc(x_34); -lean_dec(x_30); -x_35 = !lean_is_exclusive(x_31); -if (x_35 == 0) -{ -lean_object* x_36; uint8_t x_37; -x_36 = lean_ctor_get(x_31, 14); -lean_dec(x_36); -x_37 = !lean_is_exclusive(x_32); -if (x_37 == 0) -{ -lean_object* x_38; uint8_t x_39; -x_38 = lean_ctor_get(x_32, 2); -lean_dec(x_38); -x_39 = !lean_is_exclusive(x_33); -if (x_39 == 0) -{ -lean_object* x_40; lean_object* x_41; uint8_t x_42; -x_40 = lean_ctor_get(x_33, 0); -x_41 = lean_array_get_size(x_40); -x_42 = lean_nat_dec_lt(x_29, x_41); -lean_dec(x_41); -if (x_42 == 0) -{ -lean_object* x_43; lean_object* x_44; -lean_dec(x_29); -x_43 = lean_st_ref_set(x_7, x_31, x_34); -x_44 = lean_ctor_get(x_43, 1); -lean_inc(x_44); -lean_dec(x_43); -x_16 = x_44; -goto block_28; +x_219 = lean_box(0); +if (lean_is_scalar(x_218)) { + x_220 = lean_alloc_ctor(0, 2, 0); +} else { + x_220 = x_218; } -else -{ -lean_object* x_45; lean_object* x_46; lean_object* x_47; uint8_t x_48; -x_45 = lean_array_fget(x_40, x_29); -x_46 = lean_box(0); -x_47 = lean_array_fset(x_40, x_29, x_46); -x_48 = !lean_is_exclusive(x_45); -if (x_48 == 0) -{ -lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; -x_49 = lean_ctor_get(x_45, 19); -x_50 = lean_box(0); -x_51 = l_Lean_PersistentArray_set___rarg(x_49, x_4, x_50); -lean_ctor_set(x_45, 19, x_51); -x_52 = lean_array_fset(x_47, x_29, x_45); -lean_dec(x_29); -lean_ctor_set(x_33, 0, x_52); -x_53 = lean_st_ref_set(x_7, x_31, x_34); -x_54 = lean_ctor_get(x_53, 1); -lean_inc(x_54); -lean_dec(x_53); -x_16 = x_54; -goto block_28; +lean_ctor_set(x_220, 0, x_219); +lean_ctor_set(x_220, 1, x_217); +return x_220; } else { -lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; uint8_t x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; -x_55 = lean_ctor_get(x_45, 0); -x_56 = lean_ctor_get(x_45, 1); -x_57 = lean_ctor_get(x_45, 2); -x_58 = lean_ctor_get(x_45, 3); -x_59 = lean_ctor_get(x_45, 4); -x_60 = lean_ctor_get(x_45, 5); -x_61 = lean_ctor_get(x_45, 6); -x_62 = lean_ctor_get(x_45, 7); -x_63 = lean_ctor_get(x_45, 8); -x_64 = lean_ctor_get(x_45, 9); -x_65 = lean_ctor_get(x_45, 10); -x_66 = lean_ctor_get(x_45, 11); -x_67 = lean_ctor_get(x_45, 12); -x_68 = lean_ctor_get(x_45, 13); -x_69 = lean_ctor_get(x_45, 14); -x_70 = lean_ctor_get(x_45, 15); -x_71 = lean_ctor_get(x_45, 16); -x_72 = lean_ctor_get(x_45, 17); -x_73 = lean_ctor_get(x_45, 18); -x_74 = lean_ctor_get(x_45, 19); -x_75 = lean_ctor_get(x_45, 20); -x_76 = lean_ctor_get_uint8(x_45, sizeof(void*)*21); -lean_inc(x_75); -lean_inc(x_74); -lean_inc(x_73); -lean_inc(x_72); -lean_inc(x_71); -lean_inc(x_70); -lean_inc(x_69); -lean_inc(x_68); -lean_inc(x_67); -lean_inc(x_66); -lean_inc(x_65); -lean_inc(x_64); -lean_inc(x_63); -lean_inc(x_62); -lean_inc(x_61); -lean_inc(x_60); -lean_inc(x_59); -lean_inc(x_58); -lean_inc(x_57); -lean_inc(x_56); -lean_inc(x_55); -lean_dec(x_45); -x_77 = lean_box(0); -x_78 = l_Lean_PersistentArray_set___rarg(x_74, x_4, x_77); -x_79 = lean_alloc_ctor(0, 21, 1); -lean_ctor_set(x_79, 0, x_55); -lean_ctor_set(x_79, 1, x_56); -lean_ctor_set(x_79, 2, x_57); -lean_ctor_set(x_79, 3, x_58); -lean_ctor_set(x_79, 4, x_59); -lean_ctor_set(x_79, 5, x_60); -lean_ctor_set(x_79, 6, x_61); -lean_ctor_set(x_79, 7, x_62); -lean_ctor_set(x_79, 8, x_63); -lean_ctor_set(x_79, 9, x_64); -lean_ctor_set(x_79, 10, x_65); -lean_ctor_set(x_79, 11, x_66); -lean_ctor_set(x_79, 12, x_67); -lean_ctor_set(x_79, 13, x_68); -lean_ctor_set(x_79, 14, x_69); -lean_ctor_set(x_79, 15, x_70); -lean_ctor_set(x_79, 16, x_71); -lean_ctor_set(x_79, 17, x_72); -lean_ctor_set(x_79, 18, x_73); -lean_ctor_set(x_79, 19, x_78); -lean_ctor_set(x_79, 20, x_75); -lean_ctor_set_uint8(x_79, sizeof(void*)*21, x_76); -x_80 = lean_array_fset(x_47, x_29, x_79); -lean_dec(x_29); -lean_ctor_set(x_33, 0, x_80); -x_81 = lean_st_ref_set(x_7, x_31, x_34); -x_82 = lean_ctor_get(x_81, 1); -lean_inc(x_82); -lean_dec(x_81); -x_16 = x_82; -goto block_28; +lean_object* x_221; lean_object* x_222; lean_object* x_223; lean_object* x_224; lean_object* x_225; lean_object* x_226; lean_object* x_227; lean_object* x_228; lean_object* x_229; lean_object* x_230; lean_object* x_231; lean_object* x_232; lean_object* x_233; lean_object* x_234; lean_object* x_235; lean_object* x_236; lean_object* x_237; lean_object* x_238; lean_object* x_239; lean_object* x_240; lean_object* x_241; lean_object* x_242; lean_object* x_243; lean_object* x_244; lean_object* x_245; lean_object* x_246; lean_object* x_247; lean_object* x_248; uint8_t x_249; lean_object* x_250; lean_object* x_251; lean_object* x_252; lean_object* x_253; lean_object* x_254; lean_object* x_255; lean_object* x_256; lean_object* x_257; lean_object* x_258; +x_221 = lean_array_fget(x_206, x_19); +x_222 = lean_box(0); +x_223 = lean_array_fset(x_206, x_19, x_222); +x_224 = lean_ctor_get(x_221, 0); +lean_inc(x_224); +x_225 = lean_ctor_get(x_221, 1); +lean_inc(x_225); +x_226 = lean_ctor_get(x_221, 2); +lean_inc(x_226); +x_227 = lean_ctor_get(x_221, 3); +lean_inc(x_227); +x_228 = lean_ctor_get(x_221, 4); +lean_inc(x_228); +x_229 = lean_ctor_get(x_221, 5); +lean_inc(x_229); +x_230 = lean_ctor_get(x_221, 6); +lean_inc(x_230); +x_231 = lean_ctor_get(x_221, 7); +lean_inc(x_231); +x_232 = lean_ctor_get(x_221, 8); +lean_inc(x_232); +x_233 = lean_ctor_get(x_221, 9); +lean_inc(x_233); +x_234 = lean_ctor_get(x_221, 10); +lean_inc(x_234); +x_235 = lean_ctor_get(x_221, 11); +lean_inc(x_235); +x_236 = lean_ctor_get(x_221, 12); +lean_inc(x_236); +x_237 = lean_ctor_get(x_221, 13); +lean_inc(x_237); +x_238 = lean_ctor_get(x_221, 14); +lean_inc(x_238); +x_239 = lean_ctor_get(x_221, 15); +lean_inc(x_239); +x_240 = lean_ctor_get(x_221, 16); +lean_inc(x_240); +x_241 = lean_ctor_get(x_221, 17); +lean_inc(x_241); +x_242 = lean_ctor_get(x_221, 18); +lean_inc(x_242); +x_243 = lean_ctor_get(x_221, 19); +lean_inc(x_243); +x_244 = lean_ctor_get(x_221, 20); +lean_inc(x_244); +if (lean_is_exclusive(x_221)) { + lean_ctor_release(x_221, 0); + lean_ctor_release(x_221, 1); + lean_ctor_release(x_221, 2); + lean_ctor_release(x_221, 3); + lean_ctor_release(x_221, 4); + lean_ctor_release(x_221, 5); + lean_ctor_release(x_221, 6); + lean_ctor_release(x_221, 7); + lean_ctor_release(x_221, 8); + lean_ctor_release(x_221, 9); + lean_ctor_release(x_221, 10); + lean_ctor_release(x_221, 11); + lean_ctor_release(x_221, 12); + lean_ctor_release(x_221, 13); + lean_ctor_release(x_221, 14); + lean_ctor_release(x_221, 15); + lean_ctor_release(x_221, 16); + lean_ctor_release(x_221, 17); + lean_ctor_release(x_221, 18); + lean_ctor_release(x_221, 19); + lean_ctor_release(x_221, 20); + x_245 = x_221; +} else { + lean_dec_ref(x_221); + x_245 = lean_box(0); +} +x_246 = lean_box(0); +x_247 = lean_ctor_get(x_18, 0); +lean_inc(x_247); +lean_dec(x_18); +x_248 = l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(x_1, x_246, x_243, x_247); +lean_dec(x_247); +x_249 = 1; +if (lean_is_scalar(x_245)) { + x_250 = lean_alloc_ctor(0, 21, 1); +} else { + x_250 = x_245; +} +lean_ctor_set(x_250, 0, x_224); +lean_ctor_set(x_250, 1, x_225); +lean_ctor_set(x_250, 2, x_226); +lean_ctor_set(x_250, 3, x_227); +lean_ctor_set(x_250, 4, x_228); +lean_ctor_set(x_250, 5, x_229); +lean_ctor_set(x_250, 6, x_230); +lean_ctor_set(x_250, 7, x_231); +lean_ctor_set(x_250, 8, x_232); +lean_ctor_set(x_250, 9, x_233); +lean_ctor_set(x_250, 10, x_234); +lean_ctor_set(x_250, 11, x_235); +lean_ctor_set(x_250, 12, x_236); +lean_ctor_set(x_250, 13, x_237); +lean_ctor_set(x_250, 14, x_238); +lean_ctor_set(x_250, 15, x_239); +lean_ctor_set(x_250, 16, x_240); +lean_ctor_set(x_250, 17, x_241); +lean_ctor_set(x_250, 18, x_242); +lean_ctor_set(x_250, 19, x_248); +lean_ctor_set(x_250, 20, x_244); +lean_ctor_set_uint8(x_250, sizeof(void*)*21, x_249); +x_251 = lean_array_fset(x_223, x_19, x_250); +if (lean_is_scalar(x_210)) { + x_252 = lean_alloc_ctor(0, 4, 0); +} else { + x_252 = x_210; +} +lean_ctor_set(x_252, 0, x_251); +lean_ctor_set(x_252, 1, x_207); +lean_ctor_set(x_252, 2, x_208); +lean_ctor_set(x_252, 3, x_209); +if (lean_is_scalar(x_205)) { + x_253 = lean_alloc_ctor(0, 3, 0); +} else { + x_253 = x_205; +} +lean_ctor_set(x_253, 0, x_203); +lean_ctor_set(x_253, 1, x_204); +lean_ctor_set(x_253, 2, x_252); +x_254 = lean_alloc_ctor(0, 16, 1); +lean_ctor_set(x_254, 0, x_187); +lean_ctor_set(x_254, 1, x_188); +lean_ctor_set(x_254, 2, x_189); +lean_ctor_set(x_254, 3, x_190); +lean_ctor_set(x_254, 4, x_191); +lean_ctor_set(x_254, 5, x_192); +lean_ctor_set(x_254, 6, x_193); +lean_ctor_set(x_254, 7, x_194); +lean_ctor_set(x_254, 8, x_196); +lean_ctor_set(x_254, 9, x_197); +lean_ctor_set(x_254, 10, x_198); +lean_ctor_set(x_254, 11, x_199); +lean_ctor_set(x_254, 12, x_200); +lean_ctor_set(x_254, 13, x_201); +lean_ctor_set(x_254, 14, x_253); +lean_ctor_set(x_254, 15, x_202); +lean_ctor_set_uint8(x_254, sizeof(void*)*16, x_195); +x_255 = lean_st_ref_set(x_3, x_254, x_24); +x_256 = lean_ctor_get(x_255, 1); +lean_inc(x_256); +if (lean_is_exclusive(x_255)) { + lean_ctor_release(x_255, 0); + lean_ctor_release(x_255, 1); + x_257 = x_255; +} else { + lean_dec_ref(x_255); + x_257 = lean_box(0); +} +if (lean_is_scalar(x_257)) { + x_258 = lean_alloc_ctor(0, 2, 0); +} else { + x_258 = x_257; +} +lean_ctor_set(x_258, 0, x_222); +lean_ctor_set(x_258, 1, x_256); +return x_258; } } } -else +} +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5) { +_start: { -lean_object* x_83; lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; uint8_t x_88; -x_83 = lean_ctor_get(x_33, 0); -x_84 = lean_ctor_get(x_33, 1); -x_85 = lean_ctor_get(x_33, 2); -x_86 = lean_ctor_get(x_33, 3); -lean_inc(x_86); -lean_inc(x_85); -lean_inc(x_84); -lean_inc(x_83); -lean_dec(x_33); -x_87 = lean_array_get_size(x_83); -x_88 = lean_nat_dec_lt(x_29, x_87); -lean_dec(x_87); -if (x_88 == 0) +size_t x_6; size_t x_7; lean_object* x_8; +x_6 = lean_unbox_usize(x_4); +lean_dec(x_4); +x_7 = lean_unbox_usize(x_5); +lean_dec(x_5); +x_8 = l_Lean_PersistentArray_modifyAux___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__2(x_1, x_2, x_3, x_6, x_7); +lean_dec(x_2); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: { -lean_object* x_89; lean_object* x_90; lean_object* x_91; -lean_dec(x_29); -x_89 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_89, 0, x_83); -lean_ctor_set(x_89, 1, x_84); -lean_ctor_set(x_89, 2, x_85); -lean_ctor_set(x_89, 3, x_86); -lean_ctor_set(x_32, 2, x_89); -x_90 = lean_st_ref_set(x_7, x_31, x_34); -x_91 = lean_ctor_get(x_90, 1); -lean_inc(x_91); -lean_dec(x_90); -x_16 = x_91; -goto block_28; +lean_object* x_5; +x_5 = l_Lean_PersistentArray_modify___at_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___spec__1(x_1, x_2, x_3, x_4); +lean_dec(x_4); +lean_dec(x_2); +return x_5; } -else +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_addToBasisCore___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; uint8_t x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; -x_92 = lean_array_fget(x_83, x_29); -x_93 = lean_box(0); -x_94 = lean_array_fset(x_83, x_29, x_93); -x_95 = lean_ctor_get(x_92, 0); -lean_inc(x_95); -x_96 = lean_ctor_get(x_92, 1); -lean_inc(x_96); -x_97 = lean_ctor_get(x_92, 2); -lean_inc(x_97); -x_98 = lean_ctor_get(x_92, 3); -lean_inc(x_98); -x_99 = lean_ctor_get(x_92, 4); -lean_inc(x_99); -x_100 = lean_ctor_get(x_92, 5); -lean_inc(x_100); -x_101 = lean_ctor_get(x_92, 6); -lean_inc(x_101); -x_102 = lean_ctor_get(x_92, 7); -lean_inc(x_102); -x_103 = lean_ctor_get(x_92, 8); -lean_inc(x_103); -x_104 = lean_ctor_get(x_92, 9); -lean_inc(x_104); -x_105 = lean_ctor_get(x_92, 10); -lean_inc(x_105); -x_106 = lean_ctor_get(x_92, 11); -lean_inc(x_106); -x_107 = lean_ctor_get(x_92, 12); -lean_inc(x_107); -x_108 = lean_ctor_get(x_92, 13); -lean_inc(x_108); -x_109 = lean_ctor_get(x_92, 14); -lean_inc(x_109); -x_110 = lean_ctor_get(x_92, 15); -lean_inc(x_110); -x_111 = lean_ctor_get(x_92, 16); -lean_inc(x_111); -x_112 = lean_ctor_get(x_92, 17); -lean_inc(x_112); -x_113 = lean_ctor_get(x_92, 18); -lean_inc(x_113); -x_114 = lean_ctor_get(x_92, 19); -lean_inc(x_114); -x_115 = lean_ctor_get(x_92, 20); -lean_inc(x_115); -x_116 = lean_ctor_get_uint8(x_92, sizeof(void*)*21); -if (lean_is_exclusive(x_92)) { - lean_ctor_release(x_92, 0); - lean_ctor_release(x_92, 1); - lean_ctor_release(x_92, 2); - lean_ctor_release(x_92, 3); - lean_ctor_release(x_92, 4); - lean_ctor_release(x_92, 5); - lean_ctor_release(x_92, 6); - lean_ctor_release(x_92, 7); - lean_ctor_release(x_92, 8); - lean_ctor_release(x_92, 9); - lean_ctor_release(x_92, 10); - lean_ctor_release(x_92, 11); - lean_ctor_release(x_92, 12); - lean_ctor_release(x_92, 13); - lean_ctor_release(x_92, 14); - lean_ctor_release(x_92, 15); - lean_ctor_release(x_92, 16); - lean_ctor_release(x_92, 17); - lean_ctor_release(x_92, 18); - lean_ctor_release(x_92, 19); - lean_ctor_release(x_92, 20); - x_117 = x_92; -} else { - lean_dec_ref(x_92); - x_117 = lean_box(0); -} -x_118 = lean_box(0); -x_119 = l_Lean_PersistentArray_set___rarg(x_114, x_4, x_118); -if (lean_is_scalar(x_117)) { - x_120 = lean_alloc_ctor(0, 21, 1); -} else { - x_120 = x_117; -} -lean_ctor_set(x_120, 0, x_95); -lean_ctor_set(x_120, 1, x_96); -lean_ctor_set(x_120, 2, x_97); -lean_ctor_set(x_120, 3, x_98); -lean_ctor_set(x_120, 4, x_99); -lean_ctor_set(x_120, 5, x_100); -lean_ctor_set(x_120, 6, x_101); -lean_ctor_set(x_120, 7, x_102); -lean_ctor_set(x_120, 8, x_103); -lean_ctor_set(x_120, 9, x_104); -lean_ctor_set(x_120, 10, x_105); -lean_ctor_set(x_120, 11, x_106); -lean_ctor_set(x_120, 12, x_107); -lean_ctor_set(x_120, 13, x_108); -lean_ctor_set(x_120, 14, x_109); -lean_ctor_set(x_120, 15, x_110); -lean_ctor_set(x_120, 16, x_111); -lean_ctor_set(x_120, 17, x_112); -lean_ctor_set(x_120, 18, x_113); -lean_ctor_set(x_120, 19, x_119); -lean_ctor_set(x_120, 20, x_115); -lean_ctor_set_uint8(x_120, sizeof(void*)*21, x_116); -x_121 = lean_array_fset(x_94, x_29, x_120); -lean_dec(x_29); -x_122 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_122, 0, x_121); -lean_ctor_set(x_122, 1, x_84); -lean_ctor_set(x_122, 2, x_85); -lean_ctor_set(x_122, 3, x_86); -lean_ctor_set(x_32, 2, x_122); -x_123 = lean_st_ref_set(x_7, x_31, x_34); -x_124 = lean_ctor_get(x_123, 1); -lean_inc(x_124); -lean_dec(x_123); -x_16 = x_124; -goto block_28; +lean_object* x_12; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_addToBasisCore(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_12; } } +LEAN_EXPORT lean_object* l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_4; uint8_t x_5; lean_object* x_6; +x_4 = lean_box(0); +x_5 = 0; +x_6 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_6, 0, x_4); +lean_ctor_set(x_6, 1, x_2); +lean_ctor_set(x_6, 2, x_3); +lean_ctor_set(x_6, 3, x_4); +lean_ctor_set_uint8(x_6, sizeof(void*)*4, x_5); +return x_6; } else { -lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; uint8_t x_133; -x_125 = lean_ctor_get(x_32, 0); -x_126 = lean_ctor_get(x_32, 1); -lean_inc(x_126); -lean_inc(x_125); -lean_dec(x_32); -x_127 = lean_ctor_get(x_33, 0); -lean_inc(x_127); -x_128 = lean_ctor_get(x_33, 1); -lean_inc(x_128); -x_129 = lean_ctor_get(x_33, 2); -lean_inc(x_129); -x_130 = lean_ctor_get(x_33, 3); -lean_inc(x_130); -if (lean_is_exclusive(x_33)) { - lean_ctor_release(x_33, 0); - lean_ctor_release(x_33, 1); - lean_ctor_release(x_33, 2); - lean_ctor_release(x_33, 3); - x_131 = x_33; -} else { - lean_dec_ref(x_33); - x_131 = lean_box(0); +uint8_t x_7; +x_7 = lean_ctor_get_uint8(x_1, sizeof(void*)*4); +if (x_7 == 0) +{ +uint8_t x_8; +x_8 = !lean_is_exclusive(x_1); +if (x_8 == 0) +{ +lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; uint8_t x_13; +x_9 = lean_ctor_get(x_1, 0); +x_10 = lean_ctor_get(x_1, 1); +x_11 = lean_ctor_get(x_1, 2); +x_12 = lean_ctor_get(x_1, 3); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_compare(x_2, x_10); +switch (x_13) { +case 0: +{ +lean_object* x_14; uint8_t x_15; +x_14 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_9, x_2, x_3); +x_15 = 0; +lean_ctor_set(x_1, 0, x_14); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_15); +return x_1; } -x_132 = lean_array_get_size(x_127); -x_133 = lean_nat_dec_lt(x_29, x_132); -lean_dec(x_132); -if (x_133 == 0) +case 1: { -lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; -lean_dec(x_29); -if (lean_is_scalar(x_131)) { - x_134 = lean_alloc_ctor(0, 4, 0); -} else { - x_134 = x_131; +uint8_t x_16; +lean_dec(x_11); +lean_dec(x_10); +x_16 = 0; +lean_ctor_set(x_1, 2, x_3); +lean_ctor_set(x_1, 1, x_2); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_16); +return x_1; +} +default: +{ +lean_object* x_17; uint8_t x_18; +x_17 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_12, x_2, x_3); +x_18 = 0; +lean_ctor_set(x_1, 3, x_17); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_18); +return x_1; +} } -lean_ctor_set(x_134, 0, x_127); -lean_ctor_set(x_134, 1, x_128); -lean_ctor_set(x_134, 2, x_129); -lean_ctor_set(x_134, 3, x_130); -x_135 = lean_alloc_ctor(0, 3, 0); -lean_ctor_set(x_135, 0, x_125); -lean_ctor_set(x_135, 1, x_126); -lean_ctor_set(x_135, 2, x_134); -lean_ctor_set(x_31, 14, x_135); -x_136 = lean_st_ref_set(x_7, x_31, x_34); -x_137 = lean_ctor_get(x_136, 1); -lean_inc(x_137); -lean_dec(x_136); -x_16 = x_137; -goto block_28; } else { -lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; lean_object* x_160; lean_object* x_161; uint8_t x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; lean_object* x_171; -x_138 = lean_array_fget(x_127, x_29); -x_139 = lean_box(0); -x_140 = lean_array_fset(x_127, x_29, x_139); -x_141 = lean_ctor_get(x_138, 0); -lean_inc(x_141); -x_142 = lean_ctor_get(x_138, 1); -lean_inc(x_142); -x_143 = lean_ctor_get(x_138, 2); -lean_inc(x_143); -x_144 = lean_ctor_get(x_138, 3); -lean_inc(x_144); -x_145 = lean_ctor_get(x_138, 4); -lean_inc(x_145); -x_146 = lean_ctor_get(x_138, 5); -lean_inc(x_146); -x_147 = lean_ctor_get(x_138, 6); -lean_inc(x_147); -x_148 = lean_ctor_get(x_138, 7); -lean_inc(x_148); -x_149 = lean_ctor_get(x_138, 8); -lean_inc(x_149); -x_150 = lean_ctor_get(x_138, 9); -lean_inc(x_150); -x_151 = lean_ctor_get(x_138, 10); -lean_inc(x_151); -x_152 = lean_ctor_get(x_138, 11); -lean_inc(x_152); -x_153 = lean_ctor_get(x_138, 12); -lean_inc(x_153); -x_154 = lean_ctor_get(x_138, 13); -lean_inc(x_154); -x_155 = lean_ctor_get(x_138, 14); -lean_inc(x_155); -x_156 = lean_ctor_get(x_138, 15); -lean_inc(x_156); -x_157 = lean_ctor_get(x_138, 16); -lean_inc(x_157); -x_158 = lean_ctor_get(x_138, 17); -lean_inc(x_158); -x_159 = lean_ctor_get(x_138, 18); -lean_inc(x_159); -x_160 = lean_ctor_get(x_138, 19); -lean_inc(x_160); -x_161 = lean_ctor_get(x_138, 20); -lean_inc(x_161); -x_162 = lean_ctor_get_uint8(x_138, sizeof(void*)*21); -if (lean_is_exclusive(x_138)) { - lean_ctor_release(x_138, 0); - lean_ctor_release(x_138, 1); - lean_ctor_release(x_138, 2); - lean_ctor_release(x_138, 3); - lean_ctor_release(x_138, 4); - lean_ctor_release(x_138, 5); - lean_ctor_release(x_138, 6); - lean_ctor_release(x_138, 7); - lean_ctor_release(x_138, 8); - lean_ctor_release(x_138, 9); - lean_ctor_release(x_138, 10); - lean_ctor_release(x_138, 11); - lean_ctor_release(x_138, 12); - lean_ctor_release(x_138, 13); - lean_ctor_release(x_138, 14); - lean_ctor_release(x_138, 15); - lean_ctor_release(x_138, 16); - lean_ctor_release(x_138, 17); - lean_ctor_release(x_138, 18); - lean_ctor_release(x_138, 19); - lean_ctor_release(x_138, 20); - x_163 = x_138; -} else { - lean_dec_ref(x_138); - x_163 = lean_box(0); +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; uint8_t x_23; +x_19 = lean_ctor_get(x_1, 0); +x_20 = lean_ctor_get(x_1, 1); +x_21 = lean_ctor_get(x_1, 2); +x_22 = lean_ctor_get(x_1, 3); +lean_inc(x_22); +lean_inc(x_21); +lean_inc(x_20); +lean_inc(x_19); +lean_dec(x_1); +x_23 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_compare(x_2, x_20); +switch (x_23) { +case 0: +{ +lean_object* x_24; uint8_t x_25; lean_object* x_26; +x_24 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_19, x_2, x_3); +x_25 = 0; +x_26 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_26, 0, x_24); +lean_ctor_set(x_26, 1, x_20); +lean_ctor_set(x_26, 2, x_21); +lean_ctor_set(x_26, 3, x_22); +lean_ctor_set_uint8(x_26, sizeof(void*)*4, x_25); +return x_26; } -x_164 = lean_box(0); -x_165 = l_Lean_PersistentArray_set___rarg(x_160, x_4, x_164); -if (lean_is_scalar(x_163)) { - x_166 = lean_alloc_ctor(0, 21, 1); -} else { - x_166 = x_163; +case 1: +{ +uint8_t x_27; lean_object* x_28; +lean_dec(x_21); +lean_dec(x_20); +x_27 = 0; +x_28 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_28, 0, x_19); +lean_ctor_set(x_28, 1, x_2); +lean_ctor_set(x_28, 2, x_3); +lean_ctor_set(x_28, 3, x_22); +lean_ctor_set_uint8(x_28, sizeof(void*)*4, x_27); +return x_28; } -lean_ctor_set(x_166, 0, x_141); -lean_ctor_set(x_166, 1, x_142); -lean_ctor_set(x_166, 2, x_143); -lean_ctor_set(x_166, 3, x_144); -lean_ctor_set(x_166, 4, x_145); -lean_ctor_set(x_166, 5, x_146); -lean_ctor_set(x_166, 6, x_147); -lean_ctor_set(x_166, 7, x_148); -lean_ctor_set(x_166, 8, x_149); -lean_ctor_set(x_166, 9, x_150); -lean_ctor_set(x_166, 10, x_151); -lean_ctor_set(x_166, 11, x_152); -lean_ctor_set(x_166, 12, x_153); -lean_ctor_set(x_166, 13, x_154); -lean_ctor_set(x_166, 14, x_155); -lean_ctor_set(x_166, 15, x_156); -lean_ctor_set(x_166, 16, x_157); -lean_ctor_set(x_166, 17, x_158); -lean_ctor_set(x_166, 18, x_159); -lean_ctor_set(x_166, 19, x_165); -lean_ctor_set(x_166, 20, x_161); -lean_ctor_set_uint8(x_166, sizeof(void*)*21, x_162); -x_167 = lean_array_fset(x_140, x_29, x_166); -lean_dec(x_29); -if (lean_is_scalar(x_131)) { - x_168 = lean_alloc_ctor(0, 4, 0); -} else { - x_168 = x_131; +default: +{ +lean_object* x_29; uint8_t x_30; lean_object* x_31; +x_29 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_22, x_2, x_3); +x_30 = 0; +x_31 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_31, 0, x_19); +lean_ctor_set(x_31, 1, x_20); +lean_ctor_set(x_31, 2, x_21); +lean_ctor_set(x_31, 3, x_29); +lean_ctor_set_uint8(x_31, sizeof(void*)*4, x_30); +return x_31; } -lean_ctor_set(x_168, 0, x_167); -lean_ctor_set(x_168, 1, x_128); -lean_ctor_set(x_168, 2, x_129); -lean_ctor_set(x_168, 3, x_130); -x_169 = lean_alloc_ctor(0, 3, 0); -lean_ctor_set(x_169, 0, x_125); -lean_ctor_set(x_169, 1, x_126); -lean_ctor_set(x_169, 2, x_168); -lean_ctor_set(x_31, 14, x_169); -x_170 = lean_st_ref_set(x_7, x_31, x_34); -x_171 = lean_ctor_get(x_170, 1); -lean_inc(x_171); -lean_dec(x_170); -x_16 = x_171; -goto block_28; } } } else { -lean_object* x_172; lean_object* x_173; lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; lean_object* x_179; uint8_t x_180; lean_object* x_181; lean_object* x_182; lean_object* x_183; lean_object* x_184; lean_object* x_185; lean_object* x_186; lean_object* x_187; lean_object* x_188; lean_object* x_189; lean_object* x_190; lean_object* x_191; lean_object* x_192; lean_object* x_193; lean_object* x_194; lean_object* x_195; lean_object* x_196; uint8_t x_197; -x_172 = lean_ctor_get(x_31, 0); -x_173 = lean_ctor_get(x_31, 1); -x_174 = lean_ctor_get(x_31, 2); -x_175 = lean_ctor_get(x_31, 3); -x_176 = lean_ctor_get(x_31, 4); -x_177 = lean_ctor_get(x_31, 5); -x_178 = lean_ctor_get(x_31, 6); -x_179 = lean_ctor_get(x_31, 7); -x_180 = lean_ctor_get_uint8(x_31, sizeof(void*)*16); -x_181 = lean_ctor_get(x_31, 8); -x_182 = lean_ctor_get(x_31, 9); -x_183 = lean_ctor_get(x_31, 10); -x_184 = lean_ctor_get(x_31, 11); -x_185 = lean_ctor_get(x_31, 12); -x_186 = lean_ctor_get(x_31, 13); -x_187 = lean_ctor_get(x_31, 15); -lean_inc(x_187); -lean_inc(x_186); -lean_inc(x_185); -lean_inc(x_184); -lean_inc(x_183); -lean_inc(x_182); -lean_inc(x_181); -lean_inc(x_179); -lean_inc(x_178); -lean_inc(x_177); -lean_inc(x_176); -lean_inc(x_175); -lean_inc(x_174); -lean_inc(x_173); -lean_inc(x_172); -lean_dec(x_31); -x_188 = lean_ctor_get(x_32, 0); -lean_inc(x_188); -x_189 = lean_ctor_get(x_32, 1); -lean_inc(x_189); -if (lean_is_exclusive(x_32)) { - lean_ctor_release(x_32, 0); - lean_ctor_release(x_32, 1); - lean_ctor_release(x_32, 2); - x_190 = x_32; -} else { - lean_dec_ref(x_32); - x_190 = lean_box(0); -} -x_191 = lean_ctor_get(x_33, 0); -lean_inc(x_191); -x_192 = lean_ctor_get(x_33, 1); -lean_inc(x_192); -x_193 = lean_ctor_get(x_33, 2); -lean_inc(x_193); -x_194 = lean_ctor_get(x_33, 3); -lean_inc(x_194); -if (lean_is_exclusive(x_33)) { - lean_ctor_release(x_33, 0); - lean_ctor_release(x_33, 1); - lean_ctor_release(x_33, 2); - lean_ctor_release(x_33, 3); - x_195 = x_33; -} else { - lean_dec_ref(x_33); - x_195 = lean_box(0); -} -x_196 = lean_array_get_size(x_191); -x_197 = lean_nat_dec_lt(x_29, x_196); -lean_dec(x_196); -if (x_197 == 0) +uint8_t x_32; +x_32 = !lean_is_exclusive(x_1); +if (x_32 == 0) { -lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; lean_object* x_202; -lean_dec(x_29); -if (lean_is_scalar(x_195)) { - x_198 = lean_alloc_ctor(0, 4, 0); -} else { - x_198 = x_195; -} -lean_ctor_set(x_198, 0, x_191); -lean_ctor_set(x_198, 1, x_192); -lean_ctor_set(x_198, 2, x_193); -lean_ctor_set(x_198, 3, x_194); -if (lean_is_scalar(x_190)) { - x_199 = lean_alloc_ctor(0, 3, 0); -} else { - x_199 = x_190; -} -lean_ctor_set(x_199, 0, x_188); -lean_ctor_set(x_199, 1, x_189); -lean_ctor_set(x_199, 2, x_198); -x_200 = lean_alloc_ctor(0, 16, 1); -lean_ctor_set(x_200, 0, x_172); -lean_ctor_set(x_200, 1, x_173); -lean_ctor_set(x_200, 2, x_174); -lean_ctor_set(x_200, 3, x_175); -lean_ctor_set(x_200, 4, x_176); -lean_ctor_set(x_200, 5, x_177); -lean_ctor_set(x_200, 6, x_178); -lean_ctor_set(x_200, 7, x_179); -lean_ctor_set(x_200, 8, x_181); -lean_ctor_set(x_200, 9, x_182); -lean_ctor_set(x_200, 10, x_183); -lean_ctor_set(x_200, 11, x_184); -lean_ctor_set(x_200, 12, x_185); -lean_ctor_set(x_200, 13, x_186); -lean_ctor_set(x_200, 14, x_199); -lean_ctor_set(x_200, 15, x_187); -lean_ctor_set_uint8(x_200, sizeof(void*)*16, x_180); -x_201 = lean_st_ref_set(x_7, x_200, x_34); -x_202 = lean_ctor_get(x_201, 1); -lean_inc(x_202); -lean_dec(x_201); -x_16 = x_202; -goto block_28; -} -else +lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; uint8_t x_37; +x_33 = lean_ctor_get(x_1, 0); +x_34 = lean_ctor_get(x_1, 1); +x_35 = lean_ctor_get(x_1, 2); +x_36 = lean_ctor_get(x_1, 3); +x_37 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_compare(x_2, x_34); +switch (x_37) { +case 0: { -lean_object* x_203; lean_object* x_204; lean_object* x_205; lean_object* x_206; lean_object* x_207; lean_object* x_208; lean_object* x_209; lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; lean_object* x_215; lean_object* x_216; lean_object* x_217; lean_object* x_218; lean_object* x_219; lean_object* x_220; lean_object* x_221; lean_object* x_222; lean_object* x_223; lean_object* x_224; lean_object* x_225; lean_object* x_226; uint8_t x_227; lean_object* x_228; lean_object* x_229; lean_object* x_230; lean_object* x_231; lean_object* x_232; lean_object* x_233; lean_object* x_234; lean_object* x_235; lean_object* x_236; lean_object* x_237; -x_203 = lean_array_fget(x_191, x_29); -x_204 = lean_box(0); -x_205 = lean_array_fset(x_191, x_29, x_204); -x_206 = lean_ctor_get(x_203, 0); -lean_inc(x_206); -x_207 = lean_ctor_get(x_203, 1); -lean_inc(x_207); -x_208 = lean_ctor_get(x_203, 2); -lean_inc(x_208); -x_209 = lean_ctor_get(x_203, 3); -lean_inc(x_209); -x_210 = lean_ctor_get(x_203, 4); -lean_inc(x_210); -x_211 = lean_ctor_get(x_203, 5); -lean_inc(x_211); -x_212 = lean_ctor_get(x_203, 6); -lean_inc(x_212); -x_213 = lean_ctor_get(x_203, 7); -lean_inc(x_213); -x_214 = lean_ctor_get(x_203, 8); -lean_inc(x_214); -x_215 = lean_ctor_get(x_203, 9); -lean_inc(x_215); -x_216 = lean_ctor_get(x_203, 10); -lean_inc(x_216); -x_217 = lean_ctor_get(x_203, 11); -lean_inc(x_217); -x_218 = lean_ctor_get(x_203, 12); -lean_inc(x_218); -x_219 = lean_ctor_get(x_203, 13); -lean_inc(x_219); -x_220 = lean_ctor_get(x_203, 14); -lean_inc(x_220); -x_221 = lean_ctor_get(x_203, 15); -lean_inc(x_221); -x_222 = lean_ctor_get(x_203, 16); -lean_inc(x_222); -x_223 = lean_ctor_get(x_203, 17); -lean_inc(x_223); -x_224 = lean_ctor_get(x_203, 18); -lean_inc(x_224); -x_225 = lean_ctor_get(x_203, 19); -lean_inc(x_225); -x_226 = lean_ctor_get(x_203, 20); -lean_inc(x_226); -x_227 = lean_ctor_get_uint8(x_203, sizeof(void*)*21); -if (lean_is_exclusive(x_203)) { - lean_ctor_release(x_203, 0); - lean_ctor_release(x_203, 1); - lean_ctor_release(x_203, 2); - lean_ctor_release(x_203, 3); - lean_ctor_release(x_203, 4); - lean_ctor_release(x_203, 5); - lean_ctor_release(x_203, 6); - lean_ctor_release(x_203, 7); - lean_ctor_release(x_203, 8); - lean_ctor_release(x_203, 9); - lean_ctor_release(x_203, 10); - lean_ctor_release(x_203, 11); - lean_ctor_release(x_203, 12); - lean_ctor_release(x_203, 13); - lean_ctor_release(x_203, 14); - lean_ctor_release(x_203, 15); - lean_ctor_release(x_203, 16); - lean_ctor_release(x_203, 17); - lean_ctor_release(x_203, 18); - lean_ctor_release(x_203, 19); - lean_ctor_release(x_203, 20); - x_228 = x_203; -} else { - lean_dec_ref(x_203); - x_228 = lean_box(0); -} -x_229 = lean_box(0); -x_230 = l_Lean_PersistentArray_set___rarg(x_225, x_4, x_229); -if (lean_is_scalar(x_228)) { - x_231 = lean_alloc_ctor(0, 21, 1); -} else { - x_231 = x_228; -} -lean_ctor_set(x_231, 0, x_206); -lean_ctor_set(x_231, 1, x_207); -lean_ctor_set(x_231, 2, x_208); -lean_ctor_set(x_231, 3, x_209); -lean_ctor_set(x_231, 4, x_210); -lean_ctor_set(x_231, 5, x_211); -lean_ctor_set(x_231, 6, x_212); -lean_ctor_set(x_231, 7, x_213); -lean_ctor_set(x_231, 8, x_214); -lean_ctor_set(x_231, 9, x_215); -lean_ctor_set(x_231, 10, x_216); -lean_ctor_set(x_231, 11, x_217); -lean_ctor_set(x_231, 12, x_218); -lean_ctor_set(x_231, 13, x_219); -lean_ctor_set(x_231, 14, x_220); -lean_ctor_set(x_231, 15, x_221); -lean_ctor_set(x_231, 16, x_222); -lean_ctor_set(x_231, 17, x_223); -lean_ctor_set(x_231, 18, x_224); -lean_ctor_set(x_231, 19, x_230); -lean_ctor_set(x_231, 20, x_226); -lean_ctor_set_uint8(x_231, sizeof(void*)*21, x_227); -x_232 = lean_array_fset(x_205, x_29, x_231); -lean_dec(x_29); -if (lean_is_scalar(x_195)) { - x_233 = lean_alloc_ctor(0, 4, 0); -} else { - x_233 = x_195; -} -lean_ctor_set(x_233, 0, x_232); -lean_ctor_set(x_233, 1, x_192); -lean_ctor_set(x_233, 2, x_193); -lean_ctor_set(x_233, 3, x_194); -if (lean_is_scalar(x_190)) { - x_234 = lean_alloc_ctor(0, 3, 0); -} else { - x_234 = x_190; -} -lean_ctor_set(x_234, 0, x_188); -lean_ctor_set(x_234, 1, x_189); -lean_ctor_set(x_234, 2, x_233); -x_235 = lean_alloc_ctor(0, 16, 1); -lean_ctor_set(x_235, 0, x_172); -lean_ctor_set(x_235, 1, x_173); -lean_ctor_set(x_235, 2, x_174); -lean_ctor_set(x_235, 3, x_175); -lean_ctor_set(x_235, 4, x_176); -lean_ctor_set(x_235, 5, x_177); -lean_ctor_set(x_235, 6, x_178); -lean_ctor_set(x_235, 7, x_179); -lean_ctor_set(x_235, 8, x_181); -lean_ctor_set(x_235, 9, x_182); -lean_ctor_set(x_235, 10, x_183); -lean_ctor_set(x_235, 11, x_184); -lean_ctor_set(x_235, 12, x_185); -lean_ctor_set(x_235, 13, x_186); -lean_ctor_set(x_235, 14, x_234); -lean_ctor_set(x_235, 15, x_187); -lean_ctor_set_uint8(x_235, sizeof(void*)*16, x_180); -x_236 = lean_st_ref_set(x_7, x_235, x_34); -x_237 = lean_ctor_get(x_236, 1); -lean_inc(x_237); -lean_dec(x_236); -x_16 = x_237; -goto block_28; -} -} -block_28: +lean_object* x_38; uint8_t x_39; +x_38 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_33, x_2, x_3); +x_39 = lean_ctor_get_uint8(x_38, sizeof(void*)*4); +if (x_39 == 0) { -lean_object* x_17; lean_object* x_18; lean_object* x_19; -x_17 = lean_box(0); -x_18 = lean_box(0); -lean_inc(x_3); -x_19 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1(x_1, x_2, x_3, x_17, x_3, x_3, x_18, lean_box(0), x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_16); -lean_dec(x_3); -if (lean_obj_tag(x_19) == 0) +lean_object* x_40; +x_40 = lean_ctor_get(x_38, 0); +lean_inc(x_40); +if (lean_obj_tag(x_40) == 0) { -uint8_t x_20; -x_20 = !lean_is_exclusive(x_19); -if (x_20 == 0) +lean_object* x_41; +x_41 = lean_ctor_get(x_38, 3); +lean_inc(x_41); +if (lean_obj_tag(x_41) == 0) { -lean_object* x_21; -x_21 = lean_ctor_get(x_19, 0); -lean_dec(x_21); -lean_ctor_set(x_19, 0, x_18); -return x_19; +uint8_t x_42; +x_42 = !lean_is_exclusive(x_38); +if (x_42 == 0) +{ +lean_object* x_43; lean_object* x_44; uint8_t x_45; +x_43 = lean_ctor_get(x_38, 3); +lean_dec(x_43); +x_44 = lean_ctor_get(x_38, 0); +lean_dec(x_44); +lean_ctor_set(x_38, 0, x_41); +x_45 = 1; +lean_ctor_set(x_1, 0, x_38); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_45); +return x_1; } else { -lean_object* x_22; lean_object* x_23; -x_22 = lean_ctor_get(x_19, 1); -lean_inc(x_22); -lean_dec(x_19); -x_23 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_23, 0, x_18); -lean_ctor_set(x_23, 1, x_22); -return x_23; +lean_object* x_46; lean_object* x_47; lean_object* x_48; uint8_t x_49; +x_46 = lean_ctor_get(x_38, 1); +x_47 = lean_ctor_get(x_38, 2); +lean_inc(x_47); +lean_inc(x_46); +lean_dec(x_38); +x_48 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_48, 0, x_41); +lean_ctor_set(x_48, 1, x_46); +lean_ctor_set(x_48, 2, x_47); +lean_ctor_set(x_48, 3, x_41); +lean_ctor_set_uint8(x_48, sizeof(void*)*4, x_39); +x_49 = 1; +lean_ctor_set(x_1, 0, x_48); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_49); +return x_1; } } else { -uint8_t x_24; -x_24 = !lean_is_exclusive(x_19); -if (x_24 == 0) +uint8_t x_50; +x_50 = lean_ctor_get_uint8(x_41, sizeof(void*)*4); +if (x_50 == 0) { -return x_19; +uint8_t x_51; +x_51 = !lean_is_exclusive(x_38); +if (x_51 == 0) +{ +lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; uint8_t x_56; +x_52 = lean_ctor_get(x_38, 1); +x_53 = lean_ctor_get(x_38, 2); +x_54 = lean_ctor_get(x_38, 3); +lean_dec(x_54); +x_55 = lean_ctor_get(x_38, 0); +lean_dec(x_55); +x_56 = !lean_is_exclusive(x_41); +if (x_56 == 0) +{ +lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; uint8_t x_61; uint8_t x_62; +x_57 = lean_ctor_get(x_41, 0); +x_58 = lean_ctor_get(x_41, 1); +x_59 = lean_ctor_get(x_41, 2); +x_60 = lean_ctor_get(x_41, 3); +x_61 = 1; +lean_ctor_set(x_41, 3, x_57); +lean_ctor_set(x_41, 2, x_53); +lean_ctor_set(x_41, 1, x_52); +lean_ctor_set(x_41, 0, x_40); +lean_ctor_set_uint8(x_41, sizeof(void*)*4, x_61); +lean_ctor_set(x_38, 3, x_36); +lean_ctor_set(x_38, 2, x_35); +lean_ctor_set(x_38, 1, x_34); +lean_ctor_set(x_38, 0, x_60); +lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_61); +x_62 = 0; +lean_ctor_set(x_1, 3, x_38); +lean_ctor_set(x_1, 2, x_59); +lean_ctor_set(x_1, 1, x_58); +lean_ctor_set(x_1, 0, x_41); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_62); +return x_1; } else { -lean_object* x_25; lean_object* x_26; lean_object* x_27; -x_25 = lean_ctor_get(x_19, 0); -x_26 = lean_ctor_get(x_19, 1); -lean_inc(x_26); -lean_inc(x_25); -lean_dec(x_19); -x_27 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_27, 0, x_25); -lean_ctor_set(x_27, 1, x_26); -return x_27; -} -} -} -} +lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; uint8_t x_67; lean_object* x_68; uint8_t x_69; +x_63 = lean_ctor_get(x_41, 0); +x_64 = lean_ctor_get(x_41, 1); +x_65 = lean_ctor_get(x_41, 2); +x_66 = lean_ctor_get(x_41, 3); +lean_inc(x_66); +lean_inc(x_65); +lean_inc(x_64); +lean_inc(x_63); +lean_dec(x_41); +x_67 = 1; +x_68 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_68, 0, x_40); +lean_ctor_set(x_68, 1, x_52); +lean_ctor_set(x_68, 2, x_53); +lean_ctor_set(x_68, 3, x_63); +lean_ctor_set_uint8(x_68, sizeof(void*)*4, x_67); +lean_ctor_set(x_38, 3, x_36); +lean_ctor_set(x_38, 2, x_35); +lean_ctor_set(x_38, 1, x_34); +lean_ctor_set(x_38, 0, x_66); +lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_67); +x_69 = 0; +lean_ctor_set(x_1, 3, x_38); +lean_ctor_set(x_1, 2, x_65); +lean_ctor_set(x_1, 1, x_64); +lean_ctor_set(x_1, 0, x_68); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_69); +return x_1; } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: -{ -lean_object* x_12; -x_12 = lean_ctor_get(x_1, 0); -lean_inc(x_12); -if (lean_obj_tag(x_12) == 0) -{ -lean_object* x_13; lean_object* x_14; -lean_dec(x_12); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_13 = lean_box(0); -x_14 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_14, 0, x_13); -lean_ctor_set(x_14, 1, x_11); -return x_14; } else { -lean_object* x_15; -x_15 = lean_ctor_get(x_12, 1); -lean_inc(x_15); -lean_dec(x_12); -if (lean_obj_tag(x_15) == 0) -{ -lean_object* x_16; lean_object* x_17; -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_16 = lean_box(0); -x_17 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_17, 0, x_16); -lean_ctor_set(x_17, 1, x_11); -return x_17; +lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; uint8_t x_77; lean_object* x_78; lean_object* x_79; uint8_t x_80; +x_70 = lean_ctor_get(x_38, 1); +x_71 = lean_ctor_get(x_38, 2); +lean_inc(x_71); +lean_inc(x_70); +lean_dec(x_38); +x_72 = lean_ctor_get(x_41, 0); +lean_inc(x_72); +x_73 = lean_ctor_get(x_41, 1); +lean_inc(x_73); +x_74 = lean_ctor_get(x_41, 2); +lean_inc(x_74); +x_75 = lean_ctor_get(x_41, 3); +lean_inc(x_75); +if (lean_is_exclusive(x_41)) { + lean_ctor_release(x_41, 0); + lean_ctor_release(x_41, 1); + lean_ctor_release(x_41, 2); + lean_ctor_release(x_41, 3); + x_76 = x_41; +} else { + lean_dec_ref(x_41); + x_76 = lean_box(0); } -else -{ -lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_18 = lean_ctor_get(x_15, 0); -lean_inc(x_18); -x_19 = lean_ctor_get(x_18, 0); -lean_inc(x_19); -lean_dec(x_18); -x_20 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_20) == 0) -{ -lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_31; lean_object* x_32; lean_object* x_33; uint8_t x_34; -x_21 = lean_ctor_get(x_20, 0); -lean_inc(x_21); -x_22 = lean_ctor_get(x_20, 1); -lean_inc(x_22); -if (lean_is_exclusive(x_20)) { - lean_ctor_release(x_20, 0); - lean_ctor_release(x_20, 1); - x_23 = x_20; +x_77 = 1; +if (lean_is_scalar(x_76)) { + x_78 = lean_alloc_ctor(1, 4, 1); } else { - lean_dec_ref(x_20); - x_23 = lean_box(0); + x_78 = x_76; } -x_31 = lean_box(0); -x_32 = lean_ctor_get(x_21, 19); -lean_inc(x_32); -lean_dec(x_21); -x_33 = lean_ctor_get(x_32, 2); -lean_inc(x_33); -x_34 = lean_nat_dec_lt(x_19, x_33); -lean_dec(x_33); -if (x_34 == 0) -{ -lean_object* x_35; -lean_dec(x_32); -x_35 = l_outOfBounds___rarg(x_31); -x_24 = x_35; -goto block_30; +lean_ctor_set(x_78, 0, x_40); +lean_ctor_set(x_78, 1, x_70); +lean_ctor_set(x_78, 2, x_71); +lean_ctor_set(x_78, 3, x_72); +lean_ctor_set_uint8(x_78, sizeof(void*)*4, x_77); +x_79 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_79, 0, x_75); +lean_ctor_set(x_79, 1, x_34); +lean_ctor_set(x_79, 2, x_35); +lean_ctor_set(x_79, 3, x_36); +lean_ctor_set_uint8(x_79, sizeof(void*)*4, x_77); +x_80 = 0; +lean_ctor_set(x_1, 3, x_79); +lean_ctor_set(x_1, 2, x_74); +lean_ctor_set(x_1, 1, x_73); +lean_ctor_set(x_1, 0, x_78); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_80); +return x_1; } -else -{ -lean_object* x_36; -x_36 = l_Lean_PersistentArray_get_x21___rarg(x_31, x_32, x_19); -x_24 = x_36; -goto block_30; } -block_30: +else { -uint8_t x_25; -x_25 = l_List_isEmpty___rarg(x_24); -if (x_25 == 0) +uint8_t x_81; +lean_free_object(x_1); +x_81 = !lean_is_exclusive(x_41); +if (x_81 == 0) { -lean_object* x_26; lean_object* x_27; -lean_dec(x_23); -x_26 = lean_box(0); -x_27 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1(x_1, x_15, x_24, x_19, x_26, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_22); -lean_dec(x_19); -return x_27; +lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; uint8_t x_86; +x_82 = lean_ctor_get(x_41, 3); +lean_dec(x_82); +x_83 = lean_ctor_get(x_41, 2); +lean_dec(x_83); +x_84 = lean_ctor_get(x_41, 1); +lean_dec(x_84); +x_85 = lean_ctor_get(x_41, 0); +lean_dec(x_85); +x_86 = 1; +lean_ctor_set(x_41, 3, x_36); +lean_ctor_set(x_41, 2, x_35); +lean_ctor_set(x_41, 1, x_34); +lean_ctor_set(x_41, 0, x_38); +lean_ctor_set_uint8(x_41, sizeof(void*)*4, x_86); +return x_41; } else { -lean_object* x_28; lean_object* x_29; -lean_dec(x_24); -lean_dec(x_19); -lean_dec(x_15); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_28 = lean_box(0); -if (lean_is_scalar(x_23)) { - x_29 = lean_alloc_ctor(0, 2, 0); -} else { - x_29 = x_23; +uint8_t x_87; lean_object* x_88; +lean_dec(x_41); +x_87 = 1; +x_88 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_88, 0, x_38); +lean_ctor_set(x_88, 1, x_34); +lean_ctor_set(x_88, 2, x_35); +lean_ctor_set(x_88, 3, x_36); +lean_ctor_set_uint8(x_88, sizeof(void*)*4, x_87); +return x_88; } -lean_ctor_set(x_29, 0, x_28); -lean_ctor_set(x_29, 1, x_22); -return x_29; } } } else { -uint8_t x_37; -lean_dec(x_19); -lean_dec(x_15); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_37 = !lean_is_exclusive(x_20); -if (x_37 == 0) +uint8_t x_89; +x_89 = lean_ctor_get_uint8(x_40, sizeof(void*)*4); +if (x_89 == 0) { -return x_20; -} -else +uint8_t x_90; +x_90 = !lean_is_exclusive(x_38); +if (x_90 == 0) { -lean_object* x_38; lean_object* x_39; lean_object* x_40; -x_38 = lean_ctor_get(x_20, 0); -x_39 = lean_ctor_get(x_20, 1); -lean_inc(x_39); -lean_inc(x_38); -lean_dec(x_20); -x_40 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_40, 0, x_38); -lean_ctor_set(x_40, 1, x_39); -return x_40; -} -} -} -} -} +lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; uint8_t x_95; +x_91 = lean_ctor_get(x_38, 1); +x_92 = lean_ctor_get(x_38, 2); +x_93 = lean_ctor_get(x_38, 3); +x_94 = lean_ctor_get(x_38, 0); +lean_dec(x_94); +x_95 = !lean_is_exclusive(x_40); +if (x_95 == 0) +{ +uint8_t x_96; uint8_t x_97; +x_96 = 1; +lean_ctor_set_uint8(x_40, sizeof(void*)*4, x_96); +lean_ctor_set(x_38, 3, x_36); +lean_ctor_set(x_38, 2, x_35); +lean_ctor_set(x_38, 1, x_34); +lean_ctor_set(x_38, 0, x_93); +lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_96); +x_97 = 0; +lean_ctor_set(x_1, 3, x_38); +lean_ctor_set(x_1, 2, x_92); +lean_ctor_set(x_1, 1, x_91); +lean_ctor_set(x_1, 0, x_40); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_97); +return x_1; } -LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___boxed(lean_object** _args) { -lean_object* x_1 = _args[0]; -lean_object* x_2 = _args[1]; -lean_object* x_3 = _args[2]; -lean_object* x_4 = _args[3]; -lean_object* x_5 = _args[4]; -lean_object* x_6 = _args[5]; -lean_object* x_7 = _args[6]; -lean_object* x_8 = _args[7]; -lean_object* x_9 = _args[8]; -lean_object* x_10 = _args[9]; -lean_object* x_11 = _args[10]; -lean_object* x_12 = _args[11]; -lean_object* x_13 = _args[12]; -lean_object* x_14 = _args[13]; -lean_object* x_15 = _args[14]; -lean_object* x_16 = _args[15]; -lean_object* x_17 = _args[16]; -lean_object* x_18 = _args[17]; -_start: +else { -lean_object* x_19; -x_19 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_18); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -return x_19; +lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; uint8_t x_102; lean_object* x_103; uint8_t x_104; +x_98 = lean_ctor_get(x_40, 0); +x_99 = lean_ctor_get(x_40, 1); +x_100 = lean_ctor_get(x_40, 2); +x_101 = lean_ctor_get(x_40, 3); +lean_inc(x_101); +lean_inc(x_100); +lean_inc(x_99); +lean_inc(x_98); +lean_dec(x_40); +x_102 = 1; +x_103 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_103, 0, x_98); +lean_ctor_set(x_103, 1, x_99); +lean_ctor_set(x_103, 2, x_100); +lean_ctor_set(x_103, 3, x_101); +lean_ctor_set_uint8(x_103, sizeof(void*)*4, x_102); +lean_ctor_set(x_38, 3, x_36); +lean_ctor_set(x_38, 2, x_35); +lean_ctor_set(x_38, 1, x_34); +lean_ctor_set(x_38, 0, x_93); +lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_102); +x_104 = 0; +lean_ctor_set(x_1, 3, x_38); +lean_ctor_set(x_1, 2, x_92); +lean_ctor_set(x_1, 1, x_91); +lean_ctor_set(x_1, 0, x_103); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_104); +return x_1; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15) { -_start: +else { -lean_object* x_16; -x_16 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); -lean_dec(x_5); -lean_dec(x_4); -return x_16; +lean_object* x_105; lean_object* x_106; lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; uint8_t x_113; lean_object* x_114; lean_object* x_115; uint8_t x_116; +x_105 = lean_ctor_get(x_38, 1); +x_106 = lean_ctor_get(x_38, 2); +x_107 = lean_ctor_get(x_38, 3); +lean_inc(x_107); +lean_inc(x_106); +lean_inc(x_105); +lean_dec(x_38); +x_108 = lean_ctor_get(x_40, 0); +lean_inc(x_108); +x_109 = lean_ctor_get(x_40, 1); +lean_inc(x_109); +x_110 = lean_ctor_get(x_40, 2); +lean_inc(x_110); +x_111 = lean_ctor_get(x_40, 3); +lean_inc(x_111); +if (lean_is_exclusive(x_40)) { + lean_ctor_release(x_40, 0); + lean_ctor_release(x_40, 1); + lean_ctor_release(x_40, 2); + lean_ctor_release(x_40, 3); + x_112 = x_40; +} else { + lean_dec_ref(x_40); + x_112 = lean_box(0); } +x_113 = 1; +if (lean_is_scalar(x_112)) { + x_114 = lean_alloc_ctor(1, 4, 1); +} else { + x_114 = x_112; +} +lean_ctor_set(x_114, 0, x_108); +lean_ctor_set(x_114, 1, x_109); +lean_ctor_set(x_114, 2, x_110); +lean_ctor_set(x_114, 3, x_111); +lean_ctor_set_uint8(x_114, sizeof(void*)*4, x_113); +x_115 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_115, 0, x_107); +lean_ctor_set(x_115, 1, x_34); +lean_ctor_set(x_115, 2, x_35); +lean_ctor_set(x_115, 3, x_36); +lean_ctor_set_uint8(x_115, sizeof(void*)*4, x_113); +x_116 = 0; +lean_ctor_set(x_1, 3, x_115); +lean_ctor_set(x_1, 2, x_106); +lean_ctor_set(x_1, 1, x_105); +lean_ctor_set(x_1, 0, x_114); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_116); +return x_1; } -LEAN_EXPORT lean_object* l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3) { -_start: -{ -if (lean_obj_tag(x_1) == 0) -{ -lean_object* x_4; uint8_t x_5; lean_object* x_6; -x_4 = lean_box(0); -x_5 = 0; -x_6 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_6, 0, x_4); -lean_ctor_set(x_6, 1, x_2); -lean_ctor_set(x_6, 2, x_3); -lean_ctor_set(x_6, 3, x_4); -lean_ctor_set_uint8(x_6, sizeof(void*)*4, x_5); -return x_6; } else { -uint8_t x_7; -x_7 = lean_ctor_get_uint8(x_1, sizeof(void*)*4); -if (x_7 == 0) -{ -uint8_t x_8; -x_8 = !lean_is_exclusive(x_1); -if (x_8 == 0) -{ -lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; uint8_t x_13; -x_9 = lean_ctor_get(x_1, 0); -x_10 = lean_ctor_get(x_1, 1); -x_11 = lean_ctor_get(x_1, 2); -x_12 = lean_ctor_get(x_1, 3); -x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_compare(x_2, x_10); -switch (x_13) { -case 0: +lean_object* x_117; +x_117 = lean_ctor_get(x_38, 3); +lean_inc(x_117); +if (lean_obj_tag(x_117) == 0) { -lean_object* x_14; uint8_t x_15; -x_14 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_9, x_2, x_3); -x_15 = 0; -lean_ctor_set(x_1, 0, x_14); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_15); -return x_1; -} -case 1: +uint8_t x_118; +lean_free_object(x_1); +x_118 = !lean_is_exclusive(x_40); +if (x_118 == 0) { -uint8_t x_16; -lean_dec(x_11); -lean_dec(x_10); -x_16 = 0; -lean_ctor_set(x_1, 2, x_3); -lean_ctor_set(x_1, 1, x_2); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_16); -return x_1; +lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; uint8_t x_123; +x_119 = lean_ctor_get(x_40, 3); +lean_dec(x_119); +x_120 = lean_ctor_get(x_40, 2); +lean_dec(x_120); +x_121 = lean_ctor_get(x_40, 1); +lean_dec(x_121); +x_122 = lean_ctor_get(x_40, 0); +lean_dec(x_122); +x_123 = 1; +lean_ctor_set(x_40, 3, x_36); +lean_ctor_set(x_40, 2, x_35); +lean_ctor_set(x_40, 1, x_34); +lean_ctor_set(x_40, 0, x_38); +lean_ctor_set_uint8(x_40, sizeof(void*)*4, x_123); +return x_40; } -default: +else { -lean_object* x_17; uint8_t x_18; -x_17 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_12, x_2, x_3); -x_18 = 0; -lean_ctor_set(x_1, 3, x_17); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_18); -return x_1; -} +uint8_t x_124; lean_object* x_125; +lean_dec(x_40); +x_124 = 1; +x_125 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_125, 0, x_38); +lean_ctor_set(x_125, 1, x_34); +lean_ctor_set(x_125, 2, x_35); +lean_ctor_set(x_125, 3, x_36); +lean_ctor_set_uint8(x_125, sizeof(void*)*4, x_124); +return x_125; } } else { -lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; uint8_t x_23; -x_19 = lean_ctor_get(x_1, 0); -x_20 = lean_ctor_get(x_1, 1); -x_21 = lean_ctor_get(x_1, 2); -x_22 = lean_ctor_get(x_1, 3); -lean_inc(x_22); -lean_inc(x_21); -lean_inc(x_20); -lean_inc(x_19); -lean_dec(x_1); -x_23 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_compare(x_2, x_20); -switch (x_23) { -case 0: +uint8_t x_126; +x_126 = lean_ctor_get_uint8(x_117, sizeof(void*)*4); +if (x_126 == 0) { -lean_object* x_24; uint8_t x_25; lean_object* x_26; -x_24 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_19, x_2, x_3); -x_25 = 0; -x_26 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_26, 0, x_24); -lean_ctor_set(x_26, 1, x_20); -lean_ctor_set(x_26, 2, x_21); -lean_ctor_set(x_26, 3, x_22); -lean_ctor_set_uint8(x_26, sizeof(void*)*4, x_25); -return x_26; +uint8_t x_127; +lean_free_object(x_1); +x_127 = !lean_is_exclusive(x_38); +if (x_127 == 0) +{ +lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; uint8_t x_132; +x_128 = lean_ctor_get(x_38, 1); +x_129 = lean_ctor_get(x_38, 2); +x_130 = lean_ctor_get(x_38, 3); +lean_dec(x_130); +x_131 = lean_ctor_get(x_38, 0); +lean_dec(x_131); +x_132 = !lean_is_exclusive(x_117); +if (x_132 == 0) +{ +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; uint8_t x_137; uint8_t x_138; +x_133 = lean_ctor_get(x_117, 0); +x_134 = lean_ctor_get(x_117, 1); +x_135 = lean_ctor_get(x_117, 2); +x_136 = lean_ctor_get(x_117, 3); +x_137 = 1; +lean_inc(x_40); +lean_ctor_set(x_117, 3, x_133); +lean_ctor_set(x_117, 2, x_129); +lean_ctor_set(x_117, 1, x_128); +lean_ctor_set(x_117, 0, x_40); +x_138 = !lean_is_exclusive(x_40); +if (x_138 == 0) +{ +lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; uint8_t x_143; +x_139 = lean_ctor_get(x_40, 3); +lean_dec(x_139); +x_140 = lean_ctor_get(x_40, 2); +lean_dec(x_140); +x_141 = lean_ctor_get(x_40, 1); +lean_dec(x_141); +x_142 = lean_ctor_get(x_40, 0); +lean_dec(x_142); +lean_ctor_set_uint8(x_117, sizeof(void*)*4, x_137); +lean_ctor_set(x_40, 3, x_36); +lean_ctor_set(x_40, 2, x_35); +lean_ctor_set(x_40, 1, x_34); +lean_ctor_set(x_40, 0, x_136); +lean_ctor_set_uint8(x_40, sizeof(void*)*4, x_137); +x_143 = 0; +lean_ctor_set(x_38, 3, x_40); +lean_ctor_set(x_38, 2, x_135); +lean_ctor_set(x_38, 1, x_134); +lean_ctor_set(x_38, 0, x_117); +lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_143); +return x_38; } -case 1: +else { -uint8_t x_27; lean_object* x_28; -lean_dec(x_21); -lean_dec(x_20); -x_27 = 0; -x_28 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_28, 0, x_19); -lean_ctor_set(x_28, 1, x_2); -lean_ctor_set(x_28, 2, x_3); -lean_ctor_set(x_28, 3, x_22); -lean_ctor_set_uint8(x_28, sizeof(void*)*4, x_27); -return x_28; +lean_object* x_144; uint8_t x_145; +lean_dec(x_40); +lean_ctor_set_uint8(x_117, sizeof(void*)*4, x_137); +x_144 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_144, 0, x_136); +lean_ctor_set(x_144, 1, x_34); +lean_ctor_set(x_144, 2, x_35); +lean_ctor_set(x_144, 3, x_36); +lean_ctor_set_uint8(x_144, sizeof(void*)*4, x_137); +x_145 = 0; +lean_ctor_set(x_38, 3, x_144); +lean_ctor_set(x_38, 2, x_135); +lean_ctor_set(x_38, 1, x_134); +lean_ctor_set(x_38, 0, x_117); +lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_145); +return x_38; } -default: +} +else { -lean_object* x_29; uint8_t x_30; lean_object* x_31; -x_29 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_22, x_2, x_3); -x_30 = 0; -x_31 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_31, 0, x_19); -lean_ctor_set(x_31, 1, x_20); -lean_ctor_set(x_31, 2, x_21); -lean_ctor_set(x_31, 3, x_29); -lean_ctor_set_uint8(x_31, sizeof(void*)*4, x_30); -return x_31; +lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; uint8_t x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; uint8_t x_154; +x_146 = lean_ctor_get(x_117, 0); +x_147 = lean_ctor_get(x_117, 1); +x_148 = lean_ctor_get(x_117, 2); +x_149 = lean_ctor_get(x_117, 3); +lean_inc(x_149); +lean_inc(x_148); +lean_inc(x_147); +lean_inc(x_146); +lean_dec(x_117); +x_150 = 1; +lean_inc(x_40); +x_151 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_151, 0, x_40); +lean_ctor_set(x_151, 1, x_128); +lean_ctor_set(x_151, 2, x_129); +lean_ctor_set(x_151, 3, x_146); +if (lean_is_exclusive(x_40)) { + lean_ctor_release(x_40, 0); + lean_ctor_release(x_40, 1); + lean_ctor_release(x_40, 2); + lean_ctor_release(x_40, 3); + x_152 = x_40; +} else { + lean_dec_ref(x_40); + x_152 = lean_box(0); } +lean_ctor_set_uint8(x_151, sizeof(void*)*4, x_150); +if (lean_is_scalar(x_152)) { + x_153 = lean_alloc_ctor(1, 4, 1); +} else { + x_153 = x_152; } +lean_ctor_set(x_153, 0, x_149); +lean_ctor_set(x_153, 1, x_34); +lean_ctor_set(x_153, 2, x_35); +lean_ctor_set(x_153, 3, x_36); +lean_ctor_set_uint8(x_153, sizeof(void*)*4, x_150); +x_154 = 0; +lean_ctor_set(x_38, 3, x_153); +lean_ctor_set(x_38, 2, x_148); +lean_ctor_set(x_38, 1, x_147); +lean_ctor_set(x_38, 0, x_151); +lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_154); +return x_38; } } else { -uint8_t x_32; -x_32 = !lean_is_exclusive(x_1); -if (x_32 == 0) -{ -lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; uint8_t x_37; -x_33 = lean_ctor_get(x_1, 0); -x_34 = lean_ctor_get(x_1, 1); -x_35 = lean_ctor_get(x_1, 2); -x_36 = lean_ctor_get(x_1, 3); -x_37 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_compare(x_2, x_34); -switch (x_37) { -case 0: -{ -lean_object* x_38; uint8_t x_39; -x_38 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_33, x_2, x_3); -x_39 = lean_ctor_get_uint8(x_38, sizeof(void*)*4); -if (x_39 == 0) -{ -lean_object* x_40; -x_40 = lean_ctor_get(x_38, 0); +lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; lean_object* x_160; lean_object* x_161; uint8_t x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; uint8_t x_166; lean_object* x_167; +x_155 = lean_ctor_get(x_38, 1); +x_156 = lean_ctor_get(x_38, 2); +lean_inc(x_156); +lean_inc(x_155); +lean_dec(x_38); +x_157 = lean_ctor_get(x_117, 0); +lean_inc(x_157); +x_158 = lean_ctor_get(x_117, 1); +lean_inc(x_158); +x_159 = lean_ctor_get(x_117, 2); +lean_inc(x_159); +x_160 = lean_ctor_get(x_117, 3); +lean_inc(x_160); +if (lean_is_exclusive(x_117)) { + lean_ctor_release(x_117, 0); + lean_ctor_release(x_117, 1); + lean_ctor_release(x_117, 2); + lean_ctor_release(x_117, 3); + x_161 = x_117; +} else { + lean_dec_ref(x_117); + x_161 = lean_box(0); +} +x_162 = 1; lean_inc(x_40); -if (lean_obj_tag(x_40) == 0) +if (lean_is_scalar(x_161)) { + x_163 = lean_alloc_ctor(1, 4, 1); +} else { + x_163 = x_161; +} +lean_ctor_set(x_163, 0, x_40); +lean_ctor_set(x_163, 1, x_155); +lean_ctor_set(x_163, 2, x_156); +lean_ctor_set(x_163, 3, x_157); +if (lean_is_exclusive(x_40)) { + lean_ctor_release(x_40, 0); + lean_ctor_release(x_40, 1); + lean_ctor_release(x_40, 2); + lean_ctor_release(x_40, 3); + x_164 = x_40; +} else { + lean_dec_ref(x_40); + x_164 = lean_box(0); +} +lean_ctor_set_uint8(x_163, sizeof(void*)*4, x_162); +if (lean_is_scalar(x_164)) { + x_165 = lean_alloc_ctor(1, 4, 1); +} else { + x_165 = x_164; +} +lean_ctor_set(x_165, 0, x_160); +lean_ctor_set(x_165, 1, x_34); +lean_ctor_set(x_165, 2, x_35); +lean_ctor_set(x_165, 3, x_36); +lean_ctor_set_uint8(x_165, sizeof(void*)*4, x_162); +x_166 = 0; +x_167 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_167, 0, x_163); +lean_ctor_set(x_167, 1, x_158); +lean_ctor_set(x_167, 2, x_159); +lean_ctor_set(x_167, 3, x_165); +lean_ctor_set_uint8(x_167, sizeof(void*)*4, x_166); +return x_167; +} +} +else { -lean_object* x_41; -x_41 = lean_ctor_get(x_38, 3); -lean_inc(x_41); -if (lean_obj_tag(x_41) == 0) +uint8_t x_168; +x_168 = !lean_is_exclusive(x_38); +if (x_168 == 0) { -uint8_t x_42; -x_42 = !lean_is_exclusive(x_38); -if (x_42 == 0) +lean_object* x_169; lean_object* x_170; uint8_t x_171; +x_169 = lean_ctor_get(x_38, 3); +lean_dec(x_169); +x_170 = lean_ctor_get(x_38, 0); +lean_dec(x_170); +x_171 = !lean_is_exclusive(x_40); +if (x_171 == 0) { -lean_object* x_43; lean_object* x_44; uint8_t x_45; -x_43 = lean_ctor_get(x_38, 3); -lean_dec(x_43); -x_44 = lean_ctor_get(x_38, 0); -lean_dec(x_44); -lean_ctor_set(x_38, 0, x_41); -x_45 = 1; +uint8_t x_172; +lean_ctor_set_uint8(x_40, sizeof(void*)*4, x_126); +x_172 = 1; lean_ctor_set(x_1, 0, x_38); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_45); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_172); return x_1; } else { -lean_object* x_46; lean_object* x_47; lean_object* x_48; uint8_t x_49; -x_46 = lean_ctor_get(x_38, 1); -x_47 = lean_ctor_get(x_38, 2); -lean_inc(x_47); -lean_inc(x_46); -lean_dec(x_38); -x_48 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_48, 0, x_41); -lean_ctor_set(x_48, 1, x_46); -lean_ctor_set(x_48, 2, x_47); -lean_ctor_set(x_48, 3, x_41); -lean_ctor_set_uint8(x_48, sizeof(void*)*4, x_39); -x_49 = 1; -lean_ctor_set(x_1, 0, x_48); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_49); -return x_1; -} -} -else -{ -uint8_t x_50; -x_50 = lean_ctor_get_uint8(x_41, sizeof(void*)*4); -if (x_50 == 0) -{ -uint8_t x_51; -x_51 = !lean_is_exclusive(x_38); -if (x_51 == 0) -{ -lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; uint8_t x_56; -x_52 = lean_ctor_get(x_38, 1); -x_53 = lean_ctor_get(x_38, 2); -x_54 = lean_ctor_get(x_38, 3); -lean_dec(x_54); -x_55 = lean_ctor_get(x_38, 0); -lean_dec(x_55); -x_56 = !lean_is_exclusive(x_41); -if (x_56 == 0) -{ -lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; uint8_t x_61; uint8_t x_62; -x_57 = lean_ctor_get(x_41, 0); -x_58 = lean_ctor_get(x_41, 1); -x_59 = lean_ctor_get(x_41, 2); -x_60 = lean_ctor_get(x_41, 3); -x_61 = 1; -lean_ctor_set(x_41, 3, x_57); -lean_ctor_set(x_41, 2, x_53); -lean_ctor_set(x_41, 1, x_52); -lean_ctor_set(x_41, 0, x_40); -lean_ctor_set_uint8(x_41, sizeof(void*)*4, x_61); -lean_ctor_set(x_38, 3, x_36); -lean_ctor_set(x_38, 2, x_35); -lean_ctor_set(x_38, 1, x_34); -lean_ctor_set(x_38, 0, x_60); -lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_61); -x_62 = 0; -lean_ctor_set(x_1, 3, x_38); -lean_ctor_set(x_1, 2, x_59); -lean_ctor_set(x_1, 1, x_58); -lean_ctor_set(x_1, 0, x_41); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_62); -return x_1; -} -else -{ -lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; uint8_t x_67; lean_object* x_68; uint8_t x_69; -x_63 = lean_ctor_get(x_41, 0); -x_64 = lean_ctor_get(x_41, 1); -x_65 = lean_ctor_get(x_41, 2); -x_66 = lean_ctor_get(x_41, 3); -lean_inc(x_66); -lean_inc(x_65); -lean_inc(x_64); -lean_inc(x_63); -lean_dec(x_41); -x_67 = 1; -x_68 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_68, 0, x_40); -lean_ctor_set(x_68, 1, x_52); -lean_ctor_set(x_68, 2, x_53); -lean_ctor_set(x_68, 3, x_63); -lean_ctor_set_uint8(x_68, sizeof(void*)*4, x_67); -lean_ctor_set(x_38, 3, x_36); -lean_ctor_set(x_38, 2, x_35); -lean_ctor_set(x_38, 1, x_34); -lean_ctor_set(x_38, 0, x_66); -lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_67); -x_69 = 0; -lean_ctor_set(x_1, 3, x_38); -lean_ctor_set(x_1, 2, x_65); -lean_ctor_set(x_1, 1, x_64); -lean_ctor_set(x_1, 0, x_68); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_69); +lean_object* x_173; lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; uint8_t x_178; +x_173 = lean_ctor_get(x_40, 0); +x_174 = lean_ctor_get(x_40, 1); +x_175 = lean_ctor_get(x_40, 2); +x_176 = lean_ctor_get(x_40, 3); +lean_inc(x_176); +lean_inc(x_175); +lean_inc(x_174); +lean_inc(x_173); +lean_dec(x_40); +x_177 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_177, 0, x_173); +lean_ctor_set(x_177, 1, x_174); +lean_ctor_set(x_177, 2, x_175); +lean_ctor_set(x_177, 3, x_176); +lean_ctor_set_uint8(x_177, sizeof(void*)*4, x_126); +lean_ctor_set(x_38, 0, x_177); +x_178 = 1; +lean_ctor_set(x_1, 0, x_38); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_178); return x_1; } } else { -lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; uint8_t x_77; lean_object* x_78; lean_object* x_79; uint8_t x_80; -x_70 = lean_ctor_get(x_38, 1); -x_71 = lean_ctor_get(x_38, 2); -lean_inc(x_71); -lean_inc(x_70); +lean_object* x_179; lean_object* x_180; lean_object* x_181; lean_object* x_182; lean_object* x_183; lean_object* x_184; lean_object* x_185; lean_object* x_186; lean_object* x_187; uint8_t x_188; +x_179 = lean_ctor_get(x_38, 1); +x_180 = lean_ctor_get(x_38, 2); +lean_inc(x_180); +lean_inc(x_179); lean_dec(x_38); -x_72 = lean_ctor_get(x_41, 0); -lean_inc(x_72); -x_73 = lean_ctor_get(x_41, 1); -lean_inc(x_73); -x_74 = lean_ctor_get(x_41, 2); -lean_inc(x_74); -x_75 = lean_ctor_get(x_41, 3); -lean_inc(x_75); -if (lean_is_exclusive(x_41)) { - lean_ctor_release(x_41, 0); - lean_ctor_release(x_41, 1); - lean_ctor_release(x_41, 2); - lean_ctor_release(x_41, 3); - x_76 = x_41; +x_181 = lean_ctor_get(x_40, 0); +lean_inc(x_181); +x_182 = lean_ctor_get(x_40, 1); +lean_inc(x_182); +x_183 = lean_ctor_get(x_40, 2); +lean_inc(x_183); +x_184 = lean_ctor_get(x_40, 3); +lean_inc(x_184); +if (lean_is_exclusive(x_40)) { + lean_ctor_release(x_40, 0); + lean_ctor_release(x_40, 1); + lean_ctor_release(x_40, 2); + lean_ctor_release(x_40, 3); + x_185 = x_40; } else { - lean_dec_ref(x_41); - x_76 = lean_box(0); + lean_dec_ref(x_40); + x_185 = lean_box(0); } -x_77 = 1; -if (lean_is_scalar(x_76)) { - x_78 = lean_alloc_ctor(1, 4, 1); +if (lean_is_scalar(x_185)) { + x_186 = lean_alloc_ctor(1, 4, 1); } else { - x_78 = x_76; + x_186 = x_185; } -lean_ctor_set(x_78, 0, x_40); -lean_ctor_set(x_78, 1, x_70); -lean_ctor_set(x_78, 2, x_71); -lean_ctor_set(x_78, 3, x_72); -lean_ctor_set_uint8(x_78, sizeof(void*)*4, x_77); -x_79 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_79, 0, x_75); -lean_ctor_set(x_79, 1, x_34); -lean_ctor_set(x_79, 2, x_35); -lean_ctor_set(x_79, 3, x_36); -lean_ctor_set_uint8(x_79, sizeof(void*)*4, x_77); -x_80 = 0; -lean_ctor_set(x_1, 3, x_79); -lean_ctor_set(x_1, 2, x_74); -lean_ctor_set(x_1, 1, x_73); -lean_ctor_set(x_1, 0, x_78); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_80); +lean_ctor_set(x_186, 0, x_181); +lean_ctor_set(x_186, 1, x_182); +lean_ctor_set(x_186, 2, x_183); +lean_ctor_set(x_186, 3, x_184); +lean_ctor_set_uint8(x_186, sizeof(void*)*4, x_126); +x_187 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_187, 0, x_186); +lean_ctor_set(x_187, 1, x_179); +lean_ctor_set(x_187, 2, x_180); +lean_ctor_set(x_187, 3, x_117); +lean_ctor_set_uint8(x_187, sizeof(void*)*4, x_39); +x_188 = 1; +lean_ctor_set(x_1, 0, x_187); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_188); return x_1; } } -else -{ -uint8_t x_81; -lean_free_object(x_1); -x_81 = !lean_is_exclusive(x_41); -if (x_81 == 0) -{ -lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; uint8_t x_86; -x_82 = lean_ctor_get(x_41, 3); -lean_dec(x_82); -x_83 = lean_ctor_get(x_41, 2); -lean_dec(x_83); -x_84 = lean_ctor_get(x_41, 1); -lean_dec(x_84); -x_85 = lean_ctor_get(x_41, 0); -lean_dec(x_85); -x_86 = 1; -lean_ctor_set(x_41, 3, x_36); -lean_ctor_set(x_41, 2, x_35); -lean_ctor_set(x_41, 1, x_34); -lean_ctor_set(x_41, 0, x_38); -lean_ctor_set_uint8(x_41, sizeof(void*)*4, x_86); -return x_41; +} +} +} } else { -uint8_t x_87; lean_object* x_88; -lean_dec(x_41); -x_87 = 1; -x_88 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_88, 0, x_38); -lean_ctor_set(x_88, 1, x_34); -lean_ctor_set(x_88, 2, x_35); -lean_ctor_set(x_88, 3, x_36); -lean_ctor_set_uint8(x_88, sizeof(void*)*4, x_87); -return x_88; -} +uint8_t x_189; +x_189 = 1; +lean_ctor_set(x_1, 0, x_38); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_189); +return x_1; } } +case 1: +{ +uint8_t x_190; +lean_dec(x_35); +lean_dec(x_34); +x_190 = 1; +lean_ctor_set(x_1, 2, x_3); +lean_ctor_set(x_1, 1, x_2); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_190); +return x_1; } -else +default: { -uint8_t x_89; -x_89 = lean_ctor_get_uint8(x_40, sizeof(void*)*4); -if (x_89 == 0) +lean_object* x_191; uint8_t x_192; +x_191 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_36, x_2, x_3); +x_192 = lean_ctor_get_uint8(x_191, sizeof(void*)*4); +if (x_192 == 0) { -uint8_t x_90; -x_90 = !lean_is_exclusive(x_38); -if (x_90 == 0) +lean_object* x_193; +x_193 = lean_ctor_get(x_191, 0); +lean_inc(x_193); +if (lean_obj_tag(x_193) == 0) { -lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; uint8_t x_95; -x_91 = lean_ctor_get(x_38, 1); -x_92 = lean_ctor_get(x_38, 2); -x_93 = lean_ctor_get(x_38, 3); -x_94 = lean_ctor_get(x_38, 0); -lean_dec(x_94); -x_95 = !lean_is_exclusive(x_40); -if (x_95 == 0) +lean_object* x_194; +x_194 = lean_ctor_get(x_191, 3); +lean_inc(x_194); +if (lean_obj_tag(x_194) == 0) { -uint8_t x_96; uint8_t x_97; -x_96 = 1; -lean_ctor_set_uint8(x_40, sizeof(void*)*4, x_96); -lean_ctor_set(x_38, 3, x_36); -lean_ctor_set(x_38, 2, x_35); -lean_ctor_set(x_38, 1, x_34); -lean_ctor_set(x_38, 0, x_93); -lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_96); -x_97 = 0; -lean_ctor_set(x_1, 3, x_38); -lean_ctor_set(x_1, 2, x_92); -lean_ctor_set(x_1, 1, x_91); -lean_ctor_set(x_1, 0, x_40); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_97); -return x_1; -} -else +uint8_t x_195; +x_195 = !lean_is_exclusive(x_191); +if (x_195 == 0) { -lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; uint8_t x_102; lean_object* x_103; uint8_t x_104; -x_98 = lean_ctor_get(x_40, 0); -x_99 = lean_ctor_get(x_40, 1); -x_100 = lean_ctor_get(x_40, 2); -x_101 = lean_ctor_get(x_40, 3); -lean_inc(x_101); -lean_inc(x_100); -lean_inc(x_99); -lean_inc(x_98); -lean_dec(x_40); -x_102 = 1; -x_103 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_103, 0, x_98); -lean_ctor_set(x_103, 1, x_99); -lean_ctor_set(x_103, 2, x_100); -lean_ctor_set(x_103, 3, x_101); -lean_ctor_set_uint8(x_103, sizeof(void*)*4, x_102); -lean_ctor_set(x_38, 3, x_36); -lean_ctor_set(x_38, 2, x_35); -lean_ctor_set(x_38, 1, x_34); -lean_ctor_set(x_38, 0, x_93); -lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_102); -x_104 = 0; -lean_ctor_set(x_1, 3, x_38); -lean_ctor_set(x_1, 2, x_92); -lean_ctor_set(x_1, 1, x_91); -lean_ctor_set(x_1, 0, x_103); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_104); +lean_object* x_196; lean_object* x_197; uint8_t x_198; +x_196 = lean_ctor_get(x_191, 3); +lean_dec(x_196); +x_197 = lean_ctor_get(x_191, 0); +lean_dec(x_197); +lean_ctor_set(x_191, 0, x_194); +x_198 = 1; +lean_ctor_set(x_1, 3, x_191); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_198); return x_1; } -} else { -lean_object* x_105; lean_object* x_106; lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; uint8_t x_113; lean_object* x_114; lean_object* x_115; uint8_t x_116; -x_105 = lean_ctor_get(x_38, 1); -x_106 = lean_ctor_get(x_38, 2); -x_107 = lean_ctor_get(x_38, 3); -lean_inc(x_107); -lean_inc(x_106); -lean_inc(x_105); -lean_dec(x_38); -x_108 = lean_ctor_get(x_40, 0); -lean_inc(x_108); -x_109 = lean_ctor_get(x_40, 1); -lean_inc(x_109); -x_110 = lean_ctor_get(x_40, 2); -lean_inc(x_110); -x_111 = lean_ctor_get(x_40, 3); -lean_inc(x_111); -if (lean_is_exclusive(x_40)) { - lean_ctor_release(x_40, 0); - lean_ctor_release(x_40, 1); - lean_ctor_release(x_40, 2); - lean_ctor_release(x_40, 3); - x_112 = x_40; -} else { - lean_dec_ref(x_40); - x_112 = lean_box(0); -} -x_113 = 1; -if (lean_is_scalar(x_112)) { - x_114 = lean_alloc_ctor(1, 4, 1); -} else { - x_114 = x_112; -} -lean_ctor_set(x_114, 0, x_108); -lean_ctor_set(x_114, 1, x_109); -lean_ctor_set(x_114, 2, x_110); -lean_ctor_set(x_114, 3, x_111); -lean_ctor_set_uint8(x_114, sizeof(void*)*4, x_113); -x_115 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_115, 0, x_107); -lean_ctor_set(x_115, 1, x_34); -lean_ctor_set(x_115, 2, x_35); -lean_ctor_set(x_115, 3, x_36); -lean_ctor_set_uint8(x_115, sizeof(void*)*4, x_113); -x_116 = 0; -lean_ctor_set(x_1, 3, x_115); -lean_ctor_set(x_1, 2, x_106); -lean_ctor_set(x_1, 1, x_105); -lean_ctor_set(x_1, 0, x_114); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_116); +lean_object* x_199; lean_object* x_200; lean_object* x_201; uint8_t x_202; +x_199 = lean_ctor_get(x_191, 1); +x_200 = lean_ctor_get(x_191, 2); +lean_inc(x_200); +lean_inc(x_199); +lean_dec(x_191); +x_201 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_201, 0, x_194); +lean_ctor_set(x_201, 1, x_199); +lean_ctor_set(x_201, 2, x_200); +lean_ctor_set(x_201, 3, x_194); +lean_ctor_set_uint8(x_201, sizeof(void*)*4, x_192); +x_202 = 1; +lean_ctor_set(x_1, 3, x_201); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_202); return x_1; } } else { -lean_object* x_117; -x_117 = lean_ctor_get(x_38, 3); -lean_inc(x_117); -if (lean_obj_tag(x_117) == 0) +uint8_t x_203; +x_203 = lean_ctor_get_uint8(x_194, sizeof(void*)*4); +if (x_203 == 0) { -uint8_t x_118; -lean_free_object(x_1); -x_118 = !lean_is_exclusive(x_40); -if (x_118 == 0) +uint8_t x_204; +x_204 = !lean_is_exclusive(x_191); +if (x_204 == 0) { -lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; uint8_t x_123; -x_119 = lean_ctor_get(x_40, 3); -lean_dec(x_119); -x_120 = lean_ctor_get(x_40, 2); -lean_dec(x_120); -x_121 = lean_ctor_get(x_40, 1); -lean_dec(x_121); -x_122 = lean_ctor_get(x_40, 0); -lean_dec(x_122); -x_123 = 1; -lean_ctor_set(x_40, 3, x_36); -lean_ctor_set(x_40, 2, x_35); -lean_ctor_set(x_40, 1, x_34); -lean_ctor_set(x_40, 0, x_38); -lean_ctor_set_uint8(x_40, sizeof(void*)*4, x_123); -return x_40; +lean_object* x_205; lean_object* x_206; lean_object* x_207; lean_object* x_208; uint8_t x_209; +x_205 = lean_ctor_get(x_191, 1); +x_206 = lean_ctor_get(x_191, 2); +x_207 = lean_ctor_get(x_191, 3); +lean_dec(x_207); +x_208 = lean_ctor_get(x_191, 0); +lean_dec(x_208); +x_209 = !lean_is_exclusive(x_194); +if (x_209 == 0) +{ +lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; uint8_t x_214; uint8_t x_215; +x_210 = lean_ctor_get(x_194, 0); +x_211 = lean_ctor_get(x_194, 1); +x_212 = lean_ctor_get(x_194, 2); +x_213 = lean_ctor_get(x_194, 3); +x_214 = 1; +lean_ctor_set(x_194, 3, x_193); +lean_ctor_set(x_194, 2, x_35); +lean_ctor_set(x_194, 1, x_34); +lean_ctor_set(x_194, 0, x_33); +lean_ctor_set_uint8(x_194, sizeof(void*)*4, x_214); +lean_ctor_set(x_191, 3, x_213); +lean_ctor_set(x_191, 2, x_212); +lean_ctor_set(x_191, 1, x_211); +lean_ctor_set(x_191, 0, x_210); +lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_214); +x_215 = 0; +lean_ctor_set(x_1, 3, x_191); +lean_ctor_set(x_1, 2, x_206); +lean_ctor_set(x_1, 1, x_205); +lean_ctor_set(x_1, 0, x_194); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_215); +return x_1; } else { -uint8_t x_124; lean_object* x_125; -lean_dec(x_40); -x_124 = 1; -x_125 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_125, 0, x_38); -lean_ctor_set(x_125, 1, x_34); -lean_ctor_set(x_125, 2, x_35); -lean_ctor_set(x_125, 3, x_36); -lean_ctor_set_uint8(x_125, sizeof(void*)*4, x_124); -return x_125; +lean_object* x_216; lean_object* x_217; lean_object* x_218; lean_object* x_219; uint8_t x_220; lean_object* x_221; uint8_t x_222; +x_216 = lean_ctor_get(x_194, 0); +x_217 = lean_ctor_get(x_194, 1); +x_218 = lean_ctor_get(x_194, 2); +x_219 = lean_ctor_get(x_194, 3); +lean_inc(x_219); +lean_inc(x_218); +lean_inc(x_217); +lean_inc(x_216); +lean_dec(x_194); +x_220 = 1; +x_221 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_221, 0, x_33); +lean_ctor_set(x_221, 1, x_34); +lean_ctor_set(x_221, 2, x_35); +lean_ctor_set(x_221, 3, x_193); +lean_ctor_set_uint8(x_221, sizeof(void*)*4, x_220); +lean_ctor_set(x_191, 3, x_219); +lean_ctor_set(x_191, 2, x_218); +lean_ctor_set(x_191, 1, x_217); +lean_ctor_set(x_191, 0, x_216); +lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_220); +x_222 = 0; +lean_ctor_set(x_1, 3, x_191); +lean_ctor_set(x_1, 2, x_206); +lean_ctor_set(x_1, 1, x_205); +lean_ctor_set(x_1, 0, x_221); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_222); +return x_1; } } else { -uint8_t x_126; -x_126 = lean_ctor_get_uint8(x_117, sizeof(void*)*4); -if (x_126 == 0) -{ -uint8_t x_127; -lean_free_object(x_1); -x_127 = !lean_is_exclusive(x_38); -if (x_127 == 0) -{ -lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; uint8_t x_132; -x_128 = lean_ctor_get(x_38, 1); -x_129 = lean_ctor_get(x_38, 2); -x_130 = lean_ctor_get(x_38, 3); -lean_dec(x_130); -x_131 = lean_ctor_get(x_38, 0); -lean_dec(x_131); -x_132 = !lean_is_exclusive(x_117); -if (x_132 == 0) -{ -lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; uint8_t x_137; uint8_t x_138; -x_133 = lean_ctor_get(x_117, 0); -x_134 = lean_ctor_get(x_117, 1); -x_135 = lean_ctor_get(x_117, 2); -x_136 = lean_ctor_get(x_117, 3); -x_137 = 1; -lean_inc(x_40); -lean_ctor_set(x_117, 3, x_133); -lean_ctor_set(x_117, 2, x_129); -lean_ctor_set(x_117, 1, x_128); -lean_ctor_set(x_117, 0, x_40); -x_138 = !lean_is_exclusive(x_40); -if (x_138 == 0) -{ -lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; uint8_t x_143; -x_139 = lean_ctor_get(x_40, 3); -lean_dec(x_139); -x_140 = lean_ctor_get(x_40, 2); -lean_dec(x_140); -x_141 = lean_ctor_get(x_40, 1); -lean_dec(x_141); -x_142 = lean_ctor_get(x_40, 0); -lean_dec(x_142); -lean_ctor_set_uint8(x_117, sizeof(void*)*4, x_137); -lean_ctor_set(x_40, 3, x_36); -lean_ctor_set(x_40, 2, x_35); -lean_ctor_set(x_40, 1, x_34); -lean_ctor_set(x_40, 0, x_136); -lean_ctor_set_uint8(x_40, sizeof(void*)*4, x_137); -x_143 = 0; -lean_ctor_set(x_38, 3, x_40); -lean_ctor_set(x_38, 2, x_135); -lean_ctor_set(x_38, 1, x_134); -lean_ctor_set(x_38, 0, x_117); -lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_143); -return x_38; -} -else -{ -lean_object* x_144; uint8_t x_145; -lean_dec(x_40); -lean_ctor_set_uint8(x_117, sizeof(void*)*4, x_137); -x_144 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_144, 0, x_136); -lean_ctor_set(x_144, 1, x_34); -lean_ctor_set(x_144, 2, x_35); -lean_ctor_set(x_144, 3, x_36); -lean_ctor_set_uint8(x_144, sizeof(void*)*4, x_137); -x_145 = 0; -lean_ctor_set(x_38, 3, x_144); -lean_ctor_set(x_38, 2, x_135); -lean_ctor_set(x_38, 1, x_134); -lean_ctor_set(x_38, 0, x_117); -lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_145); -return x_38; -} -} -else -{ -lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; uint8_t x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; uint8_t x_154; -x_146 = lean_ctor_get(x_117, 0); -x_147 = lean_ctor_get(x_117, 1); -x_148 = lean_ctor_get(x_117, 2); -x_149 = lean_ctor_get(x_117, 3); -lean_inc(x_149); -lean_inc(x_148); -lean_inc(x_147); -lean_inc(x_146); -lean_dec(x_117); -x_150 = 1; -lean_inc(x_40); -x_151 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_151, 0, x_40); -lean_ctor_set(x_151, 1, x_128); -lean_ctor_set(x_151, 2, x_129); -lean_ctor_set(x_151, 3, x_146); -if (lean_is_exclusive(x_40)) { - lean_ctor_release(x_40, 0); - lean_ctor_release(x_40, 1); - lean_ctor_release(x_40, 2); - lean_ctor_release(x_40, 3); - x_152 = x_40; +lean_object* x_223; lean_object* x_224; lean_object* x_225; lean_object* x_226; lean_object* x_227; lean_object* x_228; lean_object* x_229; uint8_t x_230; lean_object* x_231; lean_object* x_232; uint8_t x_233; +x_223 = lean_ctor_get(x_191, 1); +x_224 = lean_ctor_get(x_191, 2); +lean_inc(x_224); +lean_inc(x_223); +lean_dec(x_191); +x_225 = lean_ctor_get(x_194, 0); +lean_inc(x_225); +x_226 = lean_ctor_get(x_194, 1); +lean_inc(x_226); +x_227 = lean_ctor_get(x_194, 2); +lean_inc(x_227); +x_228 = lean_ctor_get(x_194, 3); +lean_inc(x_228); +if (lean_is_exclusive(x_194)) { + lean_ctor_release(x_194, 0); + lean_ctor_release(x_194, 1); + lean_ctor_release(x_194, 2); + lean_ctor_release(x_194, 3); + x_229 = x_194; } else { - lean_dec_ref(x_40); - x_152 = lean_box(0); + lean_dec_ref(x_194); + x_229 = lean_box(0); } -lean_ctor_set_uint8(x_151, sizeof(void*)*4, x_150); -if (lean_is_scalar(x_152)) { - x_153 = lean_alloc_ctor(1, 4, 1); +x_230 = 1; +if (lean_is_scalar(x_229)) { + x_231 = lean_alloc_ctor(1, 4, 1); } else { - x_153 = x_152; + x_231 = x_229; } -lean_ctor_set(x_153, 0, x_149); -lean_ctor_set(x_153, 1, x_34); -lean_ctor_set(x_153, 2, x_35); -lean_ctor_set(x_153, 3, x_36); -lean_ctor_set_uint8(x_153, sizeof(void*)*4, x_150); -x_154 = 0; -lean_ctor_set(x_38, 3, x_153); -lean_ctor_set(x_38, 2, x_148); -lean_ctor_set(x_38, 1, x_147); -lean_ctor_set(x_38, 0, x_151); -lean_ctor_set_uint8(x_38, sizeof(void*)*4, x_154); -return x_38; +lean_ctor_set(x_231, 0, x_33); +lean_ctor_set(x_231, 1, x_34); +lean_ctor_set(x_231, 2, x_35); +lean_ctor_set(x_231, 3, x_193); +lean_ctor_set_uint8(x_231, sizeof(void*)*4, x_230); +x_232 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_232, 0, x_225); +lean_ctor_set(x_232, 1, x_226); +lean_ctor_set(x_232, 2, x_227); +lean_ctor_set(x_232, 3, x_228); +lean_ctor_set_uint8(x_232, sizeof(void*)*4, x_230); +x_233 = 0; +lean_ctor_set(x_1, 3, x_232); +lean_ctor_set(x_1, 2, x_224); +lean_ctor_set(x_1, 1, x_223); +lean_ctor_set(x_1, 0, x_231); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_233); +return x_1; } } else { -lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; lean_object* x_160; lean_object* x_161; uint8_t x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; uint8_t x_166; lean_object* x_167; -x_155 = lean_ctor_get(x_38, 1); -x_156 = lean_ctor_get(x_38, 2); -lean_inc(x_156); -lean_inc(x_155); -lean_dec(x_38); -x_157 = lean_ctor_get(x_117, 0); -lean_inc(x_157); -x_158 = lean_ctor_get(x_117, 1); -lean_inc(x_158); -x_159 = lean_ctor_get(x_117, 2); -lean_inc(x_159); -x_160 = lean_ctor_get(x_117, 3); -lean_inc(x_160); -if (lean_is_exclusive(x_117)) { - lean_ctor_release(x_117, 0); - lean_ctor_release(x_117, 1); - lean_ctor_release(x_117, 2); - lean_ctor_release(x_117, 3); - x_161 = x_117; -} else { - lean_dec_ref(x_117); - x_161 = lean_box(0); -} -x_162 = 1; -lean_inc(x_40); -if (lean_is_scalar(x_161)) { - x_163 = lean_alloc_ctor(1, 4, 1); -} else { - x_163 = x_161; +uint8_t x_234; +lean_free_object(x_1); +x_234 = !lean_is_exclusive(x_194); +if (x_234 == 0) +{ +lean_object* x_235; lean_object* x_236; lean_object* x_237; lean_object* x_238; uint8_t x_239; +x_235 = lean_ctor_get(x_194, 3); +lean_dec(x_235); +x_236 = lean_ctor_get(x_194, 2); +lean_dec(x_236); +x_237 = lean_ctor_get(x_194, 1); +lean_dec(x_237); +x_238 = lean_ctor_get(x_194, 0); +lean_dec(x_238); +x_239 = 1; +lean_ctor_set(x_194, 3, x_191); +lean_ctor_set(x_194, 2, x_35); +lean_ctor_set(x_194, 1, x_34); +lean_ctor_set(x_194, 0, x_33); +lean_ctor_set_uint8(x_194, sizeof(void*)*4, x_239); +return x_194; } -lean_ctor_set(x_163, 0, x_40); -lean_ctor_set(x_163, 1, x_155); -lean_ctor_set(x_163, 2, x_156); -lean_ctor_set(x_163, 3, x_157); -if (lean_is_exclusive(x_40)) { - lean_ctor_release(x_40, 0); - lean_ctor_release(x_40, 1); - lean_ctor_release(x_40, 2); - lean_ctor_release(x_40, 3); - x_164 = x_40; -} else { - lean_dec_ref(x_40); - x_164 = lean_box(0); +else +{ +uint8_t x_240; lean_object* x_241; +lean_dec(x_194); +x_240 = 1; +x_241 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_241, 0, x_33); +lean_ctor_set(x_241, 1, x_34); +lean_ctor_set(x_241, 2, x_35); +lean_ctor_set(x_241, 3, x_191); +lean_ctor_set_uint8(x_241, sizeof(void*)*4, x_240); +return x_241; } -lean_ctor_set_uint8(x_163, sizeof(void*)*4, x_162); -if (lean_is_scalar(x_164)) { - x_165 = lean_alloc_ctor(1, 4, 1); -} else { - x_165 = x_164; } -lean_ctor_set(x_165, 0, x_160); -lean_ctor_set(x_165, 1, x_34); -lean_ctor_set(x_165, 2, x_35); -lean_ctor_set(x_165, 3, x_36); -lean_ctor_set_uint8(x_165, sizeof(void*)*4, x_162); -x_166 = 0; -x_167 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_167, 0, x_163); -lean_ctor_set(x_167, 1, x_158); -lean_ctor_set(x_167, 2, x_159); -lean_ctor_set(x_167, 3, x_165); -lean_ctor_set_uint8(x_167, sizeof(void*)*4, x_166); -return x_167; } } else { -uint8_t x_168; -x_168 = !lean_is_exclusive(x_38); -if (x_168 == 0) +uint8_t x_242; +x_242 = lean_ctor_get_uint8(x_193, sizeof(void*)*4); +if (x_242 == 0) { -lean_object* x_169; lean_object* x_170; uint8_t x_171; -x_169 = lean_ctor_get(x_38, 3); -lean_dec(x_169); -x_170 = lean_ctor_get(x_38, 0); -lean_dec(x_170); -x_171 = !lean_is_exclusive(x_40); -if (x_171 == 0) +uint8_t x_243; +x_243 = !lean_is_exclusive(x_191); +if (x_243 == 0) { -uint8_t x_172; -lean_ctor_set_uint8(x_40, sizeof(void*)*4, x_126); -x_172 = 1; -lean_ctor_set(x_1, 0, x_38); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_172); +lean_object* x_244; uint8_t x_245; +x_244 = lean_ctor_get(x_191, 0); +lean_dec(x_244); +x_245 = !lean_is_exclusive(x_193); +if (x_245 == 0) +{ +lean_object* x_246; lean_object* x_247; lean_object* x_248; lean_object* x_249; uint8_t x_250; uint8_t x_251; +x_246 = lean_ctor_get(x_193, 0); +x_247 = lean_ctor_get(x_193, 1); +x_248 = lean_ctor_get(x_193, 2); +x_249 = lean_ctor_get(x_193, 3); +x_250 = 1; +lean_ctor_set(x_193, 3, x_246); +lean_ctor_set(x_193, 2, x_35); +lean_ctor_set(x_193, 1, x_34); +lean_ctor_set(x_193, 0, x_33); +lean_ctor_set_uint8(x_193, sizeof(void*)*4, x_250); +lean_ctor_set(x_191, 0, x_249); +lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_250); +x_251 = 0; +lean_ctor_set(x_1, 3, x_191); +lean_ctor_set(x_1, 2, x_248); +lean_ctor_set(x_1, 1, x_247); +lean_ctor_set(x_1, 0, x_193); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_251); return x_1; } else { -lean_object* x_173; lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; uint8_t x_178; -x_173 = lean_ctor_get(x_40, 0); -x_174 = lean_ctor_get(x_40, 1); -x_175 = lean_ctor_get(x_40, 2); -x_176 = lean_ctor_get(x_40, 3); -lean_inc(x_176); -lean_inc(x_175); -lean_inc(x_174); -lean_inc(x_173); -lean_dec(x_40); -x_177 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_177, 0, x_173); -lean_ctor_set(x_177, 1, x_174); -lean_ctor_set(x_177, 2, x_175); -lean_ctor_set(x_177, 3, x_176); -lean_ctor_set_uint8(x_177, sizeof(void*)*4, x_126); -lean_ctor_set(x_38, 0, x_177); -x_178 = 1; -lean_ctor_set(x_1, 0, x_38); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_178); +lean_object* x_252; lean_object* x_253; lean_object* x_254; lean_object* x_255; uint8_t x_256; lean_object* x_257; uint8_t x_258; +x_252 = lean_ctor_get(x_193, 0); +x_253 = lean_ctor_get(x_193, 1); +x_254 = lean_ctor_get(x_193, 2); +x_255 = lean_ctor_get(x_193, 3); +lean_inc(x_255); +lean_inc(x_254); +lean_inc(x_253); +lean_inc(x_252); +lean_dec(x_193); +x_256 = 1; +x_257 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_257, 0, x_33); +lean_ctor_set(x_257, 1, x_34); +lean_ctor_set(x_257, 2, x_35); +lean_ctor_set(x_257, 3, x_252); +lean_ctor_set_uint8(x_257, sizeof(void*)*4, x_256); +lean_ctor_set(x_191, 0, x_255); +lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_256); +x_258 = 0; +lean_ctor_set(x_1, 3, x_191); +lean_ctor_set(x_1, 2, x_254); +lean_ctor_set(x_1, 1, x_253); +lean_ctor_set(x_1, 0, x_257); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_258); return x_1; } } else { -lean_object* x_179; lean_object* x_180; lean_object* x_181; lean_object* x_182; lean_object* x_183; lean_object* x_184; lean_object* x_185; lean_object* x_186; lean_object* x_187; uint8_t x_188; -x_179 = lean_ctor_get(x_38, 1); -x_180 = lean_ctor_get(x_38, 2); -lean_inc(x_180); -lean_inc(x_179); -lean_dec(x_38); -x_181 = lean_ctor_get(x_40, 0); -lean_inc(x_181); -x_182 = lean_ctor_get(x_40, 1); -lean_inc(x_182); -x_183 = lean_ctor_get(x_40, 2); -lean_inc(x_183); -x_184 = lean_ctor_get(x_40, 3); -lean_inc(x_184); -if (lean_is_exclusive(x_40)) { - lean_ctor_release(x_40, 0); - lean_ctor_release(x_40, 1); - lean_ctor_release(x_40, 2); - lean_ctor_release(x_40, 3); - x_185 = x_40; +lean_object* x_259; lean_object* x_260; lean_object* x_261; lean_object* x_262; lean_object* x_263; lean_object* x_264; lean_object* x_265; lean_object* x_266; uint8_t x_267; lean_object* x_268; lean_object* x_269; uint8_t x_270; +x_259 = lean_ctor_get(x_191, 1); +x_260 = lean_ctor_get(x_191, 2); +x_261 = lean_ctor_get(x_191, 3); +lean_inc(x_261); +lean_inc(x_260); +lean_inc(x_259); +lean_dec(x_191); +x_262 = lean_ctor_get(x_193, 0); +lean_inc(x_262); +x_263 = lean_ctor_get(x_193, 1); +lean_inc(x_263); +x_264 = lean_ctor_get(x_193, 2); +lean_inc(x_264); +x_265 = lean_ctor_get(x_193, 3); +lean_inc(x_265); +if (lean_is_exclusive(x_193)) { + lean_ctor_release(x_193, 0); + lean_ctor_release(x_193, 1); + lean_ctor_release(x_193, 2); + lean_ctor_release(x_193, 3); + x_266 = x_193; } else { - lean_dec_ref(x_40); - x_185 = lean_box(0); + lean_dec_ref(x_193); + x_266 = lean_box(0); } -if (lean_is_scalar(x_185)) { - x_186 = lean_alloc_ctor(1, 4, 1); +x_267 = 1; +if (lean_is_scalar(x_266)) { + x_268 = lean_alloc_ctor(1, 4, 1); } else { - x_186 = x_185; + x_268 = x_266; } -lean_ctor_set(x_186, 0, x_181); -lean_ctor_set(x_186, 1, x_182); -lean_ctor_set(x_186, 2, x_183); -lean_ctor_set(x_186, 3, x_184); -lean_ctor_set_uint8(x_186, sizeof(void*)*4, x_126); -x_187 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_187, 0, x_186); -lean_ctor_set(x_187, 1, x_179); -lean_ctor_set(x_187, 2, x_180); -lean_ctor_set(x_187, 3, x_117); -lean_ctor_set_uint8(x_187, sizeof(void*)*4, x_39); -x_188 = 1; -lean_ctor_set(x_1, 0, x_187); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_188); +lean_ctor_set(x_268, 0, x_33); +lean_ctor_set(x_268, 1, x_34); +lean_ctor_set(x_268, 2, x_35); +lean_ctor_set(x_268, 3, x_262); +lean_ctor_set_uint8(x_268, sizeof(void*)*4, x_267); +x_269 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_269, 0, x_265); +lean_ctor_set(x_269, 1, x_259); +lean_ctor_set(x_269, 2, x_260); +lean_ctor_set(x_269, 3, x_261); +lean_ctor_set_uint8(x_269, sizeof(void*)*4, x_267); +x_270 = 0; +lean_ctor_set(x_1, 3, x_269); +lean_ctor_set(x_1, 2, x_264); +lean_ctor_set(x_1, 1, x_263); +lean_ctor_set(x_1, 0, x_268); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_270); return x_1; } } -} -} -} +else +{ +lean_object* x_271; +x_271 = lean_ctor_get(x_191, 3); +lean_inc(x_271); +if (lean_obj_tag(x_271) == 0) +{ +uint8_t x_272; +lean_free_object(x_1); +x_272 = !lean_is_exclusive(x_193); +if (x_272 == 0) +{ +lean_object* x_273; lean_object* x_274; lean_object* x_275; lean_object* x_276; uint8_t x_277; +x_273 = lean_ctor_get(x_193, 3); +lean_dec(x_273); +x_274 = lean_ctor_get(x_193, 2); +lean_dec(x_274); +x_275 = lean_ctor_get(x_193, 1); +lean_dec(x_275); +x_276 = lean_ctor_get(x_193, 0); +lean_dec(x_276); +x_277 = 1; +lean_ctor_set(x_193, 3, x_191); +lean_ctor_set(x_193, 2, x_35); +lean_ctor_set(x_193, 1, x_34); +lean_ctor_set(x_193, 0, x_33); +lean_ctor_set_uint8(x_193, sizeof(void*)*4, x_277); +return x_193; } else { -uint8_t x_189; -x_189 = 1; -lean_ctor_set(x_1, 0, x_38); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_189); -return x_1; +uint8_t x_278; lean_object* x_279; +lean_dec(x_193); +x_278 = 1; +x_279 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_279, 0, x_33); +lean_ctor_set(x_279, 1, x_34); +lean_ctor_set(x_279, 2, x_35); +lean_ctor_set(x_279, 3, x_191); +lean_ctor_set_uint8(x_279, sizeof(void*)*4, x_278); +return x_279; } } -case 1: +else { -uint8_t x_190; -lean_dec(x_35); -lean_dec(x_34); -x_190 = 1; -lean_ctor_set(x_1, 2, x_3); -lean_ctor_set(x_1, 1, x_2); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_190); -return x_1; -} -default: +uint8_t x_280; +x_280 = lean_ctor_get_uint8(x_271, sizeof(void*)*4); +if (x_280 == 0) { -lean_object* x_191; uint8_t x_192; -x_191 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_36, x_2, x_3); -x_192 = lean_ctor_get_uint8(x_191, sizeof(void*)*4); -if (x_192 == 0) +uint8_t x_281; +lean_free_object(x_1); +x_281 = !lean_is_exclusive(x_191); +if (x_281 == 0) { -lean_object* x_193; -x_193 = lean_ctor_get(x_191, 0); -lean_inc(x_193); -if (lean_obj_tag(x_193) == 0) +lean_object* x_282; lean_object* x_283; uint8_t x_284; +x_282 = lean_ctor_get(x_191, 3); +lean_dec(x_282); +x_283 = lean_ctor_get(x_191, 0); +lean_dec(x_283); +x_284 = !lean_is_exclusive(x_271); +if (x_284 == 0) { -lean_object* x_194; -x_194 = lean_ctor_get(x_191, 3); -lean_inc(x_194); -if (lean_obj_tag(x_194) == 0) +lean_object* x_285; lean_object* x_286; lean_object* x_287; lean_object* x_288; uint8_t x_289; uint8_t x_290; +x_285 = lean_ctor_get(x_271, 0); +x_286 = lean_ctor_get(x_271, 1); +x_287 = lean_ctor_get(x_271, 2); +x_288 = lean_ctor_get(x_271, 3); +x_289 = 1; +lean_inc(x_193); +lean_ctor_set(x_271, 3, x_193); +lean_ctor_set(x_271, 2, x_35); +lean_ctor_set(x_271, 1, x_34); +lean_ctor_set(x_271, 0, x_33); +x_290 = !lean_is_exclusive(x_193); +if (x_290 == 0) { -uint8_t x_195; -x_195 = !lean_is_exclusive(x_191); -if (x_195 == 0) -{ -lean_object* x_196; lean_object* x_197; uint8_t x_198; -x_196 = lean_ctor_get(x_191, 3); -lean_dec(x_196); -x_197 = lean_ctor_get(x_191, 0); -lean_dec(x_197); -lean_ctor_set(x_191, 0, x_194); -x_198 = 1; -lean_ctor_set(x_1, 3, x_191); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_198); -return x_1; +lean_object* x_291; lean_object* x_292; lean_object* x_293; lean_object* x_294; uint8_t x_295; +x_291 = lean_ctor_get(x_193, 3); +lean_dec(x_291); +x_292 = lean_ctor_get(x_193, 2); +lean_dec(x_292); +x_293 = lean_ctor_get(x_193, 1); +lean_dec(x_293); +x_294 = lean_ctor_get(x_193, 0); +lean_dec(x_294); +lean_ctor_set_uint8(x_271, sizeof(void*)*4, x_289); +lean_ctor_set(x_193, 3, x_288); +lean_ctor_set(x_193, 2, x_287); +lean_ctor_set(x_193, 1, x_286); +lean_ctor_set(x_193, 0, x_285); +lean_ctor_set_uint8(x_193, sizeof(void*)*4, x_289); +x_295 = 0; +lean_ctor_set(x_191, 3, x_193); +lean_ctor_set(x_191, 0, x_271); +lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_295); +return x_191; } else { -lean_object* x_199; lean_object* x_200; lean_object* x_201; uint8_t x_202; -x_199 = lean_ctor_get(x_191, 1); -x_200 = lean_ctor_get(x_191, 2); -lean_inc(x_200); -lean_inc(x_199); -lean_dec(x_191); -x_201 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_201, 0, x_194); -lean_ctor_set(x_201, 1, x_199); -lean_ctor_set(x_201, 2, x_200); -lean_ctor_set(x_201, 3, x_194); -lean_ctor_set_uint8(x_201, sizeof(void*)*4, x_192); -x_202 = 1; -lean_ctor_set(x_1, 3, x_201); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_202); -return x_1; +lean_object* x_296; uint8_t x_297; +lean_dec(x_193); +lean_ctor_set_uint8(x_271, sizeof(void*)*4, x_289); +x_296 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_296, 0, x_285); +lean_ctor_set(x_296, 1, x_286); +lean_ctor_set(x_296, 2, x_287); +lean_ctor_set(x_296, 3, x_288); +lean_ctor_set_uint8(x_296, sizeof(void*)*4, x_289); +x_297 = 0; +lean_ctor_set(x_191, 3, x_296); +lean_ctor_set(x_191, 0, x_271); +lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_297); +return x_191; } } else { -uint8_t x_203; -x_203 = lean_ctor_get_uint8(x_194, sizeof(void*)*4); -if (x_203 == 0) -{ -uint8_t x_204; -x_204 = !lean_is_exclusive(x_191); -if (x_204 == 0) -{ -lean_object* x_205; lean_object* x_206; lean_object* x_207; lean_object* x_208; uint8_t x_209; -x_205 = lean_ctor_get(x_191, 1); -x_206 = lean_ctor_get(x_191, 2); -x_207 = lean_ctor_get(x_191, 3); -lean_dec(x_207); -x_208 = lean_ctor_get(x_191, 0); -lean_dec(x_208); -x_209 = !lean_is_exclusive(x_194); -if (x_209 == 0) -{ -lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; uint8_t x_214; uint8_t x_215; -x_210 = lean_ctor_get(x_194, 0); -x_211 = lean_ctor_get(x_194, 1); -x_212 = lean_ctor_get(x_194, 2); -x_213 = lean_ctor_get(x_194, 3); -x_214 = 1; -lean_ctor_set(x_194, 3, x_193); -lean_ctor_set(x_194, 2, x_35); -lean_ctor_set(x_194, 1, x_34); -lean_ctor_set(x_194, 0, x_33); -lean_ctor_set_uint8(x_194, sizeof(void*)*4, x_214); -lean_ctor_set(x_191, 3, x_213); -lean_ctor_set(x_191, 2, x_212); -lean_ctor_set(x_191, 1, x_211); -lean_ctor_set(x_191, 0, x_210); -lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_214); -x_215 = 0; -lean_ctor_set(x_1, 3, x_191); -lean_ctor_set(x_1, 2, x_206); -lean_ctor_set(x_1, 1, x_205); -lean_ctor_set(x_1, 0, x_194); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_215); -return x_1; +lean_object* x_298; lean_object* x_299; lean_object* x_300; lean_object* x_301; uint8_t x_302; lean_object* x_303; lean_object* x_304; lean_object* x_305; uint8_t x_306; +x_298 = lean_ctor_get(x_271, 0); +x_299 = lean_ctor_get(x_271, 1); +x_300 = lean_ctor_get(x_271, 2); +x_301 = lean_ctor_get(x_271, 3); +lean_inc(x_301); +lean_inc(x_300); +lean_inc(x_299); +lean_inc(x_298); +lean_dec(x_271); +x_302 = 1; +lean_inc(x_193); +x_303 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_303, 0, x_33); +lean_ctor_set(x_303, 1, x_34); +lean_ctor_set(x_303, 2, x_35); +lean_ctor_set(x_303, 3, x_193); +if (lean_is_exclusive(x_193)) { + lean_ctor_release(x_193, 0); + lean_ctor_release(x_193, 1); + lean_ctor_release(x_193, 2); + lean_ctor_release(x_193, 3); + x_304 = x_193; +} else { + lean_dec_ref(x_193); + x_304 = lean_box(0); } -else -{ -lean_object* x_216; lean_object* x_217; lean_object* x_218; lean_object* x_219; uint8_t x_220; lean_object* x_221; uint8_t x_222; -x_216 = lean_ctor_get(x_194, 0); -x_217 = lean_ctor_get(x_194, 1); -x_218 = lean_ctor_get(x_194, 2); -x_219 = lean_ctor_get(x_194, 3); -lean_inc(x_219); -lean_inc(x_218); -lean_inc(x_217); -lean_inc(x_216); -lean_dec(x_194); -x_220 = 1; -x_221 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_221, 0, x_33); -lean_ctor_set(x_221, 1, x_34); -lean_ctor_set(x_221, 2, x_35); -lean_ctor_set(x_221, 3, x_193); -lean_ctor_set_uint8(x_221, sizeof(void*)*4, x_220); -lean_ctor_set(x_191, 3, x_219); -lean_ctor_set(x_191, 2, x_218); -lean_ctor_set(x_191, 1, x_217); -lean_ctor_set(x_191, 0, x_216); -lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_220); -x_222 = 0; -lean_ctor_set(x_1, 3, x_191); -lean_ctor_set(x_1, 2, x_206); -lean_ctor_set(x_1, 1, x_205); -lean_ctor_set(x_1, 0, x_221); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_222); -return x_1; +lean_ctor_set_uint8(x_303, sizeof(void*)*4, x_302); +if (lean_is_scalar(x_304)) { + x_305 = lean_alloc_ctor(1, 4, 1); +} else { + x_305 = x_304; +} +lean_ctor_set(x_305, 0, x_298); +lean_ctor_set(x_305, 1, x_299); +lean_ctor_set(x_305, 2, x_300); +lean_ctor_set(x_305, 3, x_301); +lean_ctor_set_uint8(x_305, sizeof(void*)*4, x_302); +x_306 = 0; +lean_ctor_set(x_191, 3, x_305); +lean_ctor_set(x_191, 0, x_303); +lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_306); +return x_191; } } else { -lean_object* x_223; lean_object* x_224; lean_object* x_225; lean_object* x_226; lean_object* x_227; lean_object* x_228; lean_object* x_229; uint8_t x_230; lean_object* x_231; lean_object* x_232; uint8_t x_233; -x_223 = lean_ctor_get(x_191, 1); -x_224 = lean_ctor_get(x_191, 2); -lean_inc(x_224); -lean_inc(x_223); +lean_object* x_307; lean_object* x_308; lean_object* x_309; lean_object* x_310; lean_object* x_311; lean_object* x_312; lean_object* x_313; uint8_t x_314; lean_object* x_315; lean_object* x_316; lean_object* x_317; uint8_t x_318; lean_object* x_319; +x_307 = lean_ctor_get(x_191, 1); +x_308 = lean_ctor_get(x_191, 2); +lean_inc(x_308); +lean_inc(x_307); lean_dec(x_191); -x_225 = lean_ctor_get(x_194, 0); -lean_inc(x_225); -x_226 = lean_ctor_get(x_194, 1); -lean_inc(x_226); -x_227 = lean_ctor_get(x_194, 2); -lean_inc(x_227); -x_228 = lean_ctor_get(x_194, 3); -lean_inc(x_228); -if (lean_is_exclusive(x_194)) { - lean_ctor_release(x_194, 0); - lean_ctor_release(x_194, 1); - lean_ctor_release(x_194, 2); - lean_ctor_release(x_194, 3); - x_229 = x_194; +x_309 = lean_ctor_get(x_271, 0); +lean_inc(x_309); +x_310 = lean_ctor_get(x_271, 1); +lean_inc(x_310); +x_311 = lean_ctor_get(x_271, 2); +lean_inc(x_311); +x_312 = lean_ctor_get(x_271, 3); +lean_inc(x_312); +if (lean_is_exclusive(x_271)) { + lean_ctor_release(x_271, 0); + lean_ctor_release(x_271, 1); + lean_ctor_release(x_271, 2); + lean_ctor_release(x_271, 3); + x_313 = x_271; } else { - lean_dec_ref(x_194); - x_229 = lean_box(0); + lean_dec_ref(x_271); + x_313 = lean_box(0); } -x_230 = 1; -if (lean_is_scalar(x_229)) { - x_231 = lean_alloc_ctor(1, 4, 1); +x_314 = 1; +lean_inc(x_193); +if (lean_is_scalar(x_313)) { + x_315 = lean_alloc_ctor(1, 4, 1); } else { - x_231 = x_229; -} -lean_ctor_set(x_231, 0, x_33); -lean_ctor_set(x_231, 1, x_34); -lean_ctor_set(x_231, 2, x_35); -lean_ctor_set(x_231, 3, x_193); -lean_ctor_set_uint8(x_231, sizeof(void*)*4, x_230); -x_232 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_232, 0, x_225); -lean_ctor_set(x_232, 1, x_226); -lean_ctor_set(x_232, 2, x_227); -lean_ctor_set(x_232, 3, x_228); -lean_ctor_set_uint8(x_232, sizeof(void*)*4, x_230); -x_233 = 0; -lean_ctor_set(x_1, 3, x_232); -lean_ctor_set(x_1, 2, x_224); -lean_ctor_set(x_1, 1, x_223); -lean_ctor_set(x_1, 0, x_231); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_233); -return x_1; -} + x_315 = x_313; } -else -{ -uint8_t x_234; -lean_free_object(x_1); -x_234 = !lean_is_exclusive(x_194); -if (x_234 == 0) -{ -lean_object* x_235; lean_object* x_236; lean_object* x_237; lean_object* x_238; uint8_t x_239; -x_235 = lean_ctor_get(x_194, 3); -lean_dec(x_235); -x_236 = lean_ctor_get(x_194, 2); -lean_dec(x_236); -x_237 = lean_ctor_get(x_194, 1); -lean_dec(x_237); -x_238 = lean_ctor_get(x_194, 0); -lean_dec(x_238); -x_239 = 1; -lean_ctor_set(x_194, 3, x_191); -lean_ctor_set(x_194, 2, x_35); -lean_ctor_set(x_194, 1, x_34); -lean_ctor_set(x_194, 0, x_33); -lean_ctor_set_uint8(x_194, sizeof(void*)*4, x_239); -return x_194; -} -else -{ -uint8_t x_240; lean_object* x_241; -lean_dec(x_194); -x_240 = 1; -x_241 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_241, 0, x_33); -lean_ctor_set(x_241, 1, x_34); -lean_ctor_set(x_241, 2, x_35); -lean_ctor_set(x_241, 3, x_191); -lean_ctor_set_uint8(x_241, sizeof(void*)*4, x_240); -return x_241; +lean_ctor_set(x_315, 0, x_33); +lean_ctor_set(x_315, 1, x_34); +lean_ctor_set(x_315, 2, x_35); +lean_ctor_set(x_315, 3, x_193); +if (lean_is_exclusive(x_193)) { + lean_ctor_release(x_193, 0); + lean_ctor_release(x_193, 1); + lean_ctor_release(x_193, 2); + lean_ctor_release(x_193, 3); + x_316 = x_193; +} else { + lean_dec_ref(x_193); + x_316 = lean_box(0); } +lean_ctor_set_uint8(x_315, sizeof(void*)*4, x_314); +if (lean_is_scalar(x_316)) { + x_317 = lean_alloc_ctor(1, 4, 1); +} else { + x_317 = x_316; } +lean_ctor_set(x_317, 0, x_309); +lean_ctor_set(x_317, 1, x_310); +lean_ctor_set(x_317, 2, x_311); +lean_ctor_set(x_317, 3, x_312); +lean_ctor_set_uint8(x_317, sizeof(void*)*4, x_314); +x_318 = 0; +x_319 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_319, 0, x_315); +lean_ctor_set(x_319, 1, x_307); +lean_ctor_set(x_319, 2, x_308); +lean_ctor_set(x_319, 3, x_317); +lean_ctor_set_uint8(x_319, sizeof(void*)*4, x_318); +return x_319; } } else { -uint8_t x_242; -x_242 = lean_ctor_get_uint8(x_193, sizeof(void*)*4); -if (x_242 == 0) -{ -uint8_t x_243; -x_243 = !lean_is_exclusive(x_191); -if (x_243 == 0) +uint8_t x_320; +x_320 = !lean_is_exclusive(x_191); +if (x_320 == 0) { -lean_object* x_244; uint8_t x_245; -x_244 = lean_ctor_get(x_191, 0); -lean_dec(x_244); -x_245 = !lean_is_exclusive(x_193); -if (x_245 == 0) +lean_object* x_321; lean_object* x_322; uint8_t x_323; +x_321 = lean_ctor_get(x_191, 3); +lean_dec(x_321); +x_322 = lean_ctor_get(x_191, 0); +lean_dec(x_322); +x_323 = !lean_is_exclusive(x_193); +if (x_323 == 0) { -lean_object* x_246; lean_object* x_247; lean_object* x_248; lean_object* x_249; uint8_t x_250; uint8_t x_251; -x_246 = lean_ctor_get(x_193, 0); -x_247 = lean_ctor_get(x_193, 1); -x_248 = lean_ctor_get(x_193, 2); -x_249 = lean_ctor_get(x_193, 3); -x_250 = 1; -lean_ctor_set(x_193, 3, x_246); -lean_ctor_set(x_193, 2, x_35); -lean_ctor_set(x_193, 1, x_34); -lean_ctor_set(x_193, 0, x_33); -lean_ctor_set_uint8(x_193, sizeof(void*)*4, x_250); -lean_ctor_set(x_191, 0, x_249); -lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_250); -x_251 = 0; +uint8_t x_324; +lean_ctor_set_uint8(x_193, sizeof(void*)*4, x_280); +x_324 = 1; lean_ctor_set(x_1, 3, x_191); -lean_ctor_set(x_1, 2, x_248); -lean_ctor_set(x_1, 1, x_247); -lean_ctor_set(x_1, 0, x_193); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_251); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_324); return x_1; } else { -lean_object* x_252; lean_object* x_253; lean_object* x_254; lean_object* x_255; uint8_t x_256; lean_object* x_257; uint8_t x_258; -x_252 = lean_ctor_get(x_193, 0); -x_253 = lean_ctor_get(x_193, 1); -x_254 = lean_ctor_get(x_193, 2); -x_255 = lean_ctor_get(x_193, 3); -lean_inc(x_255); -lean_inc(x_254); -lean_inc(x_253); -lean_inc(x_252); +lean_object* x_325; lean_object* x_326; lean_object* x_327; lean_object* x_328; lean_object* x_329; uint8_t x_330; +x_325 = lean_ctor_get(x_193, 0); +x_326 = lean_ctor_get(x_193, 1); +x_327 = lean_ctor_get(x_193, 2); +x_328 = lean_ctor_get(x_193, 3); +lean_inc(x_328); +lean_inc(x_327); +lean_inc(x_326); +lean_inc(x_325); lean_dec(x_193); -x_256 = 1; -x_257 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_257, 0, x_33); -lean_ctor_set(x_257, 1, x_34); -lean_ctor_set(x_257, 2, x_35); -lean_ctor_set(x_257, 3, x_252); -lean_ctor_set_uint8(x_257, sizeof(void*)*4, x_256); -lean_ctor_set(x_191, 0, x_255); -lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_256); -x_258 = 0; +x_329 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_329, 0, x_325); +lean_ctor_set(x_329, 1, x_326); +lean_ctor_set(x_329, 2, x_327); +lean_ctor_set(x_329, 3, x_328); +lean_ctor_set_uint8(x_329, sizeof(void*)*4, x_280); +lean_ctor_set(x_191, 0, x_329); +x_330 = 1; lean_ctor_set(x_1, 3, x_191); -lean_ctor_set(x_1, 2, x_254); -lean_ctor_set(x_1, 1, x_253); -lean_ctor_set(x_1, 0, x_257); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_258); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_330); return x_1; } } else { -lean_object* x_259; lean_object* x_260; lean_object* x_261; lean_object* x_262; lean_object* x_263; lean_object* x_264; lean_object* x_265; lean_object* x_266; uint8_t x_267; lean_object* x_268; lean_object* x_269; uint8_t x_270; -x_259 = lean_ctor_get(x_191, 1); -x_260 = lean_ctor_get(x_191, 2); -x_261 = lean_ctor_get(x_191, 3); -lean_inc(x_261); -lean_inc(x_260); -lean_inc(x_259); +lean_object* x_331; lean_object* x_332; lean_object* x_333; lean_object* x_334; lean_object* x_335; lean_object* x_336; lean_object* x_337; lean_object* x_338; lean_object* x_339; uint8_t x_340; +x_331 = lean_ctor_get(x_191, 1); +x_332 = lean_ctor_get(x_191, 2); +lean_inc(x_332); +lean_inc(x_331); lean_dec(x_191); -x_262 = lean_ctor_get(x_193, 0); -lean_inc(x_262); -x_263 = lean_ctor_get(x_193, 1); -lean_inc(x_263); -x_264 = lean_ctor_get(x_193, 2); -lean_inc(x_264); -x_265 = lean_ctor_get(x_193, 3); -lean_inc(x_265); +x_333 = lean_ctor_get(x_193, 0); +lean_inc(x_333); +x_334 = lean_ctor_get(x_193, 1); +lean_inc(x_334); +x_335 = lean_ctor_get(x_193, 2); +lean_inc(x_335); +x_336 = lean_ctor_get(x_193, 3); +lean_inc(x_336); if (lean_is_exclusive(x_193)) { lean_ctor_release(x_193, 0); lean_ctor_release(x_193, 1); lean_ctor_release(x_193, 2); lean_ctor_release(x_193, 3); - x_266 = x_193; + x_337 = x_193; } else { lean_dec_ref(x_193); - x_266 = lean_box(0); + x_337 = lean_box(0); } -x_267 = 1; -if (lean_is_scalar(x_266)) { - x_268 = lean_alloc_ctor(1, 4, 1); +if (lean_is_scalar(x_337)) { + x_338 = lean_alloc_ctor(1, 4, 1); } else { - x_268 = x_266; + x_338 = x_337; } -lean_ctor_set(x_268, 0, x_33); -lean_ctor_set(x_268, 1, x_34); -lean_ctor_set(x_268, 2, x_35); -lean_ctor_set(x_268, 3, x_262); -lean_ctor_set_uint8(x_268, sizeof(void*)*4, x_267); -x_269 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_269, 0, x_265); -lean_ctor_set(x_269, 1, x_259); -lean_ctor_set(x_269, 2, x_260); -lean_ctor_set(x_269, 3, x_261); -lean_ctor_set_uint8(x_269, sizeof(void*)*4, x_267); -x_270 = 0; -lean_ctor_set(x_1, 3, x_269); -lean_ctor_set(x_1, 2, x_264); -lean_ctor_set(x_1, 1, x_263); -lean_ctor_set(x_1, 0, x_268); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_270); +lean_ctor_set(x_338, 0, x_333); +lean_ctor_set(x_338, 1, x_334); +lean_ctor_set(x_338, 2, x_335); +lean_ctor_set(x_338, 3, x_336); +lean_ctor_set_uint8(x_338, sizeof(void*)*4, x_280); +x_339 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_339, 0, x_338); +lean_ctor_set(x_339, 1, x_331); +lean_ctor_set(x_339, 2, x_332); +lean_ctor_set(x_339, 3, x_271); +lean_ctor_set_uint8(x_339, sizeof(void*)*4, x_192); +x_340 = 1; +lean_ctor_set(x_1, 3, x_339); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_340); return x_1; } } -else -{ -lean_object* x_271; -x_271 = lean_ctor_get(x_191, 3); -lean_inc(x_271); -if (lean_obj_tag(x_271) == 0) -{ -uint8_t x_272; -lean_free_object(x_1); -x_272 = !lean_is_exclusive(x_193); -if (x_272 == 0) -{ -lean_object* x_273; lean_object* x_274; lean_object* x_275; lean_object* x_276; uint8_t x_277; -x_273 = lean_ctor_get(x_193, 3); -lean_dec(x_273); -x_274 = lean_ctor_get(x_193, 2); -lean_dec(x_274); -x_275 = lean_ctor_get(x_193, 1); -lean_dec(x_275); -x_276 = lean_ctor_get(x_193, 0); -lean_dec(x_276); -x_277 = 1; -lean_ctor_set(x_193, 3, x_191); -lean_ctor_set(x_193, 2, x_35); -lean_ctor_set(x_193, 1, x_34); -lean_ctor_set(x_193, 0, x_33); -lean_ctor_set_uint8(x_193, sizeof(void*)*4, x_277); -return x_193; } -else -{ -uint8_t x_278; lean_object* x_279; -lean_dec(x_193); -x_278 = 1; -x_279 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_279, 0, x_33); -lean_ctor_set(x_279, 1, x_34); -lean_ctor_set(x_279, 2, x_35); -lean_ctor_set(x_279, 3, x_191); -lean_ctor_set_uint8(x_279, sizeof(void*)*4, x_278); -return x_279; +} } } else { -uint8_t x_280; -x_280 = lean_ctor_get_uint8(x_271, sizeof(void*)*4); -if (x_280 == 0) -{ -uint8_t x_281; -lean_free_object(x_1); -x_281 = !lean_is_exclusive(x_191); -if (x_281 == 0) -{ -lean_object* x_282; lean_object* x_283; uint8_t x_284; -x_282 = lean_ctor_get(x_191, 3); -lean_dec(x_282); -x_283 = lean_ctor_get(x_191, 0); -lean_dec(x_283); -x_284 = !lean_is_exclusive(x_271); -if (x_284 == 0) -{ -lean_object* x_285; lean_object* x_286; lean_object* x_287; lean_object* x_288; uint8_t x_289; uint8_t x_290; -x_285 = lean_ctor_get(x_271, 0); -x_286 = lean_ctor_get(x_271, 1); -x_287 = lean_ctor_get(x_271, 2); -x_288 = lean_ctor_get(x_271, 3); -x_289 = 1; -lean_inc(x_193); -lean_ctor_set(x_271, 3, x_193); -lean_ctor_set(x_271, 2, x_35); -lean_ctor_set(x_271, 1, x_34); -lean_ctor_set(x_271, 0, x_33); -x_290 = !lean_is_exclusive(x_193); -if (x_290 == 0) -{ -lean_object* x_291; lean_object* x_292; lean_object* x_293; lean_object* x_294; uint8_t x_295; -x_291 = lean_ctor_get(x_193, 3); -lean_dec(x_291); -x_292 = lean_ctor_get(x_193, 2); -lean_dec(x_292); -x_293 = lean_ctor_get(x_193, 1); -lean_dec(x_293); -x_294 = lean_ctor_get(x_193, 0); -lean_dec(x_294); -lean_ctor_set_uint8(x_271, sizeof(void*)*4, x_289); -lean_ctor_set(x_193, 3, x_288); -lean_ctor_set(x_193, 2, x_287); -lean_ctor_set(x_193, 1, x_286); -lean_ctor_set(x_193, 0, x_285); -lean_ctor_set_uint8(x_193, sizeof(void*)*4, x_289); -x_295 = 0; -lean_ctor_set(x_191, 3, x_193); -lean_ctor_set(x_191, 0, x_271); -lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_295); -return x_191; +uint8_t x_341; +x_341 = 1; +lean_ctor_set(x_1, 3, x_191); +lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_341); +return x_1; +} } -else -{ -lean_object* x_296; uint8_t x_297; -lean_dec(x_193); -lean_ctor_set_uint8(x_271, sizeof(void*)*4, x_289); -x_296 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_296, 0, x_285); -lean_ctor_set(x_296, 1, x_286); -lean_ctor_set(x_296, 2, x_287); -lean_ctor_set(x_296, 3, x_288); -lean_ctor_set_uint8(x_296, sizeof(void*)*4, x_289); -x_297 = 0; -lean_ctor_set(x_191, 3, x_296); -lean_ctor_set(x_191, 0, x_271); -lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_297); -return x_191; } } else { -lean_object* x_298; lean_object* x_299; lean_object* x_300; lean_object* x_301; uint8_t x_302; lean_object* x_303; lean_object* x_304; lean_object* x_305; uint8_t x_306; -x_298 = lean_ctor_get(x_271, 0); -x_299 = lean_ctor_get(x_271, 1); -x_300 = lean_ctor_get(x_271, 2); -x_301 = lean_ctor_get(x_271, 3); -lean_inc(x_301); -lean_inc(x_300); -lean_inc(x_299); -lean_inc(x_298); -lean_dec(x_271); -x_302 = 1; -lean_inc(x_193); -x_303 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_303, 0, x_33); -lean_ctor_set(x_303, 1, x_34); -lean_ctor_set(x_303, 2, x_35); -lean_ctor_set(x_303, 3, x_193); -if (lean_is_exclusive(x_193)) { - lean_ctor_release(x_193, 0); - lean_ctor_release(x_193, 1); - lean_ctor_release(x_193, 2); - lean_ctor_release(x_193, 3); - x_304 = x_193; +lean_object* x_342; lean_object* x_343; lean_object* x_344; lean_object* x_345; uint8_t x_346; +x_342 = lean_ctor_get(x_1, 0); +x_343 = lean_ctor_get(x_1, 1); +x_344 = lean_ctor_get(x_1, 2); +x_345 = lean_ctor_get(x_1, 3); +lean_inc(x_345); +lean_inc(x_344); +lean_inc(x_343); +lean_inc(x_342); +lean_dec(x_1); +x_346 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_compare(x_2, x_343); +switch (x_346) { +case 0: +{ +lean_object* x_347; uint8_t x_348; +x_347 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_342, x_2, x_3); +x_348 = lean_ctor_get_uint8(x_347, sizeof(void*)*4); +if (x_348 == 0) +{ +lean_object* x_349; +x_349 = lean_ctor_get(x_347, 0); +lean_inc(x_349); +if (lean_obj_tag(x_349) == 0) +{ +lean_object* x_350; +x_350 = lean_ctor_get(x_347, 3); +lean_inc(x_350); +if (lean_obj_tag(x_350) == 0) +{ +lean_object* x_351; lean_object* x_352; lean_object* x_353; lean_object* x_354; uint8_t x_355; lean_object* x_356; +x_351 = lean_ctor_get(x_347, 1); +lean_inc(x_351); +x_352 = lean_ctor_get(x_347, 2); +lean_inc(x_352); +if (lean_is_exclusive(x_347)) { + lean_ctor_release(x_347, 0); + lean_ctor_release(x_347, 1); + lean_ctor_release(x_347, 2); + lean_ctor_release(x_347, 3); + x_353 = x_347; } else { - lean_dec_ref(x_193); - x_304 = lean_box(0); + lean_dec_ref(x_347); + x_353 = lean_box(0); } -lean_ctor_set_uint8(x_303, sizeof(void*)*4, x_302); -if (lean_is_scalar(x_304)) { - x_305 = lean_alloc_ctor(1, 4, 1); +if (lean_is_scalar(x_353)) { + x_354 = lean_alloc_ctor(1, 4, 1); } else { - x_305 = x_304; -} -lean_ctor_set(x_305, 0, x_298); -lean_ctor_set(x_305, 1, x_299); -lean_ctor_set(x_305, 2, x_300); -lean_ctor_set(x_305, 3, x_301); -lean_ctor_set_uint8(x_305, sizeof(void*)*4, x_302); -x_306 = 0; -lean_ctor_set(x_191, 3, x_305); -lean_ctor_set(x_191, 0, x_303); -lean_ctor_set_uint8(x_191, sizeof(void*)*4, x_306); -return x_191; + x_354 = x_353; } +lean_ctor_set(x_354, 0, x_350); +lean_ctor_set(x_354, 1, x_351); +lean_ctor_set(x_354, 2, x_352); +lean_ctor_set(x_354, 3, x_350); +lean_ctor_set_uint8(x_354, sizeof(void*)*4, x_348); +x_355 = 1; +x_356 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_356, 0, x_354); +lean_ctor_set(x_356, 1, x_343); +lean_ctor_set(x_356, 2, x_344); +lean_ctor_set(x_356, 3, x_345); +lean_ctor_set_uint8(x_356, sizeof(void*)*4, x_355); +return x_356; } else { -lean_object* x_307; lean_object* x_308; lean_object* x_309; lean_object* x_310; lean_object* x_311; lean_object* x_312; lean_object* x_313; uint8_t x_314; lean_object* x_315; lean_object* x_316; lean_object* x_317; uint8_t x_318; lean_object* x_319; -x_307 = lean_ctor_get(x_191, 1); -x_308 = lean_ctor_get(x_191, 2); -lean_inc(x_308); -lean_inc(x_307); -lean_dec(x_191); -x_309 = lean_ctor_get(x_271, 0); -lean_inc(x_309); -x_310 = lean_ctor_get(x_271, 1); -lean_inc(x_310); -x_311 = lean_ctor_get(x_271, 2); -lean_inc(x_311); -x_312 = lean_ctor_get(x_271, 3); -lean_inc(x_312); -if (lean_is_exclusive(x_271)) { - lean_ctor_release(x_271, 0); - lean_ctor_release(x_271, 1); - lean_ctor_release(x_271, 2); - lean_ctor_release(x_271, 3); - x_313 = x_271; +uint8_t x_357; +x_357 = lean_ctor_get_uint8(x_350, sizeof(void*)*4); +if (x_357 == 0) +{ +lean_object* x_358; lean_object* x_359; lean_object* x_360; lean_object* x_361; lean_object* x_362; lean_object* x_363; lean_object* x_364; lean_object* x_365; uint8_t x_366; lean_object* x_367; lean_object* x_368; uint8_t x_369; lean_object* x_370; +x_358 = lean_ctor_get(x_347, 1); +lean_inc(x_358); +x_359 = lean_ctor_get(x_347, 2); +lean_inc(x_359); +if (lean_is_exclusive(x_347)) { + lean_ctor_release(x_347, 0); + lean_ctor_release(x_347, 1); + lean_ctor_release(x_347, 2); + lean_ctor_release(x_347, 3); + x_360 = x_347; } else { - lean_dec_ref(x_271); - x_313 = lean_box(0); + lean_dec_ref(x_347); + x_360 = lean_box(0); } -x_314 = 1; -lean_inc(x_193); -if (lean_is_scalar(x_313)) { - x_315 = lean_alloc_ctor(1, 4, 1); +x_361 = lean_ctor_get(x_350, 0); +lean_inc(x_361); +x_362 = lean_ctor_get(x_350, 1); +lean_inc(x_362); +x_363 = lean_ctor_get(x_350, 2); +lean_inc(x_363); +x_364 = lean_ctor_get(x_350, 3); +lean_inc(x_364); +if (lean_is_exclusive(x_350)) { + lean_ctor_release(x_350, 0); + lean_ctor_release(x_350, 1); + lean_ctor_release(x_350, 2); + lean_ctor_release(x_350, 3); + x_365 = x_350; } else { - x_315 = x_313; + lean_dec_ref(x_350); + x_365 = lean_box(0); } -lean_ctor_set(x_315, 0, x_33); -lean_ctor_set(x_315, 1, x_34); -lean_ctor_set(x_315, 2, x_35); -lean_ctor_set(x_315, 3, x_193); -if (lean_is_exclusive(x_193)) { - lean_ctor_release(x_193, 0); - lean_ctor_release(x_193, 1); - lean_ctor_release(x_193, 2); - lean_ctor_release(x_193, 3); - x_316 = x_193; +x_366 = 1; +if (lean_is_scalar(x_365)) { + x_367 = lean_alloc_ctor(1, 4, 1); } else { - lean_dec_ref(x_193); - x_316 = lean_box(0); + x_367 = x_365; } -lean_ctor_set_uint8(x_315, sizeof(void*)*4, x_314); -if (lean_is_scalar(x_316)) { - x_317 = lean_alloc_ctor(1, 4, 1); +lean_ctor_set(x_367, 0, x_349); +lean_ctor_set(x_367, 1, x_358); +lean_ctor_set(x_367, 2, x_359); +lean_ctor_set(x_367, 3, x_361); +lean_ctor_set_uint8(x_367, sizeof(void*)*4, x_366); +if (lean_is_scalar(x_360)) { + x_368 = lean_alloc_ctor(1, 4, 1); } else { - x_317 = x_316; -} -lean_ctor_set(x_317, 0, x_309); -lean_ctor_set(x_317, 1, x_310); -lean_ctor_set(x_317, 2, x_311); -lean_ctor_set(x_317, 3, x_312); -lean_ctor_set_uint8(x_317, sizeof(void*)*4, x_314); -x_318 = 0; -x_319 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_319, 0, x_315); -lean_ctor_set(x_319, 1, x_307); -lean_ctor_set(x_319, 2, x_308); -lean_ctor_set(x_319, 3, x_317); -lean_ctor_set_uint8(x_319, sizeof(void*)*4, x_318); -return x_319; -} -} -else -{ -uint8_t x_320; -x_320 = !lean_is_exclusive(x_191); -if (x_320 == 0) -{ -lean_object* x_321; lean_object* x_322; uint8_t x_323; -x_321 = lean_ctor_get(x_191, 3); -lean_dec(x_321); -x_322 = lean_ctor_get(x_191, 0); -lean_dec(x_322); -x_323 = !lean_is_exclusive(x_193); -if (x_323 == 0) -{ -uint8_t x_324; -lean_ctor_set_uint8(x_193, sizeof(void*)*4, x_280); -x_324 = 1; -lean_ctor_set(x_1, 3, x_191); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_324); -return x_1; -} -else -{ -lean_object* x_325; lean_object* x_326; lean_object* x_327; lean_object* x_328; lean_object* x_329; uint8_t x_330; -x_325 = lean_ctor_get(x_193, 0); -x_326 = lean_ctor_get(x_193, 1); -x_327 = lean_ctor_get(x_193, 2); -x_328 = lean_ctor_get(x_193, 3); -lean_inc(x_328); -lean_inc(x_327); -lean_inc(x_326); -lean_inc(x_325); -lean_dec(x_193); -x_329 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_329, 0, x_325); -lean_ctor_set(x_329, 1, x_326); -lean_ctor_set(x_329, 2, x_327); -lean_ctor_set(x_329, 3, x_328); -lean_ctor_set_uint8(x_329, sizeof(void*)*4, x_280); -lean_ctor_set(x_191, 0, x_329); -x_330 = 1; -lean_ctor_set(x_1, 3, x_191); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_330); -return x_1; -} -} -else -{ -lean_object* x_331; lean_object* x_332; lean_object* x_333; lean_object* x_334; lean_object* x_335; lean_object* x_336; lean_object* x_337; lean_object* x_338; lean_object* x_339; uint8_t x_340; -x_331 = lean_ctor_get(x_191, 1); -x_332 = lean_ctor_get(x_191, 2); -lean_inc(x_332); -lean_inc(x_331); -lean_dec(x_191); -x_333 = lean_ctor_get(x_193, 0); -lean_inc(x_333); -x_334 = lean_ctor_get(x_193, 1); -lean_inc(x_334); -x_335 = lean_ctor_get(x_193, 2); -lean_inc(x_335); -x_336 = lean_ctor_get(x_193, 3); -lean_inc(x_336); -if (lean_is_exclusive(x_193)) { - lean_ctor_release(x_193, 0); - lean_ctor_release(x_193, 1); - lean_ctor_release(x_193, 2); - lean_ctor_release(x_193, 3); - x_337 = x_193; -} else { - lean_dec_ref(x_193); - x_337 = lean_box(0); -} -if (lean_is_scalar(x_337)) { - x_338 = lean_alloc_ctor(1, 4, 1); -} else { - x_338 = x_337; -} -lean_ctor_set(x_338, 0, x_333); -lean_ctor_set(x_338, 1, x_334); -lean_ctor_set(x_338, 2, x_335); -lean_ctor_set(x_338, 3, x_336); -lean_ctor_set_uint8(x_338, sizeof(void*)*4, x_280); -x_339 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_339, 0, x_338); -lean_ctor_set(x_339, 1, x_331); -lean_ctor_set(x_339, 2, x_332); -lean_ctor_set(x_339, 3, x_271); -lean_ctor_set_uint8(x_339, sizeof(void*)*4, x_192); -x_340 = 1; -lean_ctor_set(x_1, 3, x_339); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_340); -return x_1; -} -} -} -} -} -} -else -{ -uint8_t x_341; -x_341 = 1; -lean_ctor_set(x_1, 3, x_191); -lean_ctor_set_uint8(x_1, sizeof(void*)*4, x_341); -return x_1; -} -} -} -} -else -{ -lean_object* x_342; lean_object* x_343; lean_object* x_344; lean_object* x_345; uint8_t x_346; -x_342 = lean_ctor_get(x_1, 0); -x_343 = lean_ctor_get(x_1, 1); -x_344 = lean_ctor_get(x_1, 2); -x_345 = lean_ctor_get(x_1, 3); -lean_inc(x_345); -lean_inc(x_344); -lean_inc(x_343); -lean_inc(x_342); -lean_dec(x_1); -x_346 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_compare(x_2, x_343); -switch (x_346) { -case 0: -{ -lean_object* x_347; uint8_t x_348; -x_347 = l_Lean_RBNode_ins___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___spec__2(x_342, x_2, x_3); -x_348 = lean_ctor_get_uint8(x_347, sizeof(void*)*4); -if (x_348 == 0) -{ -lean_object* x_349; -x_349 = lean_ctor_get(x_347, 0); -lean_inc(x_349); -if (lean_obj_tag(x_349) == 0) -{ -lean_object* x_350; -x_350 = lean_ctor_get(x_347, 3); -lean_inc(x_350); -if (lean_obj_tag(x_350) == 0) -{ -lean_object* x_351; lean_object* x_352; lean_object* x_353; lean_object* x_354; uint8_t x_355; lean_object* x_356; -x_351 = lean_ctor_get(x_347, 1); -lean_inc(x_351); -x_352 = lean_ctor_get(x_347, 2); -lean_inc(x_352); -if (lean_is_exclusive(x_347)) { - lean_ctor_release(x_347, 0); - lean_ctor_release(x_347, 1); - lean_ctor_release(x_347, 2); - lean_ctor_release(x_347, 3); - x_353 = x_347; -} else { - lean_dec_ref(x_347); - x_353 = lean_box(0); -} -if (lean_is_scalar(x_353)) { - x_354 = lean_alloc_ctor(1, 4, 1); -} else { - x_354 = x_353; -} -lean_ctor_set(x_354, 0, x_350); -lean_ctor_set(x_354, 1, x_351); -lean_ctor_set(x_354, 2, x_352); -lean_ctor_set(x_354, 3, x_350); -lean_ctor_set_uint8(x_354, sizeof(void*)*4, x_348); -x_355 = 1; -x_356 = lean_alloc_ctor(1, 4, 1); -lean_ctor_set(x_356, 0, x_354); -lean_ctor_set(x_356, 1, x_343); -lean_ctor_set(x_356, 2, x_344); -lean_ctor_set(x_356, 3, x_345); -lean_ctor_set_uint8(x_356, sizeof(void*)*4, x_355); -return x_356; -} -else -{ -uint8_t x_357; -x_357 = lean_ctor_get_uint8(x_350, sizeof(void*)*4); -if (x_357 == 0) -{ -lean_object* x_358; lean_object* x_359; lean_object* x_360; lean_object* x_361; lean_object* x_362; lean_object* x_363; lean_object* x_364; lean_object* x_365; uint8_t x_366; lean_object* x_367; lean_object* x_368; uint8_t x_369; lean_object* x_370; -x_358 = lean_ctor_get(x_347, 1); -lean_inc(x_358); -x_359 = lean_ctor_get(x_347, 2); -lean_inc(x_359); -if (lean_is_exclusive(x_347)) { - lean_ctor_release(x_347, 0); - lean_ctor_release(x_347, 1); - lean_ctor_release(x_347, 2); - lean_ctor_release(x_347, 3); - x_360 = x_347; -} else { - lean_dec_ref(x_347); - x_360 = lean_box(0); -} -x_361 = lean_ctor_get(x_350, 0); -lean_inc(x_361); -x_362 = lean_ctor_get(x_350, 1); -lean_inc(x_362); -x_363 = lean_ctor_get(x_350, 2); -lean_inc(x_363); -x_364 = lean_ctor_get(x_350, 3); -lean_inc(x_364); -if (lean_is_exclusive(x_350)) { - lean_ctor_release(x_350, 0); - lean_ctor_release(x_350, 1); - lean_ctor_release(x_350, 2); - lean_ctor_release(x_350, 3); - x_365 = x_350; -} else { - lean_dec_ref(x_350); - x_365 = lean_box(0); -} -x_366 = 1; -if (lean_is_scalar(x_365)) { - x_367 = lean_alloc_ctor(1, 4, 1); -} else { - x_367 = x_365; -} -lean_ctor_set(x_367, 0, x_349); -lean_ctor_set(x_367, 1, x_358); -lean_ctor_set(x_367, 2, x_359); -lean_ctor_set(x_367, 3, x_361); -lean_ctor_set_uint8(x_367, sizeof(void*)*4, x_366); -if (lean_is_scalar(x_360)) { - x_368 = lean_alloc_ctor(1, 4, 1); -} else { - x_368 = x_360; + x_368 = x_360; } lean_ctor_set(x_368, 0, x_364); lean_ctor_set(x_368, 1, x_343); @@ -13501,656 +12551,2791 @@ if (lean_is_exclusive(x_238)) { lean_dec_ref(x_238); x_240 = lean_box(0); } -if (lean_is_scalar(x_240)) { - x_241 = lean_alloc_ctor(0, 2, 0); -} else { - x_241 = x_240; +if (lean_is_scalar(x_240)) { + x_241 = lean_alloc_ctor(0, 2, 0); +} else { + x_241 = x_240; +} +lean_ctor_set(x_241, 0, x_207); +lean_ctor_set(x_241, 1, x_239); +return x_241; +} +} +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("queue", 5, 5); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__1; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__2; +x_3 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__1; +x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); +return x_5; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; lean_object* x_14; lean_object* x_15; uint8_t x_16; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__2; +x_14 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_13, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_15 = lean_ctor_get(x_14, 0); +lean_inc(x_15); +x_16 = lean_unbox(x_15); +lean_dec(x_15); +if (x_16 == 0) +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_17 = lean_ctor_get(x_14, 1); +lean_inc(x_17); +lean_dec(x_14); +x_18 = lean_box(0); +x_19 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__1(x_1, x_18, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_17); +return x_19; +} +else +{ +uint8_t x_20; +x_20 = !lean_is_exclusive(x_14); +if (x_20 == 0) +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_21 = lean_ctor_get(x_14, 1); +x_22 = lean_ctor_get(x_14, 0); +lean_dec(x_22); +x_23 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_21); +if (lean_obj_tag(x_23) == 0) +{ +lean_object* x_24; lean_object* x_25; +x_24 = lean_ctor_get(x_23, 1); +lean_inc(x_24); +lean_dec(x_23); +lean_inc(x_1); +x_25 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_24); +if (lean_obj_tag(x_25) == 0) +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_26 = lean_ctor_get(x_25, 0); +lean_inc(x_26); +x_27 = lean_ctor_get(x_25, 1); +lean_inc(x_27); +lean_dec(x_25); +x_28 = l_Lean_MessageData_ofExpr(x_26); +x_29 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +lean_ctor_set_tag(x_14, 7); +lean_ctor_set(x_14, 1, x_28); +lean_ctor_set(x_14, 0, x_29); +x_30 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_30, 0, x_14); +lean_ctor_set(x_30, 1, x_29); +x_31 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_13, x_30, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_27); +x_32 = lean_ctor_get(x_31, 0); +lean_inc(x_32); +x_33 = lean_ctor_get(x_31, 1); +lean_inc(x_33); +lean_dec(x_31); +x_34 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__1(x_1, x_32, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_33); +lean_dec(x_32); +return x_34; +} +else +{ +uint8_t x_35; +lean_free_object(x_14); +lean_dec(x_1); +x_35 = !lean_is_exclusive(x_25); +if (x_35 == 0) +{ +return x_25; +} +else +{ +lean_object* x_36; lean_object* x_37; lean_object* x_38; +x_36 = lean_ctor_get(x_25, 0); +x_37 = lean_ctor_get(x_25, 1); +lean_inc(x_37); +lean_inc(x_36); +lean_dec(x_25); +x_38 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_38, 0, x_36); +lean_ctor_set(x_38, 1, x_37); +return x_38; +} +} +} +else +{ +uint8_t x_39; +lean_free_object(x_14); +lean_dec(x_1); +x_39 = !lean_is_exclusive(x_23); +if (x_39 == 0) +{ +return x_23; +} +else +{ +lean_object* x_40; lean_object* x_41; lean_object* x_42; +x_40 = lean_ctor_get(x_23, 0); +x_41 = lean_ctor_get(x_23, 1); +lean_inc(x_41); +lean_inc(x_40); +lean_dec(x_23); +x_42 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_42, 0, x_40); +lean_ctor_set(x_42, 1, x_41); +return x_42; +} +} +} +else +{ +lean_object* x_43; lean_object* x_44; +x_43 = lean_ctor_get(x_14, 1); +lean_inc(x_43); +lean_dec(x_14); +x_44 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_43); +if (lean_obj_tag(x_44) == 0) +{ +lean_object* x_45; lean_object* x_46; +x_45 = lean_ctor_get(x_44, 1); +lean_inc(x_45); +lean_dec(x_44); +lean_inc(x_1); +x_46 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_45); +if (lean_obj_tag(x_46) == 0) +{ +lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; +x_47 = lean_ctor_get(x_46, 0); +lean_inc(x_47); +x_48 = lean_ctor_get(x_46, 1); +lean_inc(x_48); +lean_dec(x_46); +x_49 = l_Lean_MessageData_ofExpr(x_47); +x_50 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +x_51 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_51, 0, x_50); +lean_ctor_set(x_51, 1, x_49); +x_52 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_52, 0, x_51); +lean_ctor_set(x_52, 1, x_50); +x_53 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_13, x_52, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_48); +x_54 = lean_ctor_get(x_53, 0); +lean_inc(x_54); +x_55 = lean_ctor_get(x_53, 1); +lean_inc(x_55); +lean_dec(x_53); +x_56 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__1(x_1, x_54, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_55); +lean_dec(x_54); +return x_56; +} +else +{ +lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; +lean_dec(x_1); +x_57 = lean_ctor_get(x_46, 0); +lean_inc(x_57); +x_58 = lean_ctor_get(x_46, 1); +lean_inc(x_58); +if (lean_is_exclusive(x_46)) { + lean_ctor_release(x_46, 0); + lean_ctor_release(x_46, 1); + x_59 = x_46; +} else { + lean_dec_ref(x_46); + x_59 = lean_box(0); +} +if (lean_is_scalar(x_59)) { + x_60 = lean_alloc_ctor(1, 2, 0); +} else { + x_60 = x_59; +} +lean_ctor_set(x_60, 0, x_57); +lean_ctor_set(x_60, 1, x_58); +return x_60; +} +} +else +{ +lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; +lean_dec(x_1); +x_61 = lean_ctor_get(x_44, 0); +lean_inc(x_61); +x_62 = lean_ctor_get(x_44, 1); +lean_inc(x_62); +if (lean_is_exclusive(x_44)) { + lean_ctor_release(x_44, 0); + lean_ctor_release(x_44, 1); + x_63 = x_44; +} else { + lean_dec_ref(x_44); + x_63 = lean_box(0); +} +if (lean_is_scalar(x_63)) { + x_64 = lean_alloc_ctor(1, 2, 0); +} else { + x_64 = x_63; +} +lean_ctor_set(x_64, 0, x_61); +lean_ctor_set(x_64, 1, x_62); +return x_64; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +lean_object* x_12; lean_object* x_13; uint8_t x_14; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_checkMaxSteps(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_13 = lean_ctor_get(x_12, 0); +lean_inc(x_13); +x_14 = lean_unbox(x_13); +lean_dec(x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_15 = lean_ctor_get(x_12, 1); +lean_inc(x_15); +lean_dec(x_12); +x_16 = lean_box(0); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2(x_1, x_16, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_15); +return x_17; +} +else +{ +uint8_t x_18; +lean_dec(x_1); +x_18 = !lean_is_exclusive(x_12); +if (x_18 == 0) +{ +lean_object* x_19; lean_object* x_20; +x_19 = lean_ctor_get(x_12, 0); +lean_dec(x_19); +x_20 = lean_box(0); +lean_ctor_set(x_12, 0, x_20); +return x_12; +} +else +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_21 = lean_ctor_get(x_12, 1); +lean_inc(x_21); +lean_dec(x_12); +x_22 = lean_box(0); +x_23 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_23, 0, x_22); +lean_ctor_set(x_23, 1, x_21); +return x_23; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +lean_object* x_12; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_12; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(0); +x_2 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; +x_15 = lean_ctor_get(x_1, 0); +lean_inc(x_15); +x_16 = lean_ctor_get(x_1, 1); +lean_inc(x_16); +x_17 = lean_ctor_get(x_1, 2); +lean_inc(x_17); +x_18 = lean_ctor_get(x_1, 3); +lean_inc(x_18); +x_19 = lean_ctor_get(x_1, 4); +lean_inc(x_19); +lean_dec(x_1); +x_20 = lean_alloc_ctor(1, 6, 0); +lean_ctor_set(x_20, 0, x_16); +lean_ctor_set(x_20, 1, x_17); +lean_ctor_set(x_20, 2, x_2); +lean_ctor_set(x_20, 3, x_18); +lean_ctor_set(x_20, 4, x_19); +lean_ctor_set(x_20, 5, x_3); +x_21 = l_Lean_Meta_Grind_Arith_CommRing_mkEqCnstr(x_15, x_20, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +if (lean_obj_tag(x_21) == 0) +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_22 = lean_ctor_get(x_21, 0); +lean_inc(x_22); +x_23 = lean_ctor_get(x_21, 1); +lean_inc(x_23); +lean_dec(x_21); +x_24 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue(x_22, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_23); +if (lean_obj_tag(x_24) == 0) +{ +uint8_t x_25; +x_25 = !lean_is_exclusive(x_24); +if (x_25 == 0) +{ +lean_object* x_26; lean_object* x_27; +x_26 = lean_ctor_get(x_24, 0); +lean_dec(x_26); +x_27 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___closed__1; +lean_ctor_set(x_24, 0, x_27); +return x_24; +} +else +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; +x_28 = lean_ctor_get(x_24, 1); +lean_inc(x_28); +lean_dec(x_24); +x_29 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___closed__1; +x_30 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_30, 0, x_29); +lean_ctor_set(x_30, 1, x_28); +return x_30; +} +} +else +{ +uint8_t x_31; +x_31 = !lean_is_exclusive(x_24); +if (x_31 == 0) +{ +return x_24; +} +else +{ +lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_32 = lean_ctor_get(x_24, 0); +x_33 = lean_ctor_get(x_24, 1); +lean_inc(x_33); +lean_inc(x_32); +lean_dec(x_24); +x_34 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_34, 0, x_32); +lean_ctor_set(x_34, 1, x_33); +return x_34; +} +} +} +else +{ +uint8_t x_35; +x_35 = !lean_is_exclusive(x_21); +if (x_35 == 0) +{ +return x_21; +} +else +{ +lean_object* x_36; lean_object* x_37; lean_object* x_38; +x_36 = lean_ctor_get(x_21, 0); +x_37 = lean_ctor_get(x_21, 1); +lean_inc(x_37); +lean_inc(x_36); +lean_dec(x_21); +x_38 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_38, 0, x_36); +lean_ctor_set(x_38, 1, x_37); +return x_38; +} +} +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("superpose", 9, 9); +return x_1; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__1; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__2; +x_3 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__1; +x_4 = l_Lean_Name_mkStr3(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("\nwith: ", 7, 7); +return x_1; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__3; +x_2 = l_Lean_stringToMessageData(x_1); +return x_2; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__5() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("\nresult: ", 9, 9); +return x_1; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__6() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__5; +x_2 = l_Lean_stringToMessageData(x_1); +return x_2; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__7() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked(" = 0", 4, 4); +return x_1; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__8() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__7; +x_2 = l_Lean_stringToMessageData(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15, lean_object* x_16, lean_object* x_17) { +_start: +{ +if (lean_obj_tag(x_5) == 0) +{ +lean_object* x_18; +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_18 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18, 0, x_6); +lean_ctor_set(x_18, 1, x_17); +return x_18; +} +else +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_29; lean_object* x_30; lean_object* x_31; +lean_dec(x_6); +x_19 = lean_ctor_get(x_5, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_5, 1); +lean_inc(x_20); +if (lean_is_exclusive(x_5)) { + lean_ctor_release(x_5, 0); + lean_ctor_release(x_5, 1); + x_21 = x_5; +} else { + lean_dec_ref(x_5); + x_21 = lean_box(0); +} +x_29 = lean_ctor_get(x_1, 0); +lean_inc(x_29); +x_30 = lean_ctor_get(x_19, 0); +lean_inc(x_30); +lean_inc(x_16); +lean_inc(x_15); +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_8); +x_31 = l_Lean_Grind_CommRing_Poly_spolM(x_29, x_30, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17); +if (lean_obj_tag(x_31) == 0) +{ +lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; uint8_t x_37; +x_32 = lean_ctor_get(x_31, 0); +lean_inc(x_32); +x_33 = lean_ctor_get(x_31, 1); +lean_inc(x_33); +lean_dec(x_31); +x_34 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__2; +x_35 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_34, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_33); +x_36 = lean_ctor_get(x_35, 0); +lean_inc(x_36); +x_37 = lean_unbox(x_36); +lean_dec(x_36); +if (x_37 == 0) +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_38 = lean_ctor_get(x_35, 1); +lean_inc(x_38); +lean_dec(x_35); +x_39 = lean_box(0); +lean_inc(x_1); +x_40 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1(x_32, x_1, x_19, x_39, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_38); +if (lean_obj_tag(x_40) == 0) +{ +lean_object* x_41; lean_object* x_42; +x_41 = lean_ctor_get(x_40, 0); +lean_inc(x_41); +x_42 = lean_ctor_get(x_40, 1); +lean_inc(x_42); +lean_dec(x_40); +x_22 = x_41; +x_23 = x_42; +goto block_28; +} +else +{ +uint8_t x_43; +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_43 = !lean_is_exclusive(x_40); +if (x_43 == 0) +{ +return x_40; +} +else +{ +lean_object* x_44; lean_object* x_45; lean_object* x_46; +x_44 = lean_ctor_get(x_40, 0); +x_45 = lean_ctor_get(x_40, 1); +lean_inc(x_45); +lean_inc(x_44); +lean_dec(x_40); +x_46 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_46, 0, x_44); +lean_ctor_set(x_46, 1, x_45); +return x_46; +} +} +} +else +{ +uint8_t x_47; +x_47 = !lean_is_exclusive(x_35); +if (x_47 == 0) +{ +lean_object* x_48; lean_object* x_49; lean_object* x_50; +x_48 = lean_ctor_get(x_35, 1); +x_49 = lean_ctor_get(x_35, 0); +lean_dec(x_49); +x_50 = l_Lean_Meta_Grind_updateLastTag(x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_48); +if (lean_obj_tag(x_50) == 0) +{ +lean_object* x_51; lean_object* x_52; +x_51 = lean_ctor_get(x_50, 1); +lean_inc(x_51); +lean_dec(x_50); +lean_inc(x_1); +x_52 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_51); +if (lean_obj_tag(x_52) == 0) +{ +lean_object* x_53; lean_object* x_54; lean_object* x_55; +x_53 = lean_ctor_get(x_52, 0); +lean_inc(x_53); +x_54 = lean_ctor_get(x_52, 1); +lean_inc(x_54); +lean_dec(x_52); +lean_inc(x_19); +x_55 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_19, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_54); +if (lean_obj_tag(x_55) == 0) +{ +lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; +x_56 = lean_ctor_get(x_55, 0); +lean_inc(x_56); +x_57 = lean_ctor_get(x_55, 1); +lean_inc(x_57); +lean_dec(x_55); +x_58 = lean_ctor_get(x_32, 0); +lean_inc(x_58); +x_59 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_58, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_57); +if (lean_obj_tag(x_59) == 0) +{ +lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; +x_60 = lean_ctor_get(x_59, 0); +lean_inc(x_60); +x_61 = lean_ctor_get(x_59, 1); +lean_inc(x_61); +lean_dec(x_59); +x_62 = l_Lean_MessageData_ofExpr(x_53); +x_63 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +lean_ctor_set_tag(x_35, 7); +lean_ctor_set(x_35, 1, x_62); +lean_ctor_set(x_35, 0, x_63); +x_64 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__4; +x_65 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_65, 0, x_35); +lean_ctor_set(x_65, 1, x_64); +x_66 = l_Lean_MessageData_ofExpr(x_56); +x_67 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_67, 0, x_65); +lean_ctor_set(x_67, 1, x_66); +x_68 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__6; +x_69 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_69, 0, x_67); +lean_ctor_set(x_69, 1, x_68); +x_70 = l_Lean_MessageData_ofExpr(x_60); +x_71 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_71, 0, x_69); +lean_ctor_set(x_71, 1, x_70); +x_72 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__8; +x_73 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_73, 0, x_71); +lean_ctor_set(x_73, 1, x_72); +x_74 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_34, x_73, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_61); +x_75 = lean_ctor_get(x_74, 0); +lean_inc(x_75); +x_76 = lean_ctor_get(x_74, 1); +lean_inc(x_76); +lean_dec(x_74); +lean_inc(x_1); +x_77 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1(x_32, x_1, x_19, x_75, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_76); +lean_dec(x_75); +if (lean_obj_tag(x_77) == 0) +{ +lean_object* x_78; lean_object* x_79; +x_78 = lean_ctor_get(x_77, 0); +lean_inc(x_78); +x_79 = lean_ctor_get(x_77, 1); +lean_inc(x_79); +lean_dec(x_77); +x_22 = x_78; +x_23 = x_79; +goto block_28; +} +else +{ +uint8_t x_80; +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_80 = !lean_is_exclusive(x_77); +if (x_80 == 0) +{ +return x_77; +} +else +{ +lean_object* x_81; lean_object* x_82; lean_object* x_83; +x_81 = lean_ctor_get(x_77, 0); +x_82 = lean_ctor_get(x_77, 1); +lean_inc(x_82); +lean_inc(x_81); +lean_dec(x_77); +x_83 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_83, 0, x_81); +lean_ctor_set(x_83, 1, x_82); +return x_83; +} +} +} +else +{ +uint8_t x_84; +lean_dec(x_56); +lean_dec(x_53); +lean_free_object(x_35); +lean_dec(x_32); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_84 = !lean_is_exclusive(x_59); +if (x_84 == 0) +{ +return x_59; +} +else +{ +lean_object* x_85; lean_object* x_86; lean_object* x_87; +x_85 = lean_ctor_get(x_59, 0); +x_86 = lean_ctor_get(x_59, 1); +lean_inc(x_86); +lean_inc(x_85); +lean_dec(x_59); +x_87 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_87, 0, x_85); +lean_ctor_set(x_87, 1, x_86); +return x_87; +} +} +} +else +{ +uint8_t x_88; +lean_dec(x_53); +lean_free_object(x_35); +lean_dec(x_32); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_88 = !lean_is_exclusive(x_55); +if (x_88 == 0) +{ +return x_55; +} +else +{ +lean_object* x_89; lean_object* x_90; lean_object* x_91; +x_89 = lean_ctor_get(x_55, 0); +x_90 = lean_ctor_get(x_55, 1); +lean_inc(x_90); +lean_inc(x_89); +lean_dec(x_55); +x_91 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_91, 0, x_89); +lean_ctor_set(x_91, 1, x_90); +return x_91; +} +} +} +else +{ +uint8_t x_92; +lean_free_object(x_35); +lean_dec(x_32); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_92 = !lean_is_exclusive(x_52); +if (x_92 == 0) +{ +return x_52; +} +else +{ +lean_object* x_93; lean_object* x_94; lean_object* x_95; +x_93 = lean_ctor_get(x_52, 0); +x_94 = lean_ctor_get(x_52, 1); +lean_inc(x_94); +lean_inc(x_93); +lean_dec(x_52); +x_95 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_95, 0, x_93); +lean_ctor_set(x_95, 1, x_94); +return x_95; +} +} +} +else +{ +uint8_t x_96; +lean_free_object(x_35); +lean_dec(x_32); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_96 = !lean_is_exclusive(x_50); +if (x_96 == 0) +{ +return x_50; +} +else +{ +lean_object* x_97; lean_object* x_98; lean_object* x_99; +x_97 = lean_ctor_get(x_50, 0); +x_98 = lean_ctor_get(x_50, 1); +lean_inc(x_98); +lean_inc(x_97); +lean_dec(x_50); +x_99 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_99, 0, x_97); +lean_ctor_set(x_99, 1, x_98); +return x_99; +} +} +} +else +{ +lean_object* x_100; lean_object* x_101; +x_100 = lean_ctor_get(x_35, 1); +lean_inc(x_100); +lean_dec(x_35); +x_101 = l_Lean_Meta_Grind_updateLastTag(x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_100); +if (lean_obj_tag(x_101) == 0) +{ +lean_object* x_102; lean_object* x_103; +x_102 = lean_ctor_get(x_101, 1); +lean_inc(x_102); +lean_dec(x_101); +lean_inc(x_1); +x_103 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_102); +if (lean_obj_tag(x_103) == 0) +{ +lean_object* x_104; lean_object* x_105; lean_object* x_106; +x_104 = lean_ctor_get(x_103, 0); +lean_inc(x_104); +x_105 = lean_ctor_get(x_103, 1); +lean_inc(x_105); +lean_dec(x_103); +lean_inc(x_19); +x_106 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_19, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_105); +if (lean_obj_tag(x_106) == 0) +{ +lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; +x_107 = lean_ctor_get(x_106, 0); +lean_inc(x_107); +x_108 = lean_ctor_get(x_106, 1); +lean_inc(x_108); +lean_dec(x_106); +x_109 = lean_ctor_get(x_32, 0); +lean_inc(x_109); +x_110 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_109, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_108); +if (lean_obj_tag(x_110) == 0) +{ +lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; +x_111 = lean_ctor_get(x_110, 0); +lean_inc(x_111); +x_112 = lean_ctor_get(x_110, 1); +lean_inc(x_112); +lean_dec(x_110); +x_113 = l_Lean_MessageData_ofExpr(x_104); +x_114 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +x_115 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_115, 0, x_114); +lean_ctor_set(x_115, 1, x_113); +x_116 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__4; +x_117 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_117, 0, x_115); +lean_ctor_set(x_117, 1, x_116); +x_118 = l_Lean_MessageData_ofExpr(x_107); +x_119 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_119, 0, x_117); +lean_ctor_set(x_119, 1, x_118); +x_120 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__6; +x_121 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_121, 0, x_119); +lean_ctor_set(x_121, 1, x_120); +x_122 = l_Lean_MessageData_ofExpr(x_111); +x_123 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_123, 0, x_121); +lean_ctor_set(x_123, 1, x_122); +x_124 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__8; +x_125 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_125, 0, x_123); +lean_ctor_set(x_125, 1, x_124); +x_126 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_34, x_125, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_112); +x_127 = lean_ctor_get(x_126, 0); +lean_inc(x_127); +x_128 = lean_ctor_get(x_126, 1); +lean_inc(x_128); +lean_dec(x_126); +lean_inc(x_1); +x_129 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1(x_32, x_1, x_19, x_127, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_128); +lean_dec(x_127); +if (lean_obj_tag(x_129) == 0) +{ +lean_object* x_130; lean_object* x_131; +x_130 = lean_ctor_get(x_129, 0); +lean_inc(x_130); +x_131 = lean_ctor_get(x_129, 1); +lean_inc(x_131); +lean_dec(x_129); +x_22 = x_130; +x_23 = x_131; +goto block_28; +} +else +{ +lean_object* x_132; lean_object* x_133; lean_object* x_134; lean_object* x_135; +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_132 = lean_ctor_get(x_129, 0); +lean_inc(x_132); +x_133 = lean_ctor_get(x_129, 1); +lean_inc(x_133); +if (lean_is_exclusive(x_129)) { + lean_ctor_release(x_129, 0); + lean_ctor_release(x_129, 1); + x_134 = x_129; +} else { + lean_dec_ref(x_129); + x_134 = lean_box(0); +} +if (lean_is_scalar(x_134)) { + x_135 = lean_alloc_ctor(1, 2, 0); +} else { + x_135 = x_134; +} +lean_ctor_set(x_135, 0, x_132); +lean_ctor_set(x_135, 1, x_133); +return x_135; +} +} +else +{ +lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; +lean_dec(x_107); +lean_dec(x_104); +lean_dec(x_32); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_136 = lean_ctor_get(x_110, 0); +lean_inc(x_136); +x_137 = lean_ctor_get(x_110, 1); +lean_inc(x_137); +if (lean_is_exclusive(x_110)) { + lean_ctor_release(x_110, 0); + lean_ctor_release(x_110, 1); + x_138 = x_110; +} else { + lean_dec_ref(x_110); + x_138 = lean_box(0); +} +if (lean_is_scalar(x_138)) { + x_139 = lean_alloc_ctor(1, 2, 0); +} else { + x_139 = x_138; +} +lean_ctor_set(x_139, 0, x_136); +lean_ctor_set(x_139, 1, x_137); +return x_139; +} +} +else +{ +lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; +lean_dec(x_104); +lean_dec(x_32); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_140 = lean_ctor_get(x_106, 0); +lean_inc(x_140); +x_141 = lean_ctor_get(x_106, 1); +lean_inc(x_141); +if (lean_is_exclusive(x_106)) { + lean_ctor_release(x_106, 0); + lean_ctor_release(x_106, 1); + x_142 = x_106; +} else { + lean_dec_ref(x_106); + x_142 = lean_box(0); +} +if (lean_is_scalar(x_142)) { + x_143 = lean_alloc_ctor(1, 2, 0); +} else { + x_143 = x_142; +} +lean_ctor_set(x_143, 0, x_140); +lean_ctor_set(x_143, 1, x_141); +return x_143; +} +} +else +{ +lean_object* x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; +lean_dec(x_32); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_144 = lean_ctor_get(x_103, 0); +lean_inc(x_144); +x_145 = lean_ctor_get(x_103, 1); +lean_inc(x_145); +if (lean_is_exclusive(x_103)) { + lean_ctor_release(x_103, 0); + lean_ctor_release(x_103, 1); + x_146 = x_103; +} else { + lean_dec_ref(x_103); + x_146 = lean_box(0); +} +if (lean_is_scalar(x_146)) { + x_147 = lean_alloc_ctor(1, 2, 0); +} else { + x_147 = x_146; +} +lean_ctor_set(x_147, 0, x_144); +lean_ctor_set(x_147, 1, x_145); +return x_147; +} +} +else +{ +lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; +lean_dec(x_32); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_148 = lean_ctor_get(x_101, 0); +lean_inc(x_148); +x_149 = lean_ctor_get(x_101, 1); +lean_inc(x_149); +if (lean_is_exclusive(x_101)) { + lean_ctor_release(x_101, 0); + lean_ctor_release(x_101, 1); + x_150 = x_101; +} else { + lean_dec_ref(x_101); + x_150 = lean_box(0); +} +if (lean_is_scalar(x_150)) { + x_151 = lean_alloc_ctor(1, 2, 0); +} else { + x_151 = x_150; +} +lean_ctor_set(x_151, 0, x_148); +lean_ctor_set(x_151, 1, x_149); +return x_151; +} +} +} +} +else +{ +uint8_t x_152; +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_152 = !lean_is_exclusive(x_31); +if (x_152 == 0) +{ +return x_31; +} +else +{ +lean_object* x_153; lean_object* x_154; lean_object* x_155; +x_153 = lean_ctor_get(x_31, 0); +x_154 = lean_ctor_get(x_31, 1); +lean_inc(x_154); +lean_inc(x_153); +lean_dec(x_31); +x_155 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_155, 0, x_153); +lean_ctor_set(x_155, 1, x_154); +return x_155; +} +} +block_28: +{ +if (lean_obj_tag(x_22) == 0) +{ +lean_object* x_24; lean_object* x_25; +lean_dec(x_20); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_1); +x_24 = lean_ctor_get(x_22, 0); +lean_inc(x_24); +lean_dec(x_22); +if (lean_is_scalar(x_21)) { + x_25 = lean_alloc_ctor(0, 2, 0); +} else { + x_25 = x_21; + lean_ctor_set_tag(x_25, 0); +} +lean_ctor_set(x_25, 0, x_24); +lean_ctor_set(x_25, 1, x_23); +return x_25; +} +else +{ +lean_object* x_26; +lean_dec(x_21); +x_26 = lean_ctor_get(x_22, 0); +lean_inc(x_26); +lean_dec(x_22); +x_5 = x_20; +x_6 = x_26; +x_7 = lean_box(0); +x_17 = x_23; +goto _start; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +lean_object* x_13; lean_object* x_14; +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_1); +x_13 = lean_box(0); +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_12); +return x_14; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_15 = lean_ctor_get(x_2, 0); +x_16 = lean_ctor_get(x_2, 1); +x_17 = lean_ctor_get(x_15, 0); +x_18 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_18) == 0) +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; uint8_t x_24; lean_object* x_25; lean_object* x_26; +x_19 = lean_ctor_get(x_18, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_18, 1); +lean_inc(x_20); +lean_dec(x_18); +x_21 = lean_box(0); +x_22 = lean_ctor_get(x_19, 19); +lean_inc(x_22); +lean_dec(x_19); +x_23 = lean_ctor_get(x_22, 2); +lean_inc(x_23); +x_24 = lean_nat_dec_lt(x_17, x_23); +lean_dec(x_23); +x_25 = lean_box(0); +if (x_24 == 0) +{ +lean_object* x_36; +lean_dec(x_22); +x_36 = l_outOfBounds___rarg(x_21); +x_26 = x_36; +goto block_35; +} +else +{ +lean_object* x_37; +x_37 = l_Lean_PersistentArray_get_x21___rarg(x_21, x_22, x_17); +x_26 = x_37; +goto block_35; +} +block_35: +{ +lean_object* x_27; lean_object* x_28; +x_27 = lean_box(0); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_8); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_3); +lean_inc(x_26); +lean_inc(x_1); +x_28 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1(x_1, x_25, x_26, x_26, x_26, x_27, lean_box(0), x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_20); +lean_dec(x_26); +if (lean_obj_tag(x_28) == 0) +{ +lean_object* x_29; +x_29 = lean_ctor_get(x_28, 1); +lean_inc(x_29); +lean_dec(x_28); +x_2 = x_16; +x_12 = x_29; +goto _start; +} +else +{ +uint8_t x_31; +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_1); +x_31 = !lean_is_exclusive(x_28); +if (x_31 == 0) +{ +return x_28; +} +else +{ +lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_32 = lean_ctor_get(x_28, 0); +x_33 = lean_ctor_get(x_28, 1); +lean_inc(x_33); +lean_inc(x_32); +lean_dec(x_28); +x_34 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_34, 0, x_32); +lean_ctor_set(x_34, 1, x_33); +return x_34; +} +} +} +} +else +{ +uint8_t x_38; +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_1); +x_38 = !lean_is_exclusive(x_18); +if (x_38 == 0) +{ +return x_18; +} +else +{ +lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_39 = lean_ctor_get(x_18, 0); +x_40 = lean_ctor_get(x_18, 1); +lean_inc(x_40); +lean_inc(x_39); +lean_dec(x_18); +x_41 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_41, 0, x_39); +lean_ctor_set(x_41, 1, x_40); +return x_41; +} +} +} +} +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: +{ +lean_object* x_15; +x_15 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +return x_15; +} +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___boxed(lean_object** _args) { +lean_object* x_1 = _args[0]; +lean_object* x_2 = _args[1]; +lean_object* x_3 = _args[2]; +lean_object* x_4 = _args[3]; +lean_object* x_5 = _args[4]; +lean_object* x_6 = _args[5]; +lean_object* x_7 = _args[6]; +lean_object* x_8 = _args[7]; +lean_object* x_9 = _args[8]; +lean_object* x_10 = _args[9]; +lean_object* x_11 = _args[10]; +lean_object* x_12 = _args[11]; +lean_object* x_13 = _args[12]; +lean_object* x_14 = _args[13]; +lean_object* x_15 = _args[14]; +lean_object* x_16 = _args[15]; +lean_object* x_17 = _args[16]; +_start: +{ +lean_object* x_18; +x_18 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_18; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_2); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = lean_ctor_get(x_1, 0); +lean_inc(x_13); +if (lean_obj_tag(x_13) == 0) +{ +lean_object* x_14; lean_object* x_15; +lean_dec(x_13); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_1); +x_14 = lean_box(0); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_14); +lean_ctor_set(x_15, 1, x_12); +return x_15; +} +else +{ +lean_object* x_16; lean_object* x_17; +x_16 = lean_ctor_get(x_13, 1); +lean_inc(x_16); +lean_dec(x_13); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go(x_1, x_16, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_16); +return x_17; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +lean_object* x_12; lean_object* x_13; uint8_t x_14; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_checkMaxSteps(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_13 = lean_ctor_get(x_12, 0); +lean_inc(x_13); +x_14 = lean_unbox(x_13); +lean_dec(x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_15 = lean_ctor_get(x_12, 1); +lean_inc(x_15); +lean_dec(x_12); +x_16 = lean_box(0); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith___lambda__1(x_1, x_16, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_15); +return x_17; +} +else +{ +uint8_t x_18; +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_18 = !lean_is_exclusive(x_12); +if (x_18 == 0) +{ +lean_object* x_19; lean_object* x_20; +x_19 = lean_ctor_get(x_12, 0); +lean_dec(x_19); +x_20 = lean_box(0); +lean_ctor_set(x_12, 0, x_20); +return x_12; +} +else +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_21 = lean_ctor_get(x_12, 1); +lean_inc(x_21); +lean_dec(x_12); +x_22 = lean_box(0); +x_23 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_23, 0, x_22); +lean_ctor_set(x_23, 1, x_21); +return x_23; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_2); +return x_13; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(1u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; +x_2 = lean_int_neg(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: +{ +lean_object* x_15; uint8_t x_16; +x_15 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; +x_16 = lean_int_dec_lt(x_2, x_15); +if (x_16 == 0) +{ +lean_object* x_17; +lean_dec(x_3); +x_17 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_17, 0, x_1); +lean_ctor_set(x_17, 1, x_14); +return x_17; +} +else +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_18 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__2; +x_19 = l_Lean_Grind_CommRing_Poly_mulConst(x_18, x_3); +lean_inc(x_1); +x_20 = lean_alloc_ctor(3, 2, 0); +lean_ctor_set(x_20, 0, x_18); +lean_ctor_set(x_20, 1, x_1); +x_21 = lean_ctor_get(x_1, 2); +lean_inc(x_21); +x_22 = lean_ctor_get(x_1, 3); +lean_inc(x_22); +lean_dec(x_1); +x_23 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_23, 0, x_19); +lean_ctor_set(x_23, 1, x_20); +lean_ctor_set(x_23, 2, x_21); +lean_ctor_set(x_23, 3, x_22); +x_24 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_24, 0, x_23); +lean_ctor_set(x_24, 1, x_14); +return x_24; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: +{ +lean_object* x_15; +x_15 = l_Lean_Meta_Grind_Arith_CommRing_noZeroDivisors(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +if (lean_obj_tag(x_15) == 0) +{ +lean_object* x_16; uint8_t x_17; +x_16 = lean_ctor_get(x_15, 0); +lean_inc(x_16); +x_17 = lean_unbox(x_16); +lean_dec(x_16); +if (x_17 == 0) +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_18 = lean_ctor_get(x_15, 1); +lean_inc(x_18); +lean_dec(x_15); +x_19 = lean_box(0); +x_20 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1(x_1, x_2, x_3, x_19, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_18); +return x_20; +} +else +{ +uint8_t x_21; +x_21 = !lean_is_exclusive(x_15); +if (x_21 == 0) +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; uint8_t x_27; +x_22 = lean_ctor_get(x_15, 1); +x_23 = lean_ctor_get(x_15, 0); +lean_dec(x_23); +x_24 = l_Lean_Grind_CommRing_Poly_gcdCoeffs(x_3); +x_25 = lean_nat_to_int(x_24); +x_26 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; +x_27 = lean_int_dec_eq(x_25, x_26); +if (x_27 == 0) +{ +lean_object* x_28; uint8_t x_29; +x_28 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; +x_29 = lean_int_dec_lt(x_2, x_28); +if (x_29 == 0) +{ +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_30 = lean_ctor_get(x_1, 2); +lean_inc(x_30); +x_31 = lean_ctor_get(x_1, 3); +lean_inc(x_31); +x_32 = l_Lean_Grind_CommRing_Poly_divConst(x_3, x_25); +x_33 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_33, 0, x_25); +lean_ctor_set(x_33, 1, x_1); +x_34 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_34, 0, x_32); +lean_ctor_set(x_34, 1, x_33); +lean_ctor_set(x_34, 2, x_30); +lean_ctor_set(x_34, 3, x_31); +lean_ctor_set(x_15, 0, x_34); +return x_15; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_35 = lean_ctor_get(x_1, 2); +lean_inc(x_35); +x_36 = lean_ctor_get(x_1, 3); +lean_inc(x_36); +x_37 = lean_int_neg(x_25); +lean_dec(x_25); +x_38 = l_Lean_Grind_CommRing_Poly_divConst(x_3, x_37); +x_39 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_39, 0, x_37); +lean_ctor_set(x_39, 1, x_1); +x_40 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_40, 0, x_38); +lean_ctor_set(x_40, 1, x_39); +lean_ctor_set(x_40, 2, x_35); +lean_ctor_set(x_40, 3, x_36); +lean_ctor_set(x_15, 0, x_40); +return x_15; +} +} +else +{ +lean_object* x_41; lean_object* x_42; +lean_dec(x_25); +lean_free_object(x_15); +x_41 = lean_box(0); +x_42 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1(x_1, x_2, x_3, x_41, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_22); +return x_42; +} +} +else +{ +lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; uint8_t x_47; +x_43 = lean_ctor_get(x_15, 1); +lean_inc(x_43); +lean_dec(x_15); +x_44 = l_Lean_Grind_CommRing_Poly_gcdCoeffs(x_3); +x_45 = lean_nat_to_int(x_44); +x_46 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; +x_47 = lean_int_dec_eq(x_45, x_46); +if (x_47 == 0) +{ +lean_object* x_48; uint8_t x_49; +x_48 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; +x_49 = lean_int_dec_lt(x_2, x_48); +if (x_49 == 0) +{ +lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; +x_50 = lean_ctor_get(x_1, 2); +lean_inc(x_50); +x_51 = lean_ctor_get(x_1, 3); +lean_inc(x_51); +x_52 = l_Lean_Grind_CommRing_Poly_divConst(x_3, x_45); +x_53 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_53, 0, x_45); +lean_ctor_set(x_53, 1, x_1); +x_54 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_54, 0, x_52); +lean_ctor_set(x_54, 1, x_53); +lean_ctor_set(x_54, 2, x_50); +lean_ctor_set(x_54, 3, x_51); +x_55 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_55, 0, x_54); +lean_ctor_set(x_55, 1, x_43); +return x_55; +} +else +{ +lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; +x_56 = lean_ctor_get(x_1, 2); +lean_inc(x_56); +x_57 = lean_ctor_get(x_1, 3); +lean_inc(x_57); +x_58 = lean_int_neg(x_45); +lean_dec(x_45); +x_59 = l_Lean_Grind_CommRing_Poly_divConst(x_3, x_58); +x_60 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_60, 0, x_58); +lean_ctor_set(x_60, 1, x_1); +x_61 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_61, 0, x_59); +lean_ctor_set(x_61, 1, x_60); +lean_ctor_set(x_61, 2, x_56); +lean_ctor_set(x_61, 3, x_57); +x_62 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_62, 0, x_61); +lean_ctor_set(x_62, 1, x_43); +return x_62; +} +} +else +{ +lean_object* x_63; lean_object* x_64; +lean_dec(x_45); +x_63 = lean_box(0); +x_64 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1(x_1, x_2, x_3, x_63, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_43); +return x_64; +} +} +} +} +else +{ +uint8_t x_65; +lean_dec(x_3); +lean_dec(x_1); +x_65 = !lean_is_exclusive(x_15); +if (x_65 == 0) +{ +return x_15; +} +else +{ +lean_object* x_66; lean_object* x_67; lean_object* x_68; +x_66 = lean_ctor_get(x_15, 0); +x_67 = lean_ctor_get(x_15, 1); +lean_inc(x_67); +lean_inc(x_66); +lean_dec(x_15); +x_68 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_68, 0, x_66); +lean_ctor_set(x_68, 1, x_67); +return x_68; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: +{ +lean_object* x_15; +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_8); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +x_15 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +if (lean_obj_tag(x_15) == 0) +{ +lean_object* x_16; +x_16 = lean_ctor_get(x_15, 0); +lean_inc(x_16); +if (lean_obj_tag(x_16) == 0) +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_17 = lean_ctor_get(x_15, 1); +lean_inc(x_17); +lean_dec(x_15); +x_18 = lean_box(0); +x_19 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2(x_1, x_2, x_3, x_18, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_17); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +return x_19; +} +else +{ +uint8_t x_20; +x_20 = !lean_is_exclusive(x_15); +if (x_20 == 0) +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; uint8_t x_28; +x_21 = lean_ctor_get(x_15, 1); +x_22 = lean_ctor_get(x_15, 0); +lean_dec(x_22); +x_23 = lean_ctor_get(x_16, 0); +lean_inc(x_23); +lean_dec(x_16); +lean_inc(x_23); +x_24 = lean_nat_to_int(x_23); +x_25 = l_Lean_Meta_Grind_Arith_gcdExt(x_2, x_24); +x_26 = lean_ctor_get(x_25, 1); +lean_inc(x_26); +x_27 = lean_ctor_get(x_25, 0); +lean_inc(x_27); +lean_dec(x_25); +x_28 = !lean_is_exclusive(x_26); +if (x_28 == 0) +{ +lean_object* x_29; lean_object* x_30; lean_object* x_31; uint8_t x_32; +x_29 = lean_ctor_get(x_26, 0); +x_30 = lean_ctor_get(x_26, 1); +lean_dec(x_30); +x_31 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; +x_32 = lean_int_dec_eq(x_27, x_31); +lean_dec(x_27); +if (x_32 == 0) +{ +lean_object* x_33; lean_object* x_34; +lean_free_object(x_26); +lean_dec(x_29); +lean_dec(x_24); +lean_dec(x_23); +lean_free_object(x_15); +x_33 = lean_box(0); +x_34 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2(x_1, x_2, x_3, x_33, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_21); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +return x_34; +} +else +{ +lean_object* x_35; uint8_t x_36; +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +x_35 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; +x_36 = lean_int_dec_lt(x_29, x_35); +if (x_36 == 0) +{ +lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; +lean_dec(x_24); +x_37 = lean_ctor_get(x_1, 2); +lean_inc(x_37); +x_38 = lean_ctor_get(x_1, 3); +lean_inc(x_38); +x_39 = l_Lean_Grind_CommRing_Poly_mulConstC(x_29, x_3, x_23); +lean_ctor_set_tag(x_26, 3); +lean_ctor_set(x_26, 1, x_1); +x_40 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_40, 0, x_39); +lean_ctor_set(x_40, 1, x_26); +lean_ctor_set(x_40, 2, x_37); +lean_ctor_set(x_40, 3, x_38); +lean_ctor_set(x_15, 0, x_40); +return x_15; +} +else +{ +lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; +x_41 = lean_ctor_get(x_1, 2); +lean_inc(x_41); +x_42 = lean_ctor_get(x_1, 3); +lean_inc(x_42); +x_43 = lean_int_emod(x_29, x_24); +lean_dec(x_24); +lean_dec(x_29); +x_44 = l_Lean_Grind_CommRing_Poly_mulConstC(x_43, x_3, x_23); +lean_ctor_set_tag(x_26, 3); +lean_ctor_set(x_26, 1, x_1); +lean_ctor_set(x_26, 0, x_43); +x_45 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_26); +lean_ctor_set(x_45, 2, x_41); +lean_ctor_set(x_45, 3, x_42); +lean_ctor_set(x_15, 0, x_45); +return x_15; } -lean_ctor_set(x_241, 0, x_207); -lean_ctor_set(x_241, 1, x_239); -return x_241; } } +else +{ +lean_object* x_46; lean_object* x_47; uint8_t x_48; +x_46 = lean_ctor_get(x_26, 0); +lean_inc(x_46); +lean_dec(x_26); +x_47 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; +x_48 = lean_int_dec_eq(x_27, x_47); +lean_dec(x_27); +if (x_48 == 0) +{ +lean_object* x_49; lean_object* x_50; +lean_dec(x_46); +lean_dec(x_24); +lean_dec(x_23); +lean_free_object(x_15); +x_49 = lean_box(0); +x_50 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2(x_1, x_2, x_3, x_49, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_21); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +return x_50; } +else +{ +lean_object* x_51; uint8_t x_52; +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +x_51 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; +x_52 = lean_int_dec_lt(x_46, x_51); +if (x_52 == 0) +{ +lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; +lean_dec(x_24); +x_53 = lean_ctor_get(x_1, 2); +lean_inc(x_53); +x_54 = lean_ctor_get(x_1, 3); +lean_inc(x_54); +x_55 = l_Lean_Grind_CommRing_Poly_mulConstC(x_46, x_3, x_23); +x_56 = lean_alloc_ctor(3, 2, 0); +lean_ctor_set(x_56, 0, x_46); +lean_ctor_set(x_56, 1, x_1); +x_57 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_57, 0, x_55); +lean_ctor_set(x_57, 1, x_56); +lean_ctor_set(x_57, 2, x_53); +lean_ctor_set(x_57, 3, x_54); +lean_ctor_set(x_15, 0, x_57); +return x_15; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__1() { -_start: +else { -lean_object* x_1; -x_1 = lean_mk_string_unchecked("queue", 5, 5); -return x_1; +lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; +x_58 = lean_ctor_get(x_1, 2); +lean_inc(x_58); +x_59 = lean_ctor_get(x_1, 3); +lean_inc(x_59); +x_60 = lean_int_emod(x_46, x_24); +lean_dec(x_24); +lean_dec(x_46); +x_61 = l_Lean_Grind_CommRing_Poly_mulConstC(x_60, x_3, x_23); +x_62 = lean_alloc_ctor(3, 2, 0); +lean_ctor_set(x_62, 0, x_60); +lean_ctor_set(x_62, 1, x_1); +x_63 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_63, 0, x_61); +lean_ctor_set(x_63, 1, x_62); +lean_ctor_set(x_63, 2, x_58); +lean_ctor_set(x_63, 3, x_59); +lean_ctor_set(x_15, 0, x_63); +return x_15; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__2() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__1; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__2; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__3; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__1; -x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); -return x_5; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: +else { -lean_object* x_13; lean_object* x_14; lean_object* x_15; uint8_t x_16; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__2; -x_14 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_13, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -x_15 = lean_ctor_get(x_14, 0); -lean_inc(x_15); -x_16 = lean_unbox(x_15); +lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; uint8_t x_73; +x_64 = lean_ctor_get(x_15, 1); +lean_inc(x_64); lean_dec(x_15); -if (x_16 == 0) +x_65 = lean_ctor_get(x_16, 0); +lean_inc(x_65); +lean_dec(x_16); +lean_inc(x_65); +x_66 = lean_nat_to_int(x_65); +x_67 = l_Lean_Meta_Grind_Arith_gcdExt(x_2, x_66); +x_68 = lean_ctor_get(x_67, 1); +lean_inc(x_68); +x_69 = lean_ctor_get(x_67, 0); +lean_inc(x_69); +lean_dec(x_67); +x_70 = lean_ctor_get(x_68, 0); +lean_inc(x_70); +if (lean_is_exclusive(x_68)) { + lean_ctor_release(x_68, 0); + lean_ctor_release(x_68, 1); + x_71 = x_68; +} else { + lean_dec_ref(x_68); + x_71 = lean_box(0); +} +x_72 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; +x_73 = lean_int_dec_eq(x_69, x_72); +lean_dec(x_69); +if (x_73 == 0) { -lean_object* x_17; lean_object* x_18; lean_object* x_19; -x_17 = lean_ctor_get(x_14, 1); -lean_inc(x_17); -lean_dec(x_14); -x_18 = lean_box(0); -x_19 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__1(x_1, x_18, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_17); +lean_object* x_74; lean_object* x_75; +lean_dec(x_71); +lean_dec(x_70); +lean_dec(x_66); +lean_dec(x_65); +x_74 = lean_box(0); +x_75 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2(x_1, x_2, x_3, x_74, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_64); +lean_dec(x_13); +lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); -return x_19; +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +return x_75; } else { -uint8_t x_20; -x_20 = !lean_is_exclusive(x_14); -if (x_20 == 0) -{ -lean_object* x_21; lean_object* x_22; lean_object* x_23; -x_21 = lean_ctor_get(x_14, 1); -x_22 = lean_ctor_get(x_14, 0); -lean_dec(x_22); -x_23 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_21); -if (lean_obj_tag(x_23) == 0) -{ -lean_object* x_24; lean_object* x_25; -x_24 = lean_ctor_get(x_23, 1); -lean_inc(x_24); -lean_dec(x_23); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_1); -x_25 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_1, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_24); -if (lean_obj_tag(x_25) == 0) -{ -lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; -x_26 = lean_ctor_get(x_25, 0); -lean_inc(x_26); -x_27 = lean_ctor_get(x_25, 1); -lean_inc(x_27); -lean_dec(x_25); -x_28 = l_Lean_MessageData_ofExpr(x_26); -x_29 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -lean_ctor_set_tag(x_14, 7); -lean_ctor_set(x_14, 1, x_28); -lean_ctor_set(x_14, 0, x_29); -x_30 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_30, 0, x_14); -lean_ctor_set(x_30, 1, x_29); -x_31 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_13, x_30, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_27); -x_32 = lean_ctor_get(x_31, 0); -lean_inc(x_32); -x_33 = lean_ctor_get(x_31, 1); -lean_inc(x_33); -lean_dec(x_31); -x_34 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__1(x_1, x_32, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_33); +lean_object* x_76; uint8_t x_77; +lean_dec(x_13); +lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); -lean_dec(x_32); -return x_34; +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +x_76 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; +x_77 = lean_int_dec_lt(x_70, x_76); +if (x_77 == 0) +{ +lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; +lean_dec(x_66); +x_78 = lean_ctor_get(x_1, 2); +lean_inc(x_78); +x_79 = lean_ctor_get(x_1, 3); +lean_inc(x_79); +x_80 = l_Lean_Grind_CommRing_Poly_mulConstC(x_70, x_3, x_65); +if (lean_is_scalar(x_71)) { + x_81 = lean_alloc_ctor(3, 2, 0); +} else { + x_81 = x_71; + lean_ctor_set_tag(x_81, 3); +} +lean_ctor_set(x_81, 0, x_70); +lean_ctor_set(x_81, 1, x_1); +x_82 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_82, 0, x_80); +lean_ctor_set(x_82, 1, x_81); +lean_ctor_set(x_82, 2, x_78); +lean_ctor_set(x_82, 3, x_79); +x_83 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_83, 0, x_82); +lean_ctor_set(x_83, 1, x_64); +return x_83; } else { -uint8_t x_35; -lean_free_object(x_14); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_1); -x_35 = !lean_is_exclusive(x_25); -if (x_35 == 0) -{ -return x_25; +lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; +x_84 = lean_ctor_get(x_1, 2); +lean_inc(x_84); +x_85 = lean_ctor_get(x_1, 3); +lean_inc(x_85); +x_86 = lean_int_emod(x_70, x_66); +lean_dec(x_66); +lean_dec(x_70); +x_87 = l_Lean_Grind_CommRing_Poly_mulConstC(x_86, x_3, x_65); +if (lean_is_scalar(x_71)) { + x_88 = lean_alloc_ctor(3, 2, 0); +} else { + x_88 = x_71; + lean_ctor_set_tag(x_88, 3); +} +lean_ctor_set(x_88, 0, x_86); +lean_ctor_set(x_88, 1, x_1); +x_89 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_89, 0, x_87); +lean_ctor_set(x_89, 1, x_88); +lean_ctor_set(x_89, 2, x_84); +lean_ctor_set(x_89, 3, x_85); +x_90 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_90, 0, x_89); +lean_ctor_set(x_90, 1, x_64); +return x_90; +} } -else -{ -lean_object* x_36; lean_object* x_37; lean_object* x_38; -x_36 = lean_ctor_get(x_25, 0); -x_37 = lean_ctor_get(x_25, 1); -lean_inc(x_37); -lean_inc(x_36); -lean_dec(x_25); -x_38 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_38, 0, x_36); -lean_ctor_set(x_38, 1, x_37); -return x_38; } } } else { -uint8_t x_39; -lean_free_object(x_14); +uint8_t x_91; +lean_dec(x_13); +lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_3); lean_dec(x_1); -x_39 = !lean_is_exclusive(x_23); -if (x_39 == 0) +x_91 = !lean_is_exclusive(x_15); +if (x_91 == 0) { -return x_23; +return x_15; } else { -lean_object* x_40; lean_object* x_41; lean_object* x_42; -x_40 = lean_ctor_get(x_23, 0); -x_41 = lean_ctor_get(x_23, 1); -lean_inc(x_41); -lean_inc(x_40); -lean_dec(x_23); -x_42 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_42, 0, x_40); -lean_ctor_set(x_42, 1, x_41); -return x_42; +lean_object* x_92; lean_object* x_93; lean_object* x_94; +x_92 = lean_ctor_get(x_15, 0); +x_93 = lean_ctor_get(x_15, 1); +lean_inc(x_93); +lean_inc(x_92); +lean_dec(x_15); +x_94 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_94, 0, x_92); +lean_ctor_set(x_94, 1, x_93); +return x_94; } } } -else +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -lean_object* x_43; lean_object* x_44; -x_43 = lean_ctor_get(x_14, 1); -lean_inc(x_43); -lean_dec(x_14); -x_44 = l_Lean_Meta_Grind_updateLastTag(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_43); -if (lean_obj_tag(x_44) == 0) +lean_object* x_12; lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_12 = lean_ctor_get(x_1, 0); +lean_inc(x_12); +x_13 = l_Lean_Grind_CommRing_Poly_lc(x_12); +x_14 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; +x_15 = lean_int_dec_eq(x_13, x_14); +if (x_15 == 0) { -lean_object* x_45; lean_object* x_46; -x_45 = lean_ctor_get(x_44, 1); -lean_inc(x_45); -lean_dec(x_44); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_1); -x_46 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_1, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_45); -if (lean_obj_tag(x_46) == 0) +lean_object* x_16; lean_object* x_17; +x_16 = lean_box(0); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__3(x_1, x_13, x_12, x_16, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_13); +return x_17; +} +else { -lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; -x_47 = lean_ctor_get(x_46, 0); -lean_inc(x_47); -x_48 = lean_ctor_get(x_46, 1); -lean_inc(x_48); -lean_dec(x_46); -x_49 = l_Lean_MessageData_ofExpr(x_47); -x_50 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -x_51 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_51, 0, x_50); -lean_ctor_set(x_51, 1, x_49); -x_52 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_52, 0, x_51); -lean_ctor_set(x_52, 1, x_50); -x_53 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_13, x_52, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_48); -x_54 = lean_ctor_get(x_53, 0); -lean_inc(x_54); -x_55 = lean_ctor_get(x_53, 1); -lean_inc(x_55); -lean_dec(x_53); -x_56 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__1(x_1, x_54, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_55); -lean_dec(x_11); +lean_object* x_18; +lean_dec(x_13); +lean_dec(x_12); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); -lean_dec(x_54); -return x_56; +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_18 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18, 0, x_1); +lean_ctor_set(x_18, 1, x_11); +return x_18; } -else +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: { -lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; +lean_object* x_15; +x_15 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +lean_dec(x_13); +lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); -lean_dec(x_1); -x_57 = lean_ctor_get(x_46, 0); -lean_inc(x_57); -x_58 = lean_ctor_get(x_46, 1); -lean_inc(x_58); -if (lean_is_exclusive(x_46)) { - lean_ctor_release(x_46, 0); - lean_ctor_release(x_46, 1); - x_59 = x_46; -} else { - lean_dec_ref(x_46); - x_59 = lean_box(0); -} -if (lean_is_scalar(x_59)) { - x_60 = lean_alloc_ctor(1, 2, 0); -} else { - x_60 = x_59; -} -lean_ctor_set(x_60, 0, x_57); -lean_ctor_set(x_60, 1, x_58); -return x_60; +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +return x_15; } } -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: { -lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; +lean_object* x_15; +x_15 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +lean_dec(x_13); +lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); -lean_dec(x_1); -x_61 = lean_ctor_get(x_44, 0); -lean_inc(x_61); -x_62 = lean_ctor_get(x_44, 1); -lean_inc(x_62); -if (lean_is_exclusive(x_44)) { - lean_ctor_release(x_44, 0); - lean_ctor_release(x_44, 1); - x_63 = x_44; -} else { - lean_dec_ref(x_44); - x_63 = lean_box(0); -} -if (lean_is_scalar(x_63)) { - x_64 = lean_alloc_ctor(1, 2, 0); -} else { - x_64 = x_63; -} -lean_ctor_set(x_64, 0, x_61); -lean_ctor_set(x_64, 1, x_62); -return x_64; -} +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +return x_15; } } +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: +{ +lean_object* x_15; +x_15 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +lean_dec(x_4); +lean_dec(x_2); +return x_15; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { -lean_object* x_12; lean_object* x_13; uint8_t x_14; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_checkMaxSteps(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -x_13 = lean_ctor_get(x_12, 0); -lean_inc(x_13); -x_14 = lean_unbox(x_13); -lean_dec(x_13); +lean_object* x_13; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue(x_1, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_13) == 0) +{ +uint8_t x_14; +x_14 = !lean_is_exclusive(x_13); if (x_14 == 0) { -lean_object* x_15; lean_object* x_16; lean_object* x_17; -x_15 = lean_ctor_get(x_12, 1); -lean_inc(x_15); -lean_dec(x_12); -x_16 = lean_box(0); -x_17 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2(x_1, x_16, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_15); -return x_17; +lean_object* x_15; lean_object* x_16; +x_15 = lean_ctor_get(x_13, 0); +lean_dec(x_15); +x_16 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___closed__1; +lean_ctor_set(x_13, 0, x_16); +return x_13; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_17 = lean_ctor_get(x_13, 1); +lean_inc(x_17); +lean_dec(x_13); +x_18 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___closed__1; +x_19 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19, 0, x_18); +lean_ctor_set(x_19, 1, x_17); +return x_19; +} } else { -uint8_t x_18; -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_1); -x_18 = !lean_is_exclusive(x_12); -if (x_18 == 0) +uint8_t x_20; +x_20 = !lean_is_exclusive(x_13); +if (x_20 == 0) { -lean_object* x_19; lean_object* x_20; -x_19 = lean_ctor_get(x_12, 0); -lean_dec(x_19); -x_20 = lean_box(0); -lean_ctor_set(x_12, 0, x_20); -return x_12; +return x_13; } else { lean_object* x_21; lean_object* x_22; lean_object* x_23; -x_21 = lean_ctor_get(x_12, 1); +x_21 = lean_ctor_get(x_13, 0); +x_22 = lean_ctor_get(x_13, 1); +lean_inc(x_22); lean_inc(x_21); -lean_dec(x_12); -x_22 = lean_box(0); -x_23 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_23, 0, x_22); -lean_ctor_set(x_23, 1, x_21); +lean_dec(x_13); +x_23 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_23, 0, x_21); +lean_ctor_set(x_23, 1, x_22); return x_23; } } } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__1() { _start: { -lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_object* x_1; +x_1 = lean_mk_string_unchecked("simplified: ", 12, 12); +return x_1; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__1; +x_2 = l_Lean_stringToMessageData(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15) { +_start: +{ +lean_object* x_16; +x_16 = lean_ctor_get(x_1, 0); +lean_inc(x_16); +if (lean_obj_tag(x_16) == 0) +{ +lean_object* x_17; lean_object* x_18; +lean_dec(x_16); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); -lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); -return x_13; -} +lean_dec(x_1); +x_17 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___closed__1; +x_18 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_18, 0, x_17); +lean_ctor_set(x_18, 1, x_15); +return x_18; } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: +else { -lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); +lean_object* x_19; uint8_t x_20; +x_19 = lean_ctor_get(x_16, 1); +lean_inc(x_19); +lean_dec(x_16); +x_20 = l_Lean_Grind_CommRing_Mon_divides(x_2, x_19); +if (x_20 == 0) +{ +lean_object* x_21; uint8_t x_22; lean_dec(x_4); lean_dec(x_3); -lean_dec(x_2); -return x_13; -} +x_21 = l_Lean_Meta_Grind_Arith_CommRing_addToBasisCore(x_1, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +x_22 = !lean_is_exclusive(x_21); +if (x_22 == 0) +{ +lean_object* x_23; lean_object* x_24; +x_23 = lean_ctor_get(x_21, 0); +lean_dec(x_23); +x_24 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___closed__1; +lean_ctor_set(x_21, 0, x_24); +return x_21; } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: +else { -lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -return x_12; +lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_25 = lean_ctor_get(x_21, 1); +lean_inc(x_25); +lean_dec(x_21); +x_26 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___closed__1; +x_27 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_27, 0, x_26); +lean_ctor_set(x_27, 1, x_25); +return x_27; } } -LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { -_start: -{ -lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; -x_15 = lean_ctor_get(x_1, 0); -lean_inc(x_15); -x_16 = lean_ctor_get(x_1, 1); -lean_inc(x_16); -x_17 = lean_ctor_get(x_1, 2); -lean_inc(x_17); -x_18 = lean_ctor_get(x_1, 3); -lean_inc(x_18); -x_19 = lean_ctor_get(x_1, 4); -lean_inc(x_19); -lean_dec(x_1); -x_20 = lean_alloc_ctor(1, 6, 0); -lean_ctor_set(x_20, 0, x_16); -lean_ctor_set(x_20, 1, x_17); -lean_ctor_set(x_20, 2, x_2); -lean_ctor_set(x_20, 3, x_18); -lean_ctor_set(x_20, 4, x_19); -lean_ctor_set(x_20, 5, x_3); -x_21 = l_Lean_Meta_Grind_Arith_CommRing_mkEqCnstr(x_15, x_20, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); -if (lean_obj_tag(x_21) == 0) +else { -lean_object* x_22; lean_object* x_23; lean_object* x_24; -x_22 = lean_ctor_get(x_21, 0); -lean_inc(x_22); -x_23 = lean_ctor_get(x_21, 1); -lean_inc(x_23); -lean_dec(x_21); -x_24 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue(x_22, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_23); -if (lean_obj_tag(x_24) == 0) +lean_object* x_28; +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_8); +lean_inc(x_7); +lean_inc(x_6); +x_28 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyWithExhaustively(x_1, x_3, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +if (lean_obj_tag(x_28) == 0) { -uint8_t x_25; -x_25 = !lean_is_exclusive(x_24); -if (x_25 == 0) +lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; uint8_t x_33; +x_29 = lean_ctor_get(x_28, 0); +lean_inc(x_29); +x_30 = lean_ctor_get(x_28, 1); +lean_inc(x_30); +lean_dec(x_28); +lean_inc(x_4); +x_31 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_4, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_30); +x_32 = lean_ctor_get(x_31, 0); +lean_inc(x_32); +x_33 = lean_unbox(x_32); +lean_dec(x_32); +if (x_33 == 0) { -lean_object* x_26; lean_object* x_27; -x_26 = lean_ctor_get(x_24, 0); -lean_dec(x_26); -x_27 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___closed__1; -lean_ctor_set(x_24, 0, x_27); -return x_24; +lean_object* x_34; lean_object* x_35; lean_object* x_36; +lean_dec(x_4); +x_34 = lean_ctor_get(x_31, 1); +lean_inc(x_34); +lean_dec(x_31); +x_35 = lean_box(0); +x_36 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__1(x_29, x_35, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_34); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +return x_36; } else { -lean_object* x_28; lean_object* x_29; lean_object* x_30; -x_28 = lean_ctor_get(x_24, 1); -lean_inc(x_28); -lean_dec(x_24); -x_29 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___closed__1; -x_30 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_30, 0, x_29); -lean_ctor_set(x_30, 1, x_28); -return x_30; -} +uint8_t x_37; +x_37 = !lean_is_exclusive(x_31); +if (x_37 == 0) +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_38 = lean_ctor_get(x_31, 1); +x_39 = lean_ctor_get(x_31, 0); +lean_dec(x_39); +lean_inc(x_29); +x_40 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_29, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_38); +if (lean_obj_tag(x_40) == 0) +{ +lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; +x_41 = lean_ctor_get(x_40, 0); +lean_inc(x_41); +x_42 = lean_ctor_get(x_40, 1); +lean_inc(x_42); +lean_dec(x_40); +x_43 = l_Lean_MessageData_ofExpr(x_41); +x_44 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__2; +lean_ctor_set_tag(x_31, 7); +lean_ctor_set(x_31, 1, x_43); +lean_ctor_set(x_31, 0, x_44); +x_45 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +x_46 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_46, 0, x_31); +lean_ctor_set(x_46, 1, x_45); +x_47 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_4, x_46, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_42); +x_48 = lean_ctor_get(x_47, 0); +lean_inc(x_48); +x_49 = lean_ctor_get(x_47, 1); +lean_inc(x_49); +lean_dec(x_47); +x_50 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__1(x_29, x_48, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_49); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_48); +return x_50; } else { -uint8_t x_31; -x_31 = !lean_is_exclusive(x_24); -if (x_31 == 0) +uint8_t x_51; +lean_free_object(x_31); +lean_dec(x_29); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_4); +x_51 = !lean_is_exclusive(x_40); +if (x_51 == 0) { -return x_24; +return x_40; } else { -lean_object* x_32; lean_object* x_33; lean_object* x_34; -x_32 = lean_ctor_get(x_24, 0); -x_33 = lean_ctor_get(x_24, 1); -lean_inc(x_33); -lean_inc(x_32); -lean_dec(x_24); -x_34 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_34, 0, x_32); -lean_ctor_set(x_34, 1, x_33); -return x_34; +lean_object* x_52; lean_object* x_53; lean_object* x_54; +x_52 = lean_ctor_get(x_40, 0); +x_53 = lean_ctor_get(x_40, 1); +lean_inc(x_53); +lean_inc(x_52); +lean_dec(x_40); +x_54 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_54, 0, x_52); +lean_ctor_set(x_54, 1, x_53); +return x_54; } } } else { -uint8_t x_35; +lean_object* x_55; lean_object* x_56; +x_55 = lean_ctor_get(x_31, 1); +lean_inc(x_55); +lean_dec(x_31); +lean_inc(x_29); +x_56 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_29, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_55); +if (lean_obj_tag(x_56) == 0) +{ +lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; +x_57 = lean_ctor_get(x_56, 0); +lean_inc(x_57); +x_58 = lean_ctor_get(x_56, 1); +lean_inc(x_58); +lean_dec(x_56); +x_59 = l_Lean_MessageData_ofExpr(x_57); +x_60 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__2; +x_61 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_61, 0, x_60); +lean_ctor_set(x_61, 1, x_59); +x_62 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +x_63 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_63, 0, x_61); +lean_ctor_set(x_63, 1, x_62); +x_64 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_4, x_63, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_58); +x_65 = lean_ctor_get(x_64, 0); +lean_inc(x_65); +x_66 = lean_ctor_get(x_64, 1); +lean_inc(x_66); +lean_dec(x_64); +x_67 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__1(x_29, x_65, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_66); +lean_dec(x_14); lean_dec(x_13); lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); -x_35 = !lean_is_exclusive(x_21); -if (x_35 == 0) -{ -return x_21; +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_65); +return x_67; } else { -lean_object* x_36; lean_object* x_37; lean_object* x_38; -x_36 = lean_ctor_get(x_21, 0); -x_37 = lean_ctor_get(x_21, 1); -lean_inc(x_37); -lean_inc(x_36); -lean_dec(x_21); -x_38 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_38, 0, x_36); -lean_ctor_set(x_38, 1, x_37); -return x_38; +lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; +lean_dec(x_29); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_4); +x_68 = lean_ctor_get(x_56, 0); +lean_inc(x_68); +x_69 = lean_ctor_get(x_56, 1); +lean_inc(x_69); +if (lean_is_exclusive(x_56)) { + lean_ctor_release(x_56, 0); + lean_ctor_release(x_56, 1); + x_70 = x_56; +} else { + lean_dec_ref(x_56); + x_70 = lean_box(0); } +if (lean_is_scalar(x_70)) { + x_71 = lean_alloc_ctor(1, 2, 0); +} else { + x_71 = x_70; } +lean_ctor_set(x_71, 0, x_68); +lean_ctor_set(x_71, 1, x_69); +return x_71; } } -static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__1() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("superpose", 9, 9); -return x_1; } } -static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__2() { -_start: +else { -lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__1; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__2; -x_3 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__1; -x_4 = l_Lean_Name_mkStr3(x_1, x_2, x_3); -return x_4; -} +uint8_t x_72; +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_4); +x_72 = !lean_is_exclusive(x_28); +if (x_72 == 0) +{ +return x_28; } -static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__3() { -_start: +else { -lean_object* x_1; -x_1 = lean_mk_string_unchecked("\nwith: ", 7, 7); -return x_1; +lean_object* x_73; lean_object* x_74; lean_object* x_75; +x_73 = lean_ctor_get(x_28, 0); +x_74 = lean_ctor_get(x_28, 1); +lean_inc(x_74); +lean_inc(x_73); +lean_dec(x_28); +x_75 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_75, 0, x_73); +lean_ctor_set(x_75, 1, x_74); +return x_75; } } -static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__4() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__3; -x_2 = l_Lean_stringToMessageData(x_1); -return x_2; } } -static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__5() { +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__1() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("\nresult: ", 9, 9); +x_1 = lean_mk_string_unchecked("simpBasis", 9, 9); return x_1; } } -static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__6() { +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__2() { _start: { -lean_object* x_1; lean_object* x_2; -x_1 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__5; -x_2 = l_Lean_stringToMessageData(x_1); -return x_2; +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__1; +x_2 = l_Lean_Loop_forIn_loop___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplify___spec__1___lambda__2___closed__1; +x_3 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_simplifyWith___closed__2; +x_4 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__1; +x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); +return x_5; } } -static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__7() { +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__3() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked(" = 0", 4, 4); +x_1 = lean_mk_string_unchecked("target: ", 8, 8); return x_1; } } -static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__8() { +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__4() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__7; -x_2 = l_Lean_stringToMessageData(x_1); -return x_2; -} -} -LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15, lean_object* x_16, lean_object* x_17) { -_start: -{ -if (lean_obj_tag(x_5) == 0) -{ -lean_object* x_18; -lean_dec(x_16); -lean_dec(x_15); -lean_dec(x_14); -lean_dec(x_13); -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_1); -x_18 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_18, 0, x_6); -lean_ctor_set(x_18, 1, x_17); -return x_18; -} -else -{ -lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_29; lean_object* x_30; lean_object* x_31; -lean_dec(x_6); -x_19 = lean_ctor_get(x_5, 0); -lean_inc(x_19); -x_20 = lean_ctor_get(x_5, 1); -lean_inc(x_20); -if (lean_is_exclusive(x_5)) { - lean_ctor_release(x_5, 0); - lean_ctor_release(x_5, 1); - x_21 = x_5; -} else { - lean_dec_ref(x_5); - x_21 = lean_box(0); -} -x_29 = lean_ctor_get(x_1, 0); -lean_inc(x_29); -x_30 = lean_ctor_get(x_19, 0); -lean_inc(x_30); -lean_inc(x_16); -lean_inc(x_15); -lean_inc(x_14); -lean_inc(x_13); -lean_inc(x_12); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -x_31 = l_Lean_Grind_CommRing_Poly_spolM(x_29, x_30, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17); -if (lean_obj_tag(x_31) == 0) -{ -lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; uint8_t x_37; -x_32 = lean_ctor_get(x_31, 0); -lean_inc(x_32); -x_33 = lean_ctor_get(x_31, 1); -lean_inc(x_33); -lean_dec(x_31); -x_34 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__2; -x_35 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_34, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_33); -x_36 = lean_ctor_get(x_35, 0); -lean_inc(x_36); -x_37 = lean_unbox(x_36); -lean_dec(x_36); -if (x_37 == 0) -{ -lean_object* x_38; lean_object* x_39; lean_object* x_40; -x_38 = lean_ctor_get(x_35, 1); -lean_inc(x_38); -lean_dec(x_35); -x_39 = lean_box(0); -lean_inc(x_16); -lean_inc(x_15); -lean_inc(x_14); -lean_inc(x_13); -lean_inc(x_1); -x_40 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1(x_32, x_1, x_19, x_39, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_38); -if (lean_obj_tag(x_40) == 0) -{ -lean_object* x_41; lean_object* x_42; -x_41 = lean_ctor_get(x_40, 0); -lean_inc(x_41); -x_42 = lean_ctor_get(x_40, 1); -lean_inc(x_42); -lean_dec(x_40); -x_22 = x_41; -x_23 = x_42; -goto block_28; +x_1 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__3; +x_2 = l_Lean_stringToMessageData(x_1); +return x_2; } -else +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15, lean_object* x_16, lean_object* x_17, lean_object* x_18) { +_start: { -uint8_t x_43; -lean_dec(x_21); -lean_dec(x_20); +if (lean_obj_tag(x_6) == 0) +{ +lean_object* x_19; +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14159,139 +15344,59 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_43 = !lean_is_exclusive(x_40); -if (x_43 == 0) -{ -return x_40; -} -else -{ -lean_object* x_44; lean_object* x_45; lean_object* x_46; -x_44 = lean_ctor_get(x_40, 0); -x_45 = lean_ctor_get(x_40, 1); -lean_inc(x_45); -lean_inc(x_44); -lean_dec(x_40); -x_46 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_46, 0, x_44); -lean_ctor_set(x_46, 1, x_45); -return x_46; -} -} +x_19 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19, 0, x_7); +lean_ctor_set(x_19, 1, x_18); +return x_19; } else { -uint8_t x_47; -x_47 = !lean_is_exclusive(x_35); -if (x_47 == 0) -{ -lean_object* x_48; lean_object* x_49; lean_object* x_50; -x_48 = lean_ctor_get(x_35, 1); -x_49 = lean_ctor_get(x_35, 0); -lean_dec(x_49); -x_50 = l_Lean_Meta_Grind_updateLastTag(x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_48); -if (lean_obj_tag(x_50) == 0) -{ -lean_object* x_51; lean_object* x_52; -x_51 = lean_ctor_get(x_50, 1); -lean_inc(x_51); -lean_dec(x_50); -lean_inc(x_16); -lean_inc(x_15); -lean_inc(x_14); -lean_inc(x_13); -lean_inc(x_1); -x_52 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_1, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_51); -if (lean_obj_tag(x_52) == 0) -{ -lean_object* x_53; lean_object* x_54; lean_object* x_55; -x_53 = lean_ctor_get(x_52, 0); -lean_inc(x_53); -x_54 = lean_ctor_get(x_52, 1); -lean_inc(x_54); -lean_dec(x_52); -lean_inc(x_16); -lean_inc(x_15); -lean_inc(x_14); -lean_inc(x_13); -lean_inc(x_19); -x_55 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_19, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_54); -if (lean_obj_tag(x_55) == 0) +uint8_t x_20; +lean_dec(x_7); +x_20 = !lean_is_exclusive(x_6); +if (x_20 == 0) { -lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; -x_56 = lean_ctor_get(x_55, 0); -lean_inc(x_56); -x_57 = lean_ctor_get(x_55, 1); -lean_inc(x_57); -lean_dec(x_55); -x_58 = lean_ctor_get(x_32, 0); -lean_inc(x_58); -x_59 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_58, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_57); -if (lean_obj_tag(x_59) == 0) +lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; uint8_t x_26; +x_21 = lean_ctor_get(x_6, 0); +x_22 = lean_ctor_get(x_6, 1); +x_23 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__2; +x_24 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_23, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_18); +x_25 = lean_ctor_get(x_24, 0); +lean_inc(x_25); +x_26 = lean_unbox(x_25); +lean_dec(x_25); +if (x_26 == 0) { -lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; -x_60 = lean_ctor_get(x_59, 0); -lean_inc(x_60); -x_61 = lean_ctor_get(x_59, 1); -lean_inc(x_61); -lean_dec(x_59); -x_62 = l_Lean_MessageData_ofExpr(x_53); -x_63 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -lean_ctor_set_tag(x_35, 7); -lean_ctor_set(x_35, 1, x_62); -lean_ctor_set(x_35, 0, x_63); -x_64 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__4; -x_65 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_65, 0, x_35); -lean_ctor_set(x_65, 1, x_64); -x_66 = l_Lean_MessageData_ofExpr(x_56); -x_67 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_67, 0, x_65); -lean_ctor_set(x_67, 1, x_66); -x_68 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__6; -x_69 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_69, 0, x_67); -lean_ctor_set(x_69, 1, x_68); -x_70 = l_Lean_MessageData_ofExpr(x_60); -x_71 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_71, 0, x_69); -lean_ctor_set(x_71, 1, x_70); -x_72 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__8; -x_73 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_73, 0, x_71); -lean_ctor_set(x_73, 1, x_72); -x_74 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_34, x_73, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_61); -x_75 = lean_ctor_get(x_74, 0); -lean_inc(x_75); -x_76 = lean_ctor_get(x_74, 1); -lean_inc(x_76); -lean_dec(x_74); +lean_object* x_27; lean_object* x_28; lean_object* x_29; +lean_free_object(x_6); +x_27 = lean_ctor_get(x_24, 1); +lean_inc(x_27); +lean_dec(x_24); +x_28 = lean_box(0); +lean_inc(x_17); lean_inc(x_16); lean_inc(x_15); lean_inc(x_14); lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); lean_inc(x_1); -x_77 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1(x_32, x_1, x_19, x_75, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_76); -lean_dec(x_75); -if (lean_obj_tag(x_77) == 0) +lean_inc(x_2); +x_29 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2(x_21, x_2, x_1, x_23, x_28, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_27); +if (lean_obj_tag(x_29) == 0) { -lean_object* x_78; lean_object* x_79; -x_78 = lean_ctor_get(x_77, 0); -lean_inc(x_78); -x_79 = lean_ctor_get(x_77, 1); -lean_inc(x_79); -lean_dec(x_77); -x_22 = x_78; -x_23 = x_79; -goto block_28; -} -else +lean_object* x_30; +x_30 = lean_ctor_get(x_29, 0); +lean_inc(x_30); +if (lean_obj_tag(x_30) == 0) { -uint8_t x_80; -lean_dec(x_21); -lean_dec(x_20); +uint8_t x_31; +lean_dec(x_22); +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14300,38 +15405,56 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_80 = !lean_is_exclusive(x_77); -if (x_80 == 0) +x_31 = !lean_is_exclusive(x_29); +if (x_31 == 0) { -return x_77; +lean_object* x_32; lean_object* x_33; +x_32 = lean_ctor_get(x_29, 0); +lean_dec(x_32); +x_33 = lean_ctor_get(x_30, 0); +lean_inc(x_33); +lean_dec(x_30); +lean_ctor_set(x_29, 0, x_33); +return x_29; } else { -lean_object* x_81; lean_object* x_82; lean_object* x_83; -x_81 = lean_ctor_get(x_77, 0); -x_82 = lean_ctor_get(x_77, 1); -lean_inc(x_82); -lean_inc(x_81); -lean_dec(x_77); -x_83 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_83, 0, x_81); -lean_ctor_set(x_83, 1, x_82); -return x_83; +lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_34 = lean_ctor_get(x_29, 1); +lean_inc(x_34); +lean_dec(x_29); +x_35 = lean_ctor_get(x_30, 0); +lean_inc(x_35); +lean_dec(x_30); +x_36 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_36, 0, x_35); +lean_ctor_set(x_36, 1, x_34); +return x_36; +} } +else +{ +lean_object* x_37; lean_object* x_38; +x_37 = lean_ctor_get(x_29, 1); +lean_inc(x_37); +lean_dec(x_29); +x_38 = lean_ctor_get(x_30, 0); +lean_inc(x_38); +lean_dec(x_30); +x_6 = x_22; +x_7 = x_38; +x_8 = lean_box(0); +x_18 = x_37; +goto _start; } } else { -uint8_t x_84; -lean_dec(x_56); -lean_dec(x_53); -lean_free_object(x_35); -lean_dec(x_32); -lean_dec(x_21); -lean_dec(x_20); -lean_dec(x_19); +uint8_t x_40; +lean_dec(x_22); +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14340,37 +15463,86 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_84 = !lean_is_exclusive(x_59); -if (x_84 == 0) +x_40 = !lean_is_exclusive(x_29); +if (x_40 == 0) { -return x_59; +return x_29; +} +else +{ +lean_object* x_41; lean_object* x_42; lean_object* x_43; +x_41 = lean_ctor_get(x_29, 0); +x_42 = lean_ctor_get(x_29, 1); +lean_inc(x_42); +lean_inc(x_41); +lean_dec(x_29); +x_43 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_43, 0, x_41); +lean_ctor_set(x_43, 1, x_42); +return x_43; +} +} } else { -lean_object* x_85; lean_object* x_86; lean_object* x_87; -x_85 = lean_ctor_get(x_59, 0); -x_86 = lean_ctor_get(x_59, 1); -lean_inc(x_86); -lean_inc(x_85); -lean_dec(x_59); -x_87 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_87, 0, x_85); -lean_ctor_set(x_87, 1, x_86); -return x_87; -} -} -} -else +uint8_t x_44; +x_44 = !lean_is_exclusive(x_24); +if (x_44 == 0) +{ +lean_object* x_45; lean_object* x_46; lean_object* x_47; +x_45 = lean_ctor_get(x_24, 1); +x_46 = lean_ctor_get(x_24, 0); +lean_dec(x_46); +lean_inc(x_21); +x_47 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_21, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_45); +if (lean_obj_tag(x_47) == 0) +{ +lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; +x_48 = lean_ctor_get(x_47, 0); +lean_inc(x_48); +x_49 = lean_ctor_get(x_47, 1); +lean_inc(x_49); +lean_dec(x_47); +x_50 = l_Lean_MessageData_ofExpr(x_48); +x_51 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__4; +lean_ctor_set_tag(x_24, 7); +lean_ctor_set(x_24, 1, x_50); +lean_ctor_set(x_24, 0, x_51); +x_52 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +lean_ctor_set_tag(x_6, 7); +lean_ctor_set(x_6, 1, x_52); +lean_ctor_set(x_6, 0, x_24); +x_53 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_23, x_6, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_49); +x_54 = lean_ctor_get(x_53, 0); +lean_inc(x_54); +x_55 = lean_ctor_get(x_53, 1); +lean_inc(x_55); +lean_dec(x_53); +lean_inc(x_17); +lean_inc(x_16); +lean_inc(x_15); +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_1); +lean_inc(x_2); +x_56 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2(x_21, x_2, x_1, x_23, x_54, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_55); +lean_dec(x_54); +if (lean_obj_tag(x_56) == 0) +{ +lean_object* x_57; +x_57 = lean_ctor_get(x_56, 0); +lean_inc(x_57); +if (lean_obj_tag(x_57) == 0) { -uint8_t x_88; -lean_dec(x_53); -lean_free_object(x_35); -lean_dec(x_32); -lean_dec(x_21); -lean_dec(x_20); -lean_dec(x_19); +uint8_t x_58; +lean_dec(x_22); +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14379,36 +15551,56 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_88 = !lean_is_exclusive(x_55); -if (x_88 == 0) +x_58 = !lean_is_exclusive(x_56); +if (x_58 == 0) { -return x_55; +lean_object* x_59; lean_object* x_60; +x_59 = lean_ctor_get(x_56, 0); +lean_dec(x_59); +x_60 = lean_ctor_get(x_57, 0); +lean_inc(x_60); +lean_dec(x_57); +lean_ctor_set(x_56, 0, x_60); +return x_56; } else { -lean_object* x_89; lean_object* x_90; lean_object* x_91; -x_89 = lean_ctor_get(x_55, 0); -x_90 = lean_ctor_get(x_55, 1); -lean_inc(x_90); -lean_inc(x_89); -lean_dec(x_55); -x_91 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_91, 0, x_89); -lean_ctor_set(x_91, 1, x_90); -return x_91; +lean_object* x_61; lean_object* x_62; lean_object* x_63; +x_61 = lean_ctor_get(x_56, 1); +lean_inc(x_61); +lean_dec(x_56); +x_62 = lean_ctor_get(x_57, 0); +lean_inc(x_62); +lean_dec(x_57); +x_63 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_63, 0, x_62); +lean_ctor_set(x_63, 1, x_61); +return x_63; +} } +else +{ +lean_object* x_64; lean_object* x_65; +x_64 = lean_ctor_get(x_56, 1); +lean_inc(x_64); +lean_dec(x_56); +x_65 = lean_ctor_get(x_57, 0); +lean_inc(x_65); +lean_dec(x_57); +x_6 = x_22; +x_7 = x_65; +x_8 = lean_box(0); +x_18 = x_64; +goto _start; } } else { -uint8_t x_92; -lean_free_object(x_35); -lean_dec(x_32); -lean_dec(x_21); -lean_dec(x_20); -lean_dec(x_19); +uint8_t x_67; +lean_dec(x_22); +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14417,36 +15609,36 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_92 = !lean_is_exclusive(x_52); -if (x_92 == 0) +x_67 = !lean_is_exclusive(x_56); +if (x_67 == 0) { -return x_52; +return x_56; } else { -lean_object* x_93; lean_object* x_94; lean_object* x_95; -x_93 = lean_ctor_get(x_52, 0); -x_94 = lean_ctor_get(x_52, 1); -lean_inc(x_94); -lean_inc(x_93); -lean_dec(x_52); -x_95 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_95, 0, x_93); -lean_ctor_set(x_95, 1, x_94); -return x_95; +lean_object* x_68; lean_object* x_69; lean_object* x_70; +x_68 = lean_ctor_get(x_56, 0); +x_69 = lean_ctor_get(x_56, 1); +lean_inc(x_69); +lean_inc(x_68); +lean_dec(x_56); +x_70 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_70, 0, x_68); +lean_ctor_set(x_70, 1, x_69); +return x_70; } } } else { -uint8_t x_96; -lean_free_object(x_35); -lean_dec(x_32); +uint8_t x_71; +lean_free_object(x_24); +lean_free_object(x_6); +lean_dec(x_22); lean_dec(x_21); -lean_dec(x_20); -lean_dec(x_19); +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14455,135 +15647,82 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_96 = !lean_is_exclusive(x_50); -if (x_96 == 0) +x_71 = !lean_is_exclusive(x_47); +if (x_71 == 0) { -return x_50; +return x_47; } else { -lean_object* x_97; lean_object* x_98; lean_object* x_99; -x_97 = lean_ctor_get(x_50, 0); -x_98 = lean_ctor_get(x_50, 1); -lean_inc(x_98); -lean_inc(x_97); -lean_dec(x_50); -x_99 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_99, 0, x_97); -lean_ctor_set(x_99, 1, x_98); -return x_99; +lean_object* x_72; lean_object* x_73; lean_object* x_74; +x_72 = lean_ctor_get(x_47, 0); +x_73 = lean_ctor_get(x_47, 1); +lean_inc(x_73); +lean_inc(x_72); +lean_dec(x_47); +x_74 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_74, 0, x_72); +lean_ctor_set(x_74, 1, x_73); +return x_74; } } } else { -lean_object* x_100; lean_object* x_101; -x_100 = lean_ctor_get(x_35, 1); -lean_inc(x_100); -lean_dec(x_35); -x_101 = l_Lean_Meta_Grind_updateLastTag(x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_100); -if (lean_obj_tag(x_101) == 0) -{ -lean_object* x_102; lean_object* x_103; -x_102 = lean_ctor_get(x_101, 1); -lean_inc(x_102); -lean_dec(x_101); -lean_inc(x_16); -lean_inc(x_15); -lean_inc(x_14); -lean_inc(x_13); -lean_inc(x_1); -x_103 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_1, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_102); -if (lean_obj_tag(x_103) == 0) -{ -lean_object* x_104; lean_object* x_105; lean_object* x_106; -x_104 = lean_ctor_get(x_103, 0); -lean_inc(x_104); -x_105 = lean_ctor_get(x_103, 1); -lean_inc(x_105); -lean_dec(x_103); -lean_inc(x_16); -lean_inc(x_15); -lean_inc(x_14); -lean_inc(x_13); -lean_inc(x_19); -x_106 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_19, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_105); -if (lean_obj_tag(x_106) == 0) -{ -lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; -x_107 = lean_ctor_get(x_106, 0); -lean_inc(x_107); -x_108 = lean_ctor_get(x_106, 1); -lean_inc(x_108); -lean_dec(x_106); -x_109 = lean_ctor_get(x_32, 0); -lean_inc(x_109); -x_110 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_109, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_108); -if (lean_obj_tag(x_110) == 0) +lean_object* x_75; lean_object* x_76; +x_75 = lean_ctor_get(x_24, 1); +lean_inc(x_75); +lean_dec(x_24); +lean_inc(x_21); +x_76 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_21, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_75); +if (lean_obj_tag(x_76) == 0) { -lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; -x_111 = lean_ctor_get(x_110, 0); -lean_inc(x_111); -x_112 = lean_ctor_get(x_110, 1); -lean_inc(x_112); -lean_dec(x_110); -x_113 = l_Lean_MessageData_ofExpr(x_104); -x_114 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; -x_115 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_115, 0, x_114); -lean_ctor_set(x_115, 1, x_113); -x_116 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__4; -x_117 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_117, 0, x_115); -lean_ctor_set(x_117, 1, x_116); -x_118 = l_Lean_MessageData_ofExpr(x_107); -x_119 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_119, 0, x_117); -lean_ctor_set(x_119, 1, x_118); -x_120 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__6; -x_121 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_121, 0, x_119); -lean_ctor_set(x_121, 1, x_120); -x_122 = l_Lean_MessageData_ofExpr(x_111); -x_123 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_123, 0, x_121); -lean_ctor_set(x_123, 1, x_122); -x_124 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__8; -x_125 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_125, 0, x_123); -lean_ctor_set(x_125, 1, x_124); -x_126 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_34, x_125, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_112); -x_127 = lean_ctor_get(x_126, 0); -lean_inc(x_127); -x_128 = lean_ctor_get(x_126, 1); -lean_inc(x_128); -lean_dec(x_126); +lean_object* x_77; lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; lean_object* x_86; +x_77 = lean_ctor_get(x_76, 0); +lean_inc(x_77); +x_78 = lean_ctor_get(x_76, 1); +lean_inc(x_78); +lean_dec(x_76); +x_79 = l_Lean_MessageData_ofExpr(x_77); +x_80 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__4; +x_81 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_81, 0, x_80); +lean_ctor_set(x_81, 1, x_79); +x_82 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +lean_ctor_set_tag(x_6, 7); +lean_ctor_set(x_6, 1, x_82); +lean_ctor_set(x_6, 0, x_81); +x_83 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_23, x_6, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_78); +x_84 = lean_ctor_get(x_83, 0); +lean_inc(x_84); +x_85 = lean_ctor_get(x_83, 1); +lean_inc(x_85); +lean_dec(x_83); +lean_inc(x_17); lean_inc(x_16); lean_inc(x_15); lean_inc(x_14); lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); lean_inc(x_1); -x_129 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1(x_32, x_1, x_19, x_127, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_128); -lean_dec(x_127); -if (lean_obj_tag(x_129) == 0) +lean_inc(x_2); +x_86 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2(x_21, x_2, x_1, x_23, x_84, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_85); +lean_dec(x_84); +if (lean_obj_tag(x_86) == 0) { -lean_object* x_130; lean_object* x_131; -x_130 = lean_ctor_get(x_129, 0); -lean_inc(x_130); -x_131 = lean_ctor_get(x_129, 1); -lean_inc(x_131); -lean_dec(x_129); -x_22 = x_130; -x_23 = x_131; -goto block_28; -} -else +lean_object* x_87; +x_87 = lean_ctor_get(x_86, 0); +lean_inc(x_87); +if (lean_obj_tag(x_87) == 0) { -lean_object* x_132; lean_object* x_133; lean_object* x_134; lean_object* x_135; -lean_dec(x_21); -lean_dec(x_20); +lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; +lean_dec(x_22); +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14592,39 +15731,51 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_132 = lean_ctor_get(x_129, 0); -lean_inc(x_132); -x_133 = lean_ctor_get(x_129, 1); -lean_inc(x_133); -if (lean_is_exclusive(x_129)) { - lean_ctor_release(x_129, 0); - lean_ctor_release(x_129, 1); - x_134 = x_129; +x_88 = lean_ctor_get(x_86, 1); +lean_inc(x_88); +if (lean_is_exclusive(x_86)) { + lean_ctor_release(x_86, 0); + lean_ctor_release(x_86, 1); + x_89 = x_86; } else { - lean_dec_ref(x_129); - x_134 = lean_box(0); + lean_dec_ref(x_86); + x_89 = lean_box(0); } -if (lean_is_scalar(x_134)) { - x_135 = lean_alloc_ctor(1, 2, 0); +x_90 = lean_ctor_get(x_87, 0); +lean_inc(x_90); +lean_dec(x_87); +if (lean_is_scalar(x_89)) { + x_91 = lean_alloc_ctor(0, 2, 0); } else { - x_135 = x_134; + x_91 = x_89; } -lean_ctor_set(x_135, 0, x_132); -lean_ctor_set(x_135, 1, x_133); -return x_135; +lean_ctor_set(x_91, 0, x_90); +lean_ctor_set(x_91, 1, x_88); +return x_91; +} +else +{ +lean_object* x_92; lean_object* x_93; +x_92 = lean_ctor_get(x_86, 1); +lean_inc(x_92); +lean_dec(x_86); +x_93 = lean_ctor_get(x_87, 0); +lean_inc(x_93); +lean_dec(x_87); +x_6 = x_22; +x_7 = x_93; +x_8 = lean_box(0); +x_18 = x_92; +goto _start; } } else { -lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; -lean_dec(x_107); -lean_dec(x_104); -lean_dec(x_32); -lean_dec(x_21); -lean_dec(x_20); -lean_dec(x_19); +lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; +lean_dec(x_22); +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14633,38 +15784,37 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_136 = lean_ctor_get(x_110, 0); -lean_inc(x_136); -x_137 = lean_ctor_get(x_110, 1); -lean_inc(x_137); -if (lean_is_exclusive(x_110)) { - lean_ctor_release(x_110, 0); - lean_ctor_release(x_110, 1); - x_138 = x_110; +x_95 = lean_ctor_get(x_86, 0); +lean_inc(x_95); +x_96 = lean_ctor_get(x_86, 1); +lean_inc(x_96); +if (lean_is_exclusive(x_86)) { + lean_ctor_release(x_86, 0); + lean_ctor_release(x_86, 1); + x_97 = x_86; } else { - lean_dec_ref(x_110); - x_138 = lean_box(0); + lean_dec_ref(x_86); + x_97 = lean_box(0); } -if (lean_is_scalar(x_138)) { - x_139 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_97)) { + x_98 = lean_alloc_ctor(1, 2, 0); } else { - x_139 = x_138; + x_98 = x_97; } -lean_ctor_set(x_139, 0, x_136); -lean_ctor_set(x_139, 1, x_137); -return x_139; +lean_ctor_set(x_98, 0, x_95); +lean_ctor_set(x_98, 1, x_96); +return x_98; } } else { -lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; -lean_dec(x_104); -lean_dec(x_32); +lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; +lean_free_object(x_6); +lean_dec(x_22); lean_dec(x_21); -lean_dec(x_20); -lean_dec(x_19); +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14673,37 +15823,75 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_140 = lean_ctor_get(x_106, 0); -lean_inc(x_140); -x_141 = lean_ctor_get(x_106, 1); -lean_inc(x_141); -if (lean_is_exclusive(x_106)) { - lean_ctor_release(x_106, 0); - lean_ctor_release(x_106, 1); - x_142 = x_106; +x_99 = lean_ctor_get(x_76, 0); +lean_inc(x_99); +x_100 = lean_ctor_get(x_76, 1); +lean_inc(x_100); +if (lean_is_exclusive(x_76)) { + lean_ctor_release(x_76, 0); + lean_ctor_release(x_76, 1); + x_101 = x_76; } else { - lean_dec_ref(x_106); - x_142 = lean_box(0); + lean_dec_ref(x_76); + x_101 = lean_box(0); } -if (lean_is_scalar(x_142)) { - x_143 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_101)) { + x_102 = lean_alloc_ctor(1, 2, 0); } else { - x_143 = x_142; + x_102 = x_101; +} +lean_ctor_set(x_102, 0, x_99); +lean_ctor_set(x_102, 1, x_100); +return x_102; +} } -lean_ctor_set(x_143, 0, x_140); -lean_ctor_set(x_143, 1, x_141); -return x_143; } } else { -lean_object* x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; -lean_dec(x_32); -lean_dec(x_21); -lean_dec(x_20); -lean_dec(x_19); +lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; uint8_t x_108; +x_103 = lean_ctor_get(x_6, 0); +x_104 = lean_ctor_get(x_6, 1); +lean_inc(x_104); +lean_inc(x_103); +lean_dec(x_6); +x_105 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__2; +x_106 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_105, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_18); +x_107 = lean_ctor_get(x_106, 0); +lean_inc(x_107); +x_108 = lean_unbox(x_107); +lean_dec(x_107); +if (x_108 == 0) +{ +lean_object* x_109; lean_object* x_110; lean_object* x_111; +x_109 = lean_ctor_get(x_106, 1); +lean_inc(x_109); +lean_dec(x_106); +x_110 = lean_box(0); +lean_inc(x_17); +lean_inc(x_16); +lean_inc(x_15); +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_1); +lean_inc(x_2); +x_111 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2(x_103, x_2, x_1, x_105, x_110, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_109); +if (lean_obj_tag(x_111) == 0) +{ +lean_object* x_112; +x_112 = lean_ctor_get(x_111, 0); +lean_inc(x_112); +if (lean_obj_tag(x_112) == 0) +{ +lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; +lean_dec(x_104); +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14712,37 +15900,51 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_144 = lean_ctor_get(x_103, 0); -lean_inc(x_144); -x_145 = lean_ctor_get(x_103, 1); -lean_inc(x_145); -if (lean_is_exclusive(x_103)) { - lean_ctor_release(x_103, 0); - lean_ctor_release(x_103, 1); - x_146 = x_103; +x_113 = lean_ctor_get(x_111, 1); +lean_inc(x_113); +if (lean_is_exclusive(x_111)) { + lean_ctor_release(x_111, 0); + lean_ctor_release(x_111, 1); + x_114 = x_111; } else { - lean_dec_ref(x_103); - x_146 = lean_box(0); + lean_dec_ref(x_111); + x_114 = lean_box(0); } -if (lean_is_scalar(x_146)) { - x_147 = lean_alloc_ctor(1, 2, 0); +x_115 = lean_ctor_get(x_112, 0); +lean_inc(x_115); +lean_dec(x_112); +if (lean_is_scalar(x_114)) { + x_116 = lean_alloc_ctor(0, 2, 0); } else { - x_147 = x_146; + x_116 = x_114; } -lean_ctor_set(x_147, 0, x_144); -lean_ctor_set(x_147, 1, x_145); -return x_147; +lean_ctor_set(x_116, 0, x_115); +lean_ctor_set(x_116, 1, x_113); +return x_116; +} +else +{ +lean_object* x_117; lean_object* x_118; +x_117 = lean_ctor_get(x_111, 1); +lean_inc(x_117); +lean_dec(x_111); +x_118 = lean_ctor_get(x_112, 0); +lean_inc(x_118); +lean_dec(x_112); +x_6 = x_104; +x_7 = x_118; +x_8 = lean_box(0); +x_18 = x_117; +goto _start; } } else { -lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; -lean_dec(x_32); -lean_dec(x_21); -lean_dec(x_20); -lean_dec(x_19); +lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; +lean_dec(x_104); +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14751,38 +15953,149 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_148 = lean_ctor_get(x_101, 0); -lean_inc(x_148); -x_149 = lean_ctor_get(x_101, 1); -lean_inc(x_149); -if (lean_is_exclusive(x_101)) { - lean_ctor_release(x_101, 0); - lean_ctor_release(x_101, 1); - x_150 = x_101; +x_120 = lean_ctor_get(x_111, 0); +lean_inc(x_120); +x_121 = lean_ctor_get(x_111, 1); +lean_inc(x_121); +if (lean_is_exclusive(x_111)) { + lean_ctor_release(x_111, 0); + lean_ctor_release(x_111, 1); + x_122 = x_111; +} else { + lean_dec_ref(x_111); + x_122 = lean_box(0); +} +if (lean_is_scalar(x_122)) { + x_123 = lean_alloc_ctor(1, 2, 0); +} else { + x_123 = x_122; +} +lean_ctor_set(x_123, 0, x_120); +lean_ctor_set(x_123, 1, x_121); +return x_123; +} +} +else +{ +lean_object* x_124; lean_object* x_125; lean_object* x_126; +x_124 = lean_ctor_get(x_106, 1); +lean_inc(x_124); +if (lean_is_exclusive(x_106)) { + lean_ctor_release(x_106, 0); + lean_ctor_release(x_106, 1); + x_125 = x_106; +} else { + lean_dec_ref(x_106); + x_125 = lean_box(0); +} +lean_inc(x_103); +x_126 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_103, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_124); +if (lean_obj_tag(x_126) == 0) +{ +lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; +x_127 = lean_ctor_get(x_126, 0); +lean_inc(x_127); +x_128 = lean_ctor_get(x_126, 1); +lean_inc(x_128); +lean_dec(x_126); +x_129 = l_Lean_MessageData_ofExpr(x_127); +x_130 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__4; +if (lean_is_scalar(x_125)) { + x_131 = lean_alloc_ctor(7, 2, 0); +} else { + x_131 = x_125; + lean_ctor_set_tag(x_131, 7); +} +lean_ctor_set(x_131, 0, x_130); +lean_ctor_set(x_131, 1, x_129); +x_132 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +x_133 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_133, 0, x_131); +lean_ctor_set(x_133, 1, x_132); +x_134 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_105, x_133, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_128); +x_135 = lean_ctor_get(x_134, 0); +lean_inc(x_135); +x_136 = lean_ctor_get(x_134, 1); +lean_inc(x_136); +lean_dec(x_134); +lean_inc(x_17); +lean_inc(x_16); +lean_inc(x_15); +lean_inc(x_14); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_1); +lean_inc(x_2); +x_137 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2(x_103, x_2, x_1, x_105, x_135, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_136); +lean_dec(x_135); +if (lean_obj_tag(x_137) == 0) +{ +lean_object* x_138; +x_138 = lean_ctor_get(x_137, 0); +lean_inc(x_138); +if (lean_obj_tag(x_138) == 0) +{ +lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; +lean_dec(x_104); +lean_dec(x_17); +lean_dec(x_16); +lean_dec(x_15); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_2); +lean_dec(x_1); +x_139 = lean_ctor_get(x_137, 1); +lean_inc(x_139); +if (lean_is_exclusive(x_137)) { + lean_ctor_release(x_137, 0); + lean_ctor_release(x_137, 1); + x_140 = x_137; } else { - lean_dec_ref(x_101); - x_150 = lean_box(0); + lean_dec_ref(x_137); + x_140 = lean_box(0); } -if (lean_is_scalar(x_150)) { - x_151 = lean_alloc_ctor(1, 2, 0); +x_141 = lean_ctor_get(x_138, 0); +lean_inc(x_141); +lean_dec(x_138); +if (lean_is_scalar(x_140)) { + x_142 = lean_alloc_ctor(0, 2, 0); } else { - x_151 = x_150; -} -lean_ctor_set(x_151, 0, x_148); -lean_ctor_set(x_151, 1, x_149); -return x_151; + x_142 = x_140; } +lean_ctor_set(x_142, 0, x_141); +lean_ctor_set(x_142, 1, x_139); +return x_142; } +else +{ +lean_object* x_143; lean_object* x_144; +x_143 = lean_ctor_get(x_137, 1); +lean_inc(x_143); +lean_dec(x_137); +x_144 = lean_ctor_get(x_138, 0); +lean_inc(x_144); +lean_dec(x_138); +x_6 = x_104; +x_7 = x_144; +x_8 = lean_box(0); +x_18 = x_143; +goto _start; } } else { -uint8_t x_152; -lean_dec(x_21); -lean_dec(x_20); -lean_dec(x_19); +lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; +lean_dec(x_104); +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14791,33 +16104,37 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_152 = !lean_is_exclusive(x_31); -if (x_152 == 0) -{ -return x_31; +x_146 = lean_ctor_get(x_137, 0); +lean_inc(x_146); +x_147 = lean_ctor_get(x_137, 1); +lean_inc(x_147); +if (lean_is_exclusive(x_137)) { + lean_ctor_release(x_137, 0); + lean_ctor_release(x_137, 1); + x_148 = x_137; +} else { + lean_dec_ref(x_137); + x_148 = lean_box(0); } -else -{ -lean_object* x_153; lean_object* x_154; lean_object* x_155; -x_153 = lean_ctor_get(x_31, 0); -x_154 = lean_ctor_get(x_31, 1); -lean_inc(x_154); -lean_inc(x_153); -lean_dec(x_31); -x_155 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_155, 0, x_153); -lean_ctor_set(x_155, 1, x_154); -return x_155; +if (lean_is_scalar(x_148)) { + x_149 = lean_alloc_ctor(1, 2, 0); +} else { + x_149 = x_148; +} +lean_ctor_set(x_149, 0, x_146); +lean_ctor_set(x_149, 1, x_147); +return x_149; } } -block_28: -{ -if (lean_obj_tag(x_22) == 0) +else { -lean_object* x_24; lean_object* x_25; -lean_dec(x_20); +lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; +lean_dec(x_125); +lean_dec(x_104); +lean_dec(x_103); +lean_dec(x_17); lean_dec(x_16); lean_dec(x_15); lean_dec(x_14); @@ -14826,692 +16143,908 @@ lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); -lean_dec(x_8); +lean_dec(x_2); lean_dec(x_1); -x_24 = lean_ctor_get(x_22, 0); -lean_inc(x_24); -lean_dec(x_22); -if (lean_is_scalar(x_21)) { - x_25 = lean_alloc_ctor(0, 2, 0); +x_150 = lean_ctor_get(x_126, 0); +lean_inc(x_150); +x_151 = lean_ctor_get(x_126, 1); +lean_inc(x_151); +if (lean_is_exclusive(x_126)) { + lean_ctor_release(x_126, 0); + lean_ctor_release(x_126, 1); + x_152 = x_126; } else { - x_25 = x_21; - lean_ctor_set_tag(x_25, 0); -} -lean_ctor_set(x_25, 0, x_24); -lean_ctor_set(x_25, 1, x_23); -return x_25; -} -else -{ -lean_object* x_26; -lean_dec(x_21); -x_26 = lean_ctor_get(x_22, 0); -lean_inc(x_26); -lean_dec(x_22); -x_5 = x_20; -x_6 = x_26; -x_7 = lean_box(0); -x_17 = x_23; -goto _start; + lean_dec_ref(x_126); + x_152 = lean_box(0); } +if (lean_is_scalar(x_152)) { + x_153 = lean_alloc_ctor(1, 2, 0); +} else { + x_153 = x_152; } +lean_ctor_set(x_153, 0, x_150); +lean_ctor_set(x_153, 1, x_151); +return x_153; } } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: -{ -if (lean_obj_tag(x_2) == 0) -{ -lean_object* x_13; lean_object* x_14; -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_1); -x_13 = lean_box(0); -x_14 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_14, 0, x_13); -lean_ctor_set(x_14, 1, x_12); -return x_14; } -else -{ -lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; -x_15 = lean_ctor_get(x_2, 0); -x_16 = lean_ctor_get(x_2, 1); -x_17 = lean_ctor_get(x_15, 0); -x_18 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -if (lean_obj_tag(x_18) == 0) -{ -lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; uint8_t x_24; lean_object* x_25; lean_object* x_26; -x_19 = lean_ctor_get(x_18, 0); -lean_inc(x_19); -x_20 = lean_ctor_get(x_18, 1); -lean_inc(x_20); -lean_dec(x_18); -x_21 = lean_box(0); -x_22 = lean_ctor_get(x_19, 19); -lean_inc(x_22); -lean_dec(x_19); -x_23 = lean_ctor_get(x_22, 2); -lean_inc(x_23); -x_24 = lean_nat_dec_lt(x_17, x_23); -lean_dec(x_23); -x_25 = lean_box(0); -if (x_24 == 0) -{ -lean_object* x_36; -lean_dec(x_22); -x_36 = l_outOfBounds___rarg(x_21); -x_26 = x_36; -goto block_35; } -else -{ -lean_object* x_37; -x_37 = l_Lean_PersistentArray_get_x21___rarg(x_21, x_22, x_17); -x_26 = x_37; -goto block_35; } -block_35: -{ -lean_object* x_27; lean_object* x_28; -x_27 = lean_box(0); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -lean_inc(x_6); -lean_inc(x_5); -lean_inc(x_4); -lean_inc(x_3); -lean_inc(x_26); -lean_inc(x_1); -x_28 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1(x_1, x_25, x_26, x_26, x_26, x_27, lean_box(0), x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_20); -lean_dec(x_26); -if (lean_obj_tag(x_28) == 0) +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15) { +_start: { -lean_object* x_29; -x_29 = lean_ctor_get(x_28, 1); +lean_object* x_16; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; uint8_t x_35; +x_29 = lean_ctor_get(x_6, 0); lean_inc(x_29); -lean_dec(x_28); -x_2 = x_16; -x_12 = x_29; -goto _start; -} -else -{ -uint8_t x_31; -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_1); -x_31 = !lean_is_exclusive(x_28); -if (x_31 == 0) +x_30 = lean_st_ref_take(x_7, x_15); +x_31 = lean_ctor_get(x_30, 0); +lean_inc(x_31); +x_32 = lean_ctor_get(x_31, 14); +lean_inc(x_32); +x_33 = lean_ctor_get(x_32, 2); +lean_inc(x_33); +x_34 = lean_ctor_get(x_30, 1); +lean_inc(x_34); +lean_dec(x_30); +x_35 = !lean_is_exclusive(x_31); +if (x_35 == 0) { -return x_28; -} -else +lean_object* x_36; uint8_t x_37; +x_36 = lean_ctor_get(x_31, 14); +lean_dec(x_36); +x_37 = !lean_is_exclusive(x_32); +if (x_37 == 0) { -lean_object* x_32; lean_object* x_33; lean_object* x_34; -x_32 = lean_ctor_get(x_28, 0); -x_33 = lean_ctor_get(x_28, 1); -lean_inc(x_33); -lean_inc(x_32); -lean_dec(x_28); -x_34 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_34, 0, x_32); -lean_ctor_set(x_34, 1, x_33); -return x_34; -} -} -} +lean_object* x_38; uint8_t x_39; +x_38 = lean_ctor_get(x_32, 2); +lean_dec(x_38); +x_39 = !lean_is_exclusive(x_33); +if (x_39 == 0) +{ +lean_object* x_40; lean_object* x_41; uint8_t x_42; +x_40 = lean_ctor_get(x_33, 0); +x_41 = lean_array_get_size(x_40); +x_42 = lean_nat_dec_lt(x_29, x_41); +lean_dec(x_41); +if (x_42 == 0) +{ +lean_object* x_43; lean_object* x_44; +lean_dec(x_29); +x_43 = lean_st_ref_set(x_7, x_31, x_34); +x_44 = lean_ctor_get(x_43, 1); +lean_inc(x_44); +lean_dec(x_43); +x_16 = x_44; +goto block_28; } else { -uint8_t x_38; -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_1); -x_38 = !lean_is_exclusive(x_18); -if (x_38 == 0) +lean_object* x_45; lean_object* x_46; lean_object* x_47; uint8_t x_48; +x_45 = lean_array_fget(x_40, x_29); +x_46 = lean_box(0); +x_47 = lean_array_fset(x_40, x_29, x_46); +x_48 = !lean_is_exclusive(x_45); +if (x_48 == 0) { -return x_18; +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; +x_49 = lean_ctor_get(x_45, 19); +x_50 = lean_box(0); +x_51 = l_Lean_PersistentArray_set___rarg(x_49, x_4, x_50); +lean_ctor_set(x_45, 19, x_51); +x_52 = lean_array_fset(x_47, x_29, x_45); +lean_dec(x_29); +lean_ctor_set(x_33, 0, x_52); +x_53 = lean_st_ref_set(x_7, x_31, x_34); +x_54 = lean_ctor_get(x_53, 1); +lean_inc(x_54); +lean_dec(x_53); +x_16 = x_54; +goto block_28; } else { -lean_object* x_39; lean_object* x_40; lean_object* x_41; -x_39 = lean_ctor_get(x_18, 0); -x_40 = lean_ctor_get(x_18, 1); -lean_inc(x_40); -lean_inc(x_39); -lean_dec(x_18); -x_41 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_41, 0, x_39); -lean_ctor_set(x_41, 1, x_40); -return x_41; -} -} +lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; uint8_t x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; +x_55 = lean_ctor_get(x_45, 0); +x_56 = lean_ctor_get(x_45, 1); +x_57 = lean_ctor_get(x_45, 2); +x_58 = lean_ctor_get(x_45, 3); +x_59 = lean_ctor_get(x_45, 4); +x_60 = lean_ctor_get(x_45, 5); +x_61 = lean_ctor_get(x_45, 6); +x_62 = lean_ctor_get(x_45, 7); +x_63 = lean_ctor_get(x_45, 8); +x_64 = lean_ctor_get(x_45, 9); +x_65 = lean_ctor_get(x_45, 10); +x_66 = lean_ctor_get(x_45, 11); +x_67 = lean_ctor_get(x_45, 12); +x_68 = lean_ctor_get(x_45, 13); +x_69 = lean_ctor_get(x_45, 14); +x_70 = lean_ctor_get(x_45, 15); +x_71 = lean_ctor_get(x_45, 16); +x_72 = lean_ctor_get(x_45, 17); +x_73 = lean_ctor_get(x_45, 18); +x_74 = lean_ctor_get(x_45, 19); +x_75 = lean_ctor_get(x_45, 20); +x_76 = lean_ctor_get_uint8(x_45, sizeof(void*)*21); +lean_inc(x_75); +lean_inc(x_74); +lean_inc(x_73); +lean_inc(x_72); +lean_inc(x_71); +lean_inc(x_70); +lean_inc(x_69); +lean_inc(x_68); +lean_inc(x_67); +lean_inc(x_66); +lean_inc(x_65); +lean_inc(x_64); +lean_inc(x_63); +lean_inc(x_62); +lean_inc(x_61); +lean_inc(x_60); +lean_inc(x_59); +lean_inc(x_58); +lean_inc(x_57); +lean_inc(x_56); +lean_inc(x_55); +lean_dec(x_45); +x_77 = lean_box(0); +x_78 = l_Lean_PersistentArray_set___rarg(x_74, x_4, x_77); +x_79 = lean_alloc_ctor(0, 21, 1); +lean_ctor_set(x_79, 0, x_55); +lean_ctor_set(x_79, 1, x_56); +lean_ctor_set(x_79, 2, x_57); +lean_ctor_set(x_79, 3, x_58); +lean_ctor_set(x_79, 4, x_59); +lean_ctor_set(x_79, 5, x_60); +lean_ctor_set(x_79, 6, x_61); +lean_ctor_set(x_79, 7, x_62); +lean_ctor_set(x_79, 8, x_63); +lean_ctor_set(x_79, 9, x_64); +lean_ctor_set(x_79, 10, x_65); +lean_ctor_set(x_79, 11, x_66); +lean_ctor_set(x_79, 12, x_67); +lean_ctor_set(x_79, 13, x_68); +lean_ctor_set(x_79, 14, x_69); +lean_ctor_set(x_79, 15, x_70); +lean_ctor_set(x_79, 16, x_71); +lean_ctor_set(x_79, 17, x_72); +lean_ctor_set(x_79, 18, x_73); +lean_ctor_set(x_79, 19, x_78); +lean_ctor_set(x_79, 20, x_75); +lean_ctor_set_uint8(x_79, sizeof(void*)*21, x_76); +x_80 = lean_array_fset(x_47, x_29, x_79); +lean_dec(x_29); +lean_ctor_set(x_33, 0, x_80); +x_81 = lean_st_ref_set(x_7, x_31, x_34); +x_82 = lean_ctor_get(x_81, 1); +lean_inc(x_82); +lean_dec(x_81); +x_16 = x_82; +goto block_28; } } } -LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { -_start: +else { -lean_object* x_15; -x_15 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -return x_15; -} +lean_object* x_83; lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; uint8_t x_88; +x_83 = lean_ctor_get(x_33, 0); +x_84 = lean_ctor_get(x_33, 1); +x_85 = lean_ctor_get(x_33, 2); +x_86 = lean_ctor_get(x_33, 3); +lean_inc(x_86); +lean_inc(x_85); +lean_inc(x_84); +lean_inc(x_83); +lean_dec(x_33); +x_87 = lean_array_get_size(x_83); +x_88 = lean_nat_dec_lt(x_29, x_87); +lean_dec(x_87); +if (x_88 == 0) +{ +lean_object* x_89; lean_object* x_90; lean_object* x_91; +lean_dec(x_29); +x_89 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_89, 0, x_83); +lean_ctor_set(x_89, 1, x_84); +lean_ctor_set(x_89, 2, x_85); +lean_ctor_set(x_89, 3, x_86); +lean_ctor_set(x_32, 2, x_89); +x_90 = lean_st_ref_set(x_7, x_31, x_34); +x_91 = lean_ctor_get(x_90, 1); +lean_inc(x_91); +lean_dec(x_90); +x_16 = x_91; +goto block_28; } -LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___boxed(lean_object** _args) { -lean_object* x_1 = _args[0]; -lean_object* x_2 = _args[1]; -lean_object* x_3 = _args[2]; -lean_object* x_4 = _args[3]; -lean_object* x_5 = _args[4]; -lean_object* x_6 = _args[5]; -lean_object* x_7 = _args[6]; -lean_object* x_8 = _args[7]; -lean_object* x_9 = _args[8]; -lean_object* x_10 = _args[9]; -lean_object* x_11 = _args[10]; -lean_object* x_12 = _args[11]; -lean_object* x_13 = _args[12]; -lean_object* x_14 = _args[13]; -lean_object* x_15 = _args[14]; -lean_object* x_16 = _args[15]; -lean_object* x_17 = _args[16]; -_start: +else { -lean_object* x_18; -x_18 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -return x_18; +lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; uint8_t x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; +x_92 = lean_array_fget(x_83, x_29); +x_93 = lean_box(0); +x_94 = lean_array_fset(x_83, x_29, x_93); +x_95 = lean_ctor_get(x_92, 0); +lean_inc(x_95); +x_96 = lean_ctor_get(x_92, 1); +lean_inc(x_96); +x_97 = lean_ctor_get(x_92, 2); +lean_inc(x_97); +x_98 = lean_ctor_get(x_92, 3); +lean_inc(x_98); +x_99 = lean_ctor_get(x_92, 4); +lean_inc(x_99); +x_100 = lean_ctor_get(x_92, 5); +lean_inc(x_100); +x_101 = lean_ctor_get(x_92, 6); +lean_inc(x_101); +x_102 = lean_ctor_get(x_92, 7); +lean_inc(x_102); +x_103 = lean_ctor_get(x_92, 8); +lean_inc(x_103); +x_104 = lean_ctor_get(x_92, 9); +lean_inc(x_104); +x_105 = lean_ctor_get(x_92, 10); +lean_inc(x_105); +x_106 = lean_ctor_get(x_92, 11); +lean_inc(x_106); +x_107 = lean_ctor_get(x_92, 12); +lean_inc(x_107); +x_108 = lean_ctor_get(x_92, 13); +lean_inc(x_108); +x_109 = lean_ctor_get(x_92, 14); +lean_inc(x_109); +x_110 = lean_ctor_get(x_92, 15); +lean_inc(x_110); +x_111 = lean_ctor_get(x_92, 16); +lean_inc(x_111); +x_112 = lean_ctor_get(x_92, 17); +lean_inc(x_112); +x_113 = lean_ctor_get(x_92, 18); +lean_inc(x_113); +x_114 = lean_ctor_get(x_92, 19); +lean_inc(x_114); +x_115 = lean_ctor_get(x_92, 20); +lean_inc(x_115); +x_116 = lean_ctor_get_uint8(x_92, sizeof(void*)*21); +if (lean_is_exclusive(x_92)) { + lean_ctor_release(x_92, 0); + lean_ctor_release(x_92, 1); + lean_ctor_release(x_92, 2); + lean_ctor_release(x_92, 3); + lean_ctor_release(x_92, 4); + lean_ctor_release(x_92, 5); + lean_ctor_release(x_92, 6); + lean_ctor_release(x_92, 7); + lean_ctor_release(x_92, 8); + lean_ctor_release(x_92, 9); + lean_ctor_release(x_92, 10); + lean_ctor_release(x_92, 11); + lean_ctor_release(x_92, 12); + lean_ctor_release(x_92, 13); + lean_ctor_release(x_92, 14); + lean_ctor_release(x_92, 15); + lean_ctor_release(x_92, 16); + lean_ctor_release(x_92, 17); + lean_ctor_release(x_92, 18); + lean_ctor_release(x_92, 19); + lean_ctor_release(x_92, 20); + x_117 = x_92; +} else { + lean_dec_ref(x_92); + x_117 = lean_box(0); } +x_118 = lean_box(0); +x_119 = l_Lean_PersistentArray_set___rarg(x_114, x_4, x_118); +if (lean_is_scalar(x_117)) { + x_120 = lean_alloc_ctor(0, 21, 1); +} else { + x_120 = x_117; } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: -{ -lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -lean_dec(x_2); -return x_13; +lean_ctor_set(x_120, 0, x_95); +lean_ctor_set(x_120, 1, x_96); +lean_ctor_set(x_120, 2, x_97); +lean_ctor_set(x_120, 3, x_98); +lean_ctor_set(x_120, 4, x_99); +lean_ctor_set(x_120, 5, x_100); +lean_ctor_set(x_120, 6, x_101); +lean_ctor_set(x_120, 7, x_102); +lean_ctor_set(x_120, 8, x_103); +lean_ctor_set(x_120, 9, x_104); +lean_ctor_set(x_120, 10, x_105); +lean_ctor_set(x_120, 11, x_106); +lean_ctor_set(x_120, 12, x_107); +lean_ctor_set(x_120, 13, x_108); +lean_ctor_set(x_120, 14, x_109); +lean_ctor_set(x_120, 15, x_110); +lean_ctor_set(x_120, 16, x_111); +lean_ctor_set(x_120, 17, x_112); +lean_ctor_set(x_120, 18, x_113); +lean_ctor_set(x_120, 19, x_119); +lean_ctor_set(x_120, 20, x_115); +lean_ctor_set_uint8(x_120, sizeof(void*)*21, x_116); +x_121 = lean_array_fset(x_94, x_29, x_120); +lean_dec(x_29); +x_122 = lean_alloc_ctor(0, 4, 0); +lean_ctor_set(x_122, 0, x_121); +lean_ctor_set(x_122, 1, x_84); +lean_ctor_set(x_122, 2, x_85); +lean_ctor_set(x_122, 3, x_86); +lean_ctor_set(x_32, 2, x_122); +x_123 = lean_st_ref_set(x_7, x_31, x_34); +x_124 = lean_ctor_get(x_123, 1); +lean_inc(x_124); +lean_dec(x_123); +x_16 = x_124; +goto block_28; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: -{ -lean_object* x_13; -x_13 = lean_ctor_get(x_1, 0); -lean_inc(x_13); -if (lean_obj_tag(x_13) == 0) -{ -lean_object* x_14; lean_object* x_15; -lean_dec(x_13); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_1); -x_14 = lean_box(0); -x_15 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_15, 0, x_14); -lean_ctor_set(x_15, 1, x_12); -return x_15; } else { -lean_object* x_16; lean_object* x_17; -x_16 = lean_ctor_get(x_13, 1); -lean_inc(x_16); -lean_dec(x_13); -x_17 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go(x_1, x_16, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -lean_dec(x_16); -return x_17; -} -} +lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; uint8_t x_133; +x_125 = lean_ctor_get(x_32, 0); +x_126 = lean_ctor_get(x_32, 1); +lean_inc(x_126); +lean_inc(x_125); +lean_dec(x_32); +x_127 = lean_ctor_get(x_33, 0); +lean_inc(x_127); +x_128 = lean_ctor_get(x_33, 1); +lean_inc(x_128); +x_129 = lean_ctor_get(x_33, 2); +lean_inc(x_129); +x_130 = lean_ctor_get(x_33, 3); +lean_inc(x_130); +if (lean_is_exclusive(x_33)) { + lean_ctor_release(x_33, 0); + lean_ctor_release(x_33, 1); + lean_ctor_release(x_33, 2); + lean_ctor_release(x_33, 3); + x_131 = x_33; +} else { + lean_dec_ref(x_33); + x_131 = lean_box(0); } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: -{ -lean_object* x_12; lean_object* x_13; uint8_t x_14; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_checkMaxSteps(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -x_13 = lean_ctor_get(x_12, 0); -lean_inc(x_13); -x_14 = lean_unbox(x_13); -lean_dec(x_13); -if (x_14 == 0) +x_132 = lean_array_get_size(x_127); +x_133 = lean_nat_dec_lt(x_29, x_132); +lean_dec(x_132); +if (x_133 == 0) { -lean_object* x_15; lean_object* x_16; lean_object* x_17; -x_15 = lean_ctor_get(x_12, 1); -lean_inc(x_15); -lean_dec(x_12); -x_16 = lean_box(0); -x_17 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith___lambda__1(x_1, x_16, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_15); -return x_17; +lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; +lean_dec(x_29); +if (lean_is_scalar(x_131)) { + x_134 = lean_alloc_ctor(0, 4, 0); +} else { + x_134 = x_131; } -else -{ -uint8_t x_18; -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_18 = !lean_is_exclusive(x_12); -if (x_18 == 0) -{ -lean_object* x_19; lean_object* x_20; -x_19 = lean_ctor_get(x_12, 0); -lean_dec(x_19); -x_20 = lean_box(0); -lean_ctor_set(x_12, 0, x_20); -return x_12; +lean_ctor_set(x_134, 0, x_127); +lean_ctor_set(x_134, 1, x_128); +lean_ctor_set(x_134, 2, x_129); +lean_ctor_set(x_134, 3, x_130); +x_135 = lean_alloc_ctor(0, 3, 0); +lean_ctor_set(x_135, 0, x_125); +lean_ctor_set(x_135, 1, x_126); +lean_ctor_set(x_135, 2, x_134); +lean_ctor_set(x_31, 14, x_135); +x_136 = lean_st_ref_set(x_7, x_31, x_34); +x_137 = lean_ctor_get(x_136, 1); +lean_inc(x_137); +lean_dec(x_136); +x_16 = x_137; +goto block_28; } else { -lean_object* x_21; lean_object* x_22; lean_object* x_23; -x_21 = lean_ctor_get(x_12, 1); -lean_inc(x_21); -lean_dec(x_12); -x_22 = lean_box(0); -x_23 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_23, 0, x_22); -lean_ctor_set(x_23, 1, x_21); -return x_23; -} -} -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: -{ -lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -lean_dec(x_2); -return x_13; -} +lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; lean_object* x_160; lean_object* x_161; uint8_t x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; lean_object* x_171; +x_138 = lean_array_fget(x_127, x_29); +x_139 = lean_box(0); +x_140 = lean_array_fset(x_127, x_29, x_139); +x_141 = lean_ctor_get(x_138, 0); +lean_inc(x_141); +x_142 = lean_ctor_get(x_138, 1); +lean_inc(x_142); +x_143 = lean_ctor_get(x_138, 2); +lean_inc(x_143); +x_144 = lean_ctor_get(x_138, 3); +lean_inc(x_144); +x_145 = lean_ctor_get(x_138, 4); +lean_inc(x_145); +x_146 = lean_ctor_get(x_138, 5); +lean_inc(x_146); +x_147 = lean_ctor_get(x_138, 6); +lean_inc(x_147); +x_148 = lean_ctor_get(x_138, 7); +lean_inc(x_148); +x_149 = lean_ctor_get(x_138, 8); +lean_inc(x_149); +x_150 = lean_ctor_get(x_138, 9); +lean_inc(x_150); +x_151 = lean_ctor_get(x_138, 10); +lean_inc(x_151); +x_152 = lean_ctor_get(x_138, 11); +lean_inc(x_152); +x_153 = lean_ctor_get(x_138, 12); +lean_inc(x_153); +x_154 = lean_ctor_get(x_138, 13); +lean_inc(x_154); +x_155 = lean_ctor_get(x_138, 14); +lean_inc(x_155); +x_156 = lean_ctor_get(x_138, 15); +lean_inc(x_156); +x_157 = lean_ctor_get(x_138, 16); +lean_inc(x_157); +x_158 = lean_ctor_get(x_138, 17); +lean_inc(x_158); +x_159 = lean_ctor_get(x_138, 18); +lean_inc(x_159); +x_160 = lean_ctor_get(x_138, 19); +lean_inc(x_160); +x_161 = lean_ctor_get(x_138, 20); +lean_inc(x_161); +x_162 = lean_ctor_get_uint8(x_138, sizeof(void*)*21); +if (lean_is_exclusive(x_138)) { + lean_ctor_release(x_138, 0); + lean_ctor_release(x_138, 1); + lean_ctor_release(x_138, 2); + lean_ctor_release(x_138, 3); + lean_ctor_release(x_138, 4); + lean_ctor_release(x_138, 5); + lean_ctor_release(x_138, 6); + lean_ctor_release(x_138, 7); + lean_ctor_release(x_138, 8); + lean_ctor_release(x_138, 9); + lean_ctor_release(x_138, 10); + lean_ctor_release(x_138, 11); + lean_ctor_release(x_138, 12); + lean_ctor_release(x_138, 13); + lean_ctor_release(x_138, 14); + lean_ctor_release(x_138, 15); + lean_ctor_release(x_138, 16); + lean_ctor_release(x_138, 17); + lean_ctor_release(x_138, 18); + lean_ctor_release(x_138, 19); + lean_ctor_release(x_138, 20); + x_163 = x_138; +} else { + lean_dec_ref(x_138); + x_163 = lean_box(0); } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = lean_unsigned_to_nat(1u); -x_2 = lean_nat_to_int(x_1); -return x_2; +x_164 = lean_box(0); +x_165 = l_Lean_PersistentArray_set___rarg(x_160, x_4, x_164); +if (lean_is_scalar(x_163)) { + x_166 = lean_alloc_ctor(0, 21, 1); +} else { + x_166 = x_163; } +lean_ctor_set(x_166, 0, x_141); +lean_ctor_set(x_166, 1, x_142); +lean_ctor_set(x_166, 2, x_143); +lean_ctor_set(x_166, 3, x_144); +lean_ctor_set(x_166, 4, x_145); +lean_ctor_set(x_166, 5, x_146); +lean_ctor_set(x_166, 6, x_147); +lean_ctor_set(x_166, 7, x_148); +lean_ctor_set(x_166, 8, x_149); +lean_ctor_set(x_166, 9, x_150); +lean_ctor_set(x_166, 10, x_151); +lean_ctor_set(x_166, 11, x_152); +lean_ctor_set(x_166, 12, x_153); +lean_ctor_set(x_166, 13, x_154); +lean_ctor_set(x_166, 14, x_155); +lean_ctor_set(x_166, 15, x_156); +lean_ctor_set(x_166, 16, x_157); +lean_ctor_set(x_166, 17, x_158); +lean_ctor_set(x_166, 18, x_159); +lean_ctor_set(x_166, 19, x_165); +lean_ctor_set(x_166, 20, x_161); +lean_ctor_set_uint8(x_166, sizeof(void*)*21, x_162); +x_167 = lean_array_fset(x_140, x_29, x_166); +lean_dec(x_29); +if (lean_is_scalar(x_131)) { + x_168 = lean_alloc_ctor(0, 4, 0); +} else { + x_168 = x_131; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__2() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; -x_2 = lean_int_neg(x_1); -return x_2; +lean_ctor_set(x_168, 0, x_167); +lean_ctor_set(x_168, 1, x_128); +lean_ctor_set(x_168, 2, x_129); +lean_ctor_set(x_168, 3, x_130); +x_169 = lean_alloc_ctor(0, 3, 0); +lean_ctor_set(x_169, 0, x_125); +lean_ctor_set(x_169, 1, x_126); +lean_ctor_set(x_169, 2, x_168); +lean_ctor_set(x_31, 14, x_169); +x_170 = lean_st_ref_set(x_7, x_31, x_34); +x_171 = lean_ctor_get(x_170, 1); +lean_inc(x_171); +lean_dec(x_170); +x_16 = x_171; +goto block_28; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { -_start: -{ -lean_object* x_15; uint8_t x_16; -x_15 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; -x_16 = lean_int_dec_lt(x_2, x_15); -if (x_16 == 0) -{ -lean_object* x_17; -lean_dec(x_3); -x_17 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_17, 0, x_1); -lean_ctor_set(x_17, 1, x_14); -return x_17; } else { -lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; -x_18 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__2; -x_19 = l_Lean_Grind_CommRing_Poly_mulConst(x_18, x_3); -lean_inc(x_1); -x_20 = lean_alloc_ctor(3, 2, 0); -lean_ctor_set(x_20, 0, x_18); -lean_ctor_set(x_20, 1, x_1); -x_21 = lean_ctor_get(x_1, 2); -lean_inc(x_21); -x_22 = lean_ctor_get(x_1, 3); -lean_inc(x_22); -lean_dec(x_1); -x_23 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_23, 0, x_19); -lean_ctor_set(x_23, 1, x_20); -lean_ctor_set(x_23, 2, x_21); -lean_ctor_set(x_23, 3, x_22); -x_24 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_24, 0, x_23); -lean_ctor_set(x_24, 1, x_14); -return x_24; -} -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { -_start: -{ -lean_object* x_15; -x_15 = l_Lean_Meta_Grind_Arith_CommRing_noZeroDivisors(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); -if (lean_obj_tag(x_15) == 0) -{ -lean_object* x_16; uint8_t x_17; -x_16 = lean_ctor_get(x_15, 0); -lean_inc(x_16); -x_17 = lean_unbox(x_16); -lean_dec(x_16); -if (x_17 == 0) -{ -lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_18 = lean_ctor_get(x_15, 1); -lean_inc(x_18); -lean_dec(x_15); -x_19 = lean_box(0); -x_20 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1(x_1, x_2, x_3, x_19, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_18); -return x_20; +lean_object* x_172; lean_object* x_173; lean_object* x_174; lean_object* x_175; lean_object* x_176; lean_object* x_177; lean_object* x_178; lean_object* x_179; uint8_t x_180; lean_object* x_181; lean_object* x_182; lean_object* x_183; lean_object* x_184; lean_object* x_185; lean_object* x_186; lean_object* x_187; lean_object* x_188; lean_object* x_189; lean_object* x_190; lean_object* x_191; lean_object* x_192; lean_object* x_193; lean_object* x_194; lean_object* x_195; lean_object* x_196; uint8_t x_197; +x_172 = lean_ctor_get(x_31, 0); +x_173 = lean_ctor_get(x_31, 1); +x_174 = lean_ctor_get(x_31, 2); +x_175 = lean_ctor_get(x_31, 3); +x_176 = lean_ctor_get(x_31, 4); +x_177 = lean_ctor_get(x_31, 5); +x_178 = lean_ctor_get(x_31, 6); +x_179 = lean_ctor_get(x_31, 7); +x_180 = lean_ctor_get_uint8(x_31, sizeof(void*)*16); +x_181 = lean_ctor_get(x_31, 8); +x_182 = lean_ctor_get(x_31, 9); +x_183 = lean_ctor_get(x_31, 10); +x_184 = lean_ctor_get(x_31, 11); +x_185 = lean_ctor_get(x_31, 12); +x_186 = lean_ctor_get(x_31, 13); +x_187 = lean_ctor_get(x_31, 15); +lean_inc(x_187); +lean_inc(x_186); +lean_inc(x_185); +lean_inc(x_184); +lean_inc(x_183); +lean_inc(x_182); +lean_inc(x_181); +lean_inc(x_179); +lean_inc(x_178); +lean_inc(x_177); +lean_inc(x_176); +lean_inc(x_175); +lean_inc(x_174); +lean_inc(x_173); +lean_inc(x_172); +lean_dec(x_31); +x_188 = lean_ctor_get(x_32, 0); +lean_inc(x_188); +x_189 = lean_ctor_get(x_32, 1); +lean_inc(x_189); +if (lean_is_exclusive(x_32)) { + lean_ctor_release(x_32, 0); + lean_ctor_release(x_32, 1); + lean_ctor_release(x_32, 2); + x_190 = x_32; +} else { + lean_dec_ref(x_32); + x_190 = lean_box(0); } -else -{ -uint8_t x_21; -x_21 = !lean_is_exclusive(x_15); -if (x_21 == 0) -{ -lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; uint8_t x_27; -x_22 = lean_ctor_get(x_15, 1); -x_23 = lean_ctor_get(x_15, 0); -lean_dec(x_23); -x_24 = l_Lean_Grind_CommRing_Poly_gcdCoeffs(x_3); -x_25 = lean_nat_to_int(x_24); -x_26 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; -x_27 = lean_int_dec_eq(x_25, x_26); -if (x_27 == 0) -{ -lean_object* x_28; uint8_t x_29; -x_28 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; -x_29 = lean_int_dec_lt(x_2, x_28); -if (x_29 == 0) -{ -lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; -x_30 = lean_ctor_get(x_1, 2); -lean_inc(x_30); -x_31 = lean_ctor_get(x_1, 3); -lean_inc(x_31); -x_32 = l_Lean_Grind_CommRing_Poly_divConst(x_3, x_25); -x_33 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_33, 0, x_25); -lean_ctor_set(x_33, 1, x_1); -x_34 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_34, 0, x_32); -lean_ctor_set(x_34, 1, x_33); -lean_ctor_set(x_34, 2, x_30); -lean_ctor_set(x_34, 3, x_31); -lean_ctor_set(x_15, 0, x_34); -return x_15; +x_191 = lean_ctor_get(x_33, 0); +lean_inc(x_191); +x_192 = lean_ctor_get(x_33, 1); +lean_inc(x_192); +x_193 = lean_ctor_get(x_33, 2); +lean_inc(x_193); +x_194 = lean_ctor_get(x_33, 3); +lean_inc(x_194); +if (lean_is_exclusive(x_33)) { + lean_ctor_release(x_33, 0); + lean_ctor_release(x_33, 1); + lean_ctor_release(x_33, 2); + lean_ctor_release(x_33, 3); + x_195 = x_33; +} else { + lean_dec_ref(x_33); + x_195 = lean_box(0); } -else +x_196 = lean_array_get_size(x_191); +x_197 = lean_nat_dec_lt(x_29, x_196); +lean_dec(x_196); +if (x_197 == 0) { -lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; -x_35 = lean_ctor_get(x_1, 2); -lean_inc(x_35); -x_36 = lean_ctor_get(x_1, 3); -lean_inc(x_36); -x_37 = lean_int_neg(x_25); -lean_dec(x_25); -x_38 = l_Lean_Grind_CommRing_Poly_divConst(x_3, x_37); -x_39 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_39, 0, x_37); -lean_ctor_set(x_39, 1, x_1); -x_40 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_40, 0, x_38); -lean_ctor_set(x_40, 1, x_39); -lean_ctor_set(x_40, 2, x_35); -lean_ctor_set(x_40, 3, x_36); -lean_ctor_set(x_15, 0, x_40); -return x_15; +lean_object* x_198; lean_object* x_199; lean_object* x_200; lean_object* x_201; lean_object* x_202; +lean_dec(x_29); +if (lean_is_scalar(x_195)) { + x_198 = lean_alloc_ctor(0, 4, 0); +} else { + x_198 = x_195; +} +lean_ctor_set(x_198, 0, x_191); +lean_ctor_set(x_198, 1, x_192); +lean_ctor_set(x_198, 2, x_193); +lean_ctor_set(x_198, 3, x_194); +if (lean_is_scalar(x_190)) { + x_199 = lean_alloc_ctor(0, 3, 0); +} else { + x_199 = x_190; } +lean_ctor_set(x_199, 0, x_188); +lean_ctor_set(x_199, 1, x_189); +lean_ctor_set(x_199, 2, x_198); +x_200 = lean_alloc_ctor(0, 16, 1); +lean_ctor_set(x_200, 0, x_172); +lean_ctor_set(x_200, 1, x_173); +lean_ctor_set(x_200, 2, x_174); +lean_ctor_set(x_200, 3, x_175); +lean_ctor_set(x_200, 4, x_176); +lean_ctor_set(x_200, 5, x_177); +lean_ctor_set(x_200, 6, x_178); +lean_ctor_set(x_200, 7, x_179); +lean_ctor_set(x_200, 8, x_181); +lean_ctor_set(x_200, 9, x_182); +lean_ctor_set(x_200, 10, x_183); +lean_ctor_set(x_200, 11, x_184); +lean_ctor_set(x_200, 12, x_185); +lean_ctor_set(x_200, 13, x_186); +lean_ctor_set(x_200, 14, x_199); +lean_ctor_set(x_200, 15, x_187); +lean_ctor_set_uint8(x_200, sizeof(void*)*16, x_180); +x_201 = lean_st_ref_set(x_7, x_200, x_34); +x_202 = lean_ctor_get(x_201, 1); +lean_inc(x_202); +lean_dec(x_201); +x_16 = x_202; +goto block_28; } else { -lean_object* x_41; lean_object* x_42; -lean_dec(x_25); -lean_free_object(x_15); -x_41 = lean_box(0); -x_42 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1(x_1, x_2, x_3, x_41, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_22); -return x_42; +lean_object* x_203; lean_object* x_204; lean_object* x_205; lean_object* x_206; lean_object* x_207; lean_object* x_208; lean_object* x_209; lean_object* x_210; lean_object* x_211; lean_object* x_212; lean_object* x_213; lean_object* x_214; lean_object* x_215; lean_object* x_216; lean_object* x_217; lean_object* x_218; lean_object* x_219; lean_object* x_220; lean_object* x_221; lean_object* x_222; lean_object* x_223; lean_object* x_224; lean_object* x_225; lean_object* x_226; uint8_t x_227; lean_object* x_228; lean_object* x_229; lean_object* x_230; lean_object* x_231; lean_object* x_232; lean_object* x_233; lean_object* x_234; lean_object* x_235; lean_object* x_236; lean_object* x_237; +x_203 = lean_array_fget(x_191, x_29); +x_204 = lean_box(0); +x_205 = lean_array_fset(x_191, x_29, x_204); +x_206 = lean_ctor_get(x_203, 0); +lean_inc(x_206); +x_207 = lean_ctor_get(x_203, 1); +lean_inc(x_207); +x_208 = lean_ctor_get(x_203, 2); +lean_inc(x_208); +x_209 = lean_ctor_get(x_203, 3); +lean_inc(x_209); +x_210 = lean_ctor_get(x_203, 4); +lean_inc(x_210); +x_211 = lean_ctor_get(x_203, 5); +lean_inc(x_211); +x_212 = lean_ctor_get(x_203, 6); +lean_inc(x_212); +x_213 = lean_ctor_get(x_203, 7); +lean_inc(x_213); +x_214 = lean_ctor_get(x_203, 8); +lean_inc(x_214); +x_215 = lean_ctor_get(x_203, 9); +lean_inc(x_215); +x_216 = lean_ctor_get(x_203, 10); +lean_inc(x_216); +x_217 = lean_ctor_get(x_203, 11); +lean_inc(x_217); +x_218 = lean_ctor_get(x_203, 12); +lean_inc(x_218); +x_219 = lean_ctor_get(x_203, 13); +lean_inc(x_219); +x_220 = lean_ctor_get(x_203, 14); +lean_inc(x_220); +x_221 = lean_ctor_get(x_203, 15); +lean_inc(x_221); +x_222 = lean_ctor_get(x_203, 16); +lean_inc(x_222); +x_223 = lean_ctor_get(x_203, 17); +lean_inc(x_223); +x_224 = lean_ctor_get(x_203, 18); +lean_inc(x_224); +x_225 = lean_ctor_get(x_203, 19); +lean_inc(x_225); +x_226 = lean_ctor_get(x_203, 20); +lean_inc(x_226); +x_227 = lean_ctor_get_uint8(x_203, sizeof(void*)*21); +if (lean_is_exclusive(x_203)) { + lean_ctor_release(x_203, 0); + lean_ctor_release(x_203, 1); + lean_ctor_release(x_203, 2); + lean_ctor_release(x_203, 3); + lean_ctor_release(x_203, 4); + lean_ctor_release(x_203, 5); + lean_ctor_release(x_203, 6); + lean_ctor_release(x_203, 7); + lean_ctor_release(x_203, 8); + lean_ctor_release(x_203, 9); + lean_ctor_release(x_203, 10); + lean_ctor_release(x_203, 11); + lean_ctor_release(x_203, 12); + lean_ctor_release(x_203, 13); + lean_ctor_release(x_203, 14); + lean_ctor_release(x_203, 15); + lean_ctor_release(x_203, 16); + lean_ctor_release(x_203, 17); + lean_ctor_release(x_203, 18); + lean_ctor_release(x_203, 19); + lean_ctor_release(x_203, 20); + x_228 = x_203; +} else { + lean_dec_ref(x_203); + x_228 = lean_box(0); } +x_229 = lean_box(0); +x_230 = l_Lean_PersistentArray_set___rarg(x_225, x_4, x_229); +if (lean_is_scalar(x_228)) { + x_231 = lean_alloc_ctor(0, 21, 1); +} else { + x_231 = x_228; } -else -{ -lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; uint8_t x_47; -x_43 = lean_ctor_get(x_15, 1); -lean_inc(x_43); -lean_dec(x_15); -x_44 = l_Lean_Grind_CommRing_Poly_gcdCoeffs(x_3); -x_45 = lean_nat_to_int(x_44); -x_46 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; -x_47 = lean_int_dec_eq(x_45, x_46); -if (x_47 == 0) -{ -lean_object* x_48; uint8_t x_49; -x_48 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; -x_49 = lean_int_dec_lt(x_2, x_48); -if (x_49 == 0) -{ -lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; -x_50 = lean_ctor_get(x_1, 2); -lean_inc(x_50); -x_51 = lean_ctor_get(x_1, 3); -lean_inc(x_51); -x_52 = l_Lean_Grind_CommRing_Poly_divConst(x_3, x_45); -x_53 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_53, 0, x_45); -lean_ctor_set(x_53, 1, x_1); -x_54 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_54, 0, x_52); -lean_ctor_set(x_54, 1, x_53); -lean_ctor_set(x_54, 2, x_50); -lean_ctor_set(x_54, 3, x_51); -x_55 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_55, 0, x_54); -lean_ctor_set(x_55, 1, x_43); -return x_55; +lean_ctor_set(x_231, 0, x_206); +lean_ctor_set(x_231, 1, x_207); +lean_ctor_set(x_231, 2, x_208); +lean_ctor_set(x_231, 3, x_209); +lean_ctor_set(x_231, 4, x_210); +lean_ctor_set(x_231, 5, x_211); +lean_ctor_set(x_231, 6, x_212); +lean_ctor_set(x_231, 7, x_213); +lean_ctor_set(x_231, 8, x_214); +lean_ctor_set(x_231, 9, x_215); +lean_ctor_set(x_231, 10, x_216); +lean_ctor_set(x_231, 11, x_217); +lean_ctor_set(x_231, 12, x_218); +lean_ctor_set(x_231, 13, x_219); +lean_ctor_set(x_231, 14, x_220); +lean_ctor_set(x_231, 15, x_221); +lean_ctor_set(x_231, 16, x_222); +lean_ctor_set(x_231, 17, x_223); +lean_ctor_set(x_231, 18, x_224); +lean_ctor_set(x_231, 19, x_230); +lean_ctor_set(x_231, 20, x_226); +lean_ctor_set_uint8(x_231, sizeof(void*)*21, x_227); +x_232 = lean_array_fset(x_205, x_29, x_231); +lean_dec(x_29); +if (lean_is_scalar(x_195)) { + x_233 = lean_alloc_ctor(0, 4, 0); +} else { + x_233 = x_195; } -else -{ -lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; -x_56 = lean_ctor_get(x_1, 2); -lean_inc(x_56); -x_57 = lean_ctor_get(x_1, 3); -lean_inc(x_57); -x_58 = lean_int_neg(x_45); -lean_dec(x_45); -x_59 = l_Lean_Grind_CommRing_Poly_divConst(x_3, x_58); -x_60 = lean_alloc_ctor(4, 2, 0); -lean_ctor_set(x_60, 0, x_58); -lean_ctor_set(x_60, 1, x_1); -x_61 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_61, 0, x_59); -lean_ctor_set(x_61, 1, x_60); -lean_ctor_set(x_61, 2, x_56); -lean_ctor_set(x_61, 3, x_57); -x_62 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_62, 0, x_61); -lean_ctor_set(x_62, 1, x_43); -return x_62; +lean_ctor_set(x_233, 0, x_232); +lean_ctor_set(x_233, 1, x_192); +lean_ctor_set(x_233, 2, x_193); +lean_ctor_set(x_233, 3, x_194); +if (lean_is_scalar(x_190)) { + x_234 = lean_alloc_ctor(0, 3, 0); +} else { + x_234 = x_190; +} +lean_ctor_set(x_234, 0, x_188); +lean_ctor_set(x_234, 1, x_189); +lean_ctor_set(x_234, 2, x_233); +x_235 = lean_alloc_ctor(0, 16, 1); +lean_ctor_set(x_235, 0, x_172); +lean_ctor_set(x_235, 1, x_173); +lean_ctor_set(x_235, 2, x_174); +lean_ctor_set(x_235, 3, x_175); +lean_ctor_set(x_235, 4, x_176); +lean_ctor_set(x_235, 5, x_177); +lean_ctor_set(x_235, 6, x_178); +lean_ctor_set(x_235, 7, x_179); +lean_ctor_set(x_235, 8, x_181); +lean_ctor_set(x_235, 9, x_182); +lean_ctor_set(x_235, 10, x_183); +lean_ctor_set(x_235, 11, x_184); +lean_ctor_set(x_235, 12, x_185); +lean_ctor_set(x_235, 13, x_186); +lean_ctor_set(x_235, 14, x_234); +lean_ctor_set(x_235, 15, x_187); +lean_ctor_set_uint8(x_235, sizeof(void*)*16, x_180); +x_236 = lean_st_ref_set(x_7, x_235, x_34); +x_237 = lean_ctor_get(x_236, 1); +lean_inc(x_237); +lean_dec(x_236); +x_16 = x_237; +goto block_28; } } -else +block_28: { -lean_object* x_63; lean_object* x_64; -lean_dec(x_45); -x_63 = lean_box(0); -x_64 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1(x_1, x_2, x_3, x_63, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_43); -return x_64; -} +lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_17 = lean_box(0); +x_18 = lean_box(0); +lean_inc(x_3); +x_19 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1(x_1, x_2, x_3, x_17, x_3, x_3, x_18, lean_box(0), x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_16); +lean_dec(x_3); +if (lean_obj_tag(x_19) == 0) +{ +uint8_t x_20; +x_20 = !lean_is_exclusive(x_19); +if (x_20 == 0) +{ +lean_object* x_21; +x_21 = lean_ctor_get(x_19, 0); +lean_dec(x_21); +lean_ctor_set(x_19, 0, x_18); +return x_19; } +else +{ +lean_object* x_22; lean_object* x_23; +x_22 = lean_ctor_get(x_19, 1); +lean_inc(x_22); +lean_dec(x_19); +x_23 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_23, 0, x_18); +lean_ctor_set(x_23, 1, x_22); +return x_23; } } else { -uint8_t x_65; -lean_dec(x_3); -lean_dec(x_1); -x_65 = !lean_is_exclusive(x_15); -if (x_65 == 0) +uint8_t x_24; +x_24 = !lean_is_exclusive(x_19); +if (x_24 == 0) { -return x_15; +return x_19; } else { -lean_object* x_66; lean_object* x_67; lean_object* x_68; -x_66 = lean_ctor_get(x_15, 0); -x_67 = lean_ctor_get(x_15, 1); -lean_inc(x_67); -lean_inc(x_66); -lean_dec(x_15); -x_68 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_68, 0, x_66); -lean_ctor_set(x_68, 1, x_67); -return x_68; +lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_25 = lean_ctor_get(x_19, 0); +x_26 = lean_ctor_get(x_19, 1); +lean_inc(x_26); +lean_inc(x_25); +lean_dec(x_19); +x_27 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_27, 0, x_25); +lean_ctor_set(x_27, 1, x_26); +return x_27; } } } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { _start: { -lean_object* x_15; -lean_inc(x_13); -lean_inc(x_12); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -lean_inc(x_6); -lean_inc(x_5); -x_15 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); -if (lean_obj_tag(x_15) == 0) +lean_object* x_14; +x_14 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +if (lean_obj_tag(x_14) == 0) { -lean_object* x_16; -x_16 = lean_ctor_get(x_15, 0); +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_25; lean_object* x_26; lean_object* x_27; uint8_t x_28; +x_15 = lean_ctor_get(x_14, 0); +lean_inc(x_15); +x_16 = lean_ctor_get(x_14, 1); lean_inc(x_16); -if (lean_obj_tag(x_16) == 0) -{ -lean_object* x_17; lean_object* x_18; lean_object* x_19; -x_17 = lean_ctor_get(x_15, 1); -lean_inc(x_17); -lean_dec(x_15); -x_18 = lean_box(0); -x_19 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2(x_1, x_2, x_3, x_18, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_17); -lean_dec(x_13); -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -return x_19; +if (lean_is_exclusive(x_14)) { + lean_ctor_release(x_14, 0); + lean_ctor_release(x_14, 1); + x_17 = x_14; +} else { + lean_dec_ref(x_14); + x_17 = lean_box(0); } -else -{ -uint8_t x_20; -x_20 = !lean_is_exclusive(x_15); -if (x_20 == 0) -{ -lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; uint8_t x_28; -x_21 = lean_ctor_get(x_15, 1); -x_22 = lean_ctor_get(x_15, 0); -lean_dec(x_22); -x_23 = lean_ctor_get(x_16, 0); -lean_inc(x_23); -lean_dec(x_16); -lean_inc(x_23); -x_24 = lean_nat_to_int(x_23); -x_25 = l_Lean_Meta_Grind_Arith_gcdExt(x_2, x_24); -x_26 = lean_ctor_get(x_25, 1); +x_25 = lean_box(0); +x_26 = lean_ctor_get(x_15, 19); lean_inc(x_26); -x_27 = lean_ctor_get(x_25, 0); +lean_dec(x_15); +x_27 = lean_ctor_get(x_26, 2); lean_inc(x_27); -lean_dec(x_25); -x_28 = !lean_is_exclusive(x_26); +x_28 = lean_nat_dec_lt(x_3, x_27); +lean_dec(x_27); if (x_28 == 0) { -lean_object* x_29; lean_object* x_30; lean_object* x_31; uint8_t x_32; -x_29 = lean_ctor_get(x_26, 0); -x_30 = lean_ctor_get(x_26, 1); -lean_dec(x_30); -x_31 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; -x_32 = lean_int_dec_eq(x_27, x_31); -lean_dec(x_27); -if (x_32 == 0) +lean_object* x_29; +lean_dec(x_26); +x_29 = l_outOfBounds___rarg(x_25); +x_18 = x_29; +goto block_24; +} +else { -lean_object* x_33; lean_object* x_34; -lean_free_object(x_26); -lean_dec(x_29); -lean_dec(x_24); -lean_dec(x_23); -lean_free_object(x_15); -x_33 = lean_box(0); -x_34 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2(x_1, x_2, x_3, x_33, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_21); -lean_dec(x_13); -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -return x_34; +lean_object* x_30; +x_30 = l_Lean_PersistentArray_get_x21___rarg(x_25, x_26, x_3); +x_18 = x_30; +goto block_24; +} +block_24: +{ +uint8_t x_19; +x_19 = l_List_isEmpty___rarg(x_18); +if (x_19 == 0) +{ +lean_object* x_20; lean_object* x_21; +lean_dec(x_17); +x_20 = lean_box(0); +x_21 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___lambda__1(x_1, x_2, x_18, x_3, x_20, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_16); +return x_21; } else { -lean_object* x_35; uint8_t x_36; -lean_dec(x_13); +lean_object* x_22; lean_object* x_23; +lean_dec(x_18); lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); @@ -15520,70 +17053,24 @@ lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); -x_35 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; -x_36 = lean_int_dec_lt(x_29, x_35); -if (x_36 == 0) -{ -lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; -lean_dec(x_24); -x_37 = lean_ctor_get(x_1, 2); -lean_inc(x_37); -x_38 = lean_ctor_get(x_1, 3); -lean_inc(x_38); -x_39 = l_Lean_Grind_CommRing_Poly_mulConstC(x_29, x_3, x_23); -lean_ctor_set_tag(x_26, 3); -lean_ctor_set(x_26, 1, x_1); -x_40 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_40, 0, x_39); -lean_ctor_set(x_40, 1, x_26); -lean_ctor_set(x_40, 2, x_37); -lean_ctor_set(x_40, 3, x_38); -lean_ctor_set(x_15, 0, x_40); -return x_15; +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22 = lean_box(0); +if (lean_is_scalar(x_17)) { + x_23 = lean_alloc_ctor(0, 2, 0); +} else { + x_23 = x_17; } -else -{ -lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; -x_41 = lean_ctor_get(x_1, 2); -lean_inc(x_41); -x_42 = lean_ctor_get(x_1, 3); -lean_inc(x_42); -x_43 = lean_int_emod(x_29, x_24); -lean_dec(x_24); -lean_dec(x_29); -x_44 = l_Lean_Grind_CommRing_Poly_mulConstC(x_43, x_3, x_23); -lean_ctor_set_tag(x_26, 3); -lean_ctor_set(x_26, 1, x_1); -lean_ctor_set(x_26, 0, x_43); -x_45 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_45, 0, x_44); -lean_ctor_set(x_45, 1, x_26); -lean_ctor_set(x_45, 2, x_41); -lean_ctor_set(x_45, 3, x_42); -lean_ctor_set(x_15, 0, x_45); -return x_15; +lean_ctor_set(x_23, 0, x_22); +lean_ctor_set(x_23, 1, x_16); +return x_23; } } } else { -lean_object* x_46; lean_object* x_47; uint8_t x_48; -x_46 = lean_ctor_get(x_26, 0); -lean_inc(x_46); -lean_dec(x_26); -x_47 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; -x_48 = lean_int_dec_eq(x_27, x_47); -lean_dec(x_27); -if (x_48 == 0) -{ -lean_object* x_49; lean_object* x_50; -lean_dec(x_46); -lean_dec(x_24); -lean_dec(x_23); -lean_free_object(x_15); -x_49 = lean_box(0); -x_50 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2(x_1, x_2, x_3, x_49, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_21); -lean_dec(x_13); +uint8_t x_31; lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); @@ -15592,13 +17079,35 @@ lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); -return x_50; +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_31 = !lean_is_exclusive(x_14); +if (x_31 == 0) +{ +return x_14; } else { -lean_object* x_51; uint8_t x_52; -lean_dec(x_13); -lean_dec(x_12); +lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_32 = lean_ctor_get(x_14, 0); +x_33 = lean_ctor_get(x_14, 1); +lean_inc(x_33); +lean_inc(x_32); +lean_dec(x_14); +x_34 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_34, 0, x_32); +lean_ctor_set(x_34, 1, x_33); +return x_34; +} +} +} +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -15606,93 +17115,75 @@ lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); -x_51 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; -x_52 = lean_int_dec_lt(x_46, x_51); -if (x_52 == 0) +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_13; +} +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15) { +_start: { -lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; -lean_dec(x_24); -x_53 = lean_ctor_get(x_1, 2); -lean_inc(x_53); -x_54 = lean_ctor_get(x_1, 3); -lean_inc(x_54); -x_55 = l_Lean_Grind_CommRing_Poly_mulConstC(x_46, x_3, x_23); -x_56 = lean_alloc_ctor(3, 2, 0); -lean_ctor_set(x_56, 0, x_46); -lean_ctor_set(x_56, 1, x_1); -x_57 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_57, 0, x_55); -lean_ctor_set(x_57, 1, x_56); -lean_ctor_set(x_57, 2, x_53); -lean_ctor_set(x_57, 3, x_54); -lean_ctor_set(x_15, 0, x_57); -return x_15; +lean_object* x_16; +x_16 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +lean_dec(x_5); +return x_16; } -else +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___boxed(lean_object** _args) { +lean_object* x_1 = _args[0]; +lean_object* x_2 = _args[1]; +lean_object* x_3 = _args[2]; +lean_object* x_4 = _args[3]; +lean_object* x_5 = _args[4]; +lean_object* x_6 = _args[5]; +lean_object* x_7 = _args[6]; +lean_object* x_8 = _args[7]; +lean_object* x_9 = _args[8]; +lean_object* x_10 = _args[9]; +lean_object* x_11 = _args[10]; +lean_object* x_12 = _args[11]; +lean_object* x_13 = _args[12]; +lean_object* x_14 = _args[13]; +lean_object* x_15 = _args[14]; +lean_object* x_16 = _args[15]; +lean_object* x_17 = _args[16]; +lean_object* x_18 = _args[17]; +_start: { -lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; -x_58 = lean_ctor_get(x_1, 2); -lean_inc(x_58); -x_59 = lean_ctor_get(x_1, 3); -lean_inc(x_59); -x_60 = lean_int_emod(x_46, x_24); -lean_dec(x_24); -lean_dec(x_46); -x_61 = l_Lean_Grind_CommRing_Poly_mulConstC(x_60, x_3, x_23); -x_62 = lean_alloc_ctor(3, 2, 0); -lean_ctor_set(x_62, 0, x_60); -lean_ctor_set(x_62, 1, x_1); -x_63 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_63, 0, x_61); -lean_ctor_set(x_63, 1, x_62); -lean_ctor_set(x_63, 2, x_58); -lean_ctor_set(x_63, 3, x_59); -lean_ctor_set(x_15, 0, x_63); -return x_15; +lean_object* x_19; +x_19 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_18); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_19; } } +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15) { +_start: +{ +lean_object* x_16; +x_16 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +lean_dec(x_5); +lean_dec(x_4); +return x_16; } } -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: { -lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; uint8_t x_73; -x_64 = lean_ctor_get(x_15, 1); -lean_inc(x_64); -lean_dec(x_15); -x_65 = lean_ctor_get(x_16, 0); -lean_inc(x_65); -lean_dec(x_16); -lean_inc(x_65); -x_66 = lean_nat_to_int(x_65); -x_67 = l_Lean_Meta_Grind_Arith_gcdExt(x_2, x_66); -x_68 = lean_ctor_get(x_67, 1); -lean_inc(x_68); -x_69 = lean_ctor_get(x_67, 0); -lean_inc(x_69); -lean_dec(x_67); -x_70 = lean_ctor_get(x_68, 0); -lean_inc(x_70); -if (lean_is_exclusive(x_68)) { - lean_ctor_release(x_68, 0); - lean_ctor_release(x_68, 1); - x_71 = x_68; -} else { - lean_dec_ref(x_68); - x_71 = lean_box(0); +lean_object* x_14; +x_14 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_3); +return x_14; } -x_72 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; -x_73 = lean_int_dec_eq(x_69, x_72); -lean_dec(x_69); -if (x_73 == 0) +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_go(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: { -lean_object* x_74; lean_object* x_75; -lean_dec(x_71); -lean_dec(x_70); -lean_dec(x_66); -lean_dec(x_65); -x_74 = lean_box(0); -x_75 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2(x_1, x_2, x_3, x_74, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_64); -lean_dec(x_13); +if (lean_obj_tag(x_3) == 0) +{ +lean_object* x_14; lean_object* x_15; lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); @@ -15701,12 +17192,46 @@ lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); -return x_75; +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_14 = lean_box(0); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_14); +lean_ctor_set(x_15, 1, x_13); +return x_15; } else { -lean_object* x_76; uint8_t x_77; -lean_dec(x_13); +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_16 = lean_ctor_get(x_3, 0); +x_17 = lean_ctor_get(x_3, 1); +x_18 = lean_ctor_get(x_16, 0); +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_8); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +lean_inc(x_2); +lean_inc(x_1); +x_19 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar(x_1, x_2, x_18, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +if (lean_obj_tag(x_19) == 0) +{ +lean_object* x_20; +x_20 = lean_ctor_get(x_19, 1); +lean_inc(x_20); +lean_dec(x_19); +x_3 = x_17; +x_13 = x_20; +goto _start; +} +else +{ +uint8_t x_22; lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); @@ -15715,73 +17240,50 @@ lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); -x_76 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__2; -x_77 = lean_int_dec_lt(x_70, x_76); -if (x_77 == 0) +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +x_22 = !lean_is_exclusive(x_19); +if (x_22 == 0) { -lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; -lean_dec(x_66); -x_78 = lean_ctor_get(x_1, 2); -lean_inc(x_78); -x_79 = lean_ctor_get(x_1, 3); -lean_inc(x_79); -x_80 = l_Lean_Grind_CommRing_Poly_mulConstC(x_70, x_3, x_65); -if (lean_is_scalar(x_71)) { - x_81 = lean_alloc_ctor(3, 2, 0); -} else { - x_81 = x_71; - lean_ctor_set_tag(x_81, 3); -} -lean_ctor_set(x_81, 0, x_70); -lean_ctor_set(x_81, 1, x_1); -x_82 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_82, 0, x_80); -lean_ctor_set(x_82, 1, x_81); -lean_ctor_set(x_82, 2, x_78); -lean_ctor_set(x_82, 3, x_79); -x_83 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_83, 0, x_82); -lean_ctor_set(x_83, 1, x_64); -return x_83; +return x_19; } else { -lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; -x_84 = lean_ctor_get(x_1, 2); -lean_inc(x_84); -x_85 = lean_ctor_get(x_1, 3); -lean_inc(x_85); -x_86 = lean_int_emod(x_70, x_66); -lean_dec(x_66); -lean_dec(x_70); -x_87 = l_Lean_Grind_CommRing_Poly_mulConstC(x_86, x_3, x_65); -if (lean_is_scalar(x_71)) { - x_88 = lean_alloc_ctor(3, 2, 0); -} else { - x_88 = x_71; - lean_ctor_set_tag(x_88, 3); +lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_23 = lean_ctor_get(x_19, 0); +x_24 = lean_ctor_get(x_19, 1); +lean_inc(x_24); +lean_inc(x_23); +lean_dec(x_19); +x_25 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_25, 0, x_23); +lean_ctor_set(x_25, 1, x_24); +return x_25; +} } -lean_ctor_set(x_88, 0, x_86); -lean_ctor_set(x_88, 1, x_1); -x_89 = lean_alloc_ctor(0, 4, 0); -lean_ctor_set(x_89, 0, x_87); -lean_ctor_set(x_89, 1, x_88); -lean_ctor_set(x_89, 2, x_84); -lean_ctor_set(x_89, 3, x_85); -x_90 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_90, 0, x_89); -lean_ctor_set(x_90, 1, x_64); -return x_90; } } } +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +lean_object* x_14; +x_14 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_go(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_3); +return x_14; } } -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = lean_ctor_get(x_1, 0); +lean_inc(x_13); +if (lean_obj_tag(x_13) == 0) { -uint8_t x_91; +lean_object* x_14; lean_object* x_15; lean_dec(x_13); -lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -15789,51 +17291,108 @@ lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); lean_dec(x_1); -x_91 = !lean_is_exclusive(x_15); -if (x_91 == 0) -{ +x_14 = lean_box(0); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_14); +lean_ctor_set(x_15, 1, x_12); return x_15; } else { -lean_object* x_92; lean_object* x_93; lean_object* x_94; -x_92 = lean_ctor_get(x_15, 0); -x_93 = lean_ctor_get(x_15, 1); -lean_inc(x_93); -lean_inc(x_92); -lean_dec(x_15); -x_94 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_94, 0, x_92); -lean_ctor_set(x_94, 1, x_93); -return x_94; +lean_object* x_16; lean_object* x_17; +x_16 = lean_ctor_get(x_13, 1); +lean_inc(x_16); +lean_dec(x_13); +lean_inc(x_16); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_go(x_1, x_16, x_16, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_16); +return x_17; } } } +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("using: ", 7, 7); +return x_1; } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__1; +x_2 = l_Lean_stringToMessageData(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { lean_object* x_12; lean_object* x_13; lean_object* x_14; uint8_t x_15; -x_12 = lean_ctor_get(x_1, 0); -lean_inc(x_12); -x_13 = l_Lean_Grind_CommRing_Poly_lc(x_12); -x_14 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1; -x_15 = lean_int_dec_eq(x_13, x_14); +x_12 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__2; +x_13 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_14 = lean_ctor_get(x_13, 0); +lean_inc(x_14); +x_15 = lean_unbox(x_14); +lean_dec(x_14); if (x_15 == 0) { -lean_object* x_16; lean_object* x_17; -x_16 = lean_box(0); -x_17 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__3(x_1, x_13, x_12, x_16, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_13, 1); +lean_inc(x_16); lean_dec(x_13); -return x_17; +x_17 = lean_box(0); +x_18 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1(x_1, x_17, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_16); +return x_18; } else { -lean_object* x_18; -lean_dec(x_13); -lean_dec(x_12); +uint8_t x_19; +x_19 = !lean_is_exclusive(x_13); +if (x_19 == 0) +{ +lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_20 = lean_ctor_get(x_13, 1); +x_21 = lean_ctor_get(x_13, 0); +lean_dec(x_21); +lean_inc(x_1); +x_22 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_20); +if (lean_obj_tag(x_22) == 0) +{ +lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; +x_23 = lean_ctor_get(x_22, 0); +lean_inc(x_23); +x_24 = lean_ctor_get(x_22, 1); +lean_inc(x_24); +lean_dec(x_22); +x_25 = l_Lean_MessageData_ofExpr(x_23); +x_26 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__2; +lean_ctor_set_tag(x_13, 7); +lean_ctor_set(x_13, 1, x_25); +lean_ctor_set(x_13, 0, x_26); +x_27 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +x_28 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_28, 0, x_13); +lean_ctor_set(x_28, 1, x_27); +x_29 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_12, x_28, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_24); +x_30 = lean_ctor_get(x_29, 0); +lean_inc(x_30); +x_31 = lean_ctor_get(x_29, 1); +lean_inc(x_31); +lean_dec(x_29); +x_32 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1(x_1, x_30, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_31); +lean_dec(x_30); +return x_32; +} +else +{ +uint8_t x_33; +lean_free_object(x_13); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -15843,40 +17402,65 @@ lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); -x_18 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_18, 0, x_1); -lean_ctor_set(x_18, 1, x_11); -return x_18; +lean_dec(x_1); +x_33 = !lean_is_exclusive(x_22); +if (x_33 == 0) +{ +return x_22; } +else +{ +lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_34 = lean_ctor_get(x_22, 0); +x_35 = lean_ctor_get(x_22, 1); +lean_inc(x_35); +lean_inc(x_34); +lean_dec(x_22); +x_36 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_36, 0, x_34); +lean_ctor_set(x_36, 1, x_35); +return x_36; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { -_start: +} +else { -lean_object* x_15; -x_15 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +lean_object* x_37; lean_object* x_38; +x_37 = lean_ctor_get(x_13, 1); +lean_inc(x_37); lean_dec(x_13); -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_2); -return x_15; -} +lean_inc(x_1); +x_38 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_37); +if (lean_obj_tag(x_38) == 0) +{ +lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; +x_39 = lean_ctor_get(x_38, 0); +lean_inc(x_39); +x_40 = lean_ctor_get(x_38, 1); +lean_inc(x_40); +lean_dec(x_38); +x_41 = l_Lean_MessageData_ofExpr(x_39); +x_42 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__2; +x_43 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_41); +x_44 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_toRingExpr_x3f___closed__5; +x_45 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_45, 0, x_43); +lean_ctor_set(x_45, 1, x_44); +x_46 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_12, x_45, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_40); +x_47 = lean_ctor_get(x_46, 0); +lean_inc(x_47); +x_48 = lean_ctor_get(x_46, 1); +lean_inc(x_48); +lean_dec(x_46); +x_49 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1(x_1, x_47, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_48); +lean_dec(x_47); +return x_49; } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { -_start: +else { -lean_object* x_15; -x_15 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); -lean_dec(x_13); -lean_dec(x_12); -lean_dec(x_11); +lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -15884,18 +17468,41 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); -return x_15; +lean_dec(x_1); +x_50 = lean_ctor_get(x_38, 0); +lean_inc(x_50); +x_51 = lean_ctor_get(x_38, 1); +lean_inc(x_51); +if (lean_is_exclusive(x_38)) { + lean_ctor_release(x_38, 0); + lean_ctor_release(x_38, 1); + x_52 = x_38; +} else { + lean_dec_ref(x_38); + x_52 = lean_box(0); } +if (lean_is_scalar(x_52)) { + x_53 = lean_alloc_ctor(1, 2, 0); +} else { + x_53 = x_52; } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +lean_ctor_set(x_53, 0, x_50); +lean_ctor_set(x_53, 1, x_51); +return x_53; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { -lean_object* x_15; -x_15 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); -lean_dec(x_4); +lean_object* x_13; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); lean_dec(x_2); -return x_15; +return x_13; } } LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToBasisAfterSimp___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { @@ -16023,12 +17630,8 @@ lean_object* x_29; lean_object* x_30; x_29 = lean_ctor_get(x_28, 1); lean_inc(x_29); lean_dec(x_28); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); lean_inc(x_13); -x_30 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_13, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_29); +x_30 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_13, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_29); if (lean_obj_tag(x_30) == 0) { lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; @@ -16142,12 +17745,8 @@ lean_object* x_49; lean_object* x_50; x_49 = lean_ctor_get(x_48, 1); lean_inc(x_49); lean_dec(x_48); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); lean_inc(x_13); -x_50 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_13, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_49); +x_50 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_13, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_49); if (lean_obj_tag(x_50) == 0) { lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; @@ -16543,6 +18142,10 @@ if (x_25 == 0) { lean_object* x_26; x_26 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue(x_21, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_20); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); @@ -16668,11 +18271,7 @@ lean_object* x_31; lean_object* x_32; x_31 = lean_ctor_get(x_30, 1); lean_inc(x_31); lean_dec(x_30); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -x_32 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_31); +x_32 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_31); lean_dec(x_1); if (lean_obj_tag(x_32) == 0) { @@ -18023,10 +19622,6 @@ lean_inc(x_16); lean_dec(x_13); x_17 = lean_box(0); x_18 = l_Lean_Meta_Grind_Arith_CommRing_saveDiseq___lambda__1(x_1, x_17, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_16); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); return x_18; } else @@ -18046,11 +19641,7 @@ lean_object* x_23; lean_object* x_24; x_23 = lean_ctor_get(x_22, 1); lean_inc(x_23); lean_dec(x_22); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -x_24 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_23); +x_24 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_23); if (lean_obj_tag(x_24) == 0) { lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; @@ -18074,10 +19665,6 @@ x_32 = lean_ctor_get(x_30, 1); lean_inc(x_32); lean_dec(x_30); x_33 = l_Lean_Meta_Grind_Arith_CommRing_saveDiseq___lambda__1(x_1, x_31, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_32); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); lean_dec(x_31); return x_33; } @@ -18085,10 +19672,6 @@ else { uint8_t x_34; lean_free_object(x_13); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); lean_dec(x_1); x_34 = !lean_is_exclusive(x_24); if (x_34 == 0) @@ -18114,10 +19697,6 @@ else { uint8_t x_38; lean_free_object(x_13); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); lean_dec(x_1); x_38 = !lean_is_exclusive(x_22); if (x_38 == 0) @@ -18152,11 +19731,7 @@ lean_object* x_44; lean_object* x_45; x_44 = lean_ctor_get(x_43, 1); lean_inc(x_44); lean_dec(x_43); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -x_45 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_44); +x_45 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_44); if (lean_obj_tag(x_45) == 0) { lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; @@ -18180,20 +19755,12 @@ x_54 = lean_ctor_get(x_52, 1); lean_inc(x_54); lean_dec(x_52); x_55 = l_Lean_Meta_Grind_Arith_CommRing_saveDiseq___lambda__1(x_1, x_53, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_54); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); lean_dec(x_53); return x_55; } else { lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); lean_dec(x_1); x_56 = lean_ctor_get(x_45, 0); lean_inc(x_56); @@ -18220,10 +19787,6 @@ return x_59; else { lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); lean_dec(x_1); x_60 = lean_ctor_get(x_43, 0); lean_inc(x_60); @@ -18273,6 +19836,10 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_saveDiseq___boxed(lean { lean_object* x_12; x_12 = l_Lean_Meta_Grind_Arith_CommRing_saveDiseq(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); @@ -18336,6 +19903,10 @@ x_18 = lean_ctor_get(x_15, 1); lean_inc(x_18); lean_dec(x_15); x_19 = l_Lean_Meta_Grind_Arith_CommRing_saveDiseq(x_13, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_18); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); @@ -18451,6 +20022,10 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_addNewDiseq___lambda__ { lean_object* x_13; x_13 = l_Lean_Meta_Grind_Arith_CommRing_addNewDiseq___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); @@ -19827,7 +21402,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCns _start: { lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_12 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); if (lean_obj_tag(x_12) == 0) { uint8_t x_13; @@ -21194,7 +22769,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCns _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; uint8_t x_43; @@ -22889,7 +24464,7 @@ lean_inc(x_33); x_34 = lean_ctor_get(x_32, 1); lean_inc(x_34); lean_dec(x_32); -x_35 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_7, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_18, x_34); +x_35 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_7, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_18, x_34); if (lean_obj_tag(x_35) == 0) { lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; uint8_t x_44; @@ -23127,7 +24702,7 @@ lean_inc(x_90); x_91 = lean_ctor_get(x_89, 1); lean_inc(x_91); lean_dec(x_89); -x_92 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_7, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_18, x_91); +x_92 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_7, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_18, x_91); if (lean_obj_tag(x_92) == 0) { lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; uint8_t x_102; @@ -25120,7 +26695,7 @@ x_33 = lean_ctor_get(x_32, 1); lean_inc(x_33); lean_dec(x_32); x_34 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_p(x_19); -x_35 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_34, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_33); +x_35 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_34, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_33); if (lean_obj_tag(x_35) == 0) { lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; uint8_t x_44; @@ -25303,7 +26878,7 @@ x_84 = lean_ctor_get(x_83, 1); lean_inc(x_84); lean_dec(x_83); x_85 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_p(x_19); -x_86 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_85, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_84); +x_86 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_85, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_84); if (lean_obj_tag(x_86) == 0) { lean_object* x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; uint8_t x_96; @@ -25956,7 +27531,7 @@ static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_ lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; x_1 = l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__4___lambda__1___closed__1; x_2 = l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__4___lambda__1___closed__2; -x_3 = lean_unsigned_to_nat(318u); +x_3 = lean_unsigned_to_nat(335u); x_4 = lean_unsigned_to_nat(34u); x_5 = l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCnstr_0__Lean_Meta_Grind_Arith_CommRing_propagateEqs___spec__4___lambda__1___closed__3; x_6 = l___private_Init_Util_0__mkPanicMessageWithDecl(x_1, x_2, x_3, x_4, x_5); @@ -28694,7 +30269,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCns _start: { lean_object* x_14; -x_14 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +x_14 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); if (lean_obj_tag(x_14) == 0) { lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; @@ -28884,7 +30459,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_EqCns _start: { lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_12 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); if (lean_obj_tag(x_12) == 0) { lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; @@ -29621,12 +31196,8 @@ lean_object* x_46; lean_object* x_47; x_46 = lean_ctor_get(x_45, 1); lean_inc(x_46); lean_dec(x_45); -lean_inc(x_12); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); lean_inc(x_29); -x_47 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_29, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_46); +x_47 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_29, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_46); if (lean_obj_tag(x_47) == 0) { lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; @@ -29793,12 +31364,8 @@ lean_object* x_73; lean_object* x_74; x_73 = lean_ctor_get(x_72, 1); lean_inc(x_73); lean_dec(x_72); -lean_inc(x_12); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); lean_inc(x_29); -x_74 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_29, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_73); +x_74 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_29, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_73); if (lean_obj_tag(x_74) == 0) { lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; @@ -31220,7 +32787,7 @@ lean_object* x_23; lean_object* x_24; x_23 = lean_ctor_get(x_22, 1); lean_inc(x_23); lean_dec(x_22); -x_24 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_23); +x_24 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_23); if (lean_obj_tag(x_24) == 0) { lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; @@ -31329,7 +32896,7 @@ lean_object* x_45; lean_object* x_46; x_45 = lean_ctor_get(x_44, 1); lean_inc(x_45); lean_dec(x_44); -x_46 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_45); +x_46 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_45); if (lean_obj_tag(x_46) == 0) { lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; @@ -32409,12 +33976,12 @@ l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__6 = _init_l_Lea lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__6); l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__7 = _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__7(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_checkConstant___closed__7); -l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___closed__1 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___closed__1(); -lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___spec__1___closed__1); l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__1(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__1); l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__2(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToQueue___lambda__2___closed__2); +l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___closed__1 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___closed__1(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___lambda__1___closed__1); l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__1 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__1(); lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__1); l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__2 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_superposeWith_go___spec__1___closed__2(); @@ -32435,6 +34002,22 @@ l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1 = _init lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__1); l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__2(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toMonic___lambda__1___closed__2); +l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__1 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__1(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__1); +l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__2 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__2(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___lambda__2___closed__2); +l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__1 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__1(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__1); +l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__2 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__2(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__2); +l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__3 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__3(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__3); +l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__4 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__4(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis_goVar___spec__1___closed__4); +l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__1); +l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__2(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_simplifyBasis___closed__2); l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToBasisAfterSimp___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToBasisAfterSimp___closed__1(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToBasisAfterSimp___closed__1); l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToBasisAfterSimp___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_addToBasisAfterSimp___closed__2(); diff --git a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Inv.c b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Inv.c index 5b11b76d155c..03004c67ee04 100644 --- a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Inv.c +++ b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Inv.c @@ -21,7 +21,6 @@ LEAN_EXPORT lean_object* l_panic___at___private_Lean_Meta_Tactic_Grind_Arith_Com static lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_checkInvariants___spec__1___closed__1; LEAN_EXPORT lean_object* l_panic___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Inv_0__Lean_Meta_Grind_Arith_CommRing_checkVars___spec__8(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_foldlMAux_traverse___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Inv_0__Lean_Meta_Grind_Arith_CommRing_checkVars___spec__7___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkInvariants___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Inv_0__Lean_Meta_Grind_Arith_CommRing_checkDiseqs___spec__5(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Inv_0__Lean_Meta_Grind_Arith_CommRing_checkBasis___spec__3___closed__6; @@ -134,6 +133,7 @@ lean_object* lean_array_uget(lean_object*, size_t); size_t lean_array_size(lean_object*); static lean_object* l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Inv_0__Lean_Meta_Grind_Arith_CommRing_checkBasis___spec__3___closed__1; lean_object* l_instInhabitedOfMonad___rarg(lean_object*, lean_object*); +lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_p(lean_object*); lean_object* lean_string_append(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Inv_0__Lean_Meta_Grind_Arith_CommRing_checkVars___spec__6___rarg(lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -1380,7 +1380,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Inv_0 _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; @@ -3226,7 +3226,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Inv_0 _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; @@ -3692,7 +3692,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Inv_0 _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; @@ -4705,7 +4705,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Inv_0 _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; diff --git a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/PP.c b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/PP.c new file mode 100644 index 000000000000..0f40e1ed68cd --- /dev/null +++ b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/PP.c @@ -0,0 +1,3153 @@ +// Lean compiler output +// Module: Lean.Meta.Tactic.Grind.Arith.CommRing.PP +// Imports: Lean.Meta.Tactic.Grind.Arith.CommRing.DenoteExpr +#include <lean/lean.h> +#if defined(__clang__) +#pragma clang diagnostic ignored "-Wunused-parameter" +#pragma clang diagnostic ignored "-Wunused-label" +#elif defined(__GNUC__) && !defined(__CLANG__) +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wunused-label" +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#endif +#ifdef __cplusplus +extern "C" { +#endif +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1(lean_object*); +lean_object* l_Lean_Expr_const___override(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__16(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_mkNatLit(lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__1; +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___closed__1; +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__12(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__1; +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__6; +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__3; +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___closed__2; +lean_object* l_Lean_mkAppB(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__2; +lean_object* l_Lean_Level_succ___override(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* lean_array_push(lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__4; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__1; +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__4; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__14___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__1; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_pp_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__2; +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_pp_x3f___spec__1(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_stringToMessageData(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__3; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__2; +static lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5___closed__1; +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__7; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM; +LEAN_EXPORT lean_object* l_ReaderT_read___at_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__2; +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_pp_x3f___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___boxed(lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__1; +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__2; +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__12___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* lean_nat_to_int(lean_object*); +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__12___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__3; +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__1; +lean_object* l_Lean_MessageData_ofFormat(lean_object*); +static lean_object* l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10___closed__1; +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__12___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_outOfBounds___rarg(lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__16___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +extern lean_object* l_Lean_instInhabitedExpr; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__1; +lean_object* lean_thunk_get_own(lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__2; +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_ReaderT_read___at_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_mkNot(lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__1; +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__2; +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__2; +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__4; +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__3; +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* lean_mk_thunk(lean_object*); +lean_object* l_Lean_MessageData_ofExpr(lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__14(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___boxed(lean_object*); +double l_Float_ofScientific(lean_object*, uint8_t, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__15(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* lean_array_fget(lean_object*, lean_object*); +lean_object* lean_nat_abs(lean_object*); +LEAN_EXPORT lean_object* l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10(lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__3; +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___boxed(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__3; +lean_object* l_Lean_Expr_app___override(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +uint8_t lean_nat_dec_eq(lean_object*, lean_object*); +lean_object* l_Lean_mkApp3(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM___closed__1; +uint8_t lean_nat_dec_lt(lean_object*, lean_object*); +lean_object* l_Lean_mkRawNatLit(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Name_mkStr2(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_push(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__4; +uint8_t lean_int_dec_lt(lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__8; +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__4; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___boxed(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__7(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* lean_array_mk(lean_object*); +lean_object* l_Lean_PersistentArray_get_x21___rarg(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +size_t lean_usize_add(size_t, size_t); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1(lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__3; +lean_object* lean_array_uget(lean_object*, size_t); +size_t lean_array_size(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static double l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__1; +lean_object* l_Lean_Name_mkStr4(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__5; +uint8_t lean_int_dec_eq(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_p(lean_object*); +lean_object* lean_array_get_size(lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__2; +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__6(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1(lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption(lean_object*, lean_object*, lean_object*); +uint8_t lean_usize_dec_lt(size_t, size_t); +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__7(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__1; +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__2; +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__15___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__8(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +uint8_t l_Array_isEmpty___rarg(lean_object*); +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_ReaderT_read___at_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; +x_7 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_7, 0, x_1); +lean_ctor_set(x_7, 1, x_6); +return x_7; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l_ReaderT_read___at_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM___spec__1___boxed), 6, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM___closed__1; +return x_1; +} +} +LEAN_EXPORT lean_object* l_ReaderT_read___at_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; +x_7 = l_ReaderT_read___at_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM___spec__1(x_1, x_2, x_3, x_4, x_5, x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_7; +} +} +static double _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__1() { +_start: +{ +lean_object* x_1; uint8_t x_2; double x_3; +x_1 = lean_unsigned_to_nat(0u); +x_2 = 0; +x_3 = l_Float_ofScientific(x_1, x_2, x_1); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("", 0, 0); +return x_1; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +uint8_t x_4; +x_4 = l_Array_isEmpty___rarg(x_3); +if (x_4 == 0) +{ +double x_5; uint8_t x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__1; +x_6 = 1; +x_7 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__2; +x_8 = lean_alloc_ctor(0, 2, 17); +lean_ctor_set(x_8, 0, x_1); +lean_ctor_set(x_8, 1, x_7); +lean_ctor_set_float(x_8, sizeof(void*)*2, x_5); +lean_ctor_set_float(x_8, sizeof(void*)*2 + 8, x_5); +lean_ctor_set_uint8(x_8, sizeof(void*)*2 + 16, x_6); +x_9 = lean_thunk_get_own(x_2); +x_10 = lean_alloc_ctor(9, 3, 0); +lean_ctor_set(x_10, 0, x_8); +lean_ctor_set(x_10, 1, x_9); +lean_ctor_set(x_10, 2, x_3); +x_11 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_11, 0, x_10); +return x_11; +} +else +{ +lean_object* x_12; +lean_dec(x_3); +lean_dec(x_1); +x_12 = lean_box(0); +return x_12; +} +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; +x_4 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption(x_1, x_2, x_3); +lean_dec(x_2); +return x_4; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_push(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +return x_1; +} +else +{ +lean_object* x_3; lean_object* x_4; +x_3 = lean_ctor_get(x_2, 0); +lean_inc(x_3); +lean_dec(x_2); +x_4 = lean_array_push(x_1, x_3); +return x_4; +} +} +} +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean", 4, 4); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Grind", 5, 5); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("CommRing", 8, 8); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__4() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("ofNat", 5, 5); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__3; +x_4 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__4; +x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); +return x_5; +} +} +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__6() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("OfNat", 5, 5); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__7() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__6; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__4; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__8() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(0u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; +x_8 = lean_nat_abs(x_1); +x_9 = l_Lean_mkRawNatLit(x_8); +x_10 = lean_ctor_get(x_2, 2); +lean_inc(x_10); +x_11 = lean_box(0); +x_12 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12, 0, x_10); +lean_ctor_set(x_12, 1, x_11); +x_13 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__5; +lean_inc(x_12); +x_14 = l_Lean_Expr_const___override(x_13, x_12); +x_15 = lean_ctor_get(x_2, 1); +lean_inc(x_15); +x_16 = lean_ctor_get(x_2, 3); +lean_inc(x_16); +lean_inc(x_9); +lean_inc(x_15); +x_17 = l_Lean_mkApp3(x_14, x_15, x_16, x_9); +x_18 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__7; +x_19 = l_Lean_Expr_const___override(x_18, x_12); +x_20 = l_Lean_mkApp3(x_19, x_15, x_9, x_17); +x_21 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__8; +x_22 = lean_int_dec_lt(x_1, x_21); +if (x_22 == 0) +{ +lean_object* x_23; +lean_dec(x_2); +x_23 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_23, 0, x_20); +lean_ctor_set(x_23, 1, x_7); +return x_23; +} +else +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; +x_24 = lean_ctor_get(x_2, 9); +lean_inc(x_24); +lean_dec(x_2); +x_25 = l_Lean_Expr_app___override(x_24, x_20); +x_26 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_26, 0, x_25); +lean_ctor_set(x_26, 1, x_7); +return x_26; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__6(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; uint8_t x_11; lean_object* x_12; lean_object* x_13; uint8_t x_14; +x_8 = lean_ctor_get(x_2, 13); +lean_inc(x_8); +x_9 = lean_ctor_get(x_1, 0); +lean_inc(x_9); +x_10 = lean_ctor_get(x_8, 2); +lean_inc(x_10); +x_11 = lean_nat_dec_lt(x_9, x_10); +lean_dec(x_10); +x_12 = lean_ctor_get(x_1, 1); +lean_inc(x_12); +lean_dec(x_1); +x_13 = lean_unsigned_to_nat(1u); +x_14 = lean_nat_dec_eq(x_12, x_13); +if (x_11 == 0) +{ +lean_object* x_15; lean_object* x_16; +lean_dec(x_9); +lean_dec(x_8); +x_15 = l_Lean_instInhabitedExpr; +x_16 = l_outOfBounds___rarg(x_15); +if (x_14 == 0) +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_17 = lean_ctor_get(x_2, 10); +lean_inc(x_17); +lean_dec(x_2); +x_18 = l_Lean_mkNatLit(x_12); +x_19 = l_Lean_mkAppB(x_17, x_16, x_18); +x_20 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_20, 0, x_19); +lean_ctor_set(x_20, 1, x_7); +return x_20; +} +else +{ +lean_object* x_21; +lean_dec(x_12); +lean_dec(x_2); +x_21 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21, 0, x_16); +lean_ctor_set(x_21, 1, x_7); +return x_21; +} +} +else +{ +lean_object* x_22; lean_object* x_23; +x_22 = l_Lean_instInhabitedExpr; +x_23 = l_Lean_PersistentArray_get_x21___rarg(x_22, x_8, x_9); +lean_dec(x_9); +if (x_14 == 0) +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_24 = lean_ctor_get(x_2, 10); +lean_inc(x_24); +lean_dec(x_2); +x_25 = l_Lean_mkNatLit(x_12); +x_26 = l_Lean_mkAppB(x_24, x_23, x_25); +x_27 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_27, 0, x_26); +lean_ctor_set(x_27, 1, x_7); +return x_27; +} +else +{ +lean_object* x_28; +lean_dec(x_12); +lean_dec(x_2); +x_28 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_28, 0, x_23); +lean_ctor_set(x_28, 1, x_7); +return x_28; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__7(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_9; +lean_dec(x_3); +x_9 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9, 0, x_2); +lean_ctor_set(x_9, 1, x_8); +return x_9; +} +else +{ +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_10 = lean_ctor_get(x_1, 0); +lean_inc(x_10); +x_11 = lean_ctor_get(x_1, 1); +lean_inc(x_11); +lean_dec(x_1); +lean_inc(x_3); +x_12 = l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__6(x_10, x_3, x_4, x_5, x_6, x_7, x_8); +x_13 = lean_ctor_get(x_12, 0); +lean_inc(x_13); +x_14 = lean_ctor_get(x_12, 1); +lean_inc(x_14); +lean_dec(x_12); +x_15 = lean_ctor_get(x_3, 7); +lean_inc(x_15); +x_16 = l_Lean_mkAppB(x_15, x_2, x_13); +x_1 = x_11; +x_2 = x_16; +x_8 = x_14; +goto _start; +} +} +} +static lean_object* _init_l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(1u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_8; lean_object* x_9; +x_8 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5___closed__1; +x_9 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3(x_8, x_2, x_3, x_4, x_5, x_6, x_7); +return x_9; +} +else +{ +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; +x_10 = lean_ctor_get(x_1, 0); +lean_inc(x_10); +x_11 = lean_ctor_get(x_1, 1); +lean_inc(x_11); +lean_dec(x_1); +lean_inc(x_2); +x_12 = l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__6(x_10, x_2, x_3, x_4, x_5, x_6, x_7); +x_13 = lean_ctor_get(x_12, 0); +lean_inc(x_13); +x_14 = lean_ctor_get(x_12, 1); +lean_inc(x_14); +lean_dec(x_12); +x_15 = l_Lean_Grind_CommRing_Mon_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__7(x_11, x_13, x_2, x_3, x_4, x_5, x_6, x_14); +return x_15; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__4(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; uint8_t x_10; +x_9 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5___closed__1; +x_10 = lean_int_dec_eq(x_1, x_9); +if (x_10 == 0) +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; uint8_t x_15; +lean_inc(x_3); +x_11 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3(x_1, x_3, x_4, x_5, x_6, x_7, x_8); +x_12 = lean_ctor_get(x_11, 0); +lean_inc(x_12); +x_13 = lean_ctor_get(x_11, 1); +lean_inc(x_13); +lean_dec(x_11); +lean_inc(x_3); +x_14 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5(x_2, x_3, x_4, x_5, x_6, x_7, x_13); +x_15 = !lean_is_exclusive(x_14); +if (x_15 == 0) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_14, 0); +x_17 = lean_ctor_get(x_3, 7); +lean_inc(x_17); +lean_dec(x_3); +x_18 = l_Lean_mkAppB(x_17, x_12, x_16); +lean_ctor_set(x_14, 0, x_18); +return x_14; +} +else +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_19 = lean_ctor_get(x_14, 0); +x_20 = lean_ctor_get(x_14, 1); +lean_inc(x_20); +lean_inc(x_19); +lean_dec(x_14); +x_21 = lean_ctor_get(x_3, 7); +lean_inc(x_21); +lean_dec(x_3); +x_22 = l_Lean_mkAppB(x_21, x_12, x_19); +x_23 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_23, 0, x_22); +lean_ctor_set(x_23, 1, x_20); +return x_23; +} +} +else +{ +lean_object* x_24; +x_24 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5(x_2, x_3, x_4, x_5, x_6, x_7, x_8); +return x_24; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__8(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_9; lean_object* x_10; uint8_t x_11; +x_9 = lean_ctor_get(x_1, 0); +lean_inc(x_9); +lean_dec(x_1); +x_10 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__8; +x_11 = lean_int_dec_eq(x_9, x_10); +if (x_11 == 0) +{ +lean_object* x_12; uint8_t x_13; +lean_inc(x_3); +x_12 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3(x_9, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_9); +x_13 = !lean_is_exclusive(x_12); +if (x_13 == 0) +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_14 = lean_ctor_get(x_12, 0); +x_15 = lean_ctor_get(x_3, 6); +lean_inc(x_15); +lean_dec(x_3); +x_16 = l_Lean_mkAppB(x_15, x_2, x_14); +lean_ctor_set(x_12, 0, x_16); +return x_12; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; +x_17 = lean_ctor_get(x_12, 0); +x_18 = lean_ctor_get(x_12, 1); +lean_inc(x_18); +lean_inc(x_17); +lean_dec(x_12); +x_19 = lean_ctor_get(x_3, 6); +lean_inc(x_19); +lean_dec(x_3); +x_20 = l_Lean_mkAppB(x_19, x_2, x_17); +x_21 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21, 0, x_20); +lean_ctor_set(x_21, 1, x_18); +return x_21; +} +} +else +{ +lean_object* x_22; +lean_dec(x_9); +lean_dec(x_3); +x_22 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_22, 0, x_2); +lean_ctor_set(x_22, 1, x_8); +return x_22; +} +} +else +{ +lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; +x_23 = lean_ctor_get(x_1, 0); +lean_inc(x_23); +x_24 = lean_ctor_get(x_1, 1); +lean_inc(x_24); +x_25 = lean_ctor_get(x_1, 2); +lean_inc(x_25); +lean_dec(x_1); +lean_inc(x_3); +x_26 = l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__4(x_23, x_24, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_23); +x_27 = lean_ctor_get(x_26, 0); +lean_inc(x_27); +x_28 = lean_ctor_get(x_26, 1); +lean_inc(x_28); +lean_dec(x_26); +x_29 = lean_ctor_get(x_3, 6); +lean_inc(x_29); +x_30 = l_Lean_mkAppB(x_29, x_2, x_27); +x_1 = x_25; +x_2 = x_30; +x_8 = x_28; +goto _start; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_8; lean_object* x_9; +x_8 = lean_ctor_get(x_1, 0); +lean_inc(x_8); +lean_dec(x_1); +x_9 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3(x_8, x_2, x_3, x_4, x_5, x_6, x_7); +lean_dec(x_8); +return x_9; +} +else +{ +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_10 = lean_ctor_get(x_1, 0); +lean_inc(x_10); +x_11 = lean_ctor_get(x_1, 1); +lean_inc(x_11); +x_12 = lean_ctor_get(x_1, 2); +lean_inc(x_12); +lean_dec(x_1); +lean_inc(x_2); +x_13 = l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__4(x_10, x_11, x_2, x_3, x_4, x_5, x_6, x_7); +lean_dec(x_10); +x_14 = lean_ctor_get(x_13, 0); +lean_inc(x_14); +x_15 = lean_ctor_get(x_13, 1); +lean_inc(x_15); +lean_dec(x_13); +x_16 = l_Lean_Grind_CommRing_Poly_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__8(x_12, x_14, x_2, x_3, x_4, x_5, x_6, x_15); +return x_16; +} +} +} +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Eq", 2, 2); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___closed__1; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_9 = lean_ctor_get(x_3, 2); +lean_inc(x_9); +x_10 = l_Lean_Level_succ___override(x_9); +x_11 = lean_box(0); +x_12 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12, 0, x_10); +lean_ctor_set(x_12, 1, x_11); +x_13 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___closed__2; +x_14 = l_Lean_Expr_const___override(x_13, x_12); +x_15 = lean_ctor_get(x_3, 1); +lean_inc(x_15); +lean_dec(x_3); +x_16 = l_Lean_mkApp3(x_14, x_15, x_1, x_2); +x_17 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_17, 0, x_16); +lean_ctor_set(x_17, 1, x_8); +return x_17; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_8 = lean_ctor_get(x_1, 0); +lean_inc(x_8); +lean_dec(x_1); +lean_inc(x_2); +x_9 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__2(x_8, x_2, x_3, x_4, x_5, x_6, x_7); +x_10 = lean_ctor_get(x_9, 0); +lean_inc(x_10); +x_11 = lean_ctor_get(x_9, 1); +lean_inc(x_11); +lean_dec(x_9); +x_12 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__8; +lean_inc(x_2); +x_13 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3(x_12, x_2, x_3, x_4, x_5, x_6, x_11); +x_14 = lean_ctor_get(x_13, 0); +lean_inc(x_14); +x_15 = lean_ctor_get(x_13, 1); +lean_inc(x_15); +lean_dec(x_13); +x_16 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9(x_10, x_14, x_2, x_3, x_4, x_5, x_6, x_15); +return x_16; +} +} +static lean_object* _init_l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(0); +x_2 = lean_array_mk(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10(lean_object* x_1, lean_object* x_2) { +_start: +{ +double x_3; uint8_t x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__1; +x_4 = 1; +x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__2; +x_6 = lean_alloc_ctor(0, 2, 17); +lean_ctor_set(x_6, 0, x_2); +lean_ctor_set(x_6, 1, x_5); +lean_ctor_set_float(x_6, sizeof(void*)*2, x_3); +lean_ctor_set_float(x_6, sizeof(void*)*2 + 8, x_3); +lean_ctor_set_uint8(x_6, sizeof(void*)*2 + 16, x_4); +x_7 = l_Lean_MessageData_ofExpr(x_1); +x_8 = l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10___closed__1; +x_9 = lean_alloc_ctor(9, 3, 0); +lean_ctor_set(x_9, 0, x_6); +lean_ctor_set(x_9, 1, x_7); +lean_ctor_set(x_9, 2, x_8); +return x_9; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("_", 1, 1); +return x_1; +} +} +static lean_object* _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__1; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +if (lean_obj_tag(x_4) == 0) +{ +lean_object* x_13; +lean_dec(x_7); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_5); +lean_ctor_set(x_13, 1, x_12); +return x_13; +} +else +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; +x_14 = lean_ctor_get(x_4, 0); +lean_inc(x_14); +x_15 = lean_ctor_get(x_4, 1); +lean_inc(x_15); +lean_dec(x_4); +lean_inc(x_7); +x_16 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__1(x_14, x_7, x_8, x_9, x_10, x_11, x_12); +x_17 = lean_ctor_get(x_16, 0); +lean_inc(x_17); +x_18 = lean_ctor_get(x_16, 1); +lean_inc(x_18); +lean_dec(x_16); +x_19 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__2; +x_20 = l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10(x_17, x_19); +x_21 = lean_array_push(x_5, x_20); +x_4 = x_15; +x_5 = x_21; +x_6 = lean_box(0); +x_12 = x_18; +goto _start; +} +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__14(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, size_t x_6, size_t x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: +{ +uint8_t x_15; +x_15 = lean_usize_dec_lt(x_7, x_6); +if (x_15 == 0) +{ +lean_object* x_16; +lean_dec(x_9); +lean_dec(x_4); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_8); +lean_ctor_set(x_16, 1, x_14); +return x_16; +} +else +{ +lean_object* x_17; uint8_t x_18; +x_17 = lean_array_uget(x_5, x_7); +x_18 = !lean_is_exclusive(x_8); +if (x_18 == 0) +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_19 = lean_ctor_get(x_8, 1); +x_20 = lean_ctor_get(x_8, 0); +lean_dec(x_20); +lean_inc(x_9); +lean_inc(x_19); +x_21 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13(x_1, x_17, x_19, x_9, x_10, x_11, x_12, x_13, x_14); +lean_dec(x_17); +x_22 = lean_ctor_get(x_21, 0); +lean_inc(x_22); +if (lean_obj_tag(x_22) == 0) +{ +uint8_t x_23; +lean_dec(x_9); +lean_dec(x_4); +x_23 = !lean_is_exclusive(x_21); +if (x_23 == 0) +{ +lean_object* x_24; lean_object* x_25; +x_24 = lean_ctor_get(x_21, 0); +lean_dec(x_24); +x_25 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_25, 0, x_22); +lean_ctor_set(x_8, 0, x_25); +lean_ctor_set(x_21, 0, x_8); +return x_21; +} +else +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_26 = lean_ctor_get(x_21, 1); +lean_inc(x_26); +lean_dec(x_21); +x_27 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_27, 0, x_22); +lean_ctor_set(x_8, 0, x_27); +x_28 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_28, 0, x_8); +lean_ctor_set(x_28, 1, x_26); +return x_28; +} +} +else +{ +lean_object* x_29; lean_object* x_30; size_t x_31; size_t x_32; +lean_dec(x_19); +x_29 = lean_ctor_get(x_21, 1); +lean_inc(x_29); +lean_dec(x_21); +x_30 = lean_ctor_get(x_22, 0); +lean_inc(x_30); +lean_dec(x_22); +lean_inc(x_4); +lean_ctor_set(x_8, 1, x_30); +lean_ctor_set(x_8, 0, x_4); +x_31 = 1; +x_32 = lean_usize_add(x_7, x_31); +x_7 = x_32; +x_14 = x_29; +goto _start; +} +} +else +{ +lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_34 = lean_ctor_get(x_8, 1); +lean_inc(x_34); +lean_dec(x_8); +lean_inc(x_9); +lean_inc(x_34); +x_35 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13(x_1, x_17, x_34, x_9, x_10, x_11, x_12, x_13, x_14); +lean_dec(x_17); +x_36 = lean_ctor_get(x_35, 0); +lean_inc(x_36); +if (lean_obj_tag(x_36) == 0) +{ +lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; +lean_dec(x_9); +lean_dec(x_4); +x_37 = lean_ctor_get(x_35, 1); +lean_inc(x_37); +if (lean_is_exclusive(x_35)) { + lean_ctor_release(x_35, 0); + lean_ctor_release(x_35, 1); + x_38 = x_35; +} else { + lean_dec_ref(x_35); + x_38 = lean_box(0); +} +x_39 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_39, 0, x_36); +x_40 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_40, 0, x_39); +lean_ctor_set(x_40, 1, x_34); +if (lean_is_scalar(x_38)) { + x_41 = lean_alloc_ctor(0, 2, 0); +} else { + x_41 = x_38; +} +lean_ctor_set(x_41, 0, x_40); +lean_ctor_set(x_41, 1, x_37); +return x_41; +} +else +{ +lean_object* x_42; lean_object* x_43; lean_object* x_44; size_t x_45; size_t x_46; +lean_dec(x_34); +x_42 = lean_ctor_get(x_35, 1); +lean_inc(x_42); +lean_dec(x_35); +x_43 = lean_ctor_get(x_36, 0); +lean_inc(x_43); +lean_dec(x_36); +lean_inc(x_4); +x_44 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_44, 0, x_4); +lean_ctor_set(x_44, 1, x_43); +x_45 = 1; +x_46 = lean_usize_add(x_7, x_45); +x_7 = x_46; +x_8 = x_44; +x_14 = x_42; +goto _start; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__15(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, size_t x_5, size_t x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +uint8_t x_14; +x_14 = lean_usize_dec_lt(x_6, x_5); +if (x_14 == 0) +{ +lean_object* x_15; +lean_dec(x_8); +lean_dec(x_3); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_7); +lean_ctor_set(x_15, 1, x_13); +return x_15; +} +else +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; +x_16 = lean_array_uget(x_4, x_6); +x_17 = lean_ctor_get(x_7, 1); +lean_inc(x_17); +lean_dec(x_7); +x_18 = lean_box(0); +lean_inc(x_8); +lean_inc(x_16); +x_19 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11(x_16, x_18, x_16, x_16, x_17, lean_box(0), x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_16); +x_20 = !lean_is_exclusive(x_19); +if (x_20 == 0) +{ +lean_object* x_21; lean_object* x_22; size_t x_23; size_t x_24; +x_21 = lean_ctor_get(x_19, 0); +x_22 = lean_ctor_get(x_19, 1); +lean_inc(x_3); +lean_ctor_set(x_19, 1, x_21); +lean_ctor_set(x_19, 0, x_3); +x_23 = 1; +x_24 = lean_usize_add(x_6, x_23); +x_6 = x_24; +x_7 = x_19; +x_13 = x_22; +goto _start; +} +else +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; size_t x_29; size_t x_30; +x_26 = lean_ctor_get(x_19, 0); +x_27 = lean_ctor_get(x_19, 1); +lean_inc(x_27); +lean_inc(x_26); +lean_dec(x_19); +lean_inc(x_3); +x_28 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_28, 0, x_3); +lean_ctor_set(x_28, 1, x_26); +x_29 = 1; +x_30 = lean_usize_add(x_6, x_29); +x_6 = x_30; +x_7 = x_28; +x_13 = x_27; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; lean_object* x_10; +x_9 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_9, 0, x_1); +x_10 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10, 0, x_9); +lean_ctor_set(x_10, 1, x_8); +return x_10; +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; size_t x_14; size_t x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_10 = lean_ctor_get(x_2, 0); +x_11 = lean_box(0); +x_12 = lean_box(0); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_12); +lean_ctor_set(x_13, 1, x_3); +x_14 = lean_array_size(x_10); +x_15 = 0; +lean_inc(x_4); +x_16 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__14(x_1, x_10, x_11, x_12, x_10, x_14, x_15, x_13, x_4, x_5, x_6, x_7, x_8, x_9); +x_17 = lean_ctor_get(x_16, 0); +lean_inc(x_17); +x_18 = lean_ctor_get(x_17, 0); +lean_inc(x_18); +if (lean_obj_tag(x_18) == 0) +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_19 = lean_ctor_get(x_16, 1); +lean_inc(x_19); +lean_dec(x_16); +x_20 = lean_ctor_get(x_17, 1); +lean_inc(x_20); +lean_dec(x_17); +x_21 = lean_box(0); +x_22 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13___lambda__1(x_20, x_21, x_4, x_5, x_6, x_7, x_8, x_19); +lean_dec(x_4); +return x_22; +} +else +{ +uint8_t x_23; +lean_dec(x_17); +lean_dec(x_4); +x_23 = !lean_is_exclusive(x_16); +if (x_23 == 0) +{ +lean_object* x_24; lean_object* x_25; +x_24 = lean_ctor_get(x_16, 0); +lean_dec(x_24); +x_25 = lean_ctor_get(x_18, 0); +lean_inc(x_25); +lean_dec(x_18); +lean_ctor_set(x_16, 0, x_25); +return x_16; +} +else +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_26 = lean_ctor_get(x_16, 1); +lean_inc(x_26); +lean_dec(x_16); +x_27 = lean_ctor_get(x_18, 0); +lean_inc(x_27); +lean_dec(x_18); +x_28 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_28, 0, x_27); +lean_ctor_set(x_28, 1, x_26); +return x_28; +} +} +} +else +{ +lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; size_t x_33; size_t x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; +x_29 = lean_ctor_get(x_2, 0); +x_30 = lean_box(0); +x_31 = lean_box(0); +x_32 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_32, 0, x_31); +lean_ctor_set(x_32, 1, x_3); +x_33 = lean_array_size(x_29); +x_34 = 0; +lean_inc(x_4); +x_35 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__15(x_29, x_30, x_31, x_29, x_33, x_34, x_32, x_4, x_5, x_6, x_7, x_8, x_9); +x_36 = lean_ctor_get(x_35, 0); +lean_inc(x_36); +x_37 = lean_ctor_get(x_36, 0); +lean_inc(x_37); +if (lean_obj_tag(x_37) == 0) +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_38 = lean_ctor_get(x_35, 1); +lean_inc(x_38); +lean_dec(x_35); +x_39 = lean_ctor_get(x_36, 1); +lean_inc(x_39); +lean_dec(x_36); +x_40 = lean_box(0); +x_41 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13___lambda__1(x_39, x_40, x_4, x_5, x_6, x_7, x_8, x_38); +lean_dec(x_4); +return x_41; +} +else +{ +uint8_t x_42; +lean_dec(x_36); +lean_dec(x_4); +x_42 = !lean_is_exclusive(x_35); +if (x_42 == 0) +{ +lean_object* x_43; lean_object* x_44; +x_43 = lean_ctor_get(x_35, 0); +lean_dec(x_43); +x_44 = lean_ctor_get(x_37, 0); +lean_inc(x_44); +lean_dec(x_37); +lean_ctor_set(x_35, 0, x_44); +return x_35; +} +else +{ +lean_object* x_45; lean_object* x_46; lean_object* x_47; +x_45 = lean_ctor_get(x_35, 1); +lean_inc(x_45); +lean_dec(x_35); +x_46 = lean_ctor_get(x_37, 0); +lean_inc(x_46); +lean_dec(x_37); +x_47 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_47, 0, x_46); +lean_ctor_set(x_47, 1, x_45); +return x_47; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__16(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, size_t x_5, size_t x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +uint8_t x_14; +x_14 = lean_usize_dec_lt(x_6, x_5); +if (x_14 == 0) +{ +lean_object* x_15; +lean_dec(x_8); +lean_dec(x_3); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_7); +lean_ctor_set(x_15, 1, x_13); +return x_15; +} +else +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; +x_16 = lean_array_uget(x_4, x_6); +x_17 = lean_ctor_get(x_7, 1); +lean_inc(x_17); +lean_dec(x_7); +x_18 = lean_box(0); +lean_inc(x_8); +lean_inc(x_16); +x_19 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11(x_16, x_18, x_16, x_16, x_17, lean_box(0), x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_16); +x_20 = !lean_is_exclusive(x_19); +if (x_20 == 0) +{ +lean_object* x_21; lean_object* x_22; size_t x_23; size_t x_24; +x_21 = lean_ctor_get(x_19, 0); +x_22 = lean_ctor_get(x_19, 1); +lean_inc(x_3); +lean_ctor_set(x_19, 1, x_21); +lean_ctor_set(x_19, 0, x_3); +x_23 = 1; +x_24 = lean_usize_add(x_6, x_23); +x_6 = x_24; +x_7 = x_19; +x_13 = x_22; +goto _start; +} +else +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; size_t x_29; size_t x_30; +x_26 = lean_ctor_get(x_19, 0); +x_27 = lean_ctor_get(x_19, 1); +lean_inc(x_27); +lean_inc(x_26); +lean_dec(x_19); +lean_inc(x_3); +x_28 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_28, 0, x_3); +lean_ctor_set(x_28, 1, x_26); +x_29 = 1; +x_30 = lean_usize_add(x_6, x_29); +x_6 = x_30; +x_7 = x_28; +x_13 = x_27; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__12___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; +x_9 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_9, 0, x_1); +lean_ctor_set(x_9, 1, x_8); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__12(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_9 = lean_ctor_get(x_1, 0); +lean_inc(x_3); +lean_inc(x_2); +x_10 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13(x_2, x_9, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_2); +x_11 = lean_ctor_get(x_10, 0); +lean_inc(x_11); +if (lean_obj_tag(x_11) == 0) +{ +uint8_t x_12; +lean_dec(x_3); +x_12 = !lean_is_exclusive(x_10); +if (x_12 == 0) +{ +lean_object* x_13; lean_object* x_14; +x_13 = lean_ctor_get(x_10, 0); +lean_dec(x_13); +x_14 = lean_ctor_get(x_11, 0); +lean_inc(x_14); +lean_dec(x_11); +lean_ctor_set(x_10, 0, x_14); +return x_10; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_15 = lean_ctor_get(x_10, 1); +lean_inc(x_15); +lean_dec(x_10); +x_16 = lean_ctor_get(x_11, 0); +lean_inc(x_16); +lean_dec(x_11); +x_17 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_17, 0, x_16); +lean_ctor_set(x_17, 1, x_15); +return x_17; +} +} +else +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; size_t x_24; size_t x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_18 = lean_ctor_get(x_10, 1); +lean_inc(x_18); +lean_dec(x_10); +x_19 = lean_ctor_get(x_11, 0); +lean_inc(x_19); +lean_dec(x_11); +x_20 = lean_box(0); +x_21 = lean_ctor_get(x_1, 1); +x_22 = lean_box(0); +x_23 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_23, 0, x_22); +lean_ctor_set(x_23, 1, x_19); +x_24 = lean_array_size(x_21); +x_25 = 0; +x_26 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__16(x_20, x_21, x_22, x_21, x_24, x_25, x_23, x_3, x_4, x_5, x_6, x_7, x_18); +x_27 = lean_ctor_get(x_26, 0); +lean_inc(x_27); +x_28 = lean_ctor_get(x_27, 0); +lean_inc(x_28); +if (lean_obj_tag(x_28) == 0) +{ +uint8_t x_29; +x_29 = !lean_is_exclusive(x_26); +if (x_29 == 0) +{ +lean_object* x_30; lean_object* x_31; +x_30 = lean_ctor_get(x_26, 0); +lean_dec(x_30); +x_31 = lean_ctor_get(x_27, 1); +lean_inc(x_31); +lean_dec(x_27); +lean_ctor_set(x_26, 0, x_31); +return x_26; +} +else +{ +lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_32 = lean_ctor_get(x_26, 1); +lean_inc(x_32); +lean_dec(x_26); +x_33 = lean_ctor_get(x_27, 1); +lean_inc(x_33); +lean_dec(x_27); +x_34 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_34, 0, x_33); +lean_ctor_set(x_34, 1, x_32); +return x_34; +} +} +else +{ +uint8_t x_35; +lean_dec(x_27); +x_35 = !lean_is_exclusive(x_26); +if (x_35 == 0) +{ +lean_object* x_36; lean_object* x_37; +x_36 = lean_ctor_get(x_26, 0); +lean_dec(x_36); +x_37 = lean_ctor_get(x_28, 0); +lean_inc(x_37); +lean_dec(x_28); +lean_ctor_set(x_26, 0, x_37); +return x_26; +} +else +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_38 = lean_ctor_get(x_26, 1); +lean_inc(x_38); +lean_dec(x_26); +x_39 = lean_ctor_get(x_28, 0); +lean_inc(x_39); +lean_dec(x_28); +x_40 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_40, 0, x_39); +lean_ctor_set(x_40, 1, x_38); +return x_40; +} +} +} +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Basis", 5, 5); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__1; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__2; +x_2 = l_Lean_MessageData_ofFormat(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__3; +return x_2; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("basis", 5, 5); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__1; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___boxed), 1, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__3; +x_2 = lean_mk_thunk(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; uint8_t x_10; +x_7 = lean_ctor_get(x_1, 19); +lean_inc(x_7); +x_8 = l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10___closed__1; +x_9 = l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__12(x_7, x_8, x_1, x_2, x_3, x_4, x_5, x_6); +lean_dec(x_7); +x_10 = !lean_is_exclusive(x_9); +if (x_10 == 0) +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_11 = lean_ctor_get(x_9, 0); +x_12 = l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__2; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__4; +x_14 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption(x_12, x_13, x_11); +lean_ctor_set(x_9, 0, x_14); +return x_9; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_15 = lean_ctor_get(x_9, 0); +x_16 = lean_ctor_get(x_9, 1); +lean_inc(x_16); +lean_inc(x_15); +lean_dec(x_9); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__2; +x_18 = l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__4; +x_19 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption(x_17, x_18, x_15); +x_20 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_20, 0, x_19); +lean_ctor_set(x_20, 1, x_16); +return x_20; +} +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; +x_8 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_1); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__6___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; +x_8 = l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__6(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__7___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; +x_9 = l_Lean_Grind_CommRing_Mon_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__7(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; +x_8 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; +x_9 = l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_1); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__8___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; +x_9 = l_Lean_Grind_CommRing_Poly_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__8(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; +x_8 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_8; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; +x_9 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; +x_8 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_8; +} +} +LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__14___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: +{ +size_t x_15; size_t x_16; lean_object* x_17; +x_15 = lean_unbox_usize(x_6); +lean_dec(x_6); +x_16 = lean_unbox_usize(x_7); +lean_dec(x_7); +x_17 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__14(x_1, x_2, x_3, x_4, x_5, x_15, x_16, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_5); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_17; +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__15___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +size_t x_14; size_t x_15; lean_object* x_16; +x_14 = lean_unbox_usize(x_5); +lean_dec(x_5); +x_15 = lean_unbox_usize(x_6); +lean_dec(x_6); +x_16 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__15(x_1, x_2, x_3, x_4, x_14, x_15, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +return x_16; +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; +x_9 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +_start: +{ +lean_object* x_10; +x_10 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_2); +lean_dec(x_1); +return x_10; +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__16___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +size_t x_14; size_t x_15; lean_object* x_16; +x_14 = lean_unbox_usize(x_5); +lean_dec(x_5); +x_15 = lean_unbox_usize(x_6); +lean_dec(x_6); +x_16 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__16(x_1, x_2, x_3, x_4, x_14, x_15, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +return x_16; +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__12___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; +x_9 = l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__12___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__12___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; +x_9 = l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__12(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_1); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___boxed(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1(x_1); +lean_dec(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; +x_7 = l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f(x_1, x_2, x_3, x_4, x_5, x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_7; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; lean_object* x_9; +x_8 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_p(x_1); +x_9 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__2(x_8, x_2, x_3, x_4, x_5, x_6, x_7); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; uint8_t x_17; +x_8 = lean_ctor_get(x_1, 4); +lean_inc(x_2); +x_9 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__2(x_8, x_2, x_3, x_4, x_5, x_6, x_7); +x_10 = lean_ctor_get(x_9, 0); +lean_inc(x_10); +x_11 = lean_ctor_get(x_9, 1); +lean_inc(x_11); +lean_dec(x_9); +x_12 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__8; +lean_inc(x_2); +x_13 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3(x_12, x_2, x_3, x_4, x_5, x_6, x_11); +x_14 = lean_ctor_get(x_13, 0); +lean_inc(x_14); +x_15 = lean_ctor_get(x_13, 1); +lean_inc(x_15); +lean_dec(x_13); +x_16 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9(x_10, x_14, x_2, x_3, x_4, x_5, x_6, x_15); +x_17 = !lean_is_exclusive(x_16); +if (x_17 == 0) +{ +lean_object* x_18; lean_object* x_19; +x_18 = lean_ctor_get(x_16, 0); +x_19 = l_Lean_mkNot(x_18); +lean_ctor_set(x_16, 0, x_19); +return x_16; +} +else +{ +lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_20 = lean_ctor_get(x_16, 0); +x_21 = lean_ctor_get(x_16, 1); +lean_inc(x_21); +lean_inc(x_20); +lean_dec(x_16); +x_22 = l_Lean_mkNot(x_20); +x_23 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_23, 0, x_22); +lean_ctor_set(x_23, 1, x_21); +return x_23; +} +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__5(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, size_t x_6, size_t x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: +{ +uint8_t x_15; +x_15 = lean_usize_dec_lt(x_7, x_6); +if (x_15 == 0) +{ +lean_object* x_16; +lean_dec(x_9); +lean_dec(x_4); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_8); +lean_ctor_set(x_16, 1, x_14); +return x_16; +} +else +{ +lean_object* x_17; uint8_t x_18; +x_17 = lean_array_uget(x_5, x_7); +x_18 = !lean_is_exclusive(x_8); +if (x_18 == 0) +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_19 = lean_ctor_get(x_8, 1); +x_20 = lean_ctor_get(x_8, 0); +lean_dec(x_20); +lean_inc(x_9); +lean_inc(x_19); +x_21 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__4(x_1, x_17, x_19, x_9, x_10, x_11, x_12, x_13, x_14); +lean_dec(x_17); +x_22 = lean_ctor_get(x_21, 0); +lean_inc(x_22); +if (lean_obj_tag(x_22) == 0) +{ +uint8_t x_23; +lean_dec(x_9); +lean_dec(x_4); +x_23 = !lean_is_exclusive(x_21); +if (x_23 == 0) +{ +lean_object* x_24; lean_object* x_25; +x_24 = lean_ctor_get(x_21, 0); +lean_dec(x_24); +x_25 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_25, 0, x_22); +lean_ctor_set(x_8, 0, x_25); +lean_ctor_set(x_21, 0, x_8); +return x_21; +} +else +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_26 = lean_ctor_get(x_21, 1); +lean_inc(x_26); +lean_dec(x_21); +x_27 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_27, 0, x_22); +lean_ctor_set(x_8, 0, x_27); +x_28 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_28, 0, x_8); +lean_ctor_set(x_28, 1, x_26); +return x_28; +} +} +else +{ +lean_object* x_29; lean_object* x_30; size_t x_31; size_t x_32; +lean_dec(x_19); +x_29 = lean_ctor_get(x_21, 1); +lean_inc(x_29); +lean_dec(x_21); +x_30 = lean_ctor_get(x_22, 0); +lean_inc(x_30); +lean_dec(x_22); +lean_inc(x_4); +lean_ctor_set(x_8, 1, x_30); +lean_ctor_set(x_8, 0, x_4); +x_31 = 1; +x_32 = lean_usize_add(x_7, x_31); +x_7 = x_32; +x_14 = x_29; +goto _start; +} +} +else +{ +lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_34 = lean_ctor_get(x_8, 1); +lean_inc(x_34); +lean_dec(x_8); +lean_inc(x_9); +lean_inc(x_34); +x_35 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__4(x_1, x_17, x_34, x_9, x_10, x_11, x_12, x_13, x_14); +lean_dec(x_17); +x_36 = lean_ctor_get(x_35, 0); +lean_inc(x_36); +if (lean_obj_tag(x_36) == 0) +{ +lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; +lean_dec(x_9); +lean_dec(x_4); +x_37 = lean_ctor_get(x_35, 1); +lean_inc(x_37); +if (lean_is_exclusive(x_35)) { + lean_ctor_release(x_35, 0); + lean_ctor_release(x_35, 1); + x_38 = x_35; +} else { + lean_dec_ref(x_35); + x_38 = lean_box(0); +} +x_39 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_39, 0, x_36); +x_40 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_40, 0, x_39); +lean_ctor_set(x_40, 1, x_34); +if (lean_is_scalar(x_38)) { + x_41 = lean_alloc_ctor(0, 2, 0); +} else { + x_41 = x_38; +} +lean_ctor_set(x_41, 0, x_40); +lean_ctor_set(x_41, 1, x_37); +return x_41; +} +else +{ +lean_object* x_42; lean_object* x_43; lean_object* x_44; size_t x_45; size_t x_46; +lean_dec(x_34); +x_42 = lean_ctor_get(x_35, 1); +lean_inc(x_42); +lean_dec(x_35); +x_43 = lean_ctor_get(x_36, 0); +lean_inc(x_43); +lean_dec(x_36); +lean_inc(x_4); +x_44 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_44, 0, x_4); +lean_ctor_set(x_44, 1, x_43); +x_45 = 1; +x_46 = lean_usize_add(x_7, x_45); +x_7 = x_46; +x_8 = x_44; +x_14 = x_42; +goto _start; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__6(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, size_t x_5, size_t x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +uint8_t x_14; +x_14 = lean_usize_dec_lt(x_6, x_5); +if (x_14 == 0) +{ +lean_object* x_15; +lean_dec(x_8); +lean_dec(x_3); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_7); +lean_ctor_set(x_15, 1, x_13); +return x_15; +} +else +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; uint8_t x_19; +x_16 = lean_array_uget(x_4, x_6); +x_17 = lean_ctor_get(x_7, 1); +lean_inc(x_17); +lean_dec(x_7); +lean_inc(x_8); +x_18 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__1(x_16, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_16); +x_19 = !lean_is_exclusive(x_18); +if (x_19 == 0) +{ +lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; size_t x_25; size_t x_26; +x_20 = lean_ctor_get(x_18, 0); +x_21 = lean_ctor_get(x_18, 1); +x_22 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__2; +x_23 = l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10(x_20, x_22); +x_24 = lean_array_push(x_17, x_23); +lean_inc(x_3); +lean_ctor_set(x_18, 1, x_24); +lean_ctor_set(x_18, 0, x_3); +x_25 = 1; +x_26 = lean_usize_add(x_6, x_25); +x_6 = x_26; +x_7 = x_18; +x_13 = x_21; +goto _start; +} +else +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; size_t x_34; size_t x_35; +x_28 = lean_ctor_get(x_18, 0); +x_29 = lean_ctor_get(x_18, 1); +lean_inc(x_29); +lean_inc(x_28); +lean_dec(x_18); +x_30 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__2; +x_31 = l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10(x_28, x_30); +x_32 = lean_array_push(x_17, x_31); +lean_inc(x_3); +x_33 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_33, 0, x_3); +lean_ctor_set(x_33, 1, x_32); +x_34 = 1; +x_35 = lean_usize_add(x_6, x_34); +x_6 = x_35; +x_7 = x_33; +x_13 = x_29; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__4(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; size_t x_14; size_t x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_10 = lean_ctor_get(x_2, 0); +x_11 = lean_box(0); +x_12 = lean_box(0); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_12); +lean_ctor_set(x_13, 1, x_3); +x_14 = lean_array_size(x_10); +x_15 = 0; +lean_inc(x_4); +x_16 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__5(x_1, x_10, x_11, x_12, x_10, x_14, x_15, x_13, x_4, x_5, x_6, x_7, x_8, x_9); +x_17 = lean_ctor_get(x_16, 0); +lean_inc(x_17); +x_18 = lean_ctor_get(x_17, 0); +lean_inc(x_18); +if (lean_obj_tag(x_18) == 0) +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_19 = lean_ctor_get(x_16, 1); +lean_inc(x_19); +lean_dec(x_16); +x_20 = lean_ctor_get(x_17, 1); +lean_inc(x_20); +lean_dec(x_17); +x_21 = lean_box(0); +x_22 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13___lambda__1(x_20, x_21, x_4, x_5, x_6, x_7, x_8, x_19); +lean_dec(x_4); +return x_22; +} +else +{ +uint8_t x_23; +lean_dec(x_17); +lean_dec(x_4); +x_23 = !lean_is_exclusive(x_16); +if (x_23 == 0) +{ +lean_object* x_24; lean_object* x_25; +x_24 = lean_ctor_get(x_16, 0); +lean_dec(x_24); +x_25 = lean_ctor_get(x_18, 0); +lean_inc(x_25); +lean_dec(x_18); +lean_ctor_set(x_16, 0, x_25); +return x_16; +} +else +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_26 = lean_ctor_get(x_16, 1); +lean_inc(x_26); +lean_dec(x_16); +x_27 = lean_ctor_get(x_18, 0); +lean_inc(x_27); +lean_dec(x_18); +x_28 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_28, 0, x_27); +lean_ctor_set(x_28, 1, x_26); +return x_28; +} +} +} +else +{ +lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; size_t x_33; size_t x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; +x_29 = lean_ctor_get(x_2, 0); +x_30 = lean_box(0); +x_31 = lean_box(0); +x_32 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_32, 0, x_31); +lean_ctor_set(x_32, 1, x_3); +x_33 = lean_array_size(x_29); +x_34 = 0; +lean_inc(x_4); +x_35 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__6(x_29, x_30, x_31, x_29, x_33, x_34, x_32, x_4, x_5, x_6, x_7, x_8, x_9); +x_36 = lean_ctor_get(x_35, 0); +lean_inc(x_36); +x_37 = lean_ctor_get(x_36, 0); +lean_inc(x_37); +if (lean_obj_tag(x_37) == 0) +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_38 = lean_ctor_get(x_35, 1); +lean_inc(x_38); +lean_dec(x_35); +x_39 = lean_ctor_get(x_36, 1); +lean_inc(x_39); +lean_dec(x_36); +x_40 = lean_box(0); +x_41 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__13___lambda__1(x_39, x_40, x_4, x_5, x_6, x_7, x_8, x_38); +lean_dec(x_4); +return x_41; +} +else +{ +uint8_t x_42; +lean_dec(x_36); +lean_dec(x_4); +x_42 = !lean_is_exclusive(x_35); +if (x_42 == 0) +{ +lean_object* x_43; lean_object* x_44; +x_43 = lean_ctor_get(x_35, 0); +lean_dec(x_43); +x_44 = lean_ctor_get(x_37, 0); +lean_inc(x_44); +lean_dec(x_37); +lean_ctor_set(x_35, 0, x_44); +return x_35; +} +else +{ +lean_object* x_45; lean_object* x_46; lean_object* x_47; +x_45 = lean_ctor_get(x_35, 1); +lean_inc(x_45); +lean_dec(x_35); +x_46 = lean_ctor_get(x_37, 0); +lean_inc(x_46); +lean_dec(x_37); +x_47 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_47, 0, x_46); +lean_ctor_set(x_47, 1, x_45); +return x_47; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__7(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, size_t x_5, size_t x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +uint8_t x_14; +x_14 = lean_usize_dec_lt(x_6, x_5); +if (x_14 == 0) +{ +lean_object* x_15; +lean_dec(x_8); +lean_dec(x_3); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_7); +lean_ctor_set(x_15, 1, x_13); +return x_15; +} +else +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; uint8_t x_19; +x_16 = lean_array_uget(x_4, x_6); +x_17 = lean_ctor_get(x_7, 1); +lean_inc(x_17); +lean_dec(x_7); +lean_inc(x_8); +x_18 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__1(x_16, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_16); +x_19 = !lean_is_exclusive(x_18); +if (x_19 == 0) +{ +lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; size_t x_25; size_t x_26; +x_20 = lean_ctor_get(x_18, 0); +x_21 = lean_ctor_get(x_18, 1); +x_22 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__2; +x_23 = l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10(x_20, x_22); +x_24 = lean_array_push(x_17, x_23); +lean_inc(x_3); +lean_ctor_set(x_18, 1, x_24); +lean_ctor_set(x_18, 0, x_3); +x_25 = 1; +x_26 = lean_usize_add(x_6, x_25); +x_6 = x_26; +x_7 = x_18; +x_13 = x_21; +goto _start; +} +else +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; size_t x_34; size_t x_35; +x_28 = lean_ctor_get(x_18, 0); +x_29 = lean_ctor_get(x_18, 1); +lean_inc(x_29); +lean_inc(x_28); +lean_dec(x_18); +x_30 = l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__2; +x_31 = l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10(x_28, x_30); +x_32 = lean_array_push(x_17, x_31); +lean_inc(x_3); +x_33 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_33, 0, x_3); +lean_ctor_set(x_33, 1, x_32); +x_34 = 1; +x_35 = lean_usize_add(x_6, x_34); +x_6 = x_35; +x_7 = x_33; +x_13 = x_29; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_9 = lean_ctor_get(x_1, 0); +lean_inc(x_3); +lean_inc(x_2); +x_10 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__4(x_2, x_9, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_2); +x_11 = lean_ctor_get(x_10, 0); +lean_inc(x_11); +if (lean_obj_tag(x_11) == 0) +{ +uint8_t x_12; +lean_dec(x_3); +x_12 = !lean_is_exclusive(x_10); +if (x_12 == 0) +{ +lean_object* x_13; lean_object* x_14; +x_13 = lean_ctor_get(x_10, 0); +lean_dec(x_13); +x_14 = lean_ctor_get(x_11, 0); +lean_inc(x_14); +lean_dec(x_11); +lean_ctor_set(x_10, 0, x_14); +return x_10; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_15 = lean_ctor_get(x_10, 1); +lean_inc(x_15); +lean_dec(x_10); +x_16 = lean_ctor_get(x_11, 0); +lean_inc(x_16); +lean_dec(x_11); +x_17 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_17, 0, x_16); +lean_ctor_set(x_17, 1, x_15); +return x_17; +} +} +else +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; size_t x_24; size_t x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_18 = lean_ctor_get(x_10, 1); +lean_inc(x_18); +lean_dec(x_10); +x_19 = lean_ctor_get(x_11, 0); +lean_inc(x_19); +lean_dec(x_11); +x_20 = lean_box(0); +x_21 = lean_ctor_get(x_1, 1); +x_22 = lean_box(0); +x_23 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_23, 0, x_22); +lean_ctor_set(x_23, 1, x_19); +x_24 = lean_array_size(x_21); +x_25 = 0; +x_26 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__7(x_20, x_21, x_22, x_21, x_24, x_25, x_23, x_3, x_4, x_5, x_6, x_7, x_18); +x_27 = lean_ctor_get(x_26, 0); +lean_inc(x_27); +x_28 = lean_ctor_get(x_27, 0); +lean_inc(x_28); +if (lean_obj_tag(x_28) == 0) +{ +uint8_t x_29; +x_29 = !lean_is_exclusive(x_26); +if (x_29 == 0) +{ +lean_object* x_30; lean_object* x_31; +x_30 = lean_ctor_get(x_26, 0); +lean_dec(x_30); +x_31 = lean_ctor_get(x_27, 1); +lean_inc(x_31); +lean_dec(x_27); +lean_ctor_set(x_26, 0, x_31); +return x_26; +} +else +{ +lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_32 = lean_ctor_get(x_26, 1); +lean_inc(x_32); +lean_dec(x_26); +x_33 = lean_ctor_get(x_27, 1); +lean_inc(x_33); +lean_dec(x_27); +x_34 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_34, 0, x_33); +lean_ctor_set(x_34, 1, x_32); +return x_34; +} +} +else +{ +uint8_t x_35; +lean_dec(x_27); +x_35 = !lean_is_exclusive(x_26); +if (x_35 == 0) +{ +lean_object* x_36; lean_object* x_37; +x_36 = lean_ctor_get(x_26, 0); +lean_dec(x_36); +x_37 = lean_ctor_get(x_28, 0); +lean_inc(x_37); +lean_dec(x_28); +lean_ctor_set(x_26, 0, x_37); +return x_26; +} +else +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_38 = lean_ctor_get(x_26, 1); +lean_inc(x_38); +lean_dec(x_26); +x_39 = lean_ctor_get(x_28, 0); +lean_inc(x_39); +lean_dec(x_28); +x_40 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_40, 0, x_39); +lean_ctor_set(x_40, 1, x_38); +return x_40; +} +} +} +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Disequalities", 13, 13); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__1; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__2; +x_2 = l_Lean_MessageData_ofFormat(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__3; +return x_2; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("diseqs", 6, 6); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__1; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___boxed), 1, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__3; +x_2 = lean_mk_thunk(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; uint8_t x_10; +x_7 = lean_ctor_get(x_1, 20); +lean_inc(x_7); +x_8 = l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10___closed__1; +x_9 = l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__3(x_7, x_8, x_1, x_2, x_3, x_4, x_5, x_6); +lean_dec(x_7); +x_10 = !lean_is_exclusive(x_9); +if (x_10 == 0) +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_11 = lean_ctor_get(x_9, 0); +x_12 = l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__2; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__4; +x_14 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption(x_12, x_13, x_11); +lean_ctor_set(x_9, 0, x_14); +return x_9; +} +else +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_15 = lean_ctor_get(x_9, 0); +x_16 = lean_ctor_get(x_9, 1); +lean_inc(x_16); +lean_inc(x_15); +lean_dec(x_9); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__2; +x_18 = l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__4; +x_19 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption(x_17, x_18, x_15); +x_20 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_20, 0, x_19); +lean_ctor_set(x_20, 1, x_16); +return x_20; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; +x_8 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_1); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; +x_8 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_1); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__5___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: +{ +size_t x_15; size_t x_16; lean_object* x_17; +x_15 = lean_unbox_usize(x_6); +lean_dec(x_6); +x_16 = lean_unbox_usize(x_7); +lean_dec(x_7); +x_17 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__5(x_1, x_2, x_3, x_4, x_5, x_15, x_16, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_5); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_17; +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__6___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +size_t x_14; size_t x_15; lean_object* x_16; +x_14 = lean_unbox_usize(x_5); +lean_dec(x_5); +x_15 = lean_unbox_usize(x_6); +lean_dec(x_6); +x_16 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__6(x_1, x_2, x_3, x_4, x_14, x_15, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +return x_16; +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { +_start: +{ +lean_object* x_10; +x_10 = l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_2); +lean_dec(x_1); +return x_10; +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__7___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +size_t x_14; size_t x_15; lean_object* x_16; +x_14 = lean_unbox_usize(x_5); +lean_dec(x_5); +x_15 = lean_unbox_usize(x_6); +lean_dec(x_6); +x_16 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__7(x_1, x_2, x_3, x_4, x_14, x_15, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_4); +lean_dec(x_2); +lean_dec(x_1); +return x_16; +} +} +LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { +_start: +{ +lean_object* x_9; +x_9 = l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___spec__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_1); +return x_9; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___boxed(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1(x_1); +lean_dec(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; +x_7 = l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f(x_1, x_2, x_3, x_4, x_5, x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_7; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Ring `", 6, 6); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__1; +x_2 = l_Lean_stringToMessageData(x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("`", 1, 1); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__3; +x_2 = l_Lean_stringToMessageData(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_3 = lean_ctor_get(x_1, 1); +lean_inc(x_3); +lean_dec(x_1); +x_4 = l_Lean_MessageData_ofExpr(x_3); +x_5 = l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__2; +x_6 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_6, 0, x_5); +lean_ctor_set(x_6, 1, x_4); +x_7 = l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__4; +x_8 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_8, 0, x_6); +lean_ctor_set(x_8, 1, x_7); +return x_8; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("ring", 4, 4); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__1; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; uint8_t x_13; +lean_inc(x_1); +x_7 = l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f(x_1, x_2, x_3, x_4, x_5, x_6); +x_8 = lean_ctor_get(x_7, 0); +lean_inc(x_8); +x_9 = lean_ctor_get(x_7, 1); +lean_inc(x_9); +lean_dec(x_7); +x_10 = l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10___closed__1; +x_11 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_push(x_10, x_8); +lean_inc(x_1); +x_12 = l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f(x_1, x_2, x_3, x_4, x_5, x_9); +x_13 = !lean_is_exclusive(x_12); +if (x_13 == 0) +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_14 = lean_ctor_get(x_12, 0); +x_15 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_push(x_11, x_14); +x_16 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___boxed), 2, 1); +lean_closure_set(x_16, 0, x_1); +x_17 = lean_mk_thunk(x_16); +x_18 = l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__2; +x_19 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption(x_18, x_17, x_15); +lean_dec(x_17); +lean_ctor_set(x_12, 0, x_19); +return x_12; +} +else +{ +lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_20 = lean_ctor_get(x_12, 0); +x_21 = lean_ctor_get(x_12, 1); +lean_inc(x_21); +lean_inc(x_20); +lean_dec(x_12); +x_22 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_push(x_11, x_20); +x_23 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___boxed), 2, 1); +lean_closure_set(x_23, 0, x_1); +x_24 = lean_mk_thunk(x_23); +x_25 = l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__2; +x_26 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption(x_25, x_24, x_22); +lean_dec(x_24); +x_27 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_27, 0, x_26); +lean_ctor_set(x_27, 1, x_21); +return x_27; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; +x_7 = l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f(x_1, x_2, x_3, x_4, x_5, x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_7; +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_pp_x3f___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, size_t x_4, size_t x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +uint8_t x_12; +x_12 = lean_usize_dec_lt(x_5, x_4); +if (x_12 == 0) +{ +lean_object* x_13; +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_6); +lean_ctor_set(x_13, 1, x_11); +return x_13; +} +else +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_14 = lean_array_uget(x_3, x_5); +x_15 = l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f(x_14, x_7, x_8, x_9, x_10, x_11); +x_16 = lean_ctor_get(x_15, 0); +lean_inc(x_16); +if (lean_obj_tag(x_16) == 0) +{ +lean_object* x_17; size_t x_18; size_t x_19; +x_17 = lean_ctor_get(x_15, 1); +lean_inc(x_17); +lean_dec(x_15); +x_18 = 1; +x_19 = lean_usize_add(x_5, x_18); +x_5 = x_19; +x_11 = x_17; +goto _start; +} +else +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; size_t x_24; size_t x_25; +x_21 = lean_ctor_get(x_15, 1); +lean_inc(x_21); +lean_dec(x_15); +x_22 = lean_ctor_get(x_16, 0); +lean_inc(x_22); +lean_dec(x_16); +x_23 = lean_array_push(x_6, x_22); +x_24 = 1; +x_25 = lean_usize_add(x_5, x_24); +x_5 = x_25; +x_6 = x_23; +x_11 = x_21; +goto _start; +} +} +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__1() { +_start: +{ +lean_object* x_1; double x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__2; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__1; +x_3 = 1; +x_4 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__2; +x_5 = lean_alloc_ctor(0, 2, 17); +lean_ctor_set(x_5, 0, x_1); +lean_ctor_set(x_5, 1, x_4); +lean_ctor_set_float(x_5, sizeof(void*)*2, x_2); +lean_ctor_set_float(x_5, sizeof(void*)*2 + 8, x_2); +lean_ctor_set_uint8(x_5, sizeof(void*)*2 + 16, x_3); +return x_5; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Rings", 5, 5); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__2; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__3; +x_2 = l_Lean_MessageData_ofFormat(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_pp_x3f(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; size_t x_11; size_t x_12; lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_7 = lean_box(0); +x_8 = lean_ctor_get(x_1, 14); +x_9 = lean_ctor_get(x_8, 2); +x_10 = lean_ctor_get(x_9, 0); +x_11 = lean_array_size(x_10); +x_12 = 0; +x_13 = l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10___closed__1; +x_14 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_pp_x3f___spec__1(x_7, x_10, x_10, x_11, x_12, x_13, x_2, x_3, x_4, x_5, x_6); +x_15 = !lean_is_exclusive(x_14); +if (x_15 == 0) +{ +lean_object* x_16; uint8_t x_17; +x_16 = lean_ctor_get(x_14, 0); +x_17 = l_Array_isEmpty___rarg(x_16); +if (x_17 == 0) +{ +lean_object* x_18; lean_object* x_19; uint8_t x_20; +x_18 = lean_array_get_size(x_16); +x_19 = lean_unsigned_to_nat(1u); +x_20 = lean_nat_dec_eq(x_18, x_19); +lean_dec(x_18); +if (x_20 == 0) +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_21 = l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__1; +x_22 = l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__4; +x_23 = lean_alloc_ctor(9, 3, 0); +lean_ctor_set(x_23, 0, x_21); +lean_ctor_set(x_23, 1, x_22); +lean_ctor_set(x_23, 2, x_16); +x_24 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_24, 0, x_23); +lean_ctor_set(x_14, 0, x_24); +return x_14; +} +else +{ +lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_25 = lean_unsigned_to_nat(0u); +x_26 = lean_array_fget(x_16, x_25); +lean_dec(x_16); +x_27 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_27, 0, x_26); +lean_ctor_set(x_14, 0, x_27); +return x_14; +} +} +else +{ +lean_object* x_28; +lean_dec(x_16); +x_28 = lean_box(0); +lean_ctor_set(x_14, 0, x_28); +return x_14; +} +} +else +{ +lean_object* x_29; lean_object* x_30; uint8_t x_31; +x_29 = lean_ctor_get(x_14, 0); +x_30 = lean_ctor_get(x_14, 1); +lean_inc(x_30); +lean_inc(x_29); +lean_dec(x_14); +x_31 = l_Array_isEmpty___rarg(x_29); +if (x_31 == 0) +{ +lean_object* x_32; lean_object* x_33; uint8_t x_34; +x_32 = lean_array_get_size(x_29); +x_33 = lean_unsigned_to_nat(1u); +x_34 = lean_nat_dec_eq(x_32, x_33); +lean_dec(x_32); +if (x_34 == 0) +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; +x_35 = l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__1; +x_36 = l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__4; +x_37 = lean_alloc_ctor(9, 3, 0); +lean_ctor_set(x_37, 0, x_35); +lean_ctor_set(x_37, 1, x_36); +lean_ctor_set(x_37, 2, x_29); +x_38 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_38, 0, x_37); +x_39 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_39, 0, x_38); +lean_ctor_set(x_39, 1, x_30); +return x_39; +} +else +{ +lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; +x_40 = lean_unsigned_to_nat(0u); +x_41 = lean_array_fget(x_29, x_40); +lean_dec(x_29); +x_42 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_42, 0, x_41); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_30); +return x_43; +} +} +else +{ +lean_object* x_44; lean_object* x_45; +lean_dec(x_29); +x_44 = lean_box(0); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_30); +return x_45; +} +} +} +} +LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_pp_x3f___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +size_t x_12; size_t x_13; lean_object* x_14; +x_12 = lean_unbox_usize(x_4); +lean_dec(x_4); +x_13 = lean_unbox_usize(x_5); +lean_dec(x_5); +x_14 = l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_pp_x3f___spec__1(x_1, x_2, x_3, x_12, x_13, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_14; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +_start: +{ +lean_object* x_7; +x_7 = l_Lean_Meta_Grind_Arith_CommRing_pp_x3f(x_1, x_2, x_3, x_4, x_5, x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_7; +} +} +lean_object* initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr(uint8_t builtin, lean_object*); +static bool _G_initialized = false; +LEAN_EXPORT lean_object* initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_PP(uint8_t builtin, lean_object* w) { +lean_object * res; +if (_G_initialized) return lean_io_result_mk_ok(lean_box(0)); +_G_initialized = true; +res = initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM___closed__1); +l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM = _init_l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingReaderTRingMetaM); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__1 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__1(); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__2 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__2(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_PP_0__Lean_Meta_Grind_Arith_CommRing_toOption___closed__2); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__1 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__1(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__1); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__2 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__2(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__2); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__3 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__3(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__3); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__4 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__4(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__4); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__5 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__5(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__5); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__6 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__6(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__6); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__7 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__7(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__7); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__8 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__8(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__3___closed__8); +l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5___closed__1 = _init_l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5___closed__1(); +lean_mark_persistent(l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__5___closed__1); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___closed__1 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___closed__1(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___closed__1); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___closed__2 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___closed__2(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__9___closed__2); +l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10___closed__1 = _init_l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10___closed__1(); +lean_mark_persistent(l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10___closed__1); +l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__1 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__1(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__1); +l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__2 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__2(); +lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__11___closed__2); +l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__1); +l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__2(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__2); +l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__3 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__3(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___lambda__1___closed__3); +l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__1); +l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__2(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__2); +l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__3 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__3(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__3); +l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__4 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__4(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___closed__4); +l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__1); +l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__2(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__2); +l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__3 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__3(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___lambda__1___closed__3); +l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__1); +l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__2(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__2); +l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__3 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__3(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__3); +l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__4 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__4(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppDiseqs_x3f___closed__4); +l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__1); +l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__2(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__2); +l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__3 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__3(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__3); +l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__4 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__4(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___lambda__1___closed__4); +l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__1); +l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__2(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_ppRing_x3f___closed__2); +l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__1); +l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__2(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__2); +l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__3 = _init_l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__3(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__3); +l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__4 = _init_l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__4(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_pp_x3f___closed__4); +return lean_io_result_mk_ok(lean_box(0)); +} +#ifdef __cplusplus +} +#endif diff --git a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Proof.c b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Proof.c index ea6b682b7a73..107e6904a017 100644 --- a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Proof.c +++ b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Proof.c @@ -36,6 +36,7 @@ lean_object* l_Lean_RArray_toExpr___rarg(lean_object*, lean_object*, lean_object static lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__3; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__1; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__8; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_caching_unsafe__1(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching(lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkPolyDecl___spec__4(lean_object*); @@ -50,23 +51,21 @@ LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_T static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__9; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_getMultiplier_go___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_getMultiplier(lean_object*); -static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__22; static lean_object* l_panic___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___spec__1___closed__6; -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___closed__7; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_caching___spec__1___boxed(lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_Arith_CommRing_getCharInst(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand_go___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__3(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__8; +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__6; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18; static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___closed__12; static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___closed__4; lean_object* l_Lean_mkApp9(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkPolyDecl___spec__8(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_getMultiplier___boxed(lean_object*); @@ -84,7 +83,6 @@ static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__L size_t lean_uint64_to_usize(uint64_t); LEAN_EXPORT lean_object* l_panic___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__3; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_contains___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkExprDecl___spec__1___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__8(size_t, size_t, lean_object*); lean_object* l_Lean_Level_succ___override(lean_object*); @@ -109,6 +107,8 @@ lean_object* lean_mk_array(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__3; lean_object* l_instInhabitedReaderT___rarg___boxed(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_fset(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__6; @@ -125,11 +125,10 @@ static lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCer static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__1; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkExprDecl___spec__6(lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___closed__3; -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext___closed__2; lean_object* l_Lean_mkApp10(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__20; LEAN_EXPORT lean_object* l_Lean_Meta_withLetDecl___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext___spec__1(lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_caching___spec__7___boxed(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__3; LEAN_EXPORT lean_object* l_Lean_mkFreshId___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkPolyDecl___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -146,6 +145,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___l static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__10; lean_object* l_Lean_mkApp4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___closed__1; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__7___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_mkNullCertExt(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepBasicPrefix(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(lean_object*, lean_object*, lean_object*); @@ -158,12 +158,14 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNul LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___spec__1(lean_object*); lean_object* l_Lean_Level_ofNat(lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__4(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___closed__7; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__10; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_mkNullCertExt(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___closed__9; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__15___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__7; static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___closed__8; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_propagateEq(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_mkApp6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -178,6 +180,7 @@ static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__L static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__3; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_toContextExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_expr_abstract(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___closed__11; LEAN_EXPORT lean_object* l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand_go___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__4(lean_object*, lean_object*, lean_object*); @@ -193,6 +196,7 @@ lean_object* l___private_Lean_Meta_Basic_0__Lean_Meta_withLetDeclImp___rarg(lean LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_caching___spec__3(lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toPoly___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_mulM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -212,13 +216,19 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkExprDecl(le uint64_t lean_usize_to_uint64(size_t); lean_object* lean_nat_to_int(lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__7; +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___lambda__1___boxed(lean_object**); +lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__2; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__13; lean_object* lean_nat_div(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint64_t l___private_Init_Grind_CommRing_Poly_0__Lean_Grind_CommRing_hashMon____x40_Init_Grind_CommRing_Poly___hyg_1349_(lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_div___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_closeGoal(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___closed__1; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__4; +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext___lambda__1___closed__1; static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___closed__12; lean_object* l_Lean_MessageData_ofFormat(lean_object*); @@ -229,9 +239,9 @@ static lean_object* l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___ra static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__15; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand_go___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_caching___spec__4(lean_object*, lean_object*, lean_object*); +lean_object* l_outOfBounds___rarg(lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert_go(lean_object*, lean_object*, lean_object*, lean_object*); uint64_t l___private_Init_Grind_CommRing_Poly_0__Lean_Grind_CommRing_hashPoly____x40_Init_Grind_CommRing_Poly___hyg_3673_(lean_object*); -lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_st_ref_get(lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_mulConst_x27(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__2___boxed(lean_object*, lean_object*); @@ -247,6 +257,7 @@ lean_object* l_Lean_Name_num___override(lean_object*, lean_object*); uint8_t l_Lean_checkTraceOption(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__5(lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_contains___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkPolyDecl___spec__3___boxed(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_levelZero; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_getContext___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__3; @@ -261,12 +272,14 @@ LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_caching___spec__7(uint64_t, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkExprDecl___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___closed__5; LEAN_EXPORT uint8_t l_Std_DHashMap_Internal_AssocList_contains___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__2(uint64_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__19; static lean_object* l_panic___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___spec__1___closed__5; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__10(lean_object*, size_t, size_t, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__5; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Expr_letE___override(lean_object*, lean_object*, lean_object*, lean_object*, uint8_t); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__3(lean_object*); @@ -278,22 +291,27 @@ LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_replace___at___privat static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__6; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__19; +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__4; static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___closed__8; static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___closed__2; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__17; +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___closed__6; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l___private_Init_Util_0__mkPanicMessageWithDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__2; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__21; +lean_object* l_Lean_mkNot(lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__1; LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine___spec__1___boxed(lean_object**); lean_object* l_Lean_Meta_mkExpectedTypeHintCore(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___closed__4; LEAN_EXPORT lean_object* l_Lean_mkFreshFVarId___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkPolyDecl___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_mulMonM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__2___closed__2; lean_object* l_Lean_Grind_CommRing_Expr_toPolyM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_updateLastTag(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -302,15 +320,17 @@ static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___close extern lean_object* l_Lean_Meta_instMonadMetaM; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__1(uint64_t, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand_go___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkExprDecl___spec__3(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__8(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Std_DHashMap_Internal_AssocList_contains___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkExprDecl___spec__1(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__18; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepBasicPrefix___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__2; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__10; static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___closed__4; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_div(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___closed__1; lean_object* l_Lean_MessageData_ofExpr(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___closed__13; @@ -318,7 +338,6 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_setUnsat(lean_ static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext___closed__1; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__9; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat___closed__3; -static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__21; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); double l_Float_ofScientific(lean_object*, uint8_t, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__11; @@ -332,10 +351,10 @@ static lean_object* l_Lean_Meta_Grind_Arith_CommRing_toContextExpr___closed__2; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___lambda__1___boxed(lean_object**); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__5; -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_mulConstM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_grind_mk_eq_proof(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__6; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_panic___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___spec__1___closed__4; LEAN_EXPORT uint8_t l_Std_DHashMap_Internal_AssocList_contains___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkPolyDecl___spec__3(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__5___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__6(lean_object*, lean_object*); @@ -347,6 +366,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_mkLetOfMap___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__2; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_int_mul(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__13___boxed(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_Arith_CommRing_noZeroDivisorsInst_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -354,26 +374,30 @@ static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__L lean_object* l_Lean_Expr_app___override(lean_object*, lean_object*); static lean_object* l_panic___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___spec__1___closed__7; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__10; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__7(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7___boxed(lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_eq(lean_object*, lean_object*); lean_object* l_Lean_mkApp3(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___closed__6; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__3; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkPolyDecl(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_mkDiseqProof(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_lt(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__3; +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_withLetDecl___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l___private_Init_Grind_CommRing_Poly_0__Lean_Grind_CommRing_beqMon____x40_Init_Grind_CommRing_Poly___hyg_1147_(lean_object*, lean_object*); +lean_object* l_Lean_mkRawNatLit(lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___closed__3; lean_object* l_Lean_Meta_Grind_Arith_CommRing_ofPoly(lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs___boxed(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__7; lean_object* l_id___rarg___boxed(lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__8; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Name_mkStr2(lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___closed__10; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_caching(lean_object*); @@ -390,11 +414,13 @@ LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_T static lean_object* l_panic___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___spec__1___closed__8; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkPolyDecl___spec__8___boxed(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__5(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___closed__1; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__2(lean_object*, lean_object*); lean_object* lean_array_set(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_check___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_addMessageContextFull___at_Lean_Meta_instAddMessageContextMetaM___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint64_t lean_uint64_xor(uint64_t, uint64_t); @@ -419,16 +445,19 @@ LEAN_EXPORT lean_object* l_Lean_Meta_withLetDecl___at___private_Lean_Meta_Tactic LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___boxed(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat___closed__2; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_mkNullCertExt___closed__4; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__2; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_replace___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkExprDecl___spec__5(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkPolyDecl___spec__6(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__7(lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__1; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_mkNullCertExt___closed__3; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__1___boxed(lean_object*, lean_object*); uint8_t lean_uint64_dec_eq(uint64_t, uint64_t); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_replace___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__5(lean_object*, lean_object*, lean_object*); size_t lean_usize_sub(size_t, size_t); lean_object* lean_array_mk(lean_object*); +lean_object* l_Lean_PersistentArray_get_x21___rarg(lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__1; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setDiseqUnsat(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -437,8 +466,8 @@ LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_T LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert_go___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toPoly___spec__1(lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__2; size_t lean_usize_add(size_t, size_t); +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__5___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_uget(lean_object*, size_t); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkExprDecl___spec__4(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toPreNullCert(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -449,9 +478,10 @@ lean_object* l_Lean_instToExprInt_mkNat(lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__14; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__12(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__10; -lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_instInhabitedOfMonad___rarg(lean_object*, lean_object*); +lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_st_ref_set(lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; uint8_t l___private_Init_Grind_CommRing_Poly_0__Lean_Grind_CommRing_beqPoly____x40_Init_Grind_CommRing_Poly___hyg_3550_(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_toContextExpr___closed__1; lean_object* l_Lean_Name_mkStr4(lean_object*, lean_object*, lean_object*, lean_object*); @@ -463,7 +493,6 @@ static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1 LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_getMultiplier_go(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors(lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__7; -LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_p(lean_object*); lean_object* lean_string_append(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkPolyDecl___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -476,6 +505,7 @@ lean_object* lean_int_ediv(lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext___closed__3; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_int_neg(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___closed__15; uint8_t lean_nat_dec_le(lean_object*, lean_object*); uint8_t lean_usize_dec_lt(size_t, size_t); @@ -492,7 +522,6 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq(lean_ LEAN_EXPORT uint64_t l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching_unsafe__1___rarg(lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__9; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_mkLetOfMap___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__6(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__3; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkExprDecl___spec__2(lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__4; @@ -502,10 +531,11 @@ lean_object* l_Lean_Meta_Grind_getConfig___rarg(lean_object*, lean_object*, lean lean_object* lean_array_uset(lean_object*, size_t, lean_object*); lean_object* l___private_Init_Data_Repr_0__Nat_reprFast(lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___spec__12___boxed(lean_object*, lean_object*); +static lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go___boxed(lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__1; lean_object* lean_mk_empty_array_with_capacity(lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___spec__1___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___closed__17; lean_object* l_ReaderT_instMonad___rarg(lean_object*); size_t lean_usize_land(size_t, size_t); @@ -516,6 +546,7 @@ static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__L lean_object* l_Lean_RArray_ofFn_go___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__12; +static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_Arith_CommRing_ofRingExpr(lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___closed__7; @@ -540,7 +571,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_toContextExpr(lean_obj _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; uint8_t x_17; @@ -715,7 +746,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; @@ -805,168 +836,69 @@ lean_dec(x_1); return x_11; } } -LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1() { _start: { -lean_object* x_12; lean_object* x_13; uint8_t x_14; -x_12 = lean_ctor_get(x_9, 5); -x_13 = l_Lean_addMessageContextFull___at_Lean_Meta_instAddMessageContextMetaM___spec__1(x_1, x_7, x_8, x_9, x_10, x_11); -x_14 = !lean_is_exclusive(x_13); -if (x_14 == 0) -{ -lean_object* x_15; lean_object* x_16; -x_15 = lean_ctor_get(x_13, 0); -lean_inc(x_12); -x_16 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_16, 0, x_12); -lean_ctor_set(x_16, 1, x_15); -lean_ctor_set_tag(x_13, 1); -lean_ctor_set(x_13, 0, x_16); -return x_13; -} -else -{ -lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_17 = lean_ctor_get(x_13, 0); -x_18 = lean_ctor_get(x_13, 1); -lean_inc(x_18); -lean_inc(x_17); -lean_dec(x_13); -lean_inc(x_12); -x_19 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_19, 0, x_12); -lean_ctor_set(x_19, 1, x_17); -x_20 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_20, 0, x_19); -lean_ctor_set(x_20, 1, x_18); -return x_20; -} +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean", 4, 4); +return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__1() { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("`grind` internal error, constant polynomial expected ", 53, 53); +x_1 = lean_mk_string_unchecked("Grind", 5, 5); return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__2() { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3() { _start: { -lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__1; -x_2 = l_Lean_stringToMessageData(x_1); -return x_2; +lean_object* x_1; +x_1 = lean_mk_string_unchecked("CommRing", 8, 8); +return x_1; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getPolyConst(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__4() { _start: { -if (lean_obj_tag(x_1) == 0) -{ -lean_object* x_12; lean_object* x_13; -x_12 = lean_ctor_get(x_1, 0); -lean_inc(x_12); -lean_dec(x_1); -x_13 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_13, 0, x_12); -lean_ctor_set(x_13, 1, x_11); -return x_13; -} -else -{ -lean_object* x_14; -x_14 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_14) == 0) -{ -lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; -x_15 = lean_ctor_get(x_14, 0); -lean_inc(x_15); -x_16 = lean_ctor_get(x_14, 1); -lean_inc(x_16); -lean_dec(x_14); -x_17 = l_Lean_indentExpr(x_15); -x_18 = l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__2; -x_19 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_19, 0, x_18); -lean_ctor_set(x_19, 1, x_17); -x_20 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; -x_21 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_21, 0, x_19); -lean_ctor_set(x_21, 1, x_20); -x_22 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_21, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_16); -return x_22; -} -else -{ -uint8_t x_23; -x_23 = !lean_is_exclusive(x_14); -if (x_23 == 0) -{ -return x_14; -} -else -{ -lean_object* x_24; lean_object* x_25; lean_object* x_26; -x_24 = lean_ctor_get(x_14, 0); -x_25 = lean_ctor_get(x_14, 1); -lean_inc(x_25); -lean_inc(x_24); -lean_dec(x_14); -x_26 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_26, 0, x_24); -lean_ctor_set(x_26, 1, x_25); -return x_26; -} -} -} +lean_object* x_1; +x_1 = lean_mk_string_unchecked("ofNat", 5, 5); +return x_1; } } -LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__5() { _start: { -lean_object* x_12; -x_12 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -return x_12; +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; +x_4 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__4; +x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); +return x_5; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__6() { _start: { -lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_getPolyConst(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -return x_12; +lean_object* x_1; +x_1 = lean_mk_string_unchecked("OfNat", 5, 5); +return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__1() { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__7() { _start: { -lean_object* x_1; lean_object* x_2; -x_1 = lean_unsigned_to_nat(0u); -x_2 = lean_mk_empty_array_with_capacity(x_1); -return x_2; +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__6; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__4; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2() { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8() { _start: { lean_object* x_1; lean_object* x_2; @@ -975,1870 +907,1523 @@ x_2 = lean_nat_to_int(x_1); return x_2; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__3() { +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__1; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; -x_3 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_3, 0, x_1); -lean_ctor_set(x_3, 1, x_2); -return x_3; -} +lean_object* x_12; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +if (lean_obj_tag(x_12) == 0) +{ +uint8_t x_13; +x_13 = !lean_is_exclusive(x_12); +if (x_13 == 0) +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; uint8_t x_29; +x_14 = lean_ctor_get(x_12, 0); +x_15 = lean_nat_abs(x_1); +x_16 = l_Lean_mkRawNatLit(x_15); +x_17 = lean_ctor_get(x_14, 2); +lean_inc(x_17); +x_18 = lean_box(0); +x_19 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_19, 0, x_17); +lean_ctor_set(x_19, 1, x_18); +x_20 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__5; +lean_inc(x_19); +x_21 = l_Lean_Expr_const___override(x_20, x_19); +x_22 = lean_ctor_get(x_14, 1); +lean_inc(x_22); +x_23 = lean_ctor_get(x_14, 3); +lean_inc(x_23); +lean_inc(x_16); +lean_inc(x_22); +x_24 = l_Lean_mkApp3(x_21, x_22, x_23, x_16); +x_25 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__7; +x_26 = l_Lean_Expr_const___override(x_25, x_19); +x_27 = l_Lean_mkApp3(x_26, x_22, x_16, x_24); +x_28 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; +x_29 = lean_int_dec_lt(x_1, x_28); +if (x_29 == 0) +{ +lean_dec(x_14); +lean_ctor_set(x_12, 0, x_27); +return x_12; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert() { -_start: +else { -lean_object* x_1; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__3; -return x_1; +lean_object* x_30; lean_object* x_31; +x_30 = lean_ctor_get(x_14, 9); +lean_inc(x_30); +lean_dec(x_14); +x_31 = l_Lean_Expr_app___override(x_30, x_27); +lean_ctor_set(x_12, 0, x_31); +return x_12; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1() { -_start: +else { -lean_object* x_1; lean_object* x_2; -x_1 = lean_box(0); -x_2 = lean_array_mk(x_1); -return x_2; -} +lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; uint8_t x_48; +x_32 = lean_ctor_get(x_12, 0); +x_33 = lean_ctor_get(x_12, 1); +lean_inc(x_33); +lean_inc(x_32); +lean_dec(x_12); +x_34 = lean_nat_abs(x_1); +x_35 = l_Lean_mkRawNatLit(x_34); +x_36 = lean_ctor_get(x_32, 2); +lean_inc(x_36); +x_37 = lean_box(0); +x_38 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_38, 0, x_36); +lean_ctor_set(x_38, 1, x_37); +x_39 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__5; +lean_inc(x_38); +x_40 = l_Lean_Expr_const___override(x_39, x_38); +x_41 = lean_ctor_get(x_32, 1); +lean_inc(x_41); +x_42 = lean_ctor_get(x_32, 3); +lean_inc(x_42); +lean_inc(x_35); +lean_inc(x_41); +x_43 = l_Lean_mkApp3(x_40, x_41, x_42, x_35); +x_44 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__7; +x_45 = l_Lean_Expr_const___override(x_44, x_38); +x_46 = l_Lean_mkApp3(x_45, x_41, x_35, x_43); +x_47 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; +x_48 = lean_int_dec_lt(x_1, x_47); +if (x_48 == 0) +{ +lean_object* x_49; +lean_dec(x_32); +x_49 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_49, 0, x_46); +lean_ctor_set(x_49, 1, x_33); +return x_49; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2() { -_start: +else { -lean_object* x_1; lean_object* x_2; -x_1 = lean_unsigned_to_nat(1u); -x_2 = lean_nat_to_int(x_1); -return x_2; +lean_object* x_50; lean_object* x_51; lean_object* x_52; +x_50 = lean_ctor_get(x_32, 9); +lean_inc(x_50); +lean_dec(x_32); +x_51 = l_Lean_Expr_app___override(x_50, x_46); +x_52 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_52, 0, x_51); +lean_ctor_set(x_52, 1, x_33); +return x_52; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__3() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2; -x_3 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_3, 0, x_1); -lean_ctor_set(x_3, 1, x_2); -return x_3; } +else +{ +uint8_t x_53; +x_53 = !lean_is_exclusive(x_12); +if (x_53 == 0) +{ +return x_12; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero() { -_start: +else { -lean_object* x_1; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__3; -return x_1; +lean_object* x_54; lean_object* x_55; lean_object* x_56; +x_54 = lean_ctor_get(x_12, 0); +x_55 = lean_ctor_get(x_12, 1); +lean_inc(x_55); +lean_inc(x_54); +lean_dec(x_12); +x_56 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_56, 0, x_54); +lean_ctor_set(x_56, 1, x_55); +return x_56; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__1() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; -x_2 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__2() { +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__5(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2; -x_2 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__1; -x_4 = lean_mk_array(x_2, x_3); -x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__2; -x_6 = lean_array_set(x_4, x_1, x_5); -x_7 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2; -x_8 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_8, 0, x_6); -lean_ctor_set(x_8, 1, x_7); -return x_8; -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___boxed(lean_object* x_1, lean_object* x_2) { -_start: -{ -lean_object* x_3; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit(x_1, x_2); -lean_dec(x_1); -return x_3; -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_div(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: +lean_object* x_12; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +if (lean_obj_tag(x_12) == 0) { uint8_t x_13; -x_13 = !lean_is_exclusive(x_1); +x_13 = !lean_is_exclusive(x_12); if (x_13 == 0) { -lean_object* x_14; lean_object* x_15; lean_object* x_16; -x_14 = lean_ctor_get(x_1, 1); -x_15 = lean_int_mul(x_14, x_2); +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; uint8_t x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; +x_14 = lean_ctor_get(x_12, 0); +x_15 = lean_ctor_get(x_12, 1); +x_16 = lean_ctor_get(x_14, 13); +lean_inc(x_16); lean_dec(x_14); -lean_ctor_set(x_1, 1, x_15); -x_16 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_16, 0, x_1); -lean_ctor_set(x_16, 1, x_12); -return x_16; -} -else -{ -lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; x_17 = lean_ctor_get(x_1, 0); -x_18 = lean_ctor_get(x_1, 1); -lean_inc(x_18); lean_inc(x_17); -lean_dec(x_1); -x_19 = lean_int_mul(x_18, x_2); +x_18 = lean_ctor_get(x_16, 2); +lean_inc(x_18); +x_19 = lean_nat_dec_lt(x_17, x_18); lean_dec(x_18); -x_20 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_20, 0, x_17); -lean_ctor_set(x_20, 1, x_19); -x_21 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_21, 0, x_20); -lean_ctor_set(x_21, 1, x_12); -return x_21; -} -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_div___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: -{ -lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_div(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -return x_13; -} -} -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___spec__1(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { -_start: -{ -uint8_t x_15; -x_15 = lean_usize_dec_lt(x_3, x_2); -if (x_15 == 0) -{ -lean_object* x_16; -lean_dec(x_13); -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -x_16 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_16, 0, x_4); -lean_ctor_set(x_16, 1, x_14); -return x_16; -} -else -{ -lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_17 = lean_array_uget(x_4, x_3); -x_18 = lean_unsigned_to_nat(0u); -x_19 = lean_array_uset(x_4, x_3, x_18); -lean_inc(x_13); -lean_inc(x_12); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -lean_inc(x_6); -lean_inc(x_5); -x_20 = l_Lean_Grind_CommRing_Poly_mulConstM(x_17, x_1, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); -if (lean_obj_tag(x_20) == 0) -{ -lean_object* x_21; lean_object* x_22; size_t x_23; size_t x_24; lean_object* x_25; -x_21 = lean_ctor_get(x_20, 0); -lean_inc(x_21); -x_22 = lean_ctor_get(x_20, 1); -lean_inc(x_22); -lean_dec(x_20); -x_23 = 1; -x_24 = lean_usize_add(x_3, x_23); -x_25 = lean_array_uset(x_19, x_3, x_21); -x_3 = x_24; -x_4 = x_25; -x_14 = x_22; -goto _start; -} -else -{ -uint8_t x_27; -lean_dec(x_19); -lean_dec(x_13); -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -x_27 = !lean_is_exclusive(x_20); -if (x_27 == 0) -{ -return x_20; -} -else -{ -lean_object* x_28; lean_object* x_29; lean_object* x_30; -x_28 = lean_ctor_get(x_20, 0); -x_29 = lean_ctor_get(x_20, 1); -lean_inc(x_29); -lean_inc(x_28); -lean_dec(x_20); -x_30 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_30, 0, x_28); -lean_ctor_set(x_30, 1, x_29); -return x_30; -} -} -} -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: -{ -lean_object* x_13; uint8_t x_14; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2; -x_14 = lean_int_dec_eq(x_2, x_13); -if (x_14 == 0) -{ -uint8_t x_15; -x_15 = !lean_is_exclusive(x_1); -if (x_15 == 0) +x_20 = lean_ctor_get(x_1, 1); +lean_inc(x_20); +lean_dec(x_1); +x_21 = lean_unsigned_to_nat(1u); +x_22 = lean_nat_dec_eq(x_20, x_21); +if (x_19 == 0) { -lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; -x_16 = lean_ctor_get(x_1, 0); -x_17 = lean_ctor_get(x_1, 1); -x_18 = l_Int_gcd(x_2, x_17); -x_19 = lean_nat_to_int(x_18); -x_20 = lean_int_ediv(x_2, x_19); -x_21 = lean_int_ediv(x_17, x_19); -lean_dec(x_19); +lean_object* x_23; lean_object* x_24; lean_dec(x_17); -x_22 = lean_int_dec_eq(x_20, x_13); +lean_dec(x_16); +x_23 = l_Lean_instInhabitedExpr; +x_24 = l_outOfBounds___rarg(x_23); if (x_22 == 0) { -size_t x_23; size_t x_24; lean_object* x_25; -x_23 = lean_array_size(x_16); -x_24 = 0; -x_25 = l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___spec__1(x_20, x_23, x_24, x_16, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -lean_dec(x_20); +lean_object* x_25; +lean_free_object(x_12); +x_25 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_15); if (lean_obj_tag(x_25) == 0) { uint8_t x_26; x_26 = !lean_is_exclusive(x_25); if (x_26 == 0) { -lean_object* x_27; +lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; x_27 = lean_ctor_get(x_25, 0); -lean_ctor_set(x_1, 1, x_21); -lean_ctor_set(x_1, 0, x_27); -lean_ctor_set(x_25, 0, x_1); +x_28 = lean_ctor_get(x_27, 10); +lean_inc(x_28); +lean_dec(x_27); +x_29 = l_Lean_mkNatLit(x_20); +x_30 = l_Lean_mkAppB(x_28, x_24, x_29); +lean_ctor_set(x_25, 0, x_30); return x_25; } else { -lean_object* x_28; lean_object* x_29; lean_object* x_30; -x_28 = lean_ctor_get(x_25, 0); -x_29 = lean_ctor_get(x_25, 1); -lean_inc(x_29); -lean_inc(x_28); +lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_31 = lean_ctor_get(x_25, 0); +x_32 = lean_ctor_get(x_25, 1); +lean_inc(x_32); +lean_inc(x_31); lean_dec(x_25); -lean_ctor_set(x_1, 1, x_21); -lean_ctor_set(x_1, 0, x_28); -x_30 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_30, 0, x_1); -lean_ctor_set(x_30, 1, x_29); -return x_30; +x_33 = lean_ctor_get(x_31, 10); +lean_inc(x_33); +lean_dec(x_31); +x_34 = l_Lean_mkNatLit(x_20); +x_35 = l_Lean_mkAppB(x_33, x_24, x_34); +x_36 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_36, 0, x_35); +lean_ctor_set(x_36, 1, x_32); +return x_36; } } else { -uint8_t x_31; -lean_dec(x_21); -lean_free_object(x_1); -x_31 = !lean_is_exclusive(x_25); -if (x_31 == 0) +uint8_t x_37; +lean_dec(x_24); +lean_dec(x_20); +x_37 = !lean_is_exclusive(x_25); +if (x_37 == 0) { return x_25; } else { -lean_object* x_32; lean_object* x_33; lean_object* x_34; -x_32 = lean_ctor_get(x_25, 0); -x_33 = lean_ctor_get(x_25, 1); -lean_inc(x_33); -lean_inc(x_32); +lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_38 = lean_ctor_get(x_25, 0); +x_39 = lean_ctor_get(x_25, 1); +lean_inc(x_39); +lean_inc(x_38); lean_dec(x_25); -x_34 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_34, 0, x_32); -lean_ctor_set(x_34, 1, x_33); -return x_34; +x_40 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_40, 0, x_38); +lean_ctor_set(x_40, 1, x_39); +return x_40; } } } else { -lean_object* x_35; lean_dec(x_20); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_ctor_set(x_1, 1, x_21); -x_35 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_35, 0, x_1); -lean_ctor_set(x_35, 1, x_12); -return x_35; +lean_ctor_set(x_12, 0, x_24); +return x_12; } } else { -lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; uint8_t x_42; -x_36 = lean_ctor_get(x_1, 0); -x_37 = lean_ctor_get(x_1, 1); -lean_inc(x_37); -lean_inc(x_36); -lean_dec(x_1); -x_38 = l_Int_gcd(x_2, x_37); -x_39 = lean_nat_to_int(x_38); -x_40 = lean_int_ediv(x_2, x_39); -x_41 = lean_int_ediv(x_37, x_39); -lean_dec(x_39); -lean_dec(x_37); -x_42 = lean_int_dec_eq(x_40, x_13); -if (x_42 == 0) +lean_object* x_41; lean_object* x_42; +x_41 = l_Lean_instInhabitedExpr; +x_42 = l_Lean_PersistentArray_get_x21___rarg(x_41, x_16, x_17); +lean_dec(x_17); +if (x_22 == 0) { -size_t x_43; size_t x_44; lean_object* x_45; -x_43 = lean_array_size(x_36); -x_44 = 0; -x_45 = l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___spec__1(x_40, x_43, x_44, x_36, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -lean_dec(x_40); -if (lean_obj_tag(x_45) == 0) +lean_object* x_43; +lean_free_object(x_12); +x_43 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_15); +if (lean_obj_tag(x_43) == 0) { -lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; -x_46 = lean_ctor_get(x_45, 0); +uint8_t x_44; +x_44 = !lean_is_exclusive(x_43); +if (x_44 == 0) +{ +lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; +x_45 = lean_ctor_get(x_43, 0); +x_46 = lean_ctor_get(x_45, 10); lean_inc(x_46); -x_47 = lean_ctor_get(x_45, 1); -lean_inc(x_47); -if (lean_is_exclusive(x_45)) { - lean_ctor_release(x_45, 0); - lean_ctor_release(x_45, 1); - x_48 = x_45; +lean_dec(x_45); +x_47 = l_Lean_mkNatLit(x_20); +x_48 = l_Lean_mkAppB(x_46, x_42, x_47); +lean_ctor_set(x_43, 0, x_48); +return x_43; +} +else +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; +x_49 = lean_ctor_get(x_43, 0); +x_50 = lean_ctor_get(x_43, 1); +lean_inc(x_50); +lean_inc(x_49); +lean_dec(x_43); +x_51 = lean_ctor_get(x_49, 10); +lean_inc(x_51); +lean_dec(x_49); +x_52 = l_Lean_mkNatLit(x_20); +x_53 = l_Lean_mkAppB(x_51, x_42, x_52); +x_54 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_54, 0, x_53); +lean_ctor_set(x_54, 1, x_50); +return x_54; +} +} +else +{ +uint8_t x_55; +lean_dec(x_42); +lean_dec(x_20); +x_55 = !lean_is_exclusive(x_43); +if (x_55 == 0) +{ +return x_43; +} +else +{ +lean_object* x_56; lean_object* x_57; lean_object* x_58; +x_56 = lean_ctor_get(x_43, 0); +x_57 = lean_ctor_get(x_43, 1); +lean_inc(x_57); +lean_inc(x_56); +lean_dec(x_43); +x_58 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_58, 0, x_56); +lean_ctor_set(x_58, 1, x_57); +return x_58; +} +} +} +else +{ +lean_dec(x_20); +lean_ctor_set(x_12, 0, x_42); +return x_12; +} +} +} +else +{ +lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; uint8_t x_64; lean_object* x_65; lean_object* x_66; uint8_t x_67; +x_59 = lean_ctor_get(x_12, 0); +x_60 = lean_ctor_get(x_12, 1); +lean_inc(x_60); +lean_inc(x_59); +lean_dec(x_12); +x_61 = lean_ctor_get(x_59, 13); +lean_inc(x_61); +lean_dec(x_59); +x_62 = lean_ctor_get(x_1, 0); +lean_inc(x_62); +x_63 = lean_ctor_get(x_61, 2); +lean_inc(x_63); +x_64 = lean_nat_dec_lt(x_62, x_63); +lean_dec(x_63); +x_65 = lean_ctor_get(x_1, 1); +lean_inc(x_65); +lean_dec(x_1); +x_66 = lean_unsigned_to_nat(1u); +x_67 = lean_nat_dec_eq(x_65, x_66); +if (x_64 == 0) +{ +lean_object* x_68; lean_object* x_69; +lean_dec(x_62); +lean_dec(x_61); +x_68 = l_Lean_instInhabitedExpr; +x_69 = l_outOfBounds___rarg(x_68); +if (x_67 == 0) +{ +lean_object* x_70; +x_70 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_60); +if (lean_obj_tag(x_70) == 0) +{ +lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; +x_71 = lean_ctor_get(x_70, 0); +lean_inc(x_71); +x_72 = lean_ctor_get(x_70, 1); +lean_inc(x_72); +if (lean_is_exclusive(x_70)) { + lean_ctor_release(x_70, 0); + lean_ctor_release(x_70, 1); + x_73 = x_70; } else { - lean_dec_ref(x_45); - x_48 = lean_box(0); + lean_dec_ref(x_70); + x_73 = lean_box(0); } -x_49 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_49, 0, x_46); -lean_ctor_set(x_49, 1, x_41); -if (lean_is_scalar(x_48)) { - x_50 = lean_alloc_ctor(0, 2, 0); +x_74 = lean_ctor_get(x_71, 10); +lean_inc(x_74); +lean_dec(x_71); +x_75 = l_Lean_mkNatLit(x_65); +x_76 = l_Lean_mkAppB(x_74, x_69, x_75); +if (lean_is_scalar(x_73)) { + x_77 = lean_alloc_ctor(0, 2, 0); } else { - x_50 = x_48; + x_77 = x_73; } -lean_ctor_set(x_50, 0, x_49); -lean_ctor_set(x_50, 1, x_47); -return x_50; +lean_ctor_set(x_77, 0, x_76); +lean_ctor_set(x_77, 1, x_72); +return x_77; } else { -lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; -lean_dec(x_41); -x_51 = lean_ctor_get(x_45, 0); -lean_inc(x_51); -x_52 = lean_ctor_get(x_45, 1); -lean_inc(x_52); -if (lean_is_exclusive(x_45)) { - lean_ctor_release(x_45, 0); - lean_ctor_release(x_45, 1); - x_53 = x_45; +lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; +lean_dec(x_69); +lean_dec(x_65); +x_78 = lean_ctor_get(x_70, 0); +lean_inc(x_78); +x_79 = lean_ctor_get(x_70, 1); +lean_inc(x_79); +if (lean_is_exclusive(x_70)) { + lean_ctor_release(x_70, 0); + lean_ctor_release(x_70, 1); + x_80 = x_70; } else { - lean_dec_ref(x_45); - x_53 = lean_box(0); + lean_dec_ref(x_70); + x_80 = lean_box(0); } -if (lean_is_scalar(x_53)) { - x_54 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_80)) { + x_81 = lean_alloc_ctor(1, 2, 0); } else { - x_54 = x_53; + x_81 = x_80; } -lean_ctor_set(x_54, 0, x_51); -lean_ctor_set(x_54, 1, x_52); -return x_54; +lean_ctor_set(x_81, 0, x_78); +lean_ctor_set(x_81, 1, x_79); +return x_81; } } else { -lean_object* x_55; lean_object* x_56; -lean_dec(x_40); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -x_55 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_55, 0, x_36); -lean_ctor_set(x_55, 1, x_41); -x_56 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_56, 0, x_55); -lean_ctor_set(x_56, 1, x_12); -return x_56; +lean_object* x_82; +lean_dec(x_65); +x_82 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_82, 0, x_69); +lean_ctor_set(x_82, 1, x_60); +return x_82; +} +} +else +{ +lean_object* x_83; lean_object* x_84; +x_83 = l_Lean_instInhabitedExpr; +x_84 = l_Lean_PersistentArray_get_x21___rarg(x_83, x_61, x_62); +lean_dec(x_62); +if (x_67 == 0) +{ +lean_object* x_85; +x_85 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_60); +if (lean_obj_tag(x_85) == 0) +{ +lean_object* x_86; lean_object* x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; lean_object* x_92; +x_86 = lean_ctor_get(x_85, 0); +lean_inc(x_86); +x_87 = lean_ctor_get(x_85, 1); +lean_inc(x_87); +if (lean_is_exclusive(x_85)) { + lean_ctor_release(x_85, 0); + lean_ctor_release(x_85, 1); + x_88 = x_85; +} else { + lean_dec_ref(x_85); + x_88 = lean_box(0); } +x_89 = lean_ctor_get(x_86, 10); +lean_inc(x_89); +lean_dec(x_86); +x_90 = l_Lean_mkNatLit(x_65); +x_91 = l_Lean_mkAppB(x_89, x_84, x_90); +if (lean_is_scalar(x_88)) { + x_92 = lean_alloc_ctor(0, 2, 0); +} else { + x_92 = x_88; } +lean_ctor_set(x_92, 0, x_91); +lean_ctor_set(x_92, 1, x_87); +return x_92; } else { -lean_object* x_57; -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -x_57 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_57, 0, x_1); -lean_ctor_set(x_57, 1, x_12); -return x_57; +lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; +lean_dec(x_84); +lean_dec(x_65); +x_93 = lean_ctor_get(x_85, 0); +lean_inc(x_93); +x_94 = lean_ctor_get(x_85, 1); +lean_inc(x_94); +if (lean_is_exclusive(x_85)) { + lean_ctor_release(x_85, 0); + lean_ctor_release(x_85, 1); + x_95 = x_85; +} else { + lean_dec_ref(x_85); + x_95 = lean_box(0); +} +if (lean_is_scalar(x_95)) { + x_96 = lean_alloc_ctor(1, 2, 0); +} else { + x_96 = x_95; } +lean_ctor_set(x_96, 0, x_93); +lean_ctor_set(x_96, 1, x_94); +return x_96; } } -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { -_start: +else { -size_t x_15; size_t x_16; lean_object* x_17; -x_15 = lean_unbox_usize(x_2); -lean_dec(x_2); -x_16 = lean_unbox_usize(x_3); -lean_dec(x_3); -x_17 = l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___spec__1(x_1, x_15, x_16, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +lean_object* x_97; +lean_dec(x_65); +x_97 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_97, 0, x_84); +lean_ctor_set(x_97, 1, x_60); +return x_97; +} +} +} +} +else +{ +uint8_t x_98; lean_dec(x_1); -return x_17; +x_98 = !lean_is_exclusive(x_12); +if (x_98 == 0) +{ +return x_12; } +else +{ +lean_object* x_99; lean_object* x_100; lean_object* x_101; +x_99 = lean_ctor_get(x_12, 0); +x_100 = lean_ctor_get(x_12, 1); +lean_inc(x_100); +lean_inc(x_99); +lean_dec(x_12); +x_101 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_101, 0, x_99); +lean_ctor_set(x_101, 1, x_100); +return x_101; } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +} +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__6(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { +if (lean_obj_tag(x_1) == 0) +{ lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -lean_dec(x_2); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_2); +lean_ctor_set(x_13, 1, x_12); return x_13; } -} -LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15, lean_object* x_16, lean_object* x_17, lean_object* x_18, lean_object* x_19, lean_object* x_20, lean_object* x_21, lean_object* x_22, lean_object* x_23, lean_object* x_24, lean_object* x_25, lean_object* x_26) { -_start: +else { -lean_object* x_27; uint8_t x_28; -x_27 = lean_ctor_get(x_12, 1); -x_28 = lean_nat_dec_lt(x_14, x_27); -if (x_28 == 0) +lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_14 = lean_ctor_get(x_1, 0); +lean_inc(x_14); +x_15 = lean_ctor_get(x_1, 1); +lean_inc(x_15); +lean_dec(x_1); +x_16 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_16) == 0) { -lean_object* x_29; -lean_dec(x_25); -lean_dec(x_24); -lean_dec(x_23); -lean_dec(x_22); -lean_dec(x_21); -lean_dec(x_20); +lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_17 = lean_ctor_get(x_16, 0); +lean_inc(x_17); +x_18 = lean_ctor_get(x_16, 1); +lean_inc(x_18); +lean_dec(x_16); +x_19 = l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__5(x_14, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_18); +if (lean_obj_tag(x_19) == 0) +{ +lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_20 = lean_ctor_get(x_19, 0); +lean_inc(x_20); +x_21 = lean_ctor_get(x_19, 1); +lean_inc(x_21); lean_dec(x_19); -lean_dec(x_18); +x_22 = lean_ctor_get(x_17, 7); +lean_inc(x_22); lean_dec(x_17); -lean_dec(x_14); -lean_dec(x_3); -lean_dec(x_1); -x_29 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_29, 0, x_13); -lean_ctor_set(x_29, 1, x_26); -return x_29; +x_23 = l_Lean_mkAppB(x_22, x_2, x_20); +x_1 = x_15; +x_2 = x_23; +x_12 = x_21; +goto _start; } else { -lean_object* x_30; lean_object* x_31; uint8_t x_37; -x_37 = lean_nat_dec_lt(x_14, x_9); -if (x_37 == 0) -{ -lean_object* x_38; lean_object* x_39; -x_38 = lean_array_fget(x_8, x_14); -lean_inc(x_25); -lean_inc(x_24); -lean_inc(x_23); -lean_inc(x_22); -lean_inc(x_21); -lean_inc(x_20); -lean_inc(x_19); -lean_inc(x_18); -lean_inc(x_17); -lean_inc(x_3); -x_39 = l_Lean_Grind_CommRing_Poly_mulMonM(x_38, x_6, x_3, x_17, x_18, x_19, x_20, x_21, x_22, x_23, x_24, x_25, x_26); -if (lean_obj_tag(x_39) == 0) +uint8_t x_25; +lean_dec(x_17); +lean_dec(x_15); +lean_dec(x_2); +x_25 = !lean_is_exclusive(x_19); +if (x_25 == 0) { -lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; -x_40 = lean_ctor_get(x_39, 0); -lean_inc(x_40); -x_41 = lean_ctor_get(x_39, 1); -lean_inc(x_41); -lean_dec(x_39); -x_42 = lean_array_fset(x_13, x_14, x_40); -x_43 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_43, 0, x_42); -x_30 = x_43; -x_31 = x_41; -goto block_36; +return x_19; } else { -uint8_t x_44; -lean_dec(x_25); -lean_dec(x_24); -lean_dec(x_23); -lean_dec(x_22); -lean_dec(x_21); -lean_dec(x_20); +lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_26 = lean_ctor_get(x_19, 0); +x_27 = lean_ctor_get(x_19, 1); +lean_inc(x_27); +lean_inc(x_26); lean_dec(x_19); -lean_dec(x_18); -lean_dec(x_17); +x_28 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_28, 0, x_26); +lean_ctor_set(x_28, 1, x_27); +return x_28; +} +} +} +else +{ +uint8_t x_29; +lean_dec(x_15); lean_dec(x_14); -lean_dec(x_13); -lean_dec(x_3); -lean_dec(x_1); -x_44 = !lean_is_exclusive(x_39); -if (x_44 == 0) +lean_dec(x_2); +x_29 = !lean_is_exclusive(x_16); +if (x_29 == 0) { -return x_39; +return x_16; } else { -lean_object* x_45; lean_object* x_46; lean_object* x_47; -x_45 = lean_ctor_get(x_39, 0); -x_46 = lean_ctor_get(x_39, 1); -lean_inc(x_46); -lean_inc(x_45); -lean_dec(x_39); -x_47 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_47, 0, x_45); -lean_ctor_set(x_47, 1, x_46); -return x_47; +lean_object* x_30; lean_object* x_31; lean_object* x_32; +x_30 = lean_ctor_get(x_16, 0); +x_31 = lean_ctor_get(x_16, 1); +lean_inc(x_31); +lean_inc(x_30); +lean_dec(x_16); +x_32 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_32, 0, x_30); +lean_ctor_set(x_32, 1, x_31); +return x_32; } } } -else +} +} +static lean_object* _init_l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1() { +_start: { -lean_object* x_48; lean_object* x_49; -x_48 = lean_array_fget(x_7, x_14); -lean_inc(x_25); -lean_inc(x_24); -lean_inc(x_23); -lean_inc(x_22); -lean_inc(x_21); -lean_inc(x_20); -lean_inc(x_19); -lean_inc(x_18); -lean_inc(x_17); -lean_inc(x_1); -x_49 = l_Lean_Grind_CommRing_Poly_mulMonM(x_48, x_5, x_1, x_17, x_18, x_19, x_20, x_21, x_22, x_23, x_24, x_25, x_26); -if (lean_obj_tag(x_49) == 0) +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(1u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -lean_object* x_50; lean_object* x_51; uint8_t x_52; -x_50 = lean_ctor_get(x_49, 0); -lean_inc(x_50); -x_51 = lean_ctor_get(x_49, 1); -lean_inc(x_51); -lean_dec(x_49); -x_52 = lean_nat_dec_lt(x_14, x_10); -if (x_52 == 0) +if (lean_obj_tag(x_1) == 0) { -lean_object* x_53; lean_object* x_54; -x_53 = lean_array_fset(x_13, x_14, x_50); -x_54 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_54, 0, x_53); -x_30 = x_54; -x_31 = x_51; -goto block_36; +lean_object* x_12; lean_object* x_13; +x_12 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; +x_13 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +return x_13; } else { -lean_object* x_55; lean_object* x_56; -x_55 = lean_array_fget(x_8, x_14); -lean_inc(x_25); -lean_inc(x_24); -lean_inc(x_23); -lean_inc(x_22); -lean_inc(x_21); -lean_inc(x_20); -lean_inc(x_19); -lean_inc(x_18); -lean_inc(x_17); -lean_inc(x_3); -x_56 = l_Lean_Grind_CommRing_Poly_mulMonM(x_55, x_6, x_3, x_17, x_18, x_19, x_20, x_21, x_22, x_23, x_24, x_25, x_51); -if (lean_obj_tag(x_56) == 0) +lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_14 = lean_ctor_get(x_1, 0); +lean_inc(x_14); +x_15 = lean_ctor_get(x_1, 1); +lean_inc(x_15); +lean_dec(x_1); +x_16 = l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__5(x_14, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +if (lean_obj_tag(x_16) == 0) { -lean_object* x_57; lean_object* x_58; lean_object* x_59; -x_57 = lean_ctor_get(x_56, 0); -lean_inc(x_57); -x_58 = lean_ctor_get(x_56, 1); -lean_inc(x_58); -lean_dec(x_56); -lean_inc(x_25); -lean_inc(x_24); -lean_inc(x_23); -lean_inc(x_22); -lean_inc(x_21); -lean_inc(x_20); -lean_inc(x_19); -lean_inc(x_18); +lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_17 = lean_ctor_get(x_16, 0); lean_inc(x_17); -x_59 = l_Lean_Grind_CommRing_Poly_combineM(x_50, x_57, x_17, x_18, x_19, x_20, x_21, x_22, x_23, x_24, x_25, x_58); -if (lean_obj_tag(x_59) == 0) -{ -lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; -x_60 = lean_ctor_get(x_59, 0); -lean_inc(x_60); -x_61 = lean_ctor_get(x_59, 1); -lean_inc(x_61); -lean_dec(x_59); -x_62 = lean_array_fset(x_13, x_14, x_60); -x_63 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_63, 0, x_62); -x_30 = x_63; -x_31 = x_61; -goto block_36; +x_18 = lean_ctor_get(x_16, 1); +lean_inc(x_18); +lean_dec(x_16); +x_19 = l_Lean_Grind_CommRing_Mon_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__6(x_15, x_17, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_18); +return x_19; } else { -uint8_t x_64; -lean_dec(x_25); -lean_dec(x_24); -lean_dec(x_23); -lean_dec(x_22); -lean_dec(x_21); -lean_dec(x_20); -lean_dec(x_19); -lean_dec(x_18); -lean_dec(x_17); -lean_dec(x_14); -lean_dec(x_13); -lean_dec(x_3); -lean_dec(x_1); -x_64 = !lean_is_exclusive(x_59); -if (x_64 == 0) +uint8_t x_20; +lean_dec(x_15); +x_20 = !lean_is_exclusive(x_16); +if (x_20 == 0) { -return x_59; +return x_16; } else { -lean_object* x_65; lean_object* x_66; lean_object* x_67; -x_65 = lean_ctor_get(x_59, 0); -x_66 = lean_ctor_get(x_59, 1); -lean_inc(x_66); -lean_inc(x_65); -lean_dec(x_59); -x_67 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_67, 0, x_65); -lean_ctor_set(x_67, 1, x_66); -return x_67; +lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_21 = lean_ctor_get(x_16, 0); +x_22 = lean_ctor_get(x_16, 1); +lean_inc(x_22); +lean_inc(x_21); +lean_dec(x_16); +x_23 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_23, 0, x_21); +lean_ctor_set(x_23, 1, x_22); +return x_23; } } } -else -{ -uint8_t x_68; -lean_dec(x_50); -lean_dec(x_25); -lean_dec(x_24); -lean_dec(x_23); -lean_dec(x_22); -lean_dec(x_21); -lean_dec(x_20); -lean_dec(x_19); +} +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; uint8_t x_14; +x_13 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; +x_14 = lean_int_dec_eq(x_1, x_13); +if (x_14 == 0) +{ +lean_object* x_15; +x_15 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_15) == 0) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_15, 0); +lean_inc(x_16); +x_17 = lean_ctor_get(x_15, 1); +lean_inc(x_17); +lean_dec(x_15); +x_18 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2(x_1, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_17); +if (lean_obj_tag(x_18) == 0) +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; +x_19 = lean_ctor_get(x_18, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_18, 1); +lean_inc(x_20); lean_dec(x_18); -lean_dec(x_17); -lean_dec(x_14); -lean_dec(x_13); -lean_dec(x_3); -lean_dec(x_1); -x_68 = !lean_is_exclusive(x_56); -if (x_68 == 0) +x_21 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_20); +if (lean_obj_tag(x_21) == 0) { -return x_56; +uint8_t x_22; +x_22 = !lean_is_exclusive(x_21); +if (x_22 == 0) +{ +lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_23 = lean_ctor_get(x_21, 0); +x_24 = lean_ctor_get(x_16, 7); +lean_inc(x_24); +lean_dec(x_16); +x_25 = l_Lean_mkAppB(x_24, x_19, x_23); +lean_ctor_set(x_21, 0, x_25); +return x_21; } else { -lean_object* x_69; lean_object* x_70; lean_object* x_71; -x_69 = lean_ctor_get(x_56, 0); -x_70 = lean_ctor_get(x_56, 1); -lean_inc(x_70); -lean_inc(x_69); -lean_dec(x_56); -x_71 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_71, 0, x_69); -lean_ctor_set(x_71, 1, x_70); -return x_71; -} -} +lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; +x_26 = lean_ctor_get(x_21, 0); +x_27 = lean_ctor_get(x_21, 1); +lean_inc(x_27); +lean_inc(x_26); +lean_dec(x_21); +x_28 = lean_ctor_get(x_16, 7); +lean_inc(x_28); +lean_dec(x_16); +x_29 = l_Lean_mkAppB(x_28, x_19, x_26); +x_30 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_30, 0, x_29); +lean_ctor_set(x_30, 1, x_27); +return x_30; } } else { -uint8_t x_72; -lean_dec(x_25); -lean_dec(x_24); -lean_dec(x_23); -lean_dec(x_22); -lean_dec(x_21); -lean_dec(x_20); +uint8_t x_31; lean_dec(x_19); -lean_dec(x_18); -lean_dec(x_17); -lean_dec(x_14); -lean_dec(x_13); -lean_dec(x_3); -lean_dec(x_1); -x_72 = !lean_is_exclusive(x_49); -if (x_72 == 0) +lean_dec(x_16); +x_31 = !lean_is_exclusive(x_21); +if (x_31 == 0) { -return x_49; +return x_21; } else { -lean_object* x_73; lean_object* x_74; lean_object* x_75; -x_73 = lean_ctor_get(x_49, 0); -x_74 = lean_ctor_get(x_49, 1); -lean_inc(x_74); -lean_inc(x_73); -lean_dec(x_49); -x_75 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_75, 0, x_73); -lean_ctor_set(x_75, 1, x_74); -return x_75; -} -} -} -block_36: -{ lean_object* x_32; lean_object* x_33; lean_object* x_34; -x_32 = lean_ctor_get(x_30, 0); +x_32 = lean_ctor_get(x_21, 0); +x_33 = lean_ctor_get(x_21, 1); +lean_inc(x_33); lean_inc(x_32); -lean_dec(x_30); -x_33 = lean_ctor_get(x_12, 2); -x_34 = lean_nat_add(x_14, x_33); -lean_dec(x_14); -x_13 = x_32; -x_14 = x_34; -x_15 = lean_box(0); -x_16 = lean_box(0); -x_26 = x_31; -goto _start; -} -} +lean_dec(x_21); +x_34 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_34, 0, x_32); +lean_ctor_set(x_34, 1, x_33); +return x_34; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15, lean_object* x_16) { -_start: -{ -lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; uint8_t x_33; lean_object* x_34; -x_17 = lean_ctor_get(x_3, 1); -x_18 = lean_ctor_get(x_6, 1); -x_19 = lean_int_mul(x_1, x_18); -x_20 = lean_int_mul(x_4, x_17); -x_21 = lean_int_mul(x_17, x_18); -x_22 = l_Int_gcd(x_19, x_20); -x_23 = lean_nat_to_int(x_22); -x_24 = l_Int_gcd(x_23, x_21); -lean_dec(x_23); -x_25 = lean_nat_to_int(x_24); -x_26 = lean_int_ediv(x_19, x_25); -lean_dec(x_19); -x_27 = lean_int_ediv(x_20, x_25); -lean_dec(x_20); -x_28 = lean_int_ediv(x_21, x_25); -lean_dec(x_25); -lean_dec(x_21); -x_29 = lean_ctor_get(x_3, 0); -x_30 = lean_ctor_get(x_6, 0); -x_31 = lean_array_get_size(x_29); -x_32 = lean_array_get_size(x_30); -x_33 = lean_nat_dec_le(x_31, x_32); -if (x_33 == 0) -{ -lean_inc(x_31); -x_34 = x_31; -goto block_52; } else { -lean_inc(x_32); -x_34 = x_32; -goto block_52; -} -block_52: -{ -lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; -x_35 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__1; -lean_inc(x_34); -x_36 = lean_mk_array(x_34, x_35); -x_37 = lean_unsigned_to_nat(0u); -x_38 = lean_unsigned_to_nat(1u); -lean_inc(x_34); -x_39 = lean_alloc_ctor(0, 3, 0); -lean_ctor_set(x_39, 0, x_37); -lean_ctor_set(x_39, 1, x_34); -lean_ctor_set(x_39, 2, x_38); -x_40 = l_Std_Range_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine___spec__1(x_2, x_3, x_5, x_6, x_26, x_27, x_29, x_30, x_31, x_32, x_34, x_39, x_36, x_37, lean_box(0), lean_box(0), x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); -lean_dec(x_39); -lean_dec(x_34); -lean_dec(x_32); -lean_dec(x_31); -lean_dec(x_27); -lean_dec(x_26); -if (lean_obj_tag(x_40) == 0) -{ -uint8_t x_41; -x_41 = !lean_is_exclusive(x_40); -if (x_41 == 0) +uint8_t x_35; +lean_dec(x_16); +lean_dec(x_2); +x_35 = !lean_is_exclusive(x_18); +if (x_35 == 0) { -lean_object* x_42; lean_object* x_43; -x_42 = lean_ctor_get(x_40, 0); -x_43 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_43, 0, x_42); -lean_ctor_set(x_43, 1, x_28); -lean_ctor_set(x_40, 0, x_43); -return x_40; +return x_18; } else { -lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; -x_44 = lean_ctor_get(x_40, 0); -x_45 = lean_ctor_get(x_40, 1); -lean_inc(x_45); -lean_inc(x_44); -lean_dec(x_40); -x_46 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_46, 0, x_44); -lean_ctor_set(x_46, 1, x_28); -x_47 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_47, 0, x_46); -lean_ctor_set(x_47, 1, x_45); -return x_47; +lean_object* x_36; lean_object* x_37; lean_object* x_38; +x_36 = lean_ctor_get(x_18, 0); +x_37 = lean_ctor_get(x_18, 1); +lean_inc(x_37); +lean_inc(x_36); +lean_dec(x_18); +x_38 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_38, 0, x_36); +lean_ctor_set(x_38, 1, x_37); +return x_38; +} } } else { -uint8_t x_48; -lean_dec(x_28); -x_48 = !lean_is_exclusive(x_40); -if (x_48 == 0) +uint8_t x_39; +lean_dec(x_2); +x_39 = !lean_is_exclusive(x_15); +if (x_39 == 0) { -return x_40; +return x_15; } else { -lean_object* x_49; lean_object* x_50; lean_object* x_51; -x_49 = lean_ctor_get(x_40, 0); -x_50 = lean_ctor_get(x_40, 1); -lean_inc(x_50); -lean_inc(x_49); -lean_dec(x_40); -x_51 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_51, 0, x_49); -lean_ctor_set(x_51, 1, x_50); -return x_51; +lean_object* x_40; lean_object* x_41; lean_object* x_42; +x_40 = lean_ctor_get(x_15, 0); +x_41 = lean_ctor_get(x_15, 1); +lean_inc(x_41); +lean_inc(x_40); +lean_dec(x_15); +x_42 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_42, 0, x_40); +lean_ctor_set(x_42, 1, x_41); +return x_42; } } } +else +{ +lean_object* x_43; +x_43 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +return x_43; } } -LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine___spec__1___boxed(lean_object** _args) { -lean_object* x_1 = _args[0]; -lean_object* x_2 = _args[1]; -lean_object* x_3 = _args[2]; -lean_object* x_4 = _args[3]; -lean_object* x_5 = _args[4]; -lean_object* x_6 = _args[5]; -lean_object* x_7 = _args[6]; -lean_object* x_8 = _args[7]; -lean_object* x_9 = _args[8]; -lean_object* x_10 = _args[9]; -lean_object* x_11 = _args[10]; -lean_object* x_12 = _args[11]; -lean_object* x_13 = _args[12]; -lean_object* x_14 = _args[13]; -lean_object* x_15 = _args[14]; -lean_object* x_16 = _args[15]; -lean_object* x_17 = _args[16]; -lean_object* x_18 = _args[17]; -lean_object* x_19 = _args[18]; -lean_object* x_20 = _args[19]; -lean_object* x_21 = _args[20]; -lean_object* x_22 = _args[21]; -lean_object* x_23 = _args[22]; -lean_object* x_24 = _args[23]; -lean_object* x_25 = _args[24]; -lean_object* x_26 = _args[25]; -_start: -{ -lean_object* x_27; -x_27 = l_Std_Range_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_18, x_19, x_20, x_21, x_22, x_23, x_24, x_25, x_26); -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_2); -return x_27; } -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15, lean_object* x_16) { +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__7(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { -lean_object* x_17; -x_17 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); -lean_dec(x_6); -lean_dec(x_4); -lean_dec(x_3); +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_13 = lean_ctor_get(x_1, 0); +lean_inc(x_13); lean_dec(x_1); -return x_17; -} +x_14 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; +x_15 = lean_int_dec_eq(x_13, x_14); +if (x_15 == 0) +{ +lean_object* x_16; +x_16 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_16) == 0) +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; +x_17 = lean_ctor_get(x_16, 0); +lean_inc(x_17); +x_18 = lean_ctor_get(x_16, 1); +lean_inc(x_18); +lean_dec(x_16); +x_19 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2(x_13, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_18); +lean_dec(x_13); +if (lean_obj_tag(x_19) == 0) +{ +uint8_t x_20; +x_20 = !lean_is_exclusive(x_19); +if (x_20 == 0) +{ +lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_21 = lean_ctor_get(x_19, 0); +x_22 = lean_ctor_get(x_17, 6); +lean_inc(x_22); +lean_dec(x_17); +x_23 = l_Lean_mkAppB(x_22, x_2, x_21); +lean_ctor_set(x_19, 0, x_23); +return x_19; } -LEAN_EXPORT uint64_t l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching_unsafe__1___rarg(lean_object* x_1) { -_start: +else { -size_t x_2; uint64_t x_3; uint64_t x_4; uint64_t x_5; -x_2 = lean_ptr_addr(x_1); -x_3 = lean_usize_to_uint64(x_2); -x_4 = 2; -x_5 = lean_uint64_shift_right(x_3, x_4); -return x_5; +lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_24 = lean_ctor_get(x_19, 0); +x_25 = lean_ctor_get(x_19, 1); +lean_inc(x_25); +lean_inc(x_24); +lean_dec(x_19); +x_26 = lean_ctor_get(x_17, 6); +lean_inc(x_26); +lean_dec(x_17); +x_27 = l_Lean_mkAppB(x_26, x_2, x_24); +x_28 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_28, 0, x_27); +lean_ctor_set(x_28, 1, x_25); +return x_28; } } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching_unsafe__1(lean_object* x_1) { -_start: +else { -lean_object* x_2; -x_2 = lean_alloc_closure((void*)(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching_unsafe__1___rarg___boxed), 1, 0); -return x_2; -} +uint8_t x_29; +lean_dec(x_17); +lean_dec(x_2); +x_29 = !lean_is_exclusive(x_19); +if (x_29 == 0) +{ +return x_19; } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching_unsafe__1___rarg___boxed(lean_object* x_1) { -_start: +else { -uint64_t x_2; lean_object* x_3; -x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching_unsafe__1___rarg(x_1); -lean_dec(x_1); -x_3 = lean_box_uint64(x_2); -return x_3; +lean_object* x_30; lean_object* x_31; lean_object* x_32; +x_30 = lean_ctor_get(x_19, 0); +x_31 = lean_ctor_get(x_19, 1); +lean_inc(x_31); +lean_inc(x_30); +lean_dec(x_19); +x_32 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_32, 0, x_30); +lean_ctor_set(x_32, 1, x_31); +return x_32; } } -LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__1(uint64_t x_1, lean_object* x_2) { -_start: -{ -if (lean_obj_tag(x_2) == 0) -{ -lean_object* x_3; -x_3 = lean_box(0); -return x_3; } else { -lean_object* x_4; lean_object* x_5; lean_object* x_6; uint64_t x_7; uint8_t x_8; -x_4 = lean_ctor_get(x_2, 0); -lean_inc(x_4); -x_5 = lean_ctor_get(x_2, 1); -lean_inc(x_5); -x_6 = lean_ctor_get(x_2, 2); -lean_inc(x_6); +uint8_t x_33; +lean_dec(x_13); lean_dec(x_2); -x_7 = lean_unbox_uint64(x_4); -lean_dec(x_4); -x_8 = lean_uint64_dec_eq(x_7, x_1); -if (x_8 == 0) +x_33 = !lean_is_exclusive(x_16); +if (x_33 == 0) { -lean_dec(x_5); -x_2 = x_6; -goto _start; +return x_16; } else { -lean_object* x_10; -lean_dec(x_6); -x_10 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_10, 0, x_5); -return x_10; +lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_34 = lean_ctor_get(x_16, 0); +x_35 = lean_ctor_get(x_16, 1); +lean_inc(x_35); +lean_inc(x_34); +lean_dec(x_16); +x_36 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_36, 0, x_34); +lean_ctor_set(x_36, 1, x_35); +return x_36; +} } } +else +{ +lean_object* x_37; +lean_dec(x_13); +x_37 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_37, 0, x_2); +lean_ctor_set(x_37, 1, x_12); +return x_37; } } -LEAN_EXPORT uint8_t l_Std_DHashMap_Internal_AssocList_contains___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__2(uint64_t x_1, lean_object* x_2) { -_start: +else { -if (lean_obj_tag(x_2) == 0) +lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_38 = lean_ctor_get(x_1, 0); +lean_inc(x_38); +x_39 = lean_ctor_get(x_1, 1); +lean_inc(x_39); +x_40 = lean_ctor_get(x_1, 2); +lean_inc(x_40); +lean_dec(x_1); +x_41 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_41) == 0) { -uint8_t x_3; -x_3 = 0; -return x_3; +lean_object* x_42; lean_object* x_43; lean_object* x_44; +x_42 = lean_ctor_get(x_41, 0); +lean_inc(x_42); +x_43 = lean_ctor_get(x_41, 1); +lean_inc(x_43); +lean_dec(x_41); +x_44 = l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__3(x_38, x_39, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_43); +lean_dec(x_38); +if (lean_obj_tag(x_44) == 0) +{ +lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; +x_45 = lean_ctor_get(x_44, 0); +lean_inc(x_45); +x_46 = lean_ctor_get(x_44, 1); +lean_inc(x_46); +lean_dec(x_44); +x_47 = lean_ctor_get(x_42, 6); +lean_inc(x_47); +lean_dec(x_42); +x_48 = l_Lean_mkAppB(x_47, x_2, x_45); +x_1 = x_40; +x_2 = x_48; +x_12 = x_46; +goto _start; } else { -lean_object* x_4; lean_object* x_5; uint64_t x_6; uint8_t x_7; -x_4 = lean_ctor_get(x_2, 0); -lean_inc(x_4); -x_5 = lean_ctor_get(x_2, 2); -lean_inc(x_5); +uint8_t x_50; +lean_dec(x_42); +lean_dec(x_40); lean_dec(x_2); -x_6 = lean_unbox_uint64(x_4); -lean_dec(x_4); -x_7 = lean_uint64_dec_eq(x_6, x_1); -if (x_7 == 0) +x_50 = !lean_is_exclusive(x_44); +if (x_50 == 0) { -x_2 = x_5; -goto _start; +return x_44; } else { -uint8_t x_9; -lean_dec(x_5); -x_9 = 1; -return x_9; +lean_object* x_51; lean_object* x_52; lean_object* x_53; +x_51 = lean_ctor_get(x_44, 0); +x_52 = lean_ctor_get(x_44, 1); +lean_inc(x_52); +lean_inc(x_51); +lean_dec(x_44); +x_53 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_53, 0, x_51); +lean_ctor_set(x_53, 1, x_52); +return x_53; +} +} +} +else +{ +uint8_t x_54; +lean_dec(x_40); +lean_dec(x_39); +lean_dec(x_38); +lean_dec(x_2); +x_54 = !lean_is_exclusive(x_41); +if (x_54 == 0) +{ +return x_41; +} +else +{ +lean_object* x_55; lean_object* x_56; lean_object* x_57; +x_55 = lean_ctor_get(x_41, 0); +x_56 = lean_ctor_get(x_41, 1); +lean_inc(x_56); +lean_inc(x_55); +lean_dec(x_41); +x_57 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_57, 0, x_55); +lean_ctor_set(x_57, 1, x_56); +return x_57; } } } } -LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__5(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -if (lean_obj_tag(x_3) == 0) +if (lean_obj_tag(x_1) == 0) { +lean_object* x_12; lean_object* x_13; +x_12 = lean_ctor_get(x_1, 0); +lean_inc(x_12); lean_dec(x_1); -return x_2; +x_13 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_12); +return x_13; } else { -uint8_t x_4; -x_4 = !lean_is_exclusive(x_3); -if (x_4 == 0) -{ -lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; uint64_t x_9; uint64_t x_10; uint64_t x_11; uint64_t x_12; uint64_t x_13; uint64_t x_14; uint64_t x_15; size_t x_16; size_t x_17; size_t x_18; size_t x_19; size_t x_20; lean_object* x_21; lean_object* x_22; -x_5 = lean_ctor_get(x_3, 0); -x_6 = lean_ctor_get(x_3, 2); -x_7 = lean_array_get_size(x_2); -lean_inc(x_1); -lean_inc(x_5); -x_8 = lean_apply_1(x_1, x_5); -x_9 = lean_unbox_uint64(x_8); -lean_dec(x_8); -x_10 = 32; -x_11 = lean_uint64_shift_right(x_9, x_10); -x_12 = lean_uint64_xor(x_9, x_11); -x_13 = 16; -x_14 = lean_uint64_shift_right(x_12, x_13); -x_15 = lean_uint64_xor(x_12, x_14); -x_16 = lean_uint64_to_usize(x_15); -x_17 = lean_usize_of_nat(x_7); -lean_dec(x_7); -x_18 = 1; -x_19 = lean_usize_sub(x_17, x_18); -x_20 = lean_usize_land(x_16, x_19); -x_21 = lean_array_uget(x_2, x_20); -lean_ctor_set(x_3, 2, x_21); -x_22 = lean_array_uset(x_2, x_20, x_3); -x_2 = x_22; -x_3 = x_6; -goto _start; -} -else -{ -lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; uint64_t x_29; uint64_t x_30; uint64_t x_31; uint64_t x_32; uint64_t x_33; uint64_t x_34; uint64_t x_35; size_t x_36; size_t x_37; size_t x_38; size_t x_39; size_t x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; -x_24 = lean_ctor_get(x_3, 0); -x_25 = lean_ctor_get(x_3, 1); -x_26 = lean_ctor_get(x_3, 2); -lean_inc(x_26); -lean_inc(x_25); -lean_inc(x_24); -lean_dec(x_3); -x_27 = lean_array_get_size(x_2); -lean_inc(x_1); -lean_inc(x_24); -x_28 = lean_apply_1(x_1, x_24); -x_29 = lean_unbox_uint64(x_28); -lean_dec(x_28); -x_30 = 32; -x_31 = lean_uint64_shift_right(x_29, x_30); -x_32 = lean_uint64_xor(x_29, x_31); -x_33 = 16; -x_34 = lean_uint64_shift_right(x_32, x_33); -x_35 = lean_uint64_xor(x_32, x_34); -x_36 = lean_uint64_to_usize(x_35); -x_37 = lean_usize_of_nat(x_27); -lean_dec(x_27); -x_38 = 1; -x_39 = lean_usize_sub(x_37, x_38); -x_40 = lean_usize_land(x_36, x_39); -x_41 = lean_array_uget(x_2, x_40); -x_42 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_42, 0, x_24); -lean_ctor_set(x_42, 1, x_25); -lean_ctor_set(x_42, 2, x_41); -x_43 = lean_array_uset(x_2, x_40, x_42); -x_2 = x_43; -x_3 = x_26; -goto _start; -} -} -} -} -LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__5___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__6(lean_object* x_1, lean_object* x_2) { -_start: -{ -if (lean_obj_tag(x_2) == 0) +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_14 = lean_ctor_get(x_1, 0); +lean_inc(x_14); +x_15 = lean_ctor_get(x_1, 1); +lean_inc(x_15); +x_16 = lean_ctor_get(x_1, 2); +lean_inc(x_16); +lean_dec(x_1); +x_17 = l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__3(x_14, x_15, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_14); +if (lean_obj_tag(x_17) == 0) { -return x_1; +lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_18 = lean_ctor_get(x_17, 0); +lean_inc(x_18); +x_19 = lean_ctor_get(x_17, 1); +lean_inc(x_19); +lean_dec(x_17); +x_20 = l_Lean_Grind_CommRing_Poly_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__7(x_16, x_18, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_19); +return x_20; } else { -uint8_t x_3; -x_3 = !lean_is_exclusive(x_2); -if (x_3 == 0) +uint8_t x_21; +lean_dec(x_16); +x_21 = !lean_is_exclusive(x_17); +if (x_21 == 0) { -lean_object* x_4; lean_object* x_5; lean_object* x_6; uint64_t x_7; uint64_t x_8; uint64_t x_9; uint64_t x_10; uint64_t x_11; uint64_t x_12; uint64_t x_13; uint64_t x_14; size_t x_15; size_t x_16; size_t x_17; size_t x_18; size_t x_19; lean_object* x_20; lean_object* x_21; -x_4 = lean_ctor_get(x_2, 0); -x_5 = lean_ctor_get(x_2, 2); -x_6 = lean_array_get_size(x_1); -x_7 = 32; -x_8 = lean_unbox_uint64(x_4); -x_9 = lean_uint64_shift_right(x_8, x_7); -x_10 = lean_unbox_uint64(x_4); -x_11 = lean_uint64_xor(x_10, x_9); -x_12 = 16; -x_13 = lean_uint64_shift_right(x_11, x_12); -x_14 = lean_uint64_xor(x_11, x_13); -x_15 = lean_uint64_to_usize(x_14); -x_16 = lean_usize_of_nat(x_6); -lean_dec(x_6); -x_17 = 1; -x_18 = lean_usize_sub(x_16, x_17); -x_19 = lean_usize_land(x_15, x_18); -x_20 = lean_array_uget(x_1, x_19); -lean_ctor_set(x_2, 2, x_20); -x_21 = lean_array_uset(x_1, x_19, x_2); -x_1 = x_21; -x_2 = x_5; -goto _start; +return x_17; } else { -lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; uint64_t x_27; uint64_t x_28; uint64_t x_29; uint64_t x_30; uint64_t x_31; uint64_t x_32; uint64_t x_33; uint64_t x_34; size_t x_35; size_t x_36; size_t x_37; size_t x_38; size_t x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; -x_23 = lean_ctor_get(x_2, 0); -x_24 = lean_ctor_get(x_2, 1); -x_25 = lean_ctor_get(x_2, 2); -lean_inc(x_25); -lean_inc(x_24); +lean_object* x_22; lean_object* x_23; lean_object* x_24; +x_22 = lean_ctor_get(x_17, 0); +x_23 = lean_ctor_get(x_17, 1); lean_inc(x_23); -lean_dec(x_2); -x_26 = lean_array_get_size(x_1); -x_27 = 32; -x_28 = lean_unbox_uint64(x_23); -x_29 = lean_uint64_shift_right(x_28, x_27); -x_30 = lean_unbox_uint64(x_23); -x_31 = lean_uint64_xor(x_30, x_29); -x_32 = 16; -x_33 = lean_uint64_shift_right(x_31, x_32); -x_34 = lean_uint64_xor(x_31, x_33); -x_35 = lean_uint64_to_usize(x_34); -x_36 = lean_usize_of_nat(x_26); -lean_dec(x_26); -x_37 = 1; -x_38 = lean_usize_sub(x_36, x_37); -x_39 = lean_usize_land(x_35, x_38); -x_40 = lean_array_uget(x_1, x_39); -x_41 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_41, 0, x_23); -lean_ctor_set(x_41, 1, x_24); -lean_ctor_set(x_41, 2, x_40); -x_42 = lean_array_uset(x_1, x_39, x_41); -x_1 = x_42; -x_2 = x_25; -goto _start; +lean_inc(x_22); +lean_dec(x_17); +x_24 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_24, 0, x_22); +lean_ctor_set(x_24, 1, x_23); +return x_24; } } } } -LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand_go___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__4(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +} +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__8(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_4; uint8_t x_5; -x_4 = lean_array_get_size(x_2); -x_5 = lean_nat_dec_lt(x_1, x_4); -lean_dec(x_4); -if (x_5 == 0) +lean_object* x_12; lean_object* x_13; uint8_t x_14; +x_12 = lean_ctor_get(x_9, 5); +x_13 = l_Lean_addMessageContextFull___at_Lean_Meta_instAddMessageContextMetaM___spec__1(x_1, x_7, x_8, x_9, x_10, x_11); +x_14 = !lean_is_exclusive(x_13); +if (x_14 == 0) { -lean_dec(x_2); -lean_dec(x_1); -return x_3; +lean_object* x_15; lean_object* x_16; +x_15 = lean_ctor_get(x_13, 0); +lean_inc(x_12); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_12); +lean_ctor_set(x_16, 1, x_15); +lean_ctor_set_tag(x_13, 1); +lean_ctor_set(x_13, 0, x_16); +return x_13; } else { -lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; -x_6 = lean_array_fget(x_2, x_1); -x_7 = lean_box(0); -x_8 = lean_array_fset(x_2, x_1, x_7); -x_9 = l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__5___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__6(x_3, x_6); -x_10 = lean_unsigned_to_nat(1u); -x_11 = lean_nat_add(x_1, x_10); -lean_dec(x_1); -x_1 = x_11; -x_2 = x_8; -x_3 = x_9; -goto _start; +lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_17 = lean_ctor_get(x_13, 0); +x_18 = lean_ctor_get(x_13, 1); +lean_inc(x_18); +lean_inc(x_17); +lean_dec(x_13); +lean_inc(x_12); +x_19 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19, 0, x_12); +lean_ctor_set(x_19, 1, x_17); +x_20 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_20, 0, x_19); +lean_ctor_set(x_20, 1, x_18); +return x_20; } } } -LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__3(lean_object* x_1) { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__1() { _start: { -lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; -x_2 = lean_array_get_size(x_1); -x_3 = lean_unsigned_to_nat(2u); -x_4 = lean_nat_mul(x_2, x_3); -lean_dec(x_2); -x_5 = lean_box(0); -x_6 = lean_mk_array(x_4, x_5); -x_7 = lean_unsigned_to_nat(0u); -x_8 = l_Std_DHashMap_Internal_Raw_u2080_expand_go___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__4(x_7, x_1, x_6); -return x_8; +lean_object* x_1; +x_1 = lean_mk_string_unchecked("`grind` internal error, constant polynomial expected ", 53, 53); +return x_1; } } -LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7(uint64_t x_1, lean_object* x_2, lean_object* x_3) { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__2() { _start: { -if (lean_obj_tag(x_3) == 0) +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__1; +x_2 = l_Lean_stringToMessageData(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getPolyConst(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -lean_object* x_4; -lean_dec(x_2); -x_4 = lean_box(0); -return x_4; +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_12; lean_object* x_13; +x_12 = lean_ctor_get(x_1, 0); +lean_inc(x_12); +lean_dec(x_1); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_12); +lean_ctor_set(x_13, 1, x_11); +return x_13; } else { -uint8_t x_5; -x_5 = !lean_is_exclusive(x_3); -if (x_5 == 0) +lean_object* x_14; +x_14 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +if (lean_obj_tag(x_14) == 0) { -lean_object* x_6; lean_object* x_7; lean_object* x_8; uint64_t x_9; uint8_t x_10; -x_6 = lean_ctor_get(x_3, 0); -x_7 = lean_ctor_get(x_3, 1); -x_8 = lean_ctor_get(x_3, 2); -x_9 = lean_unbox_uint64(x_6); -x_10 = lean_uint64_dec_eq(x_9, x_1); -if (x_10 == 0) -{ -lean_object* x_11; -x_11 = l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7(x_1, x_2, x_8); -lean_ctor_set(x_3, 2, x_11); -return x_3; -} -else -{ -lean_object* x_12; -lean_dec(x_7); -lean_dec(x_6); -x_12 = lean_box_uint64(x_1); -lean_ctor_set(x_3, 1, x_2); -lean_ctor_set(x_3, 0, x_12); -return x_3; -} +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_15 = lean_ctor_get(x_14, 0); +lean_inc(x_15); +x_16 = lean_ctor_get(x_14, 1); +lean_inc(x_16); +lean_dec(x_14); +x_17 = l_Lean_indentExpr(x_15); +x_18 = l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__2; +x_19 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_19, 0, x_18); +lean_ctor_set(x_19, 1, x_17); +x_20 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; +x_21 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_21, 0, x_19); +lean_ctor_set(x_21, 1, x_20); +x_22 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__8(x_21, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_16); +return x_22; } else { -lean_object* x_13; lean_object* x_14; lean_object* x_15; uint64_t x_16; uint8_t x_17; -x_13 = lean_ctor_get(x_3, 0); -x_14 = lean_ctor_get(x_3, 1); -x_15 = lean_ctor_get(x_3, 2); -lean_inc(x_15); -lean_inc(x_14); -lean_inc(x_13); -lean_dec(x_3); -x_16 = lean_unbox_uint64(x_13); -x_17 = lean_uint64_dec_eq(x_16, x_1); -if (x_17 == 0) +uint8_t x_23; +x_23 = !lean_is_exclusive(x_14); +if (x_23 == 0) { -lean_object* x_18; lean_object* x_19; -x_18 = l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7(x_1, x_2, x_15); -x_19 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_19, 0, x_13); -lean_ctor_set(x_19, 1, x_14); -lean_ctor_set(x_19, 2, x_18); -return x_19; +return x_14; } else { -lean_object* x_20; lean_object* x_21; +lean_object* x_24; lean_object* x_25; lean_object* x_26; +x_24 = lean_ctor_get(x_14, 0); +x_25 = lean_ctor_get(x_14, 1); +lean_inc(x_25); +lean_inc(x_24); lean_dec(x_14); -lean_dec(x_13); -x_20 = lean_box_uint64(x_1); -x_21 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_21, 0, x_20); -lean_ctor_set(x_21, 1, x_2); -lean_ctor_set(x_21, 2, x_15); -return x_21; +x_26 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_26, 0, x_24); +lean_ctor_set(x_26, 1, x_25); +return x_26; } } } } } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -size_t x_14; uint64_t x_15; uint64_t x_16; uint64_t x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; uint8_t x_21; -x_14 = lean_ptr_addr(x_1); -x_15 = lean_usize_to_uint64(x_14); -x_16 = 2; -x_17 = lean_uint64_shift_right(x_15, x_16); -x_18 = lean_st_ref_get(x_3, x_13); -x_19 = lean_ctor_get(x_18, 0); -lean_inc(x_19); -x_20 = lean_ctor_get(x_19, 0); -lean_inc(x_20); -lean_dec(x_19); -x_21 = !lean_is_exclusive(x_18); -if (x_21 == 0) -{ -lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; uint64_t x_26; uint64_t x_27; uint64_t x_28; uint64_t x_29; uint64_t x_30; uint64_t x_31; size_t x_32; size_t x_33; size_t x_34; size_t x_35; size_t x_36; lean_object* x_37; lean_object* x_38; -x_22 = lean_ctor_get(x_18, 1); -x_23 = lean_ctor_get(x_18, 0); -lean_dec(x_23); -x_24 = lean_ctor_get(x_20, 1); -lean_inc(x_24); -lean_dec(x_20); -x_25 = lean_array_get_size(x_24); -x_26 = 32; -x_27 = lean_uint64_shift_right(x_17, x_26); -x_28 = lean_uint64_xor(x_17, x_27); -x_29 = 16; -x_30 = lean_uint64_shift_right(x_28, x_29); -x_31 = lean_uint64_xor(x_28, x_30); -x_32 = lean_uint64_to_usize(x_31); -x_33 = lean_usize_of_nat(x_25); -lean_dec(x_25); -x_34 = 1; -x_35 = lean_usize_sub(x_33, x_34); -x_36 = lean_usize_land(x_32, x_35); -x_37 = lean_array_uget(x_24, x_36); -lean_dec(x_24); -x_38 = l_Std_DHashMap_Internal_AssocList_get_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__1(x_17, x_37); -if (lean_obj_tag(x_38) == 0) -{ -lean_object* x_39; -lean_free_object(x_18); -lean_inc(x_3); -x_39 = lean_apply_11(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_22); -if (lean_obj_tag(x_39) == 0) -{ -lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; uint8_t x_46; -x_40 = lean_ctor_get(x_39, 0); -lean_inc(x_40); -x_41 = lean_ctor_get(x_39, 1); -lean_inc(x_41); -lean_dec(x_39); -x_42 = lean_st_ref_take(x_3, x_41); -x_43 = lean_ctor_get(x_42, 0); -lean_inc(x_43); -x_44 = lean_ctor_get(x_43, 0); -lean_inc(x_44); -x_45 = lean_ctor_get(x_42, 1); -lean_inc(x_45); -lean_dec(x_42); -x_46 = !lean_is_exclusive(x_43); -if (x_46 == 0) -{ -lean_object* x_47; uint8_t x_48; -x_47 = lean_ctor_get(x_43, 0); -lean_dec(x_47); -x_48 = !lean_is_exclusive(x_44); -if (x_48 == 0) -{ -lean_object* x_49; lean_object* x_50; lean_object* x_51; size_t x_52; size_t x_53; size_t x_54; lean_object* x_55; uint8_t x_56; -x_49 = lean_ctor_get(x_44, 0); -x_50 = lean_ctor_get(x_44, 1); -x_51 = lean_array_get_size(x_50); -x_52 = lean_usize_of_nat(x_51); -lean_dec(x_51); -x_53 = lean_usize_sub(x_52, x_34); -x_54 = lean_usize_land(x_32, x_53); -x_55 = lean_array_uget(x_50, x_54); -lean_inc(x_55); -x_56 = l_Std_DHashMap_Internal_AssocList_contains___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__2(x_17, x_55); -if (x_56 == 0) -{ -lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; uint8_t x_67; -x_57 = lean_unsigned_to_nat(1u); -x_58 = lean_nat_add(x_49, x_57); -lean_dec(x_49); -x_59 = lean_box_uint64(x_17); -lean_inc(x_40); -x_60 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_60, 0, x_59); -lean_ctor_set(x_60, 1, x_40); -lean_ctor_set(x_60, 2, x_55); -x_61 = lean_array_uset(x_50, x_54, x_60); -x_62 = lean_unsigned_to_nat(4u); -x_63 = lean_nat_mul(x_58, x_62); -x_64 = lean_unsigned_to_nat(3u); -x_65 = lean_nat_div(x_63, x_64); -lean_dec(x_63); -x_66 = lean_array_get_size(x_61); -x_67 = lean_nat_dec_le(x_65, x_66); -lean_dec(x_66); -lean_dec(x_65); -if (x_67 == 0) -{ -lean_object* x_68; lean_object* x_69; uint8_t x_70; -x_68 = l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__3(x_61); -lean_ctor_set(x_44, 1, x_68); -lean_ctor_set(x_44, 0, x_58); -x_69 = lean_st_ref_set(x_3, x_43, x_45); +lean_object* x_12; +x_12 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); -x_70 = !lean_is_exclusive(x_69); -if (x_70 == 0) -{ -lean_object* x_71; -x_71 = lean_ctor_get(x_69, 0); -lean_dec(x_71); -lean_ctor_set(x_69, 0, x_40); -return x_69; +lean_dec(x_2); +lean_dec(x_1); +return x_12; } -else +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__5___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -lean_object* x_72; lean_object* x_73; -x_72 = lean_ctor_get(x_69, 1); -lean_inc(x_72); -lean_dec(x_69); -x_73 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_73, 0, x_40); -lean_ctor_set(x_73, 1, x_72); -return x_73; +lean_object* x_12; +x_12 = l_Lean_Grind_CommRing_Power_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__5(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_12; } } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__6___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: { -lean_object* x_74; uint8_t x_75; -lean_ctor_set(x_44, 1, x_61); -lean_ctor_set(x_44, 0, x_58); -x_74 = lean_st_ref_set(x_3, x_43, x_45); +lean_object* x_13; +x_13 = l_Lean_Grind_CommRing_Mon_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__6(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); -x_75 = !lean_is_exclusive(x_74); -if (x_75 == 0) -{ -lean_object* x_76; -x_76 = lean_ctor_get(x_74, 0); -lean_dec(x_76); -lean_ctor_set(x_74, 0, x_40); -return x_74; +return x_13; } -else +} +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -lean_object* x_77; lean_object* x_78; -x_77 = lean_ctor_get(x_74, 1); -lean_inc(x_77); -lean_dec(x_74); -x_78 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_78, 0, x_40); -lean_ctor_set(x_78, 1, x_77); -return x_78; -} +lean_object* x_12; +x_12 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_12; } } -else +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: { -lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; uint8_t x_84; -x_79 = lean_box(0); -x_80 = lean_array_uset(x_50, x_54, x_79); -lean_inc(x_40); -x_81 = l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7(x_17, x_40, x_55); -x_82 = lean_array_uset(x_80, x_54, x_81); -lean_ctor_set(x_44, 1, x_82); -x_83 = lean_st_ref_set(x_3, x_43, x_45); +lean_object* x_13; +x_13 = l_Lean_Grind_CommRing_Poly_denoteExpr_denoteTerm___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); -x_84 = !lean_is_exclusive(x_83); -if (x_84 == 0) -{ -lean_object* x_85; -x_85 = lean_ctor_get(x_83, 0); -lean_dec(x_85); -lean_ctor_set(x_83, 0, x_40); -return x_83; +lean_dec(x_1); +return x_13; } -else -{ -lean_object* x_86; lean_object* x_87; -x_86 = lean_ctor_get(x_83, 1); -lean_inc(x_86); -lean_dec(x_83); -x_87 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_87, 0, x_40); -lean_ctor_set(x_87, 1, x_86); -return x_87; } +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__7___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Lean_Grind_CommRing_Poly_denoteExpr_go___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__7(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_13; } } -else -{ -lean_object* x_88; lean_object* x_89; lean_object* x_90; size_t x_91; size_t x_92; size_t x_93; lean_object* x_94; uint8_t x_95; -x_88 = lean_ctor_get(x_44, 0); -x_89 = lean_ctor_get(x_44, 1); -lean_inc(x_89); -lean_inc(x_88); -lean_dec(x_44); -x_90 = lean_array_get_size(x_89); -x_91 = lean_usize_of_nat(x_90); -lean_dec(x_90); -x_92 = lean_usize_sub(x_91, x_34); -x_93 = lean_usize_land(x_32, x_92); -x_94 = lean_array_uget(x_89, x_93); -lean_inc(x_94); -x_95 = l_Std_DHashMap_Internal_AssocList_contains___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__2(x_17, x_94); -if (x_95 == 0) -{ -lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; uint8_t x_106; -x_96 = lean_unsigned_to_nat(1u); -x_97 = lean_nat_add(x_88, x_96); -lean_dec(x_88); -x_98 = lean_box_uint64(x_17); -lean_inc(x_40); -x_99 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_99, 0, x_98); -lean_ctor_set(x_99, 1, x_40); -lean_ctor_set(x_99, 2, x_94); -x_100 = lean_array_uset(x_89, x_93, x_99); -x_101 = lean_unsigned_to_nat(4u); -x_102 = lean_nat_mul(x_97, x_101); -x_103 = lean_unsigned_to_nat(3u); -x_104 = lean_nat_div(x_102, x_103); -lean_dec(x_102); -x_105 = lean_array_get_size(x_100); -x_106 = lean_nat_dec_le(x_104, x_105); -lean_dec(x_105); -lean_dec(x_104); -if (x_106 == 0) +LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; -x_107 = l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__3(x_100); -x_108 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_108, 0, x_97); -lean_ctor_set(x_108, 1, x_107); -lean_ctor_set(x_43, 0, x_108); -x_109 = lean_st_ref_set(x_3, x_43, x_45); +lean_object* x_12; +x_12 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); -x_110 = lean_ctor_get(x_109, 1); -lean_inc(x_110); -if (lean_is_exclusive(x_109)) { - lean_ctor_release(x_109, 0); - lean_ctor_release(x_109, 1); - x_111 = x_109; -} else { - lean_dec_ref(x_109); - x_111 = lean_box(0); +lean_dec(x_2); +return x_12; } -if (lean_is_scalar(x_111)) { - x_112 = lean_alloc_ctor(0, 2, 0); -} else { - x_112 = x_111; } -lean_ctor_set(x_112, 0, x_40); -lean_ctor_set(x_112, 1, x_110); -return x_112; +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__8___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +lean_object* x_12; +x_12 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__8(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_12; } -else +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; -x_113 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_113, 0, x_97); -lean_ctor_set(x_113, 1, x_100); -lean_ctor_set(x_43, 0, x_113); -x_114 = lean_st_ref_set(x_3, x_43, x_45); +lean_object* x_12; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_getPolyConst(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); -x_115 = lean_ctor_get(x_114, 1); -lean_inc(x_115); -if (lean_is_exclusive(x_114)) { - lean_ctor_release(x_114, 0); - lean_ctor_release(x_114, 1); - x_116 = x_114; -} else { - lean_dec_ref(x_114); - x_116 = lean_box(0); +lean_dec(x_2); +return x_12; } -if (lean_is_scalar(x_116)) { - x_117 = lean_alloc_ctor(0, 2, 0); -} else { - x_117 = x_116; } -lean_ctor_set(x_117, 0, x_40); -lean_ctor_set(x_117, 1, x_115); -return x_117; +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(0u); +x_2 = lean_mk_empty_array_with_capacity(x_1); +return x_2; } } -else +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2() { +_start: { -lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; -x_118 = lean_box(0); -x_119 = lean_array_uset(x_89, x_93, x_118); -lean_inc(x_40); -x_120 = l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7(x_17, x_40, x_94); -x_121 = lean_array_uset(x_119, x_93, x_120); -x_122 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_122, 0, x_88); -lean_ctor_set(x_122, 1, x_121); -lean_ctor_set(x_43, 0, x_122); -x_123 = lean_st_ref_set(x_3, x_43, x_45); -lean_dec(x_3); -x_124 = lean_ctor_get(x_123, 1); -lean_inc(x_124); -if (lean_is_exclusive(x_123)) { - lean_ctor_release(x_123, 0); - lean_ctor_release(x_123, 1); - x_125 = x_123; -} else { - lean_dec_ref(x_123); - x_125 = lean_box(0); +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; +x_3 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; } -if (lean_is_scalar(x_125)) { - x_126 = lean_alloc_ctor(0, 2, 0); -} else { - x_126 = x_125; } -lean_ctor_set(x_126, 0, x_40); -lean_ctor_set(x_126, 1, x_124); -return x_126; +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +return x_1; +} } +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(0); +x_2 = lean_array_mk(x_1); +return x_2; } } -else +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2() { +_start: { -lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; size_t x_132; size_t x_133; size_t x_134; lean_object* x_135; uint8_t x_136; -x_127 = lean_ctor_get(x_43, 1); -lean_inc(x_127); -lean_dec(x_43); -x_128 = lean_ctor_get(x_44, 0); -lean_inc(x_128); -x_129 = lean_ctor_get(x_44, 1); -lean_inc(x_129); -if (lean_is_exclusive(x_44)) { - lean_ctor_release(x_44, 0); - lean_ctor_release(x_44, 1); - x_130 = x_44; -} else { - lean_dec_ref(x_44); - x_130 = lean_box(0); -} -x_131 = lean_array_get_size(x_129); -x_132 = lean_usize_of_nat(x_131); -lean_dec(x_131); -x_133 = lean_usize_sub(x_132, x_34); -x_134 = lean_usize_land(x_32, x_133); -x_135 = lean_array_uget(x_129, x_134); -lean_inc(x_135); -x_136 = l_Std_DHashMap_Internal_AssocList_contains___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__2(x_17, x_135); -if (x_136 == 0) -{ -lean_object* x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; uint8_t x_147; -x_137 = lean_unsigned_to_nat(1u); -x_138 = lean_nat_add(x_128, x_137); -lean_dec(x_128); -x_139 = lean_box_uint64(x_17); -lean_inc(x_40); -x_140 = lean_alloc_ctor(1, 3, 0); -lean_ctor_set(x_140, 0, x_139); -lean_ctor_set(x_140, 1, x_40); -lean_ctor_set(x_140, 2, x_135); -x_141 = lean_array_uset(x_129, x_134, x_140); -x_142 = lean_unsigned_to_nat(4u); -x_143 = lean_nat_mul(x_138, x_142); -x_144 = lean_unsigned_to_nat(3u); -x_145 = lean_nat_div(x_143, x_144); -lean_dec(x_143); -x_146 = lean_array_get_size(x_141); -x_147 = lean_nat_dec_le(x_145, x_146); -lean_dec(x_146); -lean_dec(x_145); -if (x_147 == 0) -{ -lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; -x_148 = l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__3(x_141); -if (lean_is_scalar(x_130)) { - x_149 = lean_alloc_ctor(0, 2, 0); -} else { - x_149 = x_130; -} -lean_ctor_set(x_149, 0, x_138); -lean_ctor_set(x_149, 1, x_148); -x_150 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_150, 0, x_149); -lean_ctor_set(x_150, 1, x_127); -x_151 = lean_st_ref_set(x_3, x_150, x_45); -lean_dec(x_3); -x_152 = lean_ctor_get(x_151, 1); -lean_inc(x_152); -if (lean_is_exclusive(x_151)) { - lean_ctor_release(x_151, 0); - lean_ctor_release(x_151, 1); - x_153 = x_151; -} else { - lean_dec_ref(x_151); - x_153 = lean_box(0); -} -if (lean_is_scalar(x_153)) { - x_154 = lean_alloc_ctor(0, 2, 0); -} else { - x_154 = x_153; +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1; +x_2 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; +x_3 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; } -lean_ctor_set(x_154, 0, x_40); -lean_ctor_set(x_154, 1, x_152); -return x_154; } -else +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero() { +_start: { -lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; lean_object* x_160; -if (lean_is_scalar(x_130)) { - x_155 = lean_alloc_ctor(0, 2, 0); -} else { - x_155 = x_130; -} -lean_ctor_set(x_155, 0, x_138); -lean_ctor_set(x_155, 1, x_141); -x_156 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_156, 0, x_155); -lean_ctor_set(x_156, 1, x_127); -x_157 = lean_st_ref_set(x_3, x_156, x_45); -lean_dec(x_3); -x_158 = lean_ctor_get(x_157, 1); -lean_inc(x_158); -if (lean_is_exclusive(x_157)) { - lean_ctor_release(x_157, 0); - lean_ctor_release(x_157, 1); - x_159 = x_157; -} else { - lean_dec_ref(x_157); - x_159 = lean_box(0); +lean_object* x_1; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2; +return x_1; } -if (lean_is_scalar(x_159)) { - x_160 = lean_alloc_ctor(0, 2, 0); -} else { - x_160 = x_159; } -lean_ctor_set(x_160, 0, x_40); -lean_ctor_set(x_160, 1, x_158); -return x_160; +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; +x_2 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; } } -else +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__2() { +_start: { -lean_object* x_161; lean_object* x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; -x_161 = lean_box(0); -x_162 = lean_array_uset(x_129, x_134, x_161); -lean_inc(x_40); -x_163 = l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7(x_17, x_40, x_135); -x_164 = lean_array_uset(x_162, x_134, x_163); -if (lean_is_scalar(x_130)) { - x_165 = lean_alloc_ctor(0, 2, 0); -} else { - x_165 = x_130; +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; +x_2 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; } -lean_ctor_set(x_165, 0, x_128); -lean_ctor_set(x_165, 1, x_164); -x_166 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_166, 0, x_165); -lean_ctor_set(x_166, 1, x_127); -x_167 = lean_st_ref_set(x_3, x_166, x_45); -lean_dec(x_3); -x_168 = lean_ctor_get(x_167, 1); -lean_inc(x_168); -if (lean_is_exclusive(x_167)) { - lean_ctor_release(x_167, 0); - lean_ctor_release(x_167, 1); - x_169 = x_167; -} else { - lean_dec_ref(x_167); - x_169 = lean_box(0); } -if (lean_is_scalar(x_169)) { - x_170 = lean_alloc_ctor(0, 2, 0); -} else { - x_170 = x_169; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__1; +x_4 = lean_mk_array(x_2, x_3); +x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__2; +x_6 = lean_array_set(x_4, x_1, x_5); +x_7 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; +x_8 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_8, 0, x_6); +lean_ctor_set(x_8, 1, x_7); +return x_8; } -lean_ctor_set(x_170, 0, x_40); -lean_ctor_set(x_170, 1, x_168); -return x_170; } +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit(x_1, x_2); +lean_dec(x_1); +return x_3; } } -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_div(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: { -uint8_t x_171; -lean_dec(x_3); -x_171 = !lean_is_exclusive(x_39); -if (x_171 == 0) +uint8_t x_13; +x_13 = !lean_is_exclusive(x_1); +if (x_13 == 0) { -return x_39; +lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_14 = lean_ctor_get(x_1, 1); +x_15 = lean_int_mul(x_14, x_2); +lean_dec(x_14); +lean_ctor_set(x_1, 1, x_15); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_1); +lean_ctor_set(x_16, 1, x_12); +return x_16; } else { -lean_object* x_172; lean_object* x_173; lean_object* x_174; -x_172 = lean_ctor_get(x_39, 0); -x_173 = lean_ctor_get(x_39, 1); -lean_inc(x_173); -lean_inc(x_172); -lean_dec(x_39); -x_174 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_174, 0, x_172); -lean_ctor_set(x_174, 1, x_173); -return x_174; +lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; +x_17 = lean_ctor_get(x_1, 0); +x_18 = lean_ctor_get(x_1, 1); +lean_inc(x_18); +lean_inc(x_17); +lean_dec(x_1); +x_19 = lean_int_mul(x_18, x_2); +lean_dec(x_18); +x_20 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_20, 0, x_17); +lean_ctor_set(x_20, 1, x_19); +x_21 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_21, 0, x_20); +lean_ctor_set(x_21, 1, x_12); +return x_21; } } } -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_div___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: { -lean_object* x_175; -lean_dec(x_12); +lean_object* x_13; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_div(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -2849,30 +2434,1750 @@ lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); -x_175 = lean_ctor_get(x_38, 0); -lean_inc(x_175); -lean_dec(x_38); -lean_ctor_set(x_18, 0, x_175); -return x_18; +return x_13; } } -else +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___spec__1(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: { -lean_object* x_176; lean_object* x_177; lean_object* x_178; uint64_t x_179; uint64_t x_180; uint64_t x_181; uint64_t x_182; uint64_t x_183; uint64_t x_184; size_t x_185; size_t x_186; size_t x_187; size_t x_188; size_t x_189; lean_object* x_190; lean_object* x_191; -x_176 = lean_ctor_get(x_18, 1); -lean_inc(x_176); -lean_dec(x_18); -x_177 = lean_ctor_get(x_20, 1); -lean_inc(x_177); -lean_dec(x_20); -x_178 = lean_array_get_size(x_177); -x_179 = 32; -x_180 = lean_uint64_shift_right(x_17, x_179); -x_181 = lean_uint64_xor(x_17, x_180); -x_182 = 16; -x_183 = lean_uint64_shift_right(x_181, x_182); -x_184 = lean_uint64_xor(x_181, x_183); -x_185 = lean_uint64_to_usize(x_184); +uint8_t x_15; +x_15 = lean_usize_dec_lt(x_3, x_2); +if (x_15 == 0) +{ +lean_object* x_16; +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_4); +lean_ctor_set(x_16, 1, x_14); +return x_16; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_17 = lean_array_uget(x_4, x_3); +x_18 = lean_unsigned_to_nat(0u); +x_19 = lean_array_uset(x_4, x_3, x_18); +lean_inc(x_13); +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_8); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +x_20 = l_Lean_Grind_CommRing_Poly_mulConstM(x_17, x_1, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +if (lean_obj_tag(x_20) == 0) +{ +lean_object* x_21; lean_object* x_22; size_t x_23; size_t x_24; lean_object* x_25; +x_21 = lean_ctor_get(x_20, 0); +lean_inc(x_21); +x_22 = lean_ctor_get(x_20, 1); +lean_inc(x_22); +lean_dec(x_20); +x_23 = 1; +x_24 = lean_usize_add(x_3, x_23); +x_25 = lean_array_uset(x_19, x_3, x_21); +x_3 = x_24; +x_4 = x_25; +x_14 = x_22; +goto _start; +} +else +{ +uint8_t x_27; +lean_dec(x_19); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +x_27 = !lean_is_exclusive(x_20); +if (x_27 == 0) +{ +return x_20; +} +else +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; +x_28 = lean_ctor_get(x_20, 0); +x_29 = lean_ctor_get(x_20, 1); +lean_inc(x_29); +lean_inc(x_28); +lean_dec(x_20); +x_30 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_30, 0, x_28); +lean_ctor_set(x_30, 1, x_29); +return x_30; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; uint8_t x_14; +x_13 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; +x_14 = lean_int_dec_eq(x_2, x_13); +if (x_14 == 0) +{ +uint8_t x_15; +x_15 = !lean_is_exclusive(x_1); +if (x_15 == 0) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; +x_16 = lean_ctor_get(x_1, 0); +x_17 = lean_ctor_get(x_1, 1); +x_18 = l_Int_gcd(x_2, x_17); +x_19 = lean_nat_to_int(x_18); +x_20 = lean_int_ediv(x_2, x_19); +x_21 = lean_int_ediv(x_17, x_19); +lean_dec(x_19); +lean_dec(x_17); +x_22 = lean_int_dec_eq(x_20, x_13); +if (x_22 == 0) +{ +size_t x_23; size_t x_24; lean_object* x_25; +x_23 = lean_array_size(x_16); +x_24 = 0; +x_25 = l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___spec__1(x_20, x_23, x_24, x_16, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_20); +if (lean_obj_tag(x_25) == 0) +{ +uint8_t x_26; +x_26 = !lean_is_exclusive(x_25); +if (x_26 == 0) +{ +lean_object* x_27; +x_27 = lean_ctor_get(x_25, 0); +lean_ctor_set(x_1, 1, x_21); +lean_ctor_set(x_1, 0, x_27); +lean_ctor_set(x_25, 0, x_1); +return x_25; +} +else +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; +x_28 = lean_ctor_get(x_25, 0); +x_29 = lean_ctor_get(x_25, 1); +lean_inc(x_29); +lean_inc(x_28); +lean_dec(x_25); +lean_ctor_set(x_1, 1, x_21); +lean_ctor_set(x_1, 0, x_28); +x_30 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_30, 0, x_1); +lean_ctor_set(x_30, 1, x_29); +return x_30; +} +} +else +{ +uint8_t x_31; +lean_dec(x_21); +lean_free_object(x_1); +x_31 = !lean_is_exclusive(x_25); +if (x_31 == 0) +{ +return x_25; +} +else +{ +lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_32 = lean_ctor_get(x_25, 0); +x_33 = lean_ctor_get(x_25, 1); +lean_inc(x_33); +lean_inc(x_32); +lean_dec(x_25); +x_34 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_34, 0, x_32); +lean_ctor_set(x_34, 1, x_33); +return x_34; +} +} +} +else +{ +lean_object* x_35; +lean_dec(x_20); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_ctor_set(x_1, 1, x_21); +x_35 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_35, 0, x_1); +lean_ctor_set(x_35, 1, x_12); +return x_35; +} +} +else +{ +lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; uint8_t x_42; +x_36 = lean_ctor_get(x_1, 0); +x_37 = lean_ctor_get(x_1, 1); +lean_inc(x_37); +lean_inc(x_36); +lean_dec(x_1); +x_38 = l_Int_gcd(x_2, x_37); +x_39 = lean_nat_to_int(x_38); +x_40 = lean_int_ediv(x_2, x_39); +x_41 = lean_int_ediv(x_37, x_39); +lean_dec(x_39); +lean_dec(x_37); +x_42 = lean_int_dec_eq(x_40, x_13); +if (x_42 == 0) +{ +size_t x_43; size_t x_44; lean_object* x_45; +x_43 = lean_array_size(x_36); +x_44 = 0; +x_45 = l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___spec__1(x_40, x_43, x_44, x_36, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_40); +if (lean_obj_tag(x_45) == 0) +{ +lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; +x_46 = lean_ctor_get(x_45, 0); +lean_inc(x_46); +x_47 = lean_ctor_get(x_45, 1); +lean_inc(x_47); +if (lean_is_exclusive(x_45)) { + lean_ctor_release(x_45, 0); + lean_ctor_release(x_45, 1); + x_48 = x_45; +} else { + lean_dec_ref(x_45); + x_48 = lean_box(0); +} +x_49 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_49, 0, x_46); +lean_ctor_set(x_49, 1, x_41); +if (lean_is_scalar(x_48)) { + x_50 = lean_alloc_ctor(0, 2, 0); +} else { + x_50 = x_48; +} +lean_ctor_set(x_50, 0, x_49); +lean_ctor_set(x_50, 1, x_47); +return x_50; +} +else +{ +lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; +lean_dec(x_41); +x_51 = lean_ctor_get(x_45, 0); +lean_inc(x_51); +x_52 = lean_ctor_get(x_45, 1); +lean_inc(x_52); +if (lean_is_exclusive(x_45)) { + lean_ctor_release(x_45, 0); + lean_ctor_release(x_45, 1); + x_53 = x_45; +} else { + lean_dec_ref(x_45); + x_53 = lean_box(0); +} +if (lean_is_scalar(x_53)) { + x_54 = lean_alloc_ctor(1, 2, 0); +} else { + x_54 = x_53; +} +lean_ctor_set(x_54, 0, x_51); +lean_ctor_set(x_54, 1, x_52); +return x_54; +} +} +else +{ +lean_object* x_55; lean_object* x_56; +lean_dec(x_40); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +x_55 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_55, 0, x_36); +lean_ctor_set(x_55, 1, x_41); +x_56 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_56, 0, x_55); +lean_ctor_set(x_56, 1, x_12); +return x_56; +} +} +} +else +{ +lean_object* x_57; +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +x_57 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_57, 0, x_1); +lean_ctor_set(x_57, 1, x_12); +return x_57; +} +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: +{ +size_t x_15; size_t x_16; lean_object* x_17; +x_15 = lean_unbox_usize(x_2); +lean_dec(x_2); +x_16 = lean_unbox_usize(x_3); +lean_dec(x_3); +x_17 = l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___spec__1(x_1, x_15, x_16, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +lean_dec(x_1); +return x_17; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: +{ +lean_object* x_13; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_mul(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +lean_dec(x_2); +return x_13; +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15, lean_object* x_16, lean_object* x_17, lean_object* x_18, lean_object* x_19, lean_object* x_20, lean_object* x_21, lean_object* x_22, lean_object* x_23, lean_object* x_24, lean_object* x_25, lean_object* x_26) { +_start: +{ +lean_object* x_27; uint8_t x_28; +x_27 = lean_ctor_get(x_12, 1); +x_28 = lean_nat_dec_lt(x_14, x_27); +if (x_28 == 0) +{ +lean_object* x_29; +lean_dec(x_25); +lean_dec(x_24); +lean_dec(x_23); +lean_dec(x_22); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_18); +lean_dec(x_17); +lean_dec(x_14); +lean_dec(x_3); +lean_dec(x_1); +x_29 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_29, 0, x_13); +lean_ctor_set(x_29, 1, x_26); +return x_29; +} +else +{ +lean_object* x_30; lean_object* x_31; uint8_t x_37; +x_37 = lean_nat_dec_lt(x_14, x_9); +if (x_37 == 0) +{ +lean_object* x_38; lean_object* x_39; +x_38 = lean_array_fget(x_8, x_14); +lean_inc(x_25); +lean_inc(x_24); +lean_inc(x_23); +lean_inc(x_22); +lean_inc(x_21); +lean_inc(x_20); +lean_inc(x_19); +lean_inc(x_18); +lean_inc(x_17); +lean_inc(x_3); +x_39 = l_Lean_Grind_CommRing_Poly_mulMonM(x_38, x_6, x_3, x_17, x_18, x_19, x_20, x_21, x_22, x_23, x_24, x_25, x_26); +if (lean_obj_tag(x_39) == 0) +{ +lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; +x_40 = lean_ctor_get(x_39, 0); +lean_inc(x_40); +x_41 = lean_ctor_get(x_39, 1); +lean_inc(x_41); +lean_dec(x_39); +x_42 = lean_array_fset(x_13, x_14, x_40); +x_43 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_43, 0, x_42); +x_30 = x_43; +x_31 = x_41; +goto block_36; +} +else +{ +uint8_t x_44; +lean_dec(x_25); +lean_dec(x_24); +lean_dec(x_23); +lean_dec(x_22); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_18); +lean_dec(x_17); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_3); +lean_dec(x_1); +x_44 = !lean_is_exclusive(x_39); +if (x_44 == 0) +{ +return x_39; +} +else +{ +lean_object* x_45; lean_object* x_46; lean_object* x_47; +x_45 = lean_ctor_get(x_39, 0); +x_46 = lean_ctor_get(x_39, 1); +lean_inc(x_46); +lean_inc(x_45); +lean_dec(x_39); +x_47 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_47, 0, x_45); +lean_ctor_set(x_47, 1, x_46); +return x_47; +} +} +} +else +{ +lean_object* x_48; lean_object* x_49; +x_48 = lean_array_fget(x_7, x_14); +lean_inc(x_25); +lean_inc(x_24); +lean_inc(x_23); +lean_inc(x_22); +lean_inc(x_21); +lean_inc(x_20); +lean_inc(x_19); +lean_inc(x_18); +lean_inc(x_17); +lean_inc(x_1); +x_49 = l_Lean_Grind_CommRing_Poly_mulMonM(x_48, x_5, x_1, x_17, x_18, x_19, x_20, x_21, x_22, x_23, x_24, x_25, x_26); +if (lean_obj_tag(x_49) == 0) +{ +lean_object* x_50; lean_object* x_51; uint8_t x_52; +x_50 = lean_ctor_get(x_49, 0); +lean_inc(x_50); +x_51 = lean_ctor_get(x_49, 1); +lean_inc(x_51); +lean_dec(x_49); +x_52 = lean_nat_dec_lt(x_14, x_10); +if (x_52 == 0) +{ +lean_object* x_53; lean_object* x_54; +x_53 = lean_array_fset(x_13, x_14, x_50); +x_54 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_54, 0, x_53); +x_30 = x_54; +x_31 = x_51; +goto block_36; +} +else +{ +lean_object* x_55; lean_object* x_56; +x_55 = lean_array_fget(x_8, x_14); +lean_inc(x_25); +lean_inc(x_24); +lean_inc(x_23); +lean_inc(x_22); +lean_inc(x_21); +lean_inc(x_20); +lean_inc(x_19); +lean_inc(x_18); +lean_inc(x_17); +lean_inc(x_3); +x_56 = l_Lean_Grind_CommRing_Poly_mulMonM(x_55, x_6, x_3, x_17, x_18, x_19, x_20, x_21, x_22, x_23, x_24, x_25, x_51); +if (lean_obj_tag(x_56) == 0) +{ +lean_object* x_57; lean_object* x_58; lean_object* x_59; +x_57 = lean_ctor_get(x_56, 0); +lean_inc(x_57); +x_58 = lean_ctor_get(x_56, 1); +lean_inc(x_58); +lean_dec(x_56); +lean_inc(x_25); +lean_inc(x_24); +lean_inc(x_23); +lean_inc(x_22); +lean_inc(x_21); +lean_inc(x_20); +lean_inc(x_19); +lean_inc(x_18); +lean_inc(x_17); +x_59 = l_Lean_Grind_CommRing_Poly_combineM(x_50, x_57, x_17, x_18, x_19, x_20, x_21, x_22, x_23, x_24, x_25, x_58); +if (lean_obj_tag(x_59) == 0) +{ +lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; +x_60 = lean_ctor_get(x_59, 0); +lean_inc(x_60); +x_61 = lean_ctor_get(x_59, 1); +lean_inc(x_61); +lean_dec(x_59); +x_62 = lean_array_fset(x_13, x_14, x_60); +x_63 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_63, 0, x_62); +x_30 = x_63; +x_31 = x_61; +goto block_36; +} +else +{ +uint8_t x_64; +lean_dec(x_25); +lean_dec(x_24); +lean_dec(x_23); +lean_dec(x_22); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_18); +lean_dec(x_17); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_3); +lean_dec(x_1); +x_64 = !lean_is_exclusive(x_59); +if (x_64 == 0) +{ +return x_59; +} +else +{ +lean_object* x_65; lean_object* x_66; lean_object* x_67; +x_65 = lean_ctor_get(x_59, 0); +x_66 = lean_ctor_get(x_59, 1); +lean_inc(x_66); +lean_inc(x_65); +lean_dec(x_59); +x_67 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_67, 0, x_65); +lean_ctor_set(x_67, 1, x_66); +return x_67; +} +} +} +else +{ +uint8_t x_68; +lean_dec(x_50); +lean_dec(x_25); +lean_dec(x_24); +lean_dec(x_23); +lean_dec(x_22); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_18); +lean_dec(x_17); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_3); +lean_dec(x_1); +x_68 = !lean_is_exclusive(x_56); +if (x_68 == 0) +{ +return x_56; +} +else +{ +lean_object* x_69; lean_object* x_70; lean_object* x_71; +x_69 = lean_ctor_get(x_56, 0); +x_70 = lean_ctor_get(x_56, 1); +lean_inc(x_70); +lean_inc(x_69); +lean_dec(x_56); +x_71 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_71, 0, x_69); +lean_ctor_set(x_71, 1, x_70); +return x_71; +} +} +} +} +else +{ +uint8_t x_72; +lean_dec(x_25); +lean_dec(x_24); +lean_dec(x_23); +lean_dec(x_22); +lean_dec(x_21); +lean_dec(x_20); +lean_dec(x_19); +lean_dec(x_18); +lean_dec(x_17); +lean_dec(x_14); +lean_dec(x_13); +lean_dec(x_3); +lean_dec(x_1); +x_72 = !lean_is_exclusive(x_49); +if (x_72 == 0) +{ +return x_49; +} +else +{ +lean_object* x_73; lean_object* x_74; lean_object* x_75; +x_73 = lean_ctor_get(x_49, 0); +x_74 = lean_ctor_get(x_49, 1); +lean_inc(x_74); +lean_inc(x_73); +lean_dec(x_49); +x_75 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_75, 0, x_73); +lean_ctor_set(x_75, 1, x_74); +return x_75; +} +} +} +block_36: +{ +lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_32 = lean_ctor_get(x_30, 0); +lean_inc(x_32); +lean_dec(x_30); +x_33 = lean_ctor_get(x_12, 2); +x_34 = lean_nat_add(x_14, x_33); +lean_dec(x_14); +x_13 = x_32; +x_14 = x_34; +x_15 = lean_box(0); +x_16 = lean_box(0); +x_26 = x_31; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15, lean_object* x_16) { +_start: +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; uint8_t x_33; lean_object* x_34; +x_17 = lean_ctor_get(x_3, 1); +x_18 = lean_ctor_get(x_6, 1); +x_19 = lean_int_mul(x_1, x_18); +x_20 = lean_int_mul(x_4, x_17); +x_21 = lean_int_mul(x_17, x_18); +x_22 = l_Int_gcd(x_19, x_20); +x_23 = lean_nat_to_int(x_22); +x_24 = l_Int_gcd(x_23, x_21); +lean_dec(x_23); +x_25 = lean_nat_to_int(x_24); +x_26 = lean_int_ediv(x_19, x_25); +lean_dec(x_19); +x_27 = lean_int_ediv(x_20, x_25); +lean_dec(x_20); +x_28 = lean_int_ediv(x_21, x_25); +lean_dec(x_25); +lean_dec(x_21); +x_29 = lean_ctor_get(x_3, 0); +x_30 = lean_ctor_get(x_6, 0); +x_31 = lean_array_get_size(x_29); +x_32 = lean_array_get_size(x_30); +x_33 = lean_nat_dec_le(x_31, x_32); +if (x_33 == 0) +{ +lean_inc(x_31); +x_34 = x_31; +goto block_52; +} +else +{ +lean_inc(x_32); +x_34 = x_32; +goto block_52; +} +block_52: +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_35 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__1; +lean_inc(x_34); +x_36 = lean_mk_array(x_34, x_35); +x_37 = lean_unsigned_to_nat(0u); +x_38 = lean_unsigned_to_nat(1u); +lean_inc(x_34); +x_39 = lean_alloc_ctor(0, 3, 0); +lean_ctor_set(x_39, 0, x_37); +lean_ctor_set(x_39, 1, x_34); +lean_ctor_set(x_39, 2, x_38); +x_40 = l_Std_Range_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine___spec__1(x_2, x_3, x_5, x_6, x_26, x_27, x_29, x_30, x_31, x_32, x_34, x_39, x_36, x_37, lean_box(0), lean_box(0), x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); +lean_dec(x_39); +lean_dec(x_34); +lean_dec(x_32); +lean_dec(x_31); +lean_dec(x_27); +lean_dec(x_26); +if (lean_obj_tag(x_40) == 0) +{ +uint8_t x_41; +x_41 = !lean_is_exclusive(x_40); +if (x_41 == 0) +{ +lean_object* x_42; lean_object* x_43; +x_42 = lean_ctor_get(x_40, 0); +x_43 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_43, 0, x_42); +lean_ctor_set(x_43, 1, x_28); +lean_ctor_set(x_40, 0, x_43); +return x_40; +} +else +{ +lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; +x_44 = lean_ctor_get(x_40, 0); +x_45 = lean_ctor_get(x_40, 1); +lean_inc(x_45); +lean_inc(x_44); +lean_dec(x_40); +x_46 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_46, 0, x_44); +lean_ctor_set(x_46, 1, x_28); +x_47 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_47, 0, x_46); +lean_ctor_set(x_47, 1, x_45); +return x_47; +} +} +else +{ +uint8_t x_48; +lean_dec(x_28); +x_48 = !lean_is_exclusive(x_40); +if (x_48 == 0) +{ +return x_40; +} +else +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; +x_49 = lean_ctor_get(x_40, 0); +x_50 = lean_ctor_get(x_40, 1); +lean_inc(x_50); +lean_inc(x_49); +lean_dec(x_40); +x_51 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_51, 0, x_49); +lean_ctor_set(x_51, 1, x_50); +return x_51; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Std_Range_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine___spec__1___boxed(lean_object** _args) { +lean_object* x_1 = _args[0]; +lean_object* x_2 = _args[1]; +lean_object* x_3 = _args[2]; +lean_object* x_4 = _args[3]; +lean_object* x_5 = _args[4]; +lean_object* x_6 = _args[5]; +lean_object* x_7 = _args[6]; +lean_object* x_8 = _args[7]; +lean_object* x_9 = _args[8]; +lean_object* x_10 = _args[9]; +lean_object* x_11 = _args[10]; +lean_object* x_12 = _args[11]; +lean_object* x_13 = _args[12]; +lean_object* x_14 = _args[13]; +lean_object* x_15 = _args[14]; +lean_object* x_16 = _args[15]; +lean_object* x_17 = _args[16]; +lean_object* x_18 = _args[17]; +lean_object* x_19 = _args[18]; +lean_object* x_20 = _args[19]; +lean_object* x_21 = _args[20]; +lean_object* x_22 = _args[21]; +lean_object* x_23 = _args[22]; +lean_object* x_24 = _args[23]; +lean_object* x_25 = _args[24]; +lean_object* x_26 = _args[25]; +_start: +{ +lean_object* x_27; +x_27 = l_Std_Range_forIn_x27_loop___at_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16, x_17, x_18, x_19, x_20, x_21, x_22, x_23, x_24, x_25, x_26); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +return x_27; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15, lean_object* x_16) { +_start: +{ +lean_object* x_17; +x_17 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_combine(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); +lean_dec(x_6); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_1); +return x_17; +} +} +LEAN_EXPORT uint64_t l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching_unsafe__1___rarg(lean_object* x_1) { +_start: +{ +size_t x_2; uint64_t x_3; uint64_t x_4; uint64_t x_5; +x_2 = lean_ptr_addr(x_1); +x_3 = lean_usize_to_uint64(x_2); +x_4 = 2; +x_5 = lean_uint64_shift_right(x_3, x_4); +return x_5; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching_unsafe__1(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching_unsafe__1___rarg___boxed), 1, 0); +return x_2; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching_unsafe__1___rarg___boxed(lean_object* x_1) { +_start: +{ +uint64_t x_2; lean_object* x_3; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching_unsafe__1___rarg(x_1); +lean_dec(x_1); +x_3 = lean_box_uint64(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__1(uint64_t x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +lean_object* x_3; +x_3 = lean_box(0); +return x_3; +} +else +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; uint64_t x_7; uint8_t x_8; +x_4 = lean_ctor_get(x_2, 0); +lean_inc(x_4); +x_5 = lean_ctor_get(x_2, 1); +lean_inc(x_5); +x_6 = lean_ctor_get(x_2, 2); +lean_inc(x_6); +lean_dec(x_2); +x_7 = lean_unbox_uint64(x_4); +lean_dec(x_4); +x_8 = lean_uint64_dec_eq(x_7, x_1); +if (x_8 == 0) +{ +lean_dec(x_5); +x_2 = x_6; +goto _start; +} +else +{ +lean_object* x_10; +lean_dec(x_6); +x_10 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_10, 0, x_5); +return x_10; +} +} +} +} +LEAN_EXPORT uint8_t l_Std_DHashMap_Internal_AssocList_contains___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__2(uint64_t x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +uint8_t x_3; +x_3 = 0; +return x_3; +} +else +{ +lean_object* x_4; lean_object* x_5; uint64_t x_6; uint8_t x_7; +x_4 = lean_ctor_get(x_2, 0); +lean_inc(x_4); +x_5 = lean_ctor_get(x_2, 2); +lean_inc(x_5); +lean_dec(x_2); +x_6 = lean_unbox_uint64(x_4); +lean_dec(x_4); +x_7 = lean_uint64_dec_eq(x_6, x_1); +if (x_7 == 0) +{ +x_2 = x_5; +goto _start; +} +else +{ +uint8_t x_9; +lean_dec(x_5); +x_9 = 1; +return x_9; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__5(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_3) == 0) +{ +lean_dec(x_1); +return x_2; +} +else +{ +uint8_t x_4; +x_4 = !lean_is_exclusive(x_3); +if (x_4 == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; uint64_t x_9; uint64_t x_10; uint64_t x_11; uint64_t x_12; uint64_t x_13; uint64_t x_14; uint64_t x_15; size_t x_16; size_t x_17; size_t x_18; size_t x_19; size_t x_20; lean_object* x_21; lean_object* x_22; +x_5 = lean_ctor_get(x_3, 0); +x_6 = lean_ctor_get(x_3, 2); +x_7 = lean_array_get_size(x_2); +lean_inc(x_1); +lean_inc(x_5); +x_8 = lean_apply_1(x_1, x_5); +x_9 = lean_unbox_uint64(x_8); +lean_dec(x_8); +x_10 = 32; +x_11 = lean_uint64_shift_right(x_9, x_10); +x_12 = lean_uint64_xor(x_9, x_11); +x_13 = 16; +x_14 = lean_uint64_shift_right(x_12, x_13); +x_15 = lean_uint64_xor(x_12, x_14); +x_16 = lean_uint64_to_usize(x_15); +x_17 = lean_usize_of_nat(x_7); +lean_dec(x_7); +x_18 = 1; +x_19 = lean_usize_sub(x_17, x_18); +x_20 = lean_usize_land(x_16, x_19); +x_21 = lean_array_uget(x_2, x_20); +lean_ctor_set(x_3, 2, x_21); +x_22 = lean_array_uset(x_2, x_20, x_3); +x_2 = x_22; +x_3 = x_6; +goto _start; +} +else +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; uint64_t x_29; uint64_t x_30; uint64_t x_31; uint64_t x_32; uint64_t x_33; uint64_t x_34; uint64_t x_35; size_t x_36; size_t x_37; size_t x_38; size_t x_39; size_t x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; +x_24 = lean_ctor_get(x_3, 0); +x_25 = lean_ctor_get(x_3, 1); +x_26 = lean_ctor_get(x_3, 2); +lean_inc(x_26); +lean_inc(x_25); +lean_inc(x_24); +lean_dec(x_3); +x_27 = lean_array_get_size(x_2); +lean_inc(x_1); +lean_inc(x_24); +x_28 = lean_apply_1(x_1, x_24); +x_29 = lean_unbox_uint64(x_28); +lean_dec(x_28); +x_30 = 32; +x_31 = lean_uint64_shift_right(x_29, x_30); +x_32 = lean_uint64_xor(x_29, x_31); +x_33 = 16; +x_34 = lean_uint64_shift_right(x_32, x_33); +x_35 = lean_uint64_xor(x_32, x_34); +x_36 = lean_uint64_to_usize(x_35); +x_37 = lean_usize_of_nat(x_27); +lean_dec(x_27); +x_38 = 1; +x_39 = lean_usize_sub(x_37, x_38); +x_40 = lean_usize_land(x_36, x_39); +x_41 = lean_array_uget(x_2, x_40); +x_42 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_42, 0, x_24); +lean_ctor_set(x_42, 1, x_25); +lean_ctor_set(x_42, 2, x_41); +x_43 = lean_array_uset(x_2, x_40, x_42); +x_2 = x_43; +x_3 = x_26; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__5___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__6(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +return x_1; +} +else +{ +uint8_t x_3; +x_3 = !lean_is_exclusive(x_2); +if (x_3 == 0) +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; uint64_t x_7; uint64_t x_8; uint64_t x_9; uint64_t x_10; uint64_t x_11; uint64_t x_12; uint64_t x_13; uint64_t x_14; size_t x_15; size_t x_16; size_t x_17; size_t x_18; size_t x_19; lean_object* x_20; lean_object* x_21; +x_4 = lean_ctor_get(x_2, 0); +x_5 = lean_ctor_get(x_2, 2); +x_6 = lean_array_get_size(x_1); +x_7 = 32; +x_8 = lean_unbox_uint64(x_4); +x_9 = lean_uint64_shift_right(x_8, x_7); +x_10 = lean_unbox_uint64(x_4); +x_11 = lean_uint64_xor(x_10, x_9); +x_12 = 16; +x_13 = lean_uint64_shift_right(x_11, x_12); +x_14 = lean_uint64_xor(x_11, x_13); +x_15 = lean_uint64_to_usize(x_14); +x_16 = lean_usize_of_nat(x_6); +lean_dec(x_6); +x_17 = 1; +x_18 = lean_usize_sub(x_16, x_17); +x_19 = lean_usize_land(x_15, x_18); +x_20 = lean_array_uget(x_1, x_19); +lean_ctor_set(x_2, 2, x_20); +x_21 = lean_array_uset(x_1, x_19, x_2); +x_1 = x_21; +x_2 = x_5; +goto _start; +} +else +{ +lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; uint64_t x_27; uint64_t x_28; uint64_t x_29; uint64_t x_30; uint64_t x_31; uint64_t x_32; uint64_t x_33; uint64_t x_34; size_t x_35; size_t x_36; size_t x_37; size_t x_38; size_t x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; +x_23 = lean_ctor_get(x_2, 0); +x_24 = lean_ctor_get(x_2, 1); +x_25 = lean_ctor_get(x_2, 2); +lean_inc(x_25); +lean_inc(x_24); +lean_inc(x_23); +lean_dec(x_2); +x_26 = lean_array_get_size(x_1); +x_27 = 32; +x_28 = lean_unbox_uint64(x_23); +x_29 = lean_uint64_shift_right(x_28, x_27); +x_30 = lean_unbox_uint64(x_23); +x_31 = lean_uint64_xor(x_30, x_29); +x_32 = 16; +x_33 = lean_uint64_shift_right(x_31, x_32); +x_34 = lean_uint64_xor(x_31, x_33); +x_35 = lean_uint64_to_usize(x_34); +x_36 = lean_usize_of_nat(x_26); +lean_dec(x_26); +x_37 = 1; +x_38 = lean_usize_sub(x_36, x_37); +x_39 = lean_usize_land(x_35, x_38); +x_40 = lean_array_uget(x_1, x_39); +x_41 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_41, 0, x_23); +lean_ctor_set(x_41, 1, x_24); +lean_ctor_set(x_41, 2, x_40); +x_42 = lean_array_uset(x_1, x_39, x_41); +x_1 = x_42; +x_2 = x_25; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand_go___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__4(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; uint8_t x_5; +x_4 = lean_array_get_size(x_2); +x_5 = lean_nat_dec_lt(x_1, x_4); +lean_dec(x_4); +if (x_5 == 0) +{ +lean_dec(x_2); +lean_dec(x_1); +return x_3; +} +else +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_6 = lean_array_fget(x_2, x_1); +x_7 = lean_box(0); +x_8 = lean_array_fset(x_2, x_1, x_7); +x_9 = l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__5___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__6(x_3, x_6); +x_10 = lean_unsigned_to_nat(1u); +x_11 = lean_nat_add(x_1, x_10); +lean_dec(x_1); +x_1 = x_11; +x_2 = x_8; +x_3 = x_9; +goto _start; +} +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__3(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_2 = lean_array_get_size(x_1); +x_3 = lean_unsigned_to_nat(2u); +x_4 = lean_nat_mul(x_2, x_3); +lean_dec(x_2); +x_5 = lean_box(0); +x_6 = lean_mk_array(x_4, x_5); +x_7 = lean_unsigned_to_nat(0u); +x_8 = l_Std_DHashMap_Internal_Raw_u2080_expand_go___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__4(x_7, x_1, x_6); +return x_8; +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7(uint64_t x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_3) == 0) +{ +lean_object* x_4; +lean_dec(x_2); +x_4 = lean_box(0); +return x_4; +} +else +{ +uint8_t x_5; +x_5 = !lean_is_exclusive(x_3); +if (x_5 == 0) +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; uint64_t x_9; uint8_t x_10; +x_6 = lean_ctor_get(x_3, 0); +x_7 = lean_ctor_get(x_3, 1); +x_8 = lean_ctor_get(x_3, 2); +x_9 = lean_unbox_uint64(x_6); +x_10 = lean_uint64_dec_eq(x_9, x_1); +if (x_10 == 0) +{ +lean_object* x_11; +x_11 = l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7(x_1, x_2, x_8); +lean_ctor_set(x_3, 2, x_11); +return x_3; +} +else +{ +lean_object* x_12; +lean_dec(x_7); +lean_dec(x_6); +x_12 = lean_box_uint64(x_1); +lean_ctor_set(x_3, 1, x_2); +lean_ctor_set(x_3, 0, x_12); +return x_3; +} +} +else +{ +lean_object* x_13; lean_object* x_14; lean_object* x_15; uint64_t x_16; uint8_t x_17; +x_13 = lean_ctor_get(x_3, 0); +x_14 = lean_ctor_get(x_3, 1); +x_15 = lean_ctor_get(x_3, 2); +lean_inc(x_15); +lean_inc(x_14); +lean_inc(x_13); +lean_dec(x_3); +x_16 = lean_unbox_uint64(x_13); +x_17 = lean_uint64_dec_eq(x_16, x_1); +if (x_17 == 0) +{ +lean_object* x_18; lean_object* x_19; +x_18 = l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7(x_1, x_2, x_15); +x_19 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_19, 0, x_13); +lean_ctor_set(x_19, 1, x_14); +lean_ctor_set(x_19, 2, x_18); +return x_19; +} +else +{ +lean_object* x_20; lean_object* x_21; +lean_dec(x_14); +lean_dec(x_13); +x_20 = lean_box_uint64(x_1); +x_21 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_21, 0, x_20); +lean_ctor_set(x_21, 1, x_2); +lean_ctor_set(x_21, 2, x_15); +return x_21; +} +} +} +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +size_t x_14; uint64_t x_15; uint64_t x_16; uint64_t x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; uint8_t x_21; +x_14 = lean_ptr_addr(x_1); +x_15 = lean_usize_to_uint64(x_14); +x_16 = 2; +x_17 = lean_uint64_shift_right(x_15, x_16); +x_18 = lean_st_ref_get(x_3, x_13); +x_19 = lean_ctor_get(x_18, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_19, 0); +lean_inc(x_20); +lean_dec(x_19); +x_21 = !lean_is_exclusive(x_18); +if (x_21 == 0) +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; uint64_t x_26; uint64_t x_27; uint64_t x_28; uint64_t x_29; uint64_t x_30; uint64_t x_31; size_t x_32; size_t x_33; size_t x_34; size_t x_35; size_t x_36; lean_object* x_37; lean_object* x_38; +x_22 = lean_ctor_get(x_18, 1); +x_23 = lean_ctor_get(x_18, 0); +lean_dec(x_23); +x_24 = lean_ctor_get(x_20, 1); +lean_inc(x_24); +lean_dec(x_20); +x_25 = lean_array_get_size(x_24); +x_26 = 32; +x_27 = lean_uint64_shift_right(x_17, x_26); +x_28 = lean_uint64_xor(x_17, x_27); +x_29 = 16; +x_30 = lean_uint64_shift_right(x_28, x_29); +x_31 = lean_uint64_xor(x_28, x_30); +x_32 = lean_uint64_to_usize(x_31); +x_33 = lean_usize_of_nat(x_25); +lean_dec(x_25); +x_34 = 1; +x_35 = lean_usize_sub(x_33, x_34); +x_36 = lean_usize_land(x_32, x_35); +x_37 = lean_array_uget(x_24, x_36); +lean_dec(x_24); +x_38 = l_Std_DHashMap_Internal_AssocList_get_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__1(x_17, x_37); +if (lean_obj_tag(x_38) == 0) +{ +lean_object* x_39; +lean_free_object(x_18); +lean_inc(x_3); +x_39 = lean_apply_11(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_22); +if (lean_obj_tag(x_39) == 0) +{ +lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; uint8_t x_46; +x_40 = lean_ctor_get(x_39, 0); +lean_inc(x_40); +x_41 = lean_ctor_get(x_39, 1); +lean_inc(x_41); +lean_dec(x_39); +x_42 = lean_st_ref_take(x_3, x_41); +x_43 = lean_ctor_get(x_42, 0); +lean_inc(x_43); +x_44 = lean_ctor_get(x_43, 0); +lean_inc(x_44); +x_45 = lean_ctor_get(x_42, 1); +lean_inc(x_45); +lean_dec(x_42); +x_46 = !lean_is_exclusive(x_43); +if (x_46 == 0) +{ +lean_object* x_47; uint8_t x_48; +x_47 = lean_ctor_get(x_43, 0); +lean_dec(x_47); +x_48 = !lean_is_exclusive(x_44); +if (x_48 == 0) +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; size_t x_52; size_t x_53; size_t x_54; lean_object* x_55; uint8_t x_56; +x_49 = lean_ctor_get(x_44, 0); +x_50 = lean_ctor_get(x_44, 1); +x_51 = lean_array_get_size(x_50); +x_52 = lean_usize_of_nat(x_51); +lean_dec(x_51); +x_53 = lean_usize_sub(x_52, x_34); +x_54 = lean_usize_land(x_32, x_53); +x_55 = lean_array_uget(x_50, x_54); +lean_inc(x_55); +x_56 = l_Std_DHashMap_Internal_AssocList_contains___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__2(x_17, x_55); +if (x_56 == 0) +{ +lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; uint8_t x_67; +x_57 = lean_unsigned_to_nat(1u); +x_58 = lean_nat_add(x_49, x_57); +lean_dec(x_49); +x_59 = lean_box_uint64(x_17); +lean_inc(x_40); +x_60 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_60, 0, x_59); +lean_ctor_set(x_60, 1, x_40); +lean_ctor_set(x_60, 2, x_55); +x_61 = lean_array_uset(x_50, x_54, x_60); +x_62 = lean_unsigned_to_nat(4u); +x_63 = lean_nat_mul(x_58, x_62); +x_64 = lean_unsigned_to_nat(3u); +x_65 = lean_nat_div(x_63, x_64); +lean_dec(x_63); +x_66 = lean_array_get_size(x_61); +x_67 = lean_nat_dec_le(x_65, x_66); +lean_dec(x_66); +lean_dec(x_65); +if (x_67 == 0) +{ +lean_object* x_68; lean_object* x_69; uint8_t x_70; +x_68 = l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__3(x_61); +lean_ctor_set(x_44, 1, x_68); +lean_ctor_set(x_44, 0, x_58); +x_69 = lean_st_ref_set(x_3, x_43, x_45); +lean_dec(x_3); +x_70 = !lean_is_exclusive(x_69); +if (x_70 == 0) +{ +lean_object* x_71; +x_71 = lean_ctor_get(x_69, 0); +lean_dec(x_71); +lean_ctor_set(x_69, 0, x_40); +return x_69; +} +else +{ +lean_object* x_72; lean_object* x_73; +x_72 = lean_ctor_get(x_69, 1); +lean_inc(x_72); +lean_dec(x_69); +x_73 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_73, 0, x_40); +lean_ctor_set(x_73, 1, x_72); +return x_73; +} +} +else +{ +lean_object* x_74; uint8_t x_75; +lean_ctor_set(x_44, 1, x_61); +lean_ctor_set(x_44, 0, x_58); +x_74 = lean_st_ref_set(x_3, x_43, x_45); +lean_dec(x_3); +x_75 = !lean_is_exclusive(x_74); +if (x_75 == 0) +{ +lean_object* x_76; +x_76 = lean_ctor_get(x_74, 0); +lean_dec(x_76); +lean_ctor_set(x_74, 0, x_40); +return x_74; +} +else +{ +lean_object* x_77; lean_object* x_78; +x_77 = lean_ctor_get(x_74, 1); +lean_inc(x_77); +lean_dec(x_74); +x_78 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_78, 0, x_40); +lean_ctor_set(x_78, 1, x_77); +return x_78; +} +} +} +else +{ +lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; uint8_t x_84; +x_79 = lean_box(0); +x_80 = lean_array_uset(x_50, x_54, x_79); +lean_inc(x_40); +x_81 = l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7(x_17, x_40, x_55); +x_82 = lean_array_uset(x_80, x_54, x_81); +lean_ctor_set(x_44, 1, x_82); +x_83 = lean_st_ref_set(x_3, x_43, x_45); +lean_dec(x_3); +x_84 = !lean_is_exclusive(x_83); +if (x_84 == 0) +{ +lean_object* x_85; +x_85 = lean_ctor_get(x_83, 0); +lean_dec(x_85); +lean_ctor_set(x_83, 0, x_40); +return x_83; +} +else +{ +lean_object* x_86; lean_object* x_87; +x_86 = lean_ctor_get(x_83, 1); +lean_inc(x_86); +lean_dec(x_83); +x_87 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_87, 0, x_40); +lean_ctor_set(x_87, 1, x_86); +return x_87; +} +} +} +else +{ +lean_object* x_88; lean_object* x_89; lean_object* x_90; size_t x_91; size_t x_92; size_t x_93; lean_object* x_94; uint8_t x_95; +x_88 = lean_ctor_get(x_44, 0); +x_89 = lean_ctor_get(x_44, 1); +lean_inc(x_89); +lean_inc(x_88); +lean_dec(x_44); +x_90 = lean_array_get_size(x_89); +x_91 = lean_usize_of_nat(x_90); +lean_dec(x_90); +x_92 = lean_usize_sub(x_91, x_34); +x_93 = lean_usize_land(x_32, x_92); +x_94 = lean_array_uget(x_89, x_93); +lean_inc(x_94); +x_95 = l_Std_DHashMap_Internal_AssocList_contains___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__2(x_17, x_94); +if (x_95 == 0) +{ +lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; uint8_t x_106; +x_96 = lean_unsigned_to_nat(1u); +x_97 = lean_nat_add(x_88, x_96); +lean_dec(x_88); +x_98 = lean_box_uint64(x_17); +lean_inc(x_40); +x_99 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_99, 0, x_98); +lean_ctor_set(x_99, 1, x_40); +lean_ctor_set(x_99, 2, x_94); +x_100 = lean_array_uset(x_89, x_93, x_99); +x_101 = lean_unsigned_to_nat(4u); +x_102 = lean_nat_mul(x_97, x_101); +x_103 = lean_unsigned_to_nat(3u); +x_104 = lean_nat_div(x_102, x_103); +lean_dec(x_102); +x_105 = lean_array_get_size(x_100); +x_106 = lean_nat_dec_le(x_104, x_105); +lean_dec(x_105); +lean_dec(x_104); +if (x_106 == 0) +{ +lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; +x_107 = l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__3(x_100); +x_108 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_108, 0, x_97); +lean_ctor_set(x_108, 1, x_107); +lean_ctor_set(x_43, 0, x_108); +x_109 = lean_st_ref_set(x_3, x_43, x_45); +lean_dec(x_3); +x_110 = lean_ctor_get(x_109, 1); +lean_inc(x_110); +if (lean_is_exclusive(x_109)) { + lean_ctor_release(x_109, 0); + lean_ctor_release(x_109, 1); + x_111 = x_109; +} else { + lean_dec_ref(x_109); + x_111 = lean_box(0); +} +if (lean_is_scalar(x_111)) { + x_112 = lean_alloc_ctor(0, 2, 0); +} else { + x_112 = x_111; +} +lean_ctor_set(x_112, 0, x_40); +lean_ctor_set(x_112, 1, x_110); +return x_112; +} +else +{ +lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; +x_113 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_113, 0, x_97); +lean_ctor_set(x_113, 1, x_100); +lean_ctor_set(x_43, 0, x_113); +x_114 = lean_st_ref_set(x_3, x_43, x_45); +lean_dec(x_3); +x_115 = lean_ctor_get(x_114, 1); +lean_inc(x_115); +if (lean_is_exclusive(x_114)) { + lean_ctor_release(x_114, 0); + lean_ctor_release(x_114, 1); + x_116 = x_114; +} else { + lean_dec_ref(x_114); + x_116 = lean_box(0); +} +if (lean_is_scalar(x_116)) { + x_117 = lean_alloc_ctor(0, 2, 0); +} else { + x_117 = x_116; +} +lean_ctor_set(x_117, 0, x_40); +lean_ctor_set(x_117, 1, x_115); +return x_117; +} +} +else +{ +lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; +x_118 = lean_box(0); +x_119 = lean_array_uset(x_89, x_93, x_118); +lean_inc(x_40); +x_120 = l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7(x_17, x_40, x_94); +x_121 = lean_array_uset(x_119, x_93, x_120); +x_122 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_122, 0, x_88); +lean_ctor_set(x_122, 1, x_121); +lean_ctor_set(x_43, 0, x_122); +x_123 = lean_st_ref_set(x_3, x_43, x_45); +lean_dec(x_3); +x_124 = lean_ctor_get(x_123, 1); +lean_inc(x_124); +if (lean_is_exclusive(x_123)) { + lean_ctor_release(x_123, 0); + lean_ctor_release(x_123, 1); + x_125 = x_123; +} else { + lean_dec_ref(x_123); + x_125 = lean_box(0); +} +if (lean_is_scalar(x_125)) { + x_126 = lean_alloc_ctor(0, 2, 0); +} else { + x_126 = x_125; +} +lean_ctor_set(x_126, 0, x_40); +lean_ctor_set(x_126, 1, x_124); +return x_126; +} +} +} +else +{ +lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; size_t x_132; size_t x_133; size_t x_134; lean_object* x_135; uint8_t x_136; +x_127 = lean_ctor_get(x_43, 1); +lean_inc(x_127); +lean_dec(x_43); +x_128 = lean_ctor_get(x_44, 0); +lean_inc(x_128); +x_129 = lean_ctor_get(x_44, 1); +lean_inc(x_129); +if (lean_is_exclusive(x_44)) { + lean_ctor_release(x_44, 0); + lean_ctor_release(x_44, 1); + x_130 = x_44; +} else { + lean_dec_ref(x_44); + x_130 = lean_box(0); +} +x_131 = lean_array_get_size(x_129); +x_132 = lean_usize_of_nat(x_131); +lean_dec(x_131); +x_133 = lean_usize_sub(x_132, x_34); +x_134 = lean_usize_land(x_32, x_133); +x_135 = lean_array_uget(x_129, x_134); +lean_inc(x_135); +x_136 = l_Std_DHashMap_Internal_AssocList_contains___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__2(x_17, x_135); +if (x_136 == 0) +{ +lean_object* x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; uint8_t x_147; +x_137 = lean_unsigned_to_nat(1u); +x_138 = lean_nat_add(x_128, x_137); +lean_dec(x_128); +x_139 = lean_box_uint64(x_17); +lean_inc(x_40); +x_140 = lean_alloc_ctor(1, 3, 0); +lean_ctor_set(x_140, 0, x_139); +lean_ctor_set(x_140, 1, x_40); +lean_ctor_set(x_140, 2, x_135); +x_141 = lean_array_uset(x_129, x_134, x_140); +x_142 = lean_unsigned_to_nat(4u); +x_143 = lean_nat_mul(x_138, x_142); +x_144 = lean_unsigned_to_nat(3u); +x_145 = lean_nat_div(x_143, x_144); +lean_dec(x_143); +x_146 = lean_array_get_size(x_141); +x_147 = lean_nat_dec_le(x_145, x_146); +lean_dec(x_146); +lean_dec(x_145); +if (x_147 == 0) +{ +lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; +x_148 = l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__3(x_141); +if (lean_is_scalar(x_130)) { + x_149 = lean_alloc_ctor(0, 2, 0); +} else { + x_149 = x_130; +} +lean_ctor_set(x_149, 0, x_138); +lean_ctor_set(x_149, 1, x_148); +x_150 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_150, 0, x_149); +lean_ctor_set(x_150, 1, x_127); +x_151 = lean_st_ref_set(x_3, x_150, x_45); +lean_dec(x_3); +x_152 = lean_ctor_get(x_151, 1); +lean_inc(x_152); +if (lean_is_exclusive(x_151)) { + lean_ctor_release(x_151, 0); + lean_ctor_release(x_151, 1); + x_153 = x_151; +} else { + lean_dec_ref(x_151); + x_153 = lean_box(0); +} +if (lean_is_scalar(x_153)) { + x_154 = lean_alloc_ctor(0, 2, 0); +} else { + x_154 = x_153; +} +lean_ctor_set(x_154, 0, x_40); +lean_ctor_set(x_154, 1, x_152); +return x_154; +} +else +{ +lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; lean_object* x_159; lean_object* x_160; +if (lean_is_scalar(x_130)) { + x_155 = lean_alloc_ctor(0, 2, 0); +} else { + x_155 = x_130; +} +lean_ctor_set(x_155, 0, x_138); +lean_ctor_set(x_155, 1, x_141); +x_156 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_156, 0, x_155); +lean_ctor_set(x_156, 1, x_127); +x_157 = lean_st_ref_set(x_3, x_156, x_45); +lean_dec(x_3); +x_158 = lean_ctor_get(x_157, 1); +lean_inc(x_158); +if (lean_is_exclusive(x_157)) { + lean_ctor_release(x_157, 0); + lean_ctor_release(x_157, 1); + x_159 = x_157; +} else { + lean_dec_ref(x_157); + x_159 = lean_box(0); +} +if (lean_is_scalar(x_159)) { + x_160 = lean_alloc_ctor(0, 2, 0); +} else { + x_160 = x_159; +} +lean_ctor_set(x_160, 0, x_40); +lean_ctor_set(x_160, 1, x_158); +return x_160; +} +} +else +{ +lean_object* x_161; lean_object* x_162; lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; +x_161 = lean_box(0); +x_162 = lean_array_uset(x_129, x_134, x_161); +lean_inc(x_40); +x_163 = l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_caching___spec__7(x_17, x_40, x_135); +x_164 = lean_array_uset(x_162, x_134, x_163); +if (lean_is_scalar(x_130)) { + x_165 = lean_alloc_ctor(0, 2, 0); +} else { + x_165 = x_130; +} +lean_ctor_set(x_165, 0, x_128); +lean_ctor_set(x_165, 1, x_164); +x_166 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_166, 0, x_165); +lean_ctor_set(x_166, 1, x_127); +x_167 = lean_st_ref_set(x_3, x_166, x_45); +lean_dec(x_3); +x_168 = lean_ctor_get(x_167, 1); +lean_inc(x_168); +if (lean_is_exclusive(x_167)) { + lean_ctor_release(x_167, 0); + lean_ctor_release(x_167, 1); + x_169 = x_167; +} else { + lean_dec_ref(x_167); + x_169 = lean_box(0); +} +if (lean_is_scalar(x_169)) { + x_170 = lean_alloc_ctor(0, 2, 0); +} else { + x_170 = x_169; +} +lean_ctor_set(x_170, 0, x_40); +lean_ctor_set(x_170, 1, x_168); +return x_170; +} +} +} +else +{ +uint8_t x_171; +lean_dec(x_3); +x_171 = !lean_is_exclusive(x_39); +if (x_171 == 0) +{ +return x_39; +} +else +{ +lean_object* x_172; lean_object* x_173; lean_object* x_174; +x_172 = lean_ctor_get(x_39, 0); +x_173 = lean_ctor_get(x_39, 1); +lean_inc(x_173); +lean_inc(x_172); +lean_dec(x_39); +x_174 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_174, 0, x_172); +lean_ctor_set(x_174, 1, x_173); +return x_174; +} +} +} +else +{ +lean_object* x_175; +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +x_175 = lean_ctor_get(x_38, 0); +lean_inc(x_175); +lean_dec(x_38); +lean_ctor_set(x_18, 0, x_175); +return x_18; +} +} +else +{ +lean_object* x_176; lean_object* x_177; lean_object* x_178; uint64_t x_179; uint64_t x_180; uint64_t x_181; uint64_t x_182; uint64_t x_183; uint64_t x_184; size_t x_185; size_t x_186; size_t x_187; size_t x_188; size_t x_189; lean_object* x_190; lean_object* x_191; +x_176 = lean_ctor_get(x_18, 1); +lean_inc(x_176); +lean_dec(x_18); +x_177 = lean_ctor_get(x_20, 1); +lean_inc(x_177); +lean_dec(x_20); +x_178 = lean_array_get_size(x_177); +x_179 = 32; +x_180 = lean_uint64_shift_right(x_17, x_179); +x_181 = lean_uint64_xor(x_17, x_180); +x_182 = 16; +x_183 = lean_uint64_shift_right(x_181, x_182); +x_184 = lean_uint64_xor(x_181, x_183); +x_185 = lean_uint64_to_usize(x_184); x_186 = lean_usize_of_nat(x_178); lean_dec(x_178); x_187 = 1; @@ -5735,7 +7040,7 @@ lean_dec(x_30); x_36 = lean_ctor_get(x_33, 1); lean_inc(x_36); lean_dec(x_33); -x_91 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_91 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_92 = lean_int_dec_lt(x_15, x_91); if (x_92 == 0) { @@ -5780,7 +7085,7 @@ x_42 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__9 x_43 = lean_alloc_ctor(7, 2, 0); lean_ctor_set(x_43, 0, x_41); lean_ctor_set(x_43, 1, x_42); -x_79 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_79 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_80 = lean_int_dec_lt(x_35, x_79); if (x_80 == 0) { @@ -5820,7 +7125,7 @@ lean_ctor_set(x_47, 1, x_46); x_48 = lean_alloc_ctor(7, 2, 0); lean_ctor_set(x_48, 0, x_47); lean_ctor_set(x_48, 1, x_42); -x_49 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_49 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_50 = lean_int_dec_lt(x_36, x_49); if (x_50 == 0) { @@ -6076,7 +7381,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_getMult _start: { lean_object* x_2; lean_object* x_3; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2; +x_2 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; x_3 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_getMultiplier_go(x_1, x_2); return x_3; } @@ -6979,7 +8284,7 @@ lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); lean_inc(x_3); -x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); if (lean_obj_tag(x_13) == 0) { lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; @@ -7135,942 +8440,1481 @@ goto _start; } } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { -_start: +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +_start: +{ +lean_object* x_5; +x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert_go(x_1, x_2, x_3, x_4); +lean_dec(x_1); +return x_5; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; +x_2 = lean_ctor_get(x_1, 1); +x_3 = lean_array_get_size(x_2); +x_4 = lean_box(0); +x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert_go(x_1, x_3, x_4, lean_box(0)); +return x_5; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert___boxed(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert(x_1); +lean_dec(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; lean_object* x_5; uint8_t x_6; +x_4 = lean_ctor_get(x_1, 1); +x_5 = lean_array_get_size(x_4); +x_6 = lean_nat_dec_lt(x_2, x_5); +lean_dec(x_5); +if (x_6 == 0) +{ +lean_dec(x_2); +return x_3; +} +else +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; +x_7 = lean_array_fget(x_4, x_2); +x_8 = lean_ctor_get(x_7, 1); +lean_inc(x_8); +lean_dec(x_7); +x_9 = lean_unsigned_to_nat(1u); +x_10 = lean_nat_add(x_2, x_9); +lean_dec(x_2); +x_11 = lean_ctor_get(x_8, 0); +lean_inc(x_11); +lean_dec(x_8); +x_12 = l_Lean_Expr_app___override(x_3, x_11); +x_2 = x_10; +x_3 = x_12; +goto _start; +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(x_1, x_2, x_3); +lean_dec(x_1); +return x_4; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; lean_object* x_4; +x_3 = lean_unsigned_to_nat(0u); +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(x_2, x_3, x_1); +return x_4; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_getNoZeroDivInstIfNeeded_x3f(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +lean_object* x_12; uint8_t x_13; +x_12 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; +x_13 = lean_int_dec_eq(x_1, x_12); +if (x_13 == 0) +{ +lean_object* x_14; +x_14 = l_Lean_Meta_Grind_Arith_CommRing_noZeroDivisorsInst_x3f(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +if (lean_obj_tag(x_14) == 0) +{ +lean_object* x_15; +x_15 = lean_ctor_get(x_14, 0); +lean_inc(x_15); +if (lean_obj_tag(x_15) == 0) +{ +lean_object* x_16; lean_object* x_17; +x_16 = lean_ctor_get(x_14, 1); +lean_inc(x_16); +lean_dec(x_14); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_16); +return x_17; +} +else +{ +uint8_t x_18; +x_18 = !lean_is_exclusive(x_14); +if (x_18 == 0) +{ +lean_object* x_19; uint8_t x_20; +x_19 = lean_ctor_get(x_14, 0); +lean_dec(x_19); +x_20 = !lean_is_exclusive(x_15); +if (x_20 == 0) +{ +return x_14; +} +else +{ +lean_object* x_21; lean_object* x_22; +x_21 = lean_ctor_get(x_15, 0); +lean_inc(x_21); +lean_dec(x_15); +x_22 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_22, 0, x_21); +lean_ctor_set(x_14, 0, x_22); +return x_14; +} +} +else +{ +lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_23 = lean_ctor_get(x_14, 1); +lean_inc(x_23); +lean_dec(x_14); +x_24 = lean_ctor_get(x_15, 0); +lean_inc(x_24); +if (lean_is_exclusive(x_15)) { + lean_ctor_release(x_15, 0); + x_25 = x_15; +} else { + lean_dec_ref(x_15); + x_25 = lean_box(0); +} +if (lean_is_scalar(x_25)) { + x_26 = lean_alloc_ctor(1, 1, 0); +} else { + x_26 = x_25; +} +lean_ctor_set(x_26, 0, x_24); +x_27 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_27, 0, x_26); +lean_ctor_set(x_27, 1, x_23); +return x_27; +} +} +} +else { -lean_object* x_5; -x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert_go(x_1, x_2, x_3, x_4); -lean_dec(x_1); -return x_5; -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert(lean_object* x_1) { -_start: +uint8_t x_28; +x_28 = !lean_is_exclusive(x_14); +if (x_28 == 0) { -lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_2 = lean_ctor_get(x_1, 1); -x_3 = lean_array_get_size(x_2); -x_4 = lean_box(0); -x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert_go(x_1, x_3, x_4, lean_box(0)); -return x_5; -} +return x_14; } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert___boxed(lean_object* x_1) { -_start: +else { -lean_object* x_2; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert(x_1); -lean_dec(x_1); -return x_2; +lean_object* x_29; lean_object* x_30; lean_object* x_31; +x_29 = lean_ctor_get(x_14, 0); +x_30 = lean_ctor_get(x_14, 1); +lean_inc(x_30); +lean_inc(x_29); +lean_dec(x_14); +x_31 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_31, 0, x_29); +lean_ctor_set(x_31, 1, x_30); +return x_31; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(lean_object* x_1, lean_object* x_2, lean_object* x_3) { -_start: -{ -lean_object* x_4; lean_object* x_5; uint8_t x_6; -x_4 = lean_ctor_get(x_1, 1); -x_5 = lean_array_get_size(x_4); -x_6 = lean_nat_dec_lt(x_2, x_5); -lean_dec(x_5); -if (x_6 == 0) -{ -lean_dec(x_2); -return x_3; } else { -lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; -x_7 = lean_array_fget(x_4, x_2); -x_8 = lean_ctor_get(x_7, 1); -lean_inc(x_8); -lean_dec(x_7); -x_9 = lean_unsigned_to_nat(1u); -x_10 = lean_nat_add(x_2, x_9); -lean_dec(x_2); -x_11 = lean_ctor_get(x_8, 0); -lean_inc(x_11); -lean_dec(x_8); -x_12 = l_Lean_Expr_app___override(x_3, x_11); -x_2 = x_10; -x_3 = x_12; -goto _start; +lean_object* x_32; lean_object* x_33; +x_32 = lean_box(0); +x_33 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_33, 0, x_32); +lean_ctor_set(x_33, 1, x_11); +return x_33; } } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_getNoZeroDivInstIfNeeded_x3f___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_4; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(x_1, x_2, x_3); +lean_object* x_12; +x_12 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_getNoZeroDivInstIfNeeded_x3f(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); lean_dec(x_1); -return x_4; +return x_12; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_3; lean_object* x_4; -x_3 = lean_unsigned_to_nat(0u); -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(x_2, x_3, x_1); -return x_4; +lean_object* x_12; lean_object* x_13; uint8_t x_14; lean_object* x_15; lean_object* x_16; +x_12 = lean_ctor_get(x_9, 12); +x_13 = lean_ctor_get(x_9, 2); +x_14 = l_Lean_checkTraceOption(x_12, x_13, x_1); +x_15 = lean_box(x_14); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_15); +lean_ctor_set(x_16, 1, x_11); +return x_16; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs___boxed(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_3; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs(x_1, x_2); -lean_dec(x_2); -return x_3; +lean_object* x_12; lean_object* x_13; uint8_t x_14; +x_12 = lean_ctor_get(x_9, 5); +x_13 = l_Lean_addMessageContextFull___at_Lean_Meta_instAddMessageContextMetaM___spec__1(x_1, x_7, x_8, x_9, x_10, x_11); +x_14 = !lean_is_exclusive(x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; +x_15 = lean_ctor_get(x_13, 0); +lean_inc(x_12); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_12); +lean_ctor_set(x_16, 1, x_15); +lean_ctor_set_tag(x_13, 1); +lean_ctor_set(x_13, 0, x_16); +return x_13; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_17 = lean_ctor_get(x_13, 0); +x_18 = lean_ctor_get(x_13, 1); +lean_inc(x_18); +lean_inc(x_17); +lean_dec(x_13); +lean_inc(x_12); +x_19 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19, 0, x_12); +lean_ctor_set(x_19, 1, x_17); +x_20 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_20, 0, x_19); +lean_ctor_set(x_20, 1, x_18); +return x_20; } } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_getNoZeroDivInstIfNeeded_x3f(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +} +LEAN_EXPORT lean_object* l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { -lean_object* x_12; uint8_t x_13; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2; -x_13 = lean_int_dec_eq(x_1, x_12); -if (x_13 == 0) -{ -lean_object* x_14; -x_14 = l_Lean_Meta_Grind_Arith_CommRing_noZeroDivisorsInst_x3f(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -if (lean_obj_tag(x_14) == 0) -{ -lean_object* x_15; +lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; +x_13 = lean_ctor_get(x_10, 5); +x_14 = l_Lean_addMessageContextFull___at_Lean_Meta_instAddMessageContextMetaM___spec__1(x_2, x_8, x_9, x_10, x_11, x_12); x_15 = lean_ctor_get(x_14, 0); lean_inc(x_15); -if (lean_obj_tag(x_15) == 0) -{ -lean_object* x_16; lean_object* x_17; x_16 = lean_ctor_get(x_14, 1); lean_inc(x_16); lean_dec(x_14); -x_17 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_16); -return x_17; +x_17 = lean_st_ref_take(x_11, x_16); +x_18 = lean_ctor_get(x_17, 0); +lean_inc(x_18); +x_19 = lean_ctor_get(x_18, 3); +lean_inc(x_19); +x_20 = !lean_is_exclusive(x_17); +if (x_20 == 0) +{ +lean_object* x_21; lean_object* x_22; uint8_t x_23; +x_21 = lean_ctor_get(x_17, 1); +x_22 = lean_ctor_get(x_17, 0); +lean_dec(x_22); +x_23 = !lean_is_exclusive(x_18); +if (x_23 == 0) +{ +lean_object* x_24; uint8_t x_25; +x_24 = lean_ctor_get(x_18, 3); +lean_dec(x_24); +x_25 = !lean_is_exclusive(x_19); +if (x_25 == 0) +{ +lean_object* x_26; double x_27; uint8_t x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; uint8_t x_35; +x_26 = lean_ctor_get(x_19, 0); +x_27 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___spec__2___closed__1; +x_28 = 0; +x_29 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__3; +x_30 = lean_alloc_ctor(0, 2, 17); +lean_ctor_set(x_30, 0, x_1); +lean_ctor_set(x_30, 1, x_29); +lean_ctor_set_float(x_30, sizeof(void*)*2, x_27); +lean_ctor_set_float(x_30, sizeof(void*)*2 + 8, x_27); +lean_ctor_set_uint8(x_30, sizeof(void*)*2 + 16, x_28); +x_31 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1; +x_32 = lean_alloc_ctor(9, 3, 0); +lean_ctor_set(x_32, 0, x_30); +lean_ctor_set(x_32, 1, x_15); +lean_ctor_set(x_32, 2, x_31); +lean_inc(x_13); +lean_ctor_set(x_17, 1, x_32); +lean_ctor_set(x_17, 0, x_13); +x_33 = l_Lean_PersistentArray_push___rarg(x_26, x_17); +lean_ctor_set(x_19, 0, x_33); +x_34 = lean_st_ref_set(x_11, x_18, x_21); +x_35 = !lean_is_exclusive(x_34); +if (x_35 == 0) +{ +lean_object* x_36; lean_object* x_37; +x_36 = lean_ctor_get(x_34, 0); +lean_dec(x_36); +x_37 = lean_box(0); +lean_ctor_set(x_34, 0, x_37); +return x_34; +} +else +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_38 = lean_ctor_get(x_34, 1); +lean_inc(x_38); +lean_dec(x_34); +x_39 = lean_box(0); +x_40 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_40, 0, x_39); +lean_ctor_set(x_40, 1, x_38); +return x_40; +} +} +else +{ +uint64_t x_41; lean_object* x_42; double x_43; uint8_t x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; +x_41 = lean_ctor_get_uint64(x_19, sizeof(void*)*1); +x_42 = lean_ctor_get(x_19, 0); +lean_inc(x_42); +lean_dec(x_19); +x_43 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___spec__2___closed__1; +x_44 = 0; +x_45 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__3; +x_46 = lean_alloc_ctor(0, 2, 17); +lean_ctor_set(x_46, 0, x_1); +lean_ctor_set(x_46, 1, x_45); +lean_ctor_set_float(x_46, sizeof(void*)*2, x_43); +lean_ctor_set_float(x_46, sizeof(void*)*2 + 8, x_43); +lean_ctor_set_uint8(x_46, sizeof(void*)*2 + 16, x_44); +x_47 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1; +x_48 = lean_alloc_ctor(9, 3, 0); +lean_ctor_set(x_48, 0, x_46); +lean_ctor_set(x_48, 1, x_15); +lean_ctor_set(x_48, 2, x_47); +lean_inc(x_13); +lean_ctor_set(x_17, 1, x_48); +lean_ctor_set(x_17, 0, x_13); +x_49 = l_Lean_PersistentArray_push___rarg(x_42, x_17); +x_50 = lean_alloc_ctor(0, 1, 8); +lean_ctor_set(x_50, 0, x_49); +lean_ctor_set_uint64(x_50, sizeof(void*)*1, x_41); +lean_ctor_set(x_18, 3, x_50); +x_51 = lean_st_ref_set(x_11, x_18, x_21); +x_52 = lean_ctor_get(x_51, 1); +lean_inc(x_52); +if (lean_is_exclusive(x_51)) { + lean_ctor_release(x_51, 0); + lean_ctor_release(x_51, 1); + x_53 = x_51; +} else { + lean_dec_ref(x_51); + x_53 = lean_box(0); } -else -{ -uint8_t x_18; -x_18 = !lean_is_exclusive(x_14); -if (x_18 == 0) -{ -lean_object* x_19; uint8_t x_20; -x_19 = lean_ctor_get(x_14, 0); -lean_dec(x_19); -x_20 = !lean_is_exclusive(x_15); -if (x_20 == 0) -{ -return x_14; +x_54 = lean_box(0); +if (lean_is_scalar(x_53)) { + x_55 = lean_alloc_ctor(0, 2, 0); +} else { + x_55 = x_53; } -else -{ -lean_object* x_21; lean_object* x_22; -x_21 = lean_ctor_get(x_15, 0); -lean_inc(x_21); -lean_dec(x_15); -x_22 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_22, 0, x_21); -lean_ctor_set(x_14, 0, x_22); -return x_14; +lean_ctor_set(x_55, 0, x_54); +lean_ctor_set(x_55, 1, x_52); +return x_55; } } else { -lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; -x_23 = lean_ctor_get(x_14, 1); -lean_inc(x_23); -lean_dec(x_14); -x_24 = lean_ctor_get(x_15, 0); -lean_inc(x_24); -if (lean_is_exclusive(x_15)) { - lean_ctor_release(x_15, 0); - x_25 = x_15; +lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; uint64_t x_63; lean_object* x_64; lean_object* x_65; double x_66; uint8_t x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; +x_56 = lean_ctor_get(x_18, 0); +x_57 = lean_ctor_get(x_18, 1); +x_58 = lean_ctor_get(x_18, 2); +x_59 = lean_ctor_get(x_18, 4); +x_60 = lean_ctor_get(x_18, 5); +x_61 = lean_ctor_get(x_18, 6); +x_62 = lean_ctor_get(x_18, 7); +lean_inc(x_62); +lean_inc(x_61); +lean_inc(x_60); +lean_inc(x_59); +lean_inc(x_58); +lean_inc(x_57); +lean_inc(x_56); +lean_dec(x_18); +x_63 = lean_ctor_get_uint64(x_19, sizeof(void*)*1); +x_64 = lean_ctor_get(x_19, 0); +lean_inc(x_64); +if (lean_is_exclusive(x_19)) { + lean_ctor_release(x_19, 0); + x_65 = x_19; } else { - lean_dec_ref(x_15); - x_25 = lean_box(0); + lean_dec_ref(x_19); + x_65 = lean_box(0); } -if (lean_is_scalar(x_25)) { - x_26 = lean_alloc_ctor(1, 1, 0); +x_66 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___spec__2___closed__1; +x_67 = 0; +x_68 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__3; +x_69 = lean_alloc_ctor(0, 2, 17); +lean_ctor_set(x_69, 0, x_1); +lean_ctor_set(x_69, 1, x_68); +lean_ctor_set_float(x_69, sizeof(void*)*2, x_66); +lean_ctor_set_float(x_69, sizeof(void*)*2 + 8, x_66); +lean_ctor_set_uint8(x_69, sizeof(void*)*2 + 16, x_67); +x_70 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1; +x_71 = lean_alloc_ctor(9, 3, 0); +lean_ctor_set(x_71, 0, x_69); +lean_ctor_set(x_71, 1, x_15); +lean_ctor_set(x_71, 2, x_70); +lean_inc(x_13); +lean_ctor_set(x_17, 1, x_71); +lean_ctor_set(x_17, 0, x_13); +x_72 = l_Lean_PersistentArray_push___rarg(x_64, x_17); +if (lean_is_scalar(x_65)) { + x_73 = lean_alloc_ctor(0, 1, 8); } else { - x_26 = x_25; + x_73 = x_65; } -lean_ctor_set(x_26, 0, x_24); -x_27 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_27, 0, x_26); -lean_ctor_set(x_27, 1, x_23); -return x_27; +lean_ctor_set(x_73, 0, x_72); +lean_ctor_set_uint64(x_73, sizeof(void*)*1, x_63); +x_74 = lean_alloc_ctor(0, 8, 0); +lean_ctor_set(x_74, 0, x_56); +lean_ctor_set(x_74, 1, x_57); +lean_ctor_set(x_74, 2, x_58); +lean_ctor_set(x_74, 3, x_73); +lean_ctor_set(x_74, 4, x_59); +lean_ctor_set(x_74, 5, x_60); +lean_ctor_set(x_74, 6, x_61); +lean_ctor_set(x_74, 7, x_62); +x_75 = lean_st_ref_set(x_11, x_74, x_21); +x_76 = lean_ctor_get(x_75, 1); +lean_inc(x_76); +if (lean_is_exclusive(x_75)) { + lean_ctor_release(x_75, 0); + lean_ctor_release(x_75, 1); + x_77 = x_75; +} else { + lean_dec_ref(x_75); + x_77 = lean_box(0); +} +x_78 = lean_box(0); +if (lean_is_scalar(x_77)) { + x_79 = lean_alloc_ctor(0, 2, 0); +} else { + x_79 = x_77; } +lean_ctor_set(x_79, 0, x_78); +lean_ctor_set(x_79, 1, x_76); +return x_79; } } else { -uint8_t x_28; -x_28 = !lean_is_exclusive(x_14); -if (x_28 == 0) -{ -return x_14; +lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; lean_object* x_88; uint64_t x_89; lean_object* x_90; lean_object* x_91; double x_92; uint8_t x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; +x_80 = lean_ctor_get(x_17, 1); +lean_inc(x_80); +lean_dec(x_17); +x_81 = lean_ctor_get(x_18, 0); +lean_inc(x_81); +x_82 = lean_ctor_get(x_18, 1); +lean_inc(x_82); +x_83 = lean_ctor_get(x_18, 2); +lean_inc(x_83); +x_84 = lean_ctor_get(x_18, 4); +lean_inc(x_84); +x_85 = lean_ctor_get(x_18, 5); +lean_inc(x_85); +x_86 = lean_ctor_get(x_18, 6); +lean_inc(x_86); +x_87 = lean_ctor_get(x_18, 7); +lean_inc(x_87); +if (lean_is_exclusive(x_18)) { + lean_ctor_release(x_18, 0); + lean_ctor_release(x_18, 1); + lean_ctor_release(x_18, 2); + lean_ctor_release(x_18, 3); + lean_ctor_release(x_18, 4); + lean_ctor_release(x_18, 5); + lean_ctor_release(x_18, 6); + lean_ctor_release(x_18, 7); + x_88 = x_18; +} else { + lean_dec_ref(x_18); + x_88 = lean_box(0); } -else -{ -lean_object* x_29; lean_object* x_30; lean_object* x_31; -x_29 = lean_ctor_get(x_14, 0); -x_30 = lean_ctor_get(x_14, 1); -lean_inc(x_30); -lean_inc(x_29); -lean_dec(x_14); -x_31 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_31, 0, x_29); -lean_ctor_set(x_31, 1, x_30); -return x_31; +x_89 = lean_ctor_get_uint64(x_19, sizeof(void*)*1); +x_90 = lean_ctor_get(x_19, 0); +lean_inc(x_90); +if (lean_is_exclusive(x_19)) { + lean_ctor_release(x_19, 0); + x_91 = x_19; +} else { + lean_dec_ref(x_19); + x_91 = lean_box(0); } +x_92 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___spec__2___closed__1; +x_93 = 0; +x_94 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__3; +x_95 = lean_alloc_ctor(0, 2, 17); +lean_ctor_set(x_95, 0, x_1); +lean_ctor_set(x_95, 1, x_94); +lean_ctor_set_float(x_95, sizeof(void*)*2, x_92); +lean_ctor_set_float(x_95, sizeof(void*)*2 + 8, x_92); +lean_ctor_set_uint8(x_95, sizeof(void*)*2 + 16, x_93); +x_96 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1; +x_97 = lean_alloc_ctor(9, 3, 0); +lean_ctor_set(x_97, 0, x_95); +lean_ctor_set(x_97, 1, x_15); +lean_ctor_set(x_97, 2, x_96); +lean_inc(x_13); +x_98 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_98, 0, x_13); +lean_ctor_set(x_98, 1, x_97); +x_99 = l_Lean_PersistentArray_push___rarg(x_90, x_98); +if (lean_is_scalar(x_91)) { + x_100 = lean_alloc_ctor(0, 1, 8); +} else { + x_100 = x_91; } +lean_ctor_set(x_100, 0, x_99); +lean_ctor_set_uint64(x_100, sizeof(void*)*1, x_89); +if (lean_is_scalar(x_88)) { + x_101 = lean_alloc_ctor(0, 8, 0); +} else { + x_101 = x_88; } -else -{ -lean_object* x_32; lean_object* x_33; -x_32 = lean_box(0); -x_33 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_33, 0, x_32); -lean_ctor_set(x_33, 1, x_11); -return x_33; +lean_ctor_set(x_101, 0, x_81); +lean_ctor_set(x_101, 1, x_82); +lean_ctor_set(x_101, 2, x_83); +lean_ctor_set(x_101, 3, x_100); +lean_ctor_set(x_101, 4, x_84); +lean_ctor_set(x_101, 5, x_85); +lean_ctor_set(x_101, 6, x_86); +lean_ctor_set(x_101, 7, x_87); +x_102 = lean_st_ref_set(x_11, x_101, x_80); +x_103 = lean_ctor_get(x_102, 1); +lean_inc(x_103); +if (lean_is_exclusive(x_102)) { + lean_ctor_release(x_102, 0); + lean_ctor_release(x_102, 1); + x_104 = x_102; +} else { + lean_dec_ref(x_102); + x_104 = lean_box(0); +} +x_105 = lean_box(0); +if (lean_is_scalar(x_104)) { + x_106 = lean_alloc_ctor(0, 2, 0); +} else { + x_106 = x_104; +} +lean_ctor_set(x_106, 0, x_105); +lean_ctor_set(x_106, 1, x_103); +return x_106; } } } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_getNoZeroDivInstIfNeeded_x3f___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__1() { _start: { -lean_object* x_12; -x_12 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Null_getNoZeroDivInstIfNeeded_x3f(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -return x_12; +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Eq", 2, 2); +return x_1; } } -LEAN_EXPORT lean_object* l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__2() { _start: { -lean_object* x_12; lean_object* x_13; uint8_t x_14; lean_object* x_15; lean_object* x_16; -x_12 = lean_ctor_get(x_9, 12); -x_13 = lean_ctor_get(x_9, 2); -x_14 = l_Lean_checkTraceOption(x_12, x_13, x_1); -x_15 = lean_box(x_14); -x_16 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_16, 0, x_15); -lean_ctor_set(x_16, 1, x_11); -return x_16; +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__1; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; } } -LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { -lean_object* x_12; lean_object* x_13; uint8_t x_14; -x_12 = lean_ctor_get(x_9, 5); -x_13 = l_Lean_addMessageContextFull___at_Lean_Meta_instAddMessageContextMetaM___spec__1(x_1, x_7, x_8, x_9, x_10, x_11); +lean_object* x_13; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_13) == 0) +{ +uint8_t x_14; x_14 = !lean_is_exclusive(x_13); if (x_14 == 0) { -lean_object* x_15; lean_object* x_16; +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; x_15 = lean_ctor_get(x_13, 0); -lean_inc(x_12); -x_16 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_16, 0, x_12); -lean_ctor_set(x_16, 1, x_15); -lean_ctor_set_tag(x_13, 1); -lean_ctor_set(x_13, 0, x_16); +x_16 = lean_ctor_get(x_15, 2); +lean_inc(x_16); +x_17 = l_Lean_Level_succ___override(x_16); +x_18 = lean_box(0); +x_19 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_19, 0, x_17); +lean_ctor_set(x_19, 1, x_18); +x_20 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__2; +x_21 = l_Lean_Expr_const___override(x_20, x_19); +x_22 = lean_ctor_get(x_15, 1); +lean_inc(x_22); +lean_dec(x_15); +x_23 = l_Lean_mkApp3(x_21, x_22, x_1, x_2); +lean_ctor_set(x_13, 0, x_23); return x_13; } else { -lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_17 = lean_ctor_get(x_13, 0); -x_18 = lean_ctor_get(x_13, 1); -lean_inc(x_18); -lean_inc(x_17); +lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_24 = lean_ctor_get(x_13, 0); +x_25 = lean_ctor_get(x_13, 1); +lean_inc(x_25); +lean_inc(x_24); lean_dec(x_13); -lean_inc(x_12); -x_19 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_19, 0, x_12); -lean_ctor_set(x_19, 1, x_17); -x_20 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_20, 0, x_19); -lean_ctor_set(x_20, 1, x_18); -return x_20; +x_26 = lean_ctor_get(x_24, 2); +lean_inc(x_26); +x_27 = l_Lean_Level_succ___override(x_26); +x_28 = lean_box(0); +x_29 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_29, 0, x_27); +lean_ctor_set(x_29, 1, x_28); +x_30 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__2; +x_31 = l_Lean_Expr_const___override(x_30, x_29); +x_32 = lean_ctor_get(x_24, 1); +lean_inc(x_32); +lean_dec(x_24); +x_33 = l_Lean_mkApp3(x_31, x_32, x_1, x_2); +x_34 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_34, 0, x_33); +lean_ctor_set(x_34, 1, x_25); +return x_34; } } +else +{ +uint8_t x_35; +lean_dec(x_2); +lean_dec(x_1); +x_35 = !lean_is_exclusive(x_13); +if (x_35 == 0) +{ +return x_13; } -LEAN_EXPORT lean_object* l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +else +{ +lean_object* x_36; lean_object* x_37; lean_object* x_38; +x_36 = lean_ctor_get(x_13, 0); +x_37 = lean_ctor_get(x_13, 1); +lean_inc(x_37); +lean_inc(x_36); +lean_dec(x_13); +x_38 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_38, 0, x_36); +lean_ctor_set(x_38, 1, x_37); +return x_38; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; -x_13 = lean_ctor_get(x_10, 5); -x_14 = l_Lean_addMessageContextFull___at_Lean_Meta_instAddMessageContextMetaM___spec__1(x_2, x_8, x_9, x_10, x_11, x_12); -x_15 = lean_ctor_get(x_14, 0); +lean_object* x_12; lean_object* x_13; +x_12 = lean_ctor_get(x_1, 0); +lean_inc(x_12); +lean_dec(x_1); +x_13 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +if (lean_obj_tag(x_13) == 0) +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_14 = lean_ctor_get(x_13, 0); +lean_inc(x_14); +x_15 = lean_ctor_get(x_13, 1); lean_inc(x_15); -x_16 = lean_ctor_get(x_14, 1); -lean_inc(x_16); -lean_dec(x_14); -x_17 = lean_st_ref_take(x_11, x_16); +lean_dec(x_13); +x_16 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; +x_17 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2(x_16, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_15); +if (lean_obj_tag(x_17) == 0) +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; x_18 = lean_ctor_get(x_17, 0); lean_inc(x_18); -x_19 = lean_ctor_get(x_18, 3); +x_19 = lean_ctor_get(x_17, 1); lean_inc(x_19); -x_20 = !lean_is_exclusive(x_17); -if (x_20 == 0) +lean_dec(x_17); +x_20 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5(x_14, x_18, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_19); +return x_20; +} +else { -lean_object* x_21; lean_object* x_22; uint8_t x_23; -x_21 = lean_ctor_get(x_17, 1); +uint8_t x_21; +lean_dec(x_14); +x_21 = !lean_is_exclusive(x_17); +if (x_21 == 0) +{ +return x_17; +} +else +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; x_22 = lean_ctor_get(x_17, 0); -lean_dec(x_22); -x_23 = !lean_is_exclusive(x_18); -if (x_23 == 0) +x_23 = lean_ctor_get(x_17, 1); +lean_inc(x_23); +lean_inc(x_22); +lean_dec(x_17); +x_24 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_24, 0, x_22); +lean_ctor_set(x_24, 1, x_23); +return x_24; +} +} +} +else { -lean_object* x_24; uint8_t x_25; -x_24 = lean_ctor_get(x_18, 3); -lean_dec(x_24); -x_25 = !lean_is_exclusive(x_19); +uint8_t x_25; +x_25 = !lean_is_exclusive(x_13); if (x_25 == 0) { -lean_object* x_26; double x_27; uint8_t x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; uint8_t x_35; -x_26 = lean_ctor_get(x_19, 0); -x_27 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___spec__2___closed__1; -x_28 = 0; -x_29 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__3; -x_30 = lean_alloc_ctor(0, 2, 17); -lean_ctor_set(x_30, 0, x_1); -lean_ctor_set(x_30, 1, x_29); -lean_ctor_set_float(x_30, sizeof(void*)*2, x_27); -lean_ctor_set_float(x_30, sizeof(void*)*2 + 8, x_27); -lean_ctor_set_uint8(x_30, sizeof(void*)*2 + 16, x_28); -x_31 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1; -x_32 = lean_alloc_ctor(9, 3, 0); -lean_ctor_set(x_32, 0, x_30); -lean_ctor_set(x_32, 1, x_15); -lean_ctor_set(x_32, 2, x_31); -lean_inc(x_13); -lean_ctor_set(x_17, 1, x_32); -lean_ctor_set(x_17, 0, x_13); -x_33 = l_Lean_PersistentArray_push___rarg(x_26, x_17); -lean_ctor_set(x_19, 0, x_33); -x_34 = lean_st_ref_set(x_11, x_18, x_21); -x_35 = !lean_is_exclusive(x_34); -if (x_35 == 0) -{ -lean_object* x_36; lean_object* x_37; -x_36 = lean_ctor_get(x_34, 0); -lean_dec(x_36); -x_37 = lean_box(0); -lean_ctor_set(x_34, 0, x_37); -return x_34; +return x_13; } else { -lean_object* x_38; lean_object* x_39; lean_object* x_40; -x_38 = lean_ctor_get(x_34, 1); -lean_inc(x_38); -lean_dec(x_34); -x_39 = lean_box(0); -x_40 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_40, 0, x_39); -lean_ctor_set(x_40, 1, x_38); -return x_40; +lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_26 = lean_ctor_get(x_13, 0); +x_27 = lean_ctor_get(x_13, 1); +lean_inc(x_27); +lean_inc(x_26); +lean_dec(x_13); +x_28 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_28, 0, x_26); +lean_ctor_set(x_28, 1, x_27); +return x_28; } } -else +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__1() { +_start: { -uint64_t x_41; lean_object* x_42; double x_43; uint8_t x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; -x_41 = lean_ctor_get_uint64(x_19, sizeof(void*)*1); -x_42 = lean_ctor_get(x_19, 0); -lean_inc(x_42); -lean_dec(x_19); -x_43 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___spec__2___closed__1; -x_44 = 0; -x_45 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__3; -x_46 = lean_alloc_ctor(0, 2, 17); -lean_ctor_set(x_46, 0, x_1); -lean_ctor_set(x_46, 1, x_45); -lean_ctor_set_float(x_46, sizeof(void*)*2, x_43); -lean_ctor_set_float(x_46, sizeof(void*)*2 + 8, x_43); -lean_ctor_set_uint8(x_46, sizeof(void*)*2 + 16, x_44); -x_47 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1; -x_48 = lean_alloc_ctor(9, 3, 0); -lean_ctor_set(x_48, 0, x_46); -lean_ctor_set(x_48, 1, x_15); -lean_ctor_set(x_48, 2, x_47); -lean_inc(x_13); -lean_ctor_set(x_17, 1, x_48); -lean_ctor_set(x_17, 0, x_13); -x_49 = l_Lean_PersistentArray_push___rarg(x_42, x_17); -x_50 = lean_alloc_ctor(0, 1, 8); -lean_ctor_set(x_50, 0, x_49); -lean_ctor_set_uint64(x_50, sizeof(void*)*1, x_41); -lean_ctor_set(x_18, 3, x_50); -x_51 = lean_st_ref_set(x_11, x_18, x_21); -x_52 = lean_ctor_get(x_51, 1); -lean_inc(x_52); -if (lean_is_exclusive(x_51)) { - lean_ctor_release(x_51, 0); - lean_ctor_release(x_51, 1); - x_53 = x_51; -} else { - lean_dec_ref(x_51); - x_53 = lean_box(0); +lean_object* x_1; +x_1 = lean_mk_string_unchecked("`grind` internal error, `IsCharP` insrtance is needed, but it is not available for", 82, 82); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__1; +x_2 = l_Lean_stringToMessageData(x_1); +return x_2; } -x_54 = lean_box(0); -if (lean_is_scalar(x_53)) { - x_55 = lean_alloc_ctor(0, 2, 0); -} else { - x_55 = x_53; } -lean_ctor_set(x_55, 0, x_54); -lean_ctor_set(x_55, 1, x_52); -return x_55; +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Neg", 3, 3); +return x_1; } } -else +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__4() { +_start: { -lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; uint64_t x_63; lean_object* x_64; lean_object* x_65; double x_66; uint8_t x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; -x_56 = lean_ctor_get(x_18, 0); -x_57 = lean_ctor_get(x_18, 1); -x_58 = lean_ctor_get(x_18, 2); -x_59 = lean_ctor_get(x_18, 4); -x_60 = lean_ctor_get(x_18, 5); -x_61 = lean_ctor_get(x_18, 6); -x_62 = lean_ctor_get(x_18, 7); -lean_inc(x_62); -lean_inc(x_61); -lean_inc(x_60); -lean_inc(x_59); -lean_inc(x_58); -lean_inc(x_57); -lean_inc(x_56); -lean_dec(x_18); -x_63 = lean_ctor_get_uint64(x_19, sizeof(void*)*1); -x_64 = lean_ctor_get(x_19, 0); -lean_inc(x_64); -if (lean_is_exclusive(x_19)) { - lean_ctor_release(x_19, 0); - x_65 = x_19; -} else { - lean_dec_ref(x_19); - x_65 = lean_box(0); +lean_object* x_1; +x_1 = lean_mk_string_unchecked("neg", 3, 3); +return x_1; } -x_66 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___spec__2___closed__1; -x_67 = 0; -x_68 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__3; -x_69 = lean_alloc_ctor(0, 2, 17); -lean_ctor_set(x_69, 0, x_1); -lean_ctor_set(x_69, 1, x_68); -lean_ctor_set_float(x_69, sizeof(void*)*2, x_66); -lean_ctor_set_float(x_69, sizeof(void*)*2 + 8, x_66); -lean_ctor_set_uint8(x_69, sizeof(void*)*2 + 16, x_67); -x_70 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1; -x_71 = lean_alloc_ctor(9, 3, 0); -lean_ctor_set(x_71, 0, x_69); -lean_ctor_set(x_71, 1, x_15); -lean_ctor_set(x_71, 2, x_70); -lean_inc(x_13); -lean_ctor_set(x_17, 1, x_71); -lean_ctor_set(x_17, 0, x_13); -x_72 = l_Lean_PersistentArray_push___rarg(x_64, x_17); -if (lean_is_scalar(x_65)) { - x_73 = lean_alloc_ctor(0, 1, 8); -} else { - x_73 = x_65; } -lean_ctor_set(x_73, 0, x_72); -lean_ctor_set_uint64(x_73, sizeof(void*)*1, x_63); -x_74 = lean_alloc_ctor(0, 8, 0); -lean_ctor_set(x_74, 0, x_56); -lean_ctor_set(x_74, 1, x_57); -lean_ctor_set(x_74, 2, x_58); -lean_ctor_set(x_74, 3, x_73); -lean_ctor_set(x_74, 4, x_59); -lean_ctor_set(x_74, 5, x_60); -lean_ctor_set(x_74, 6, x_61); -lean_ctor_set(x_74, 7, x_62); -x_75 = lean_st_ref_set(x_11, x_74, x_21); -x_76 = lean_ctor_get(x_75, 1); -lean_inc(x_76); -if (lean_is_exclusive(x_75)) { - lean_ctor_release(x_75, 0); - lean_ctor_release(x_75, 1); - x_77 = x_75; -} else { - lean_dec_ref(x_75); - x_77 = lean_box(0); +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__3; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__4; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; } -x_78 = lean_box(0); -if (lean_is_scalar(x_77)) { - x_79 = lean_alloc_ctor(0, 2, 0); -} else { - x_79 = x_77; } -lean_ctor_set(x_79, 0, x_78); -lean_ctor_set(x_79, 1, x_76); -return x_79; +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__6() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(0u); +x_2 = l_Lean_Level_ofNat(x_1); +return x_2; } } -else +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__7() { +_start: { -lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; lean_object* x_86; lean_object* x_87; lean_object* x_88; uint64_t x_89; lean_object* x_90; lean_object* x_91; double x_92; uint8_t x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; -x_80 = lean_ctor_get(x_17, 1); -lean_inc(x_80); -lean_dec(x_17); -x_81 = lean_ctor_get(x_18, 0); -lean_inc(x_81); -x_82 = lean_ctor_get(x_18, 1); -lean_inc(x_82); -x_83 = lean_ctor_get(x_18, 2); -lean_inc(x_83); -x_84 = lean_ctor_get(x_18, 4); -lean_inc(x_84); -x_85 = lean_ctor_get(x_18, 5); -lean_inc(x_85); -x_86 = lean_ctor_get(x_18, 6); -lean_inc(x_86); -x_87 = lean_ctor_get(x_18, 7); -lean_inc(x_87); -if (lean_is_exclusive(x_18)) { - lean_ctor_release(x_18, 0); - lean_ctor_release(x_18, 1); - lean_ctor_release(x_18, 2); - lean_ctor_release(x_18, 3); - lean_ctor_release(x_18, 4); - lean_ctor_release(x_18, 5); - lean_ctor_release(x_18, 6); - lean_ctor_release(x_18, 7); - x_88 = x_18; -} else { - lean_dec_ref(x_18); - x_88 = lean_box(0); +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__6; +x_3 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_3, 0, x_2); +lean_ctor_set(x_3, 1, x_1); +return x_3; } -x_89 = lean_ctor_get_uint64(x_19, sizeof(void*)*1); -x_90 = lean_ctor_get(x_19, 0); -lean_inc(x_90); -if (lean_is_exclusive(x_19)) { - lean_ctor_release(x_19, 0); - x_91 = x_19; -} else { - lean_dec_ref(x_19); - x_91 = lean_box(0); } -x_92 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___spec__2___closed__1; -x_93 = 0; -x_94 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__3; -x_95 = lean_alloc_ctor(0, 2, 17); -lean_ctor_set(x_95, 0, x_1); -lean_ctor_set(x_95, 1, x_94); -lean_ctor_set_float(x_95, sizeof(void*)*2, x_92); -lean_ctor_set_float(x_95, sizeof(void*)*2 + 8, x_92); -lean_ctor_set_uint8(x_95, sizeof(void*)*2 + 16, x_93); -x_96 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1; -x_97 = lean_alloc_ctor(9, 3, 0); -lean_ctor_set(x_97, 0, x_95); -lean_ctor_set(x_97, 1, x_15); -lean_ctor_set(x_97, 2, x_96); -lean_inc(x_13); -x_98 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_98, 0, x_13); -lean_ctor_set(x_98, 1, x_97); -x_99 = l_Lean_PersistentArray_push___rarg(x_90, x_98); -if (lean_is_scalar(x_91)) { - x_100 = lean_alloc_ctor(0, 1, 8); -} else { - x_100 = x_91; +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__8() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__5; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__7; +x_3 = l_Lean_Expr_const___override(x_1, x_2); +return x_3; } -lean_ctor_set(x_100, 0, x_99); -lean_ctor_set_uint64(x_100, sizeof(void*)*1, x_89); -if (lean_is_scalar(x_88)) { - x_101 = lean_alloc_ctor(0, 8, 0); -} else { - x_101 = x_88; } -lean_ctor_set(x_101, 0, x_81); -lean_ctor_set(x_101, 1, x_82); -lean_ctor_set(x_101, 2, x_83); -lean_ctor_set(x_101, 3, x_100); -lean_ctor_set(x_101, 4, x_84); -lean_ctor_set(x_101, 5, x_85); -lean_ctor_set(x_101, 6, x_86); -lean_ctor_set(x_101, 7, x_87); -x_102 = lean_st_ref_set(x_11, x_101, x_80); -x_103 = lean_ctor_get(x_102, 1); -lean_inc(x_103); -if (lean_is_exclusive(x_102)) { - lean_ctor_release(x_102, 0); - lean_ctor_release(x_102, 1); - x_104 = x_102; -} else { - lean_dec_ref(x_102); - x_104 = lean_box(0); +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__9() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Int", 3, 3); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__10() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__9; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__11() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__10; +x_3 = l_Lean_Expr_const___override(x_2, x_1); +return x_3; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__12() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("instNegInt", 10, 10); +return x_1; } -x_105 = lean_box(0); -if (lean_is_scalar(x_104)) { - x_106 = lean_alloc_ctor(0, 2, 0); -} else { - x_106 = x_104; } -lean_ctor_set(x_106, 0, x_105); -lean_ctor_set(x_106, 1, x_103); -return x_106; +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__13() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__9; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__12; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; } } +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__14() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__13; +x_3 = l_Lean_Expr_const___override(x_2, x_1); +return x_3; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__1() { +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("`grind` internal error, `IsCharP` insrtance is needed, but it is not available for", 82, 82); +x_1 = lean_mk_string_unchecked("NullCert", 8, 8); return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__2() { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16() { _start: { -lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__1; -x_2 = l_Lean_stringToMessageData(x_1); -return x_2; +lean_object* x_1; +x_1 = lean_mk_string_unchecked("eq_unsatC", 9, 9); +return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__3() { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17() { _start: { -lean_object* x_1; -x_1 = lean_mk_string_unchecked("Neg", 3, 3); -return x_1; +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; +x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; +x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); +return x_6; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__4() { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("neg", 3, 3); +x_1 = lean_mk_string_unchecked("eq_unsat", 8, 8); return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__5() { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__19() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; +x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18; +x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); +return x_6; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { _start: { -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__3; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__4; -x_3 = l_Lean_Name_mkStr2(x_1, x_2); -return x_3; -} +lean_object* x_14; +x_14 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +if (lean_obj_tag(x_14) == 0) +{ +lean_object* x_15; lean_object* x_16; +x_15 = lean_ctor_get(x_14, 0); +lean_inc(x_15); +x_16 = lean_ctor_get(x_15, 4); +lean_inc(x_16); +if (lean_obj_tag(x_16) == 0) +{ +lean_object* x_17; lean_object* x_18; +lean_dec(x_15); +x_17 = lean_ctor_get(x_14, 1); +lean_inc(x_17); +lean_dec(x_14); +x_18 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_17); +if (lean_obj_tag(x_18) == 0) +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_19 = lean_ctor_get(x_18, 0); +lean_inc(x_19); +x_20 = lean_ctor_get(x_18, 1); +lean_inc(x_20); +lean_dec(x_18); +x_21 = lean_ctor_get(x_19, 1); +lean_inc(x_21); +lean_dec(x_19); +x_22 = l_Lean_indentExpr(x_21); +x_23 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__2; +x_24 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_24, 0, x_23); +lean_ctor_set(x_24, 1, x_22); +x_25 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; +x_26 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_26, 0, x_24); +lean_ctor_set(x_26, 1, x_25); +x_27 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__2(x_26, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_20); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +return x_27; +} +else +{ +uint8_t x_28; +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +x_28 = !lean_is_exclusive(x_18); +if (x_28 == 0) +{ +return x_18; +} +else +{ +lean_object* x_29; lean_object* x_30; lean_object* x_31; +x_29 = lean_ctor_get(x_18, 0); +x_30 = lean_ctor_get(x_18, 1); +lean_inc(x_30); +lean_inc(x_29); +lean_dec(x_18); +x_31 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_31, 0, x_29); +lean_ctor_set(x_31, 1, x_30); +return x_31; +} +} +} +else +{ +lean_object* x_32; lean_object* x_33; uint8_t x_34; +x_32 = lean_ctor_get(x_16, 0); +lean_inc(x_32); +lean_dec(x_16); +x_33 = lean_ctor_get(x_14, 1); +lean_inc(x_33); +lean_dec(x_14); +x_34 = !lean_is_exclusive(x_32); +if (x_34 == 0) +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_70; uint8_t x_71; +x_35 = lean_ctor_get(x_32, 0); +x_36 = lean_ctor_get(x_32, 1); +x_70 = lean_unsigned_to_nat(0u); +x_71 = lean_nat_dec_eq(x_36, x_70); +if (x_71 == 0) +{ +lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; +x_72 = lean_ctor_get(x_15, 2); +lean_inc(x_72); +x_73 = lean_box(0); +lean_ctor_set_tag(x_32, 1); +lean_ctor_set(x_32, 1, x_73); +lean_ctor_set(x_32, 0, x_72); +x_74 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_75 = l_Lean_Expr_const___override(x_74, x_32); +x_76 = lean_ctor_get(x_15, 1); +lean_inc(x_76); +x_77 = l_Lean_mkNatLit(x_36); +x_78 = l_Lean_mkAppB(x_75, x_76, x_77); +x_37 = x_78; +goto block_69; +} +else +{ +lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; +lean_dec(x_36); +x_79 = lean_ctor_get(x_15, 2); +lean_inc(x_79); +x_80 = lean_box(0); +lean_ctor_set_tag(x_32, 1); +lean_ctor_set(x_32, 1, x_80); +lean_ctor_set(x_32, 0, x_79); +x_81 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__19; +x_82 = l_Lean_Expr_const___override(x_81, x_32); +x_83 = lean_ctor_get(x_15, 1); +lean_inc(x_83); +x_84 = l_Lean_Expr_app___override(x_82, x_83); +x_37 = x_84; +goto block_69; +} +block_69: +{ +lean_object* x_38; +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +x_38 = l_Lean_Meta_Grind_Arith_CommRing_toContextExpr(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_33); +if (lean_obj_tag(x_38) == 0) +{ +lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; uint8_t x_45; +x_39 = lean_ctor_get(x_38, 0); +lean_inc(x_39); +x_40 = lean_ctor_get(x_38, 1); +lean_inc(x_40); +lean_dec(x_38); +x_41 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert(x_1); +x_42 = l_Lean_Meta_Grind_Arith_CommRing_ofNullCert(x_41); +x_43 = lean_ctor_get(x_15, 3); +lean_inc(x_43); +lean_dec(x_15); +x_44 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; +x_45 = lean_int_dec_le(x_44, x_2); +if (x_45 == 0) +{ +lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; +x_46 = lean_int_neg(x_2); +x_47 = l_Int_toNat(x_46); +lean_dec(x_46); +x_48 = l_Lean_instToExprInt_mkNat(x_47); +x_49 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__8; +x_50 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__11; +x_51 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__14; +x_52 = l_Lean_mkApp3(x_49, x_50, x_51, x_48); +x_53 = l_Lean_reflBoolTrue; +x_54 = l_Lean_mkApp6(x_37, x_43, x_35, x_39, x_42, x_52, x_53); +x_55 = lean_unsigned_to_nat(0u); +x_56 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(x_1, x_55, x_54); +x_57 = l_Lean_Meta_Grind_closeGoal(x_56, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_40); +return x_57; +} +else +{ +lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; +x_58 = l_Int_toNat(x_2); +x_59 = l_Lean_instToExprInt_mkNat(x_58); +x_60 = l_Lean_reflBoolTrue; +x_61 = l_Lean_mkApp6(x_37, x_43, x_35, x_39, x_42, x_59, x_60); +x_62 = lean_unsigned_to_nat(0u); +x_63 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(x_1, x_62, x_61); +x_64 = l_Lean_Meta_Grind_closeGoal(x_63, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_40); +return x_64; +} +} +else +{ +uint8_t x_65; +lean_dec(x_37); +lean_dec(x_35); +lean_dec(x_15); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +x_65 = !lean_is_exclusive(x_38); +if (x_65 == 0) +{ +return x_38; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__6() { -_start: +else { -lean_object* x_1; lean_object* x_2; -x_1 = lean_unsigned_to_nat(0u); -x_2 = l_Lean_Level_ofNat(x_1); -return x_2; +lean_object* x_66; lean_object* x_67; lean_object* x_68; +x_66 = lean_ctor_get(x_38, 0); +x_67 = lean_ctor_get(x_38, 1); +lean_inc(x_67); +lean_inc(x_66); +lean_dec(x_38); +x_68 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_68, 0, x_66); +lean_ctor_set(x_68, 1, x_67); +return x_68; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__7() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = lean_box(0); -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__6; -x_3 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_3, 0, x_2); -lean_ctor_set(x_3, 1, x_1); -return x_3; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__8() { -_start: +else { -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__5; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__7; -x_3 = l_Lean_Expr_const___override(x_1, x_2); -return x_3; -} -} -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__9() { -_start: +lean_object* x_85; lean_object* x_86; lean_object* x_87; lean_object* x_120; uint8_t x_121; +x_85 = lean_ctor_get(x_32, 0); +x_86 = lean_ctor_get(x_32, 1); +lean_inc(x_86); +lean_inc(x_85); +lean_dec(x_32); +x_120 = lean_unsigned_to_nat(0u); +x_121 = lean_nat_dec_eq(x_86, x_120); +if (x_121 == 0) { -lean_object* x_1; -x_1 = lean_mk_string_unchecked("Int", 3, 3); -return x_1; -} +lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; +x_122 = lean_ctor_get(x_15, 2); +lean_inc(x_122); +x_123 = lean_box(0); +x_124 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_124, 0, x_122); +lean_ctor_set(x_124, 1, x_123); +x_125 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_126 = l_Lean_Expr_const___override(x_125, x_124); +x_127 = lean_ctor_get(x_15, 1); +lean_inc(x_127); +x_128 = l_Lean_mkNatLit(x_86); +x_129 = l_Lean_mkAppB(x_126, x_127, x_128); +x_87 = x_129; +goto block_119; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__10() { -_start: +else { -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = lean_box(0); -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__9; -x_3 = l_Lean_Name_str___override(x_1, x_2); -return x_3; -} +lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; +lean_dec(x_86); +x_130 = lean_ctor_get(x_15, 2); +lean_inc(x_130); +x_131 = lean_box(0); +x_132 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_132, 0, x_130); +lean_ctor_set(x_132, 1, x_131); +x_133 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__19; +x_134 = l_Lean_Expr_const___override(x_133, x_132); +x_135 = lean_ctor_get(x_15, 1); +lean_inc(x_135); +x_136 = l_Lean_Expr_app___override(x_134, x_135); +x_87 = x_136; +goto block_119; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__11() { -_start: +block_119: { -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = lean_box(0); -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__10; -x_3 = l_Lean_Expr_const___override(x_2, x_1); -return x_3; -} +lean_object* x_88; +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +x_88 = l_Lean_Meta_Grind_Arith_CommRing_toContextExpr(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_33); +if (lean_obj_tag(x_88) == 0) +{ +lean_object* x_89; lean_object* x_90; lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; uint8_t x_95; +x_89 = lean_ctor_get(x_88, 0); +lean_inc(x_89); +x_90 = lean_ctor_get(x_88, 1); +lean_inc(x_90); +lean_dec(x_88); +x_91 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert(x_1); +x_92 = l_Lean_Meta_Grind_Arith_CommRing_ofNullCert(x_91); +x_93 = lean_ctor_get(x_15, 3); +lean_inc(x_93); +lean_dec(x_15); +x_94 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; +x_95 = lean_int_dec_le(x_94, x_2); +if (x_95 == 0) +{ +lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; +x_96 = lean_int_neg(x_2); +x_97 = l_Int_toNat(x_96); +lean_dec(x_96); +x_98 = l_Lean_instToExprInt_mkNat(x_97); +x_99 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__8; +x_100 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__11; +x_101 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__14; +x_102 = l_Lean_mkApp3(x_99, x_100, x_101, x_98); +x_103 = l_Lean_reflBoolTrue; +x_104 = l_Lean_mkApp6(x_87, x_93, x_85, x_89, x_92, x_102, x_103); +x_105 = lean_unsigned_to_nat(0u); +x_106 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(x_1, x_105, x_104); +x_107 = l_Lean_Meta_Grind_closeGoal(x_106, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_90); +return x_107; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__12() { -_start: +else { -lean_object* x_1; -x_1 = lean_mk_string_unchecked("instNegInt", 10, 10); -return x_1; +lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; +x_108 = l_Int_toNat(x_2); +x_109 = l_Lean_instToExprInt_mkNat(x_108); +x_110 = l_Lean_reflBoolTrue; +x_111 = l_Lean_mkApp6(x_87, x_93, x_85, x_89, x_92, x_109, x_110); +x_112 = lean_unsigned_to_nat(0u); +x_113 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(x_1, x_112, x_111); +x_114 = l_Lean_Meta_Grind_closeGoal(x_113, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_90); +return x_114; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__13() { -_start: +else { -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__9; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__12; -x_3 = l_Lean_Name_mkStr2(x_1, x_2); -return x_3; +lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; +lean_dec(x_87); +lean_dec(x_85); +lean_dec(x_15); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +x_115 = lean_ctor_get(x_88, 0); +lean_inc(x_115); +x_116 = lean_ctor_get(x_88, 1); +lean_inc(x_116); +if (lean_is_exclusive(x_88)) { + lean_ctor_release(x_88, 0); + lean_ctor_release(x_88, 1); + x_117 = x_88; +} else { + lean_dec_ref(x_88); + x_117 = lean_box(0); +} +if (lean_is_scalar(x_117)) { + x_118 = lean_alloc_ctor(1, 2, 0); +} else { + x_118 = x_117; } +lean_ctor_set(x_118, 0, x_115); +lean_ctor_set(x_118, 1, x_116); +return x_118; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__14() { -_start: -{ -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = lean_box(0); -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__13; -x_3 = l_Lean_Expr_const___override(x_2, x_1); -return x_3; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("Lean", 4, 4); -return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16() { -_start: +else { -lean_object* x_1; -x_1 = lean_mk_string_unchecked("Grind", 5, 5); -return x_1; -} +uint8_t x_137; +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +x_137 = !lean_is_exclusive(x_14); +if (x_137 == 0) +{ +return x_14; } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17() { -_start: +else { -lean_object* x_1; -x_1 = lean_mk_string_unchecked("CommRing", 8, 8); -return x_1; +lean_object* x_138; lean_object* x_139; lean_object* x_140; +x_138 = lean_ctor_get(x_14, 0); +x_139 = lean_ctor_get(x_14, 1); +lean_inc(x_139); +lean_inc(x_138); +lean_dec(x_14); +x_140 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_140, 0, x_138); +lean_ctor_set(x_140, 1, x_139); +return x_140; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("NullCert", 8, 8); -return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__19() { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__1() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("eq_unsatC", 9, 9); +x_1 = lean_mk_string_unchecked("*(", 2, 2); return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__20() { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__2() { _start: { -lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18; -x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__19; -x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); -return x_6; +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__1; +x_2 = l_Lean_stringToMessageData(x_1); +return x_2; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__21() { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__3() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("eq_unsat", 8, 8); +x_1 = lean_mk_string_unchecked("), ", 3, 3); return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__22() { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__4() { _start: { -lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18; -x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__21; -x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); -return x_6; +lean_object* x_1; lean_object* x_2; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__3; +x_2 = l_Lean_stringToMessageData(x_1); +return x_2; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { _start: { -lean_object* x_14; -x_14 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); -if (lean_obj_tag(x_14) == 0) +lean_object* x_14; lean_object* x_15; +x_14 = lean_ctor_get(x_1, 0); +lean_inc(x_14); +lean_inc(x_14); +x_15 = l_Lean_Meta_Grind_Arith_CommRing_getPolyConst(x_14, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +if (lean_obj_tag(x_15) == 0) { -lean_object* x_15; lean_object* x_16; -x_15 = lean_ctor_get(x_14, 0); -lean_inc(x_15); -x_16 = lean_ctor_get(x_15, 4); +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_15, 0); lean_inc(x_16); -if (lean_obj_tag(x_16) == 0) -{ -lean_object* x_17; lean_object* x_18; -lean_dec(x_15); -x_17 = lean_ctor_get(x_14, 1); +x_17 = lean_ctor_get(x_15, 1); lean_inc(x_17); -lean_dec(x_14); -x_18 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_17); +lean_dec(x_15); +lean_inc(x_12); +lean_inc(x_11); +lean_inc(x_10); +lean_inc(x_9); +lean_inc(x_8); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +x_18 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_mkNullCertExt(x_1, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_17); if (lean_obj_tag(x_18) == 0) { -lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; uint8_t x_23; x_19 = lean_ctor_get(x_18, 0); lean_inc(x_19); x_20 = lean_ctor_get(x_18, 1); lean_inc(x_20); lean_dec(x_18); -x_21 = lean_ctor_get(x_19, 1); -lean_inc(x_21); +lean_inc(x_2); +x_21 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_2, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_20); +x_22 = lean_ctor_get(x_21, 0); +lean_inc(x_22); +x_23 = lean_unbox(x_22); +lean_dec(x_22); +if (x_23 == 0) +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; +lean_dec(x_14); +lean_dec(x_2); +x_24 = lean_ctor_get(x_21, 1); +lean_inc(x_24); +lean_dec(x_21); +x_25 = lean_box(0); +x_26 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(x_19, x_16, x_25, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_24); +lean_dec(x_4); +lean_dec(x_16); lean_dec(x_19); -x_22 = l_Lean_indentExpr(x_21); -x_23 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__2; -x_24 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_24, 0, x_23); -lean_ctor_set(x_24, 1, x_22); -x_25 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; -x_26 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_26, 0, x_24); -lean_ctor_set(x_26, 1, x_25); -x_27 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__2(x_26, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_20); -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -return x_27; +return x_26; } else { -uint8_t x_28; -lean_dec(x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -x_28 = !lean_is_exclusive(x_18); -if (x_28 == 0) +uint8_t x_27; +x_27 = !lean_is_exclusive(x_21); +if (x_27 == 0) { -return x_18; -} -else +lean_object* x_28; lean_object* x_29; lean_object* x_30; +x_28 = lean_ctor_get(x_21, 1); +x_29 = lean_ctor_get(x_21, 0); +lean_dec(x_29); +x_30 = l_Lean_Meta_Grind_updateLastTag(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_28); +if (lean_obj_tag(x_30) == 0) { -lean_object* x_29; lean_object* x_30; lean_object* x_31; -x_29 = lean_ctor_get(x_18, 0); -x_30 = lean_ctor_get(x_18, 1); -lean_inc(x_30); -lean_inc(x_29); -lean_dec(x_18); -x_31 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_31, 0, x_29); -lean_ctor_set(x_31, 1, x_30); -return x_31; -} -} -} -else +lean_object* x_31; lean_object* x_32; +x_31 = lean_ctor_get(x_30, 1); +lean_inc(x_31); +lean_dec(x_30); +x_32 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_14, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_31); +if (lean_obj_tag(x_32) == 0) { -lean_object* x_32; lean_object* x_33; uint8_t x_34; -x_32 = lean_ctor_get(x_16, 0); -lean_inc(x_32); -lean_dec(x_16); -x_33 = lean_ctor_get(x_14, 1); +lean_object* x_33; lean_object* x_34; lean_object* x_35; +x_33 = lean_ctor_get(x_32, 0); lean_inc(x_33); -lean_dec(x_14); -x_34 = !lean_is_exclusive(x_32); -if (x_34 == 0) -{ -lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_70; uint8_t x_71; -x_35 = lean_ctor_get(x_32, 0); -x_36 = lean_ctor_get(x_32, 1); -x_70 = lean_unsigned_to_nat(0u); -x_71 = lean_nat_dec_eq(x_36, x_70); -if (x_71 == 0) -{ -lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; -x_72 = lean_ctor_get(x_15, 2); -lean_inc(x_72); -x_73 = lean_box(0); -lean_ctor_set_tag(x_32, 1); -lean_ctor_set(x_32, 1, x_73); -lean_ctor_set(x_32, 0, x_72); -x_74 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__20; -x_75 = l_Lean_Expr_const___override(x_74, x_32); -x_76 = lean_ctor_get(x_15, 1); -lean_inc(x_76); -x_77 = l_Lean_mkNatLit(x_36); -x_78 = l_Lean_mkAppB(x_75, x_76, x_77); -x_37 = x_78; -goto block_69; -} -else -{ -lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; -lean_dec(x_36); -x_79 = lean_ctor_get(x_15, 2); -lean_inc(x_79); -x_80 = lean_box(0); -lean_ctor_set_tag(x_32, 1); -lean_ctor_set(x_32, 1, x_80); -lean_ctor_set(x_32, 0, x_79); -x_81 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__22; -x_82 = l_Lean_Expr_const___override(x_81, x_32); -x_83 = lean_ctor_get(x_15, 1); -lean_inc(x_83); -x_84 = l_Lean_Expr_app___override(x_82, x_83); -x_37 = x_84; -goto block_69; -} -block_69: -{ -lean_object* x_38; +x_34 = lean_ctor_get(x_32, 1); +lean_inc(x_34); +lean_dec(x_32); lean_inc(x_12); lean_inc(x_11); lean_inc(x_10); lean_inc(x_9); -x_38 = l_Lean_Meta_Grind_Arith_CommRing_toContextExpr(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_33); +lean_inc(x_8); +lean_inc(x_7); +lean_inc(x_6); +lean_inc(x_5); +lean_inc(x_4); +x_35 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toPoly(x_19, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_34); +if (lean_obj_tag(x_35) == 0) +{ +lean_object* x_36; lean_object* x_37; lean_object* x_38; +x_36 = lean_ctor_get(x_35, 0); +lean_inc(x_36); +x_37 = lean_ctor_get(x_35, 1); +lean_inc(x_37); +lean_dec(x_35); +x_38 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_36, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_37); if (lean_obj_tag(x_38) == 0) { lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; uint8_t x_45; @@ -8079,50 +9923,114 @@ lean_inc(x_39); x_40 = lean_ctor_get(x_38, 1); lean_inc(x_40); lean_dec(x_38); -x_41 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert(x_1); -x_42 = l_Lean_Meta_Grind_Arith_CommRing_ofNullCert(x_41); -x_43 = lean_ctor_get(x_15, 3); -lean_inc(x_43); -lean_dec(x_15); -x_44 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; -x_45 = lean_int_dec_le(x_44, x_2); +x_41 = lean_ctor_get(x_19, 0); +lean_inc(x_41); +x_42 = l_Lean_MessageData_ofExpr(x_33); +x_43 = l_Lean_MessageData_ofExpr(x_39); +x_44 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; +x_45 = lean_int_dec_lt(x_41, x_44); if (x_45 == 0) { -lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; -x_46 = lean_int_neg(x_2); -x_47 = l_Int_toNat(x_46); -lean_dec(x_46); -x_48 = l_Lean_instToExprInt_mkNat(x_47); -x_49 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__8; -x_50 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__11; -x_51 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__14; -x_52 = l_Lean_mkApp3(x_49, x_50, x_51, x_48); -x_53 = l_Lean_reflBoolTrue; -x_54 = l_Lean_mkApp6(x_37, x_43, x_35, x_39, x_42, x_52, x_53); -x_55 = lean_unsigned_to_nat(0u); -x_56 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(x_1, x_55, x_54); -x_57 = l_Lean_Meta_Grind_closeGoal(x_56, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_40); -return x_57; +lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; +x_46 = lean_nat_abs(x_41); +lean_dec(x_41); +x_47 = l___private_Init_Data_Repr_0__Nat_reprFast(x_46); +x_48 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_48, 0, x_47); +x_49 = l_Lean_MessageData_ofFormat(x_48); +x_50 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; +lean_ctor_set_tag(x_21, 7); +lean_ctor_set(x_21, 1, x_49); +lean_ctor_set(x_21, 0, x_50); +x_51 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__2; +x_52 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_52, 0, x_21); +lean_ctor_set(x_52, 1, x_51); +x_53 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_53, 0, x_52); +lean_ctor_set(x_53, 1, x_42); +x_54 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__4; +x_55 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_55, 0, x_53); +lean_ctor_set(x_55, 1, x_54); +x_56 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_56, 0, x_55); +lean_ctor_set(x_56, 1, x_43); +x_57 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_57, 0, x_56); +lean_ctor_set(x_57, 1, x_50); +x_58 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_2, x_57, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_40); +x_59 = lean_ctor_get(x_58, 0); +lean_inc(x_59); +x_60 = lean_ctor_get(x_58, 1); +lean_inc(x_60); +lean_dec(x_58); +x_61 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(x_19, x_16, x_59, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_60); +lean_dec(x_4); +lean_dec(x_59); +lean_dec(x_16); +lean_dec(x_19); +return x_61; } else { -lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; -x_58 = l_Int_toNat(x_2); -x_59 = l_Lean_instToExprInt_mkNat(x_58); -x_60 = l_Lean_reflBoolTrue; -x_61 = l_Lean_mkApp6(x_37, x_43, x_35, x_39, x_42, x_59, x_60); -x_62 = lean_unsigned_to_nat(0u); -x_63 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(x_1, x_62, x_61); -x_64 = l_Lean_Meta_Grind_closeGoal(x_63, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_40); -return x_64; +lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; +x_62 = lean_nat_abs(x_41); +lean_dec(x_41); +x_63 = lean_unsigned_to_nat(1u); +x_64 = lean_nat_sub(x_62, x_63); +lean_dec(x_62); +x_65 = lean_nat_add(x_64, x_63); +lean_dec(x_64); +x_66 = l___private_Init_Data_Repr_0__Nat_reprFast(x_65); +x_67 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__10; +x_68 = lean_string_append(x_67, x_66); +lean_dec(x_66); +x_69 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_69, 0, x_68); +x_70 = l_Lean_MessageData_ofFormat(x_69); +x_71 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; +lean_ctor_set_tag(x_21, 7); +lean_ctor_set(x_21, 1, x_70); +lean_ctor_set(x_21, 0, x_71); +x_72 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__2; +x_73 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_73, 0, x_21); +lean_ctor_set(x_73, 1, x_72); +x_74 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_74, 0, x_73); +lean_ctor_set(x_74, 1, x_42); +x_75 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__4; +x_76 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_76, 0, x_74); +lean_ctor_set(x_76, 1, x_75); +x_77 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_77, 0, x_76); +lean_ctor_set(x_77, 1, x_43); +x_78 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_78, 0, x_77); +lean_ctor_set(x_78, 1, x_71); +x_79 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_2, x_78, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_40); +x_80 = lean_ctor_get(x_79, 0); +lean_inc(x_80); +x_81 = lean_ctor_get(x_79, 1); +lean_inc(x_81); +lean_dec(x_79); +x_82 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(x_19, x_16, x_80, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_81); +lean_dec(x_4); +lean_dec(x_80); +lean_dec(x_16); +lean_dec(x_19); +return x_82; } } else { -uint8_t x_65; -lean_dec(x_37); -lean_dec(x_35); -lean_dec(x_15); +uint8_t x_83; +lean_dec(x_33); +lean_free_object(x_21); +lean_dec(x_19); +lean_dec(x_16); lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); @@ -8131,133 +10039,71 @@ lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); -x_65 = !lean_is_exclusive(x_38); -if (x_65 == 0) +lean_dec(x_4); +lean_dec(x_2); +x_83 = !lean_is_exclusive(x_38); +if (x_83 == 0) { return x_38; } else { -lean_object* x_66; lean_object* x_67; lean_object* x_68; -x_66 = lean_ctor_get(x_38, 0); -x_67 = lean_ctor_get(x_38, 1); -lean_inc(x_67); -lean_inc(x_66); +lean_object* x_84; lean_object* x_85; lean_object* x_86; +x_84 = lean_ctor_get(x_38, 0); +x_85 = lean_ctor_get(x_38, 1); +lean_inc(x_85); +lean_inc(x_84); lean_dec(x_38); -x_68 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_68, 0, x_66); -lean_ctor_set(x_68, 1, x_67); -return x_68; -} +x_86 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_86, 0, x_84); +lean_ctor_set(x_86, 1, x_85); +return x_86; } } } else { -lean_object* x_85; lean_object* x_86; lean_object* x_87; lean_object* x_120; uint8_t x_121; -x_85 = lean_ctor_get(x_32, 0); -x_86 = lean_ctor_get(x_32, 1); -lean_inc(x_86); -lean_inc(x_85); -lean_dec(x_32); -x_120 = lean_unsigned_to_nat(0u); -x_121 = lean_nat_dec_eq(x_86, x_120); -if (x_121 == 0) +uint8_t x_87; +lean_dec(x_33); +lean_free_object(x_21); +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_87 = !lean_is_exclusive(x_35); +if (x_87 == 0) { -lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; -x_122 = lean_ctor_get(x_15, 2); -lean_inc(x_122); -x_123 = lean_box(0); -x_124 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_124, 0, x_122); -lean_ctor_set(x_124, 1, x_123); -x_125 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__20; -x_126 = l_Lean_Expr_const___override(x_125, x_124); -x_127 = lean_ctor_get(x_15, 1); -lean_inc(x_127); -x_128 = l_Lean_mkNatLit(x_86); -x_129 = l_Lean_mkAppB(x_126, x_127, x_128); -x_87 = x_129; -goto block_119; +return x_35; } else { -lean_object* x_130; lean_object* x_131; lean_object* x_132; lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; -lean_dec(x_86); -x_130 = lean_ctor_get(x_15, 2); -lean_inc(x_130); -x_131 = lean_box(0); -x_132 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_132, 0, x_130); -lean_ctor_set(x_132, 1, x_131); -x_133 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__22; -x_134 = l_Lean_Expr_const___override(x_133, x_132); -x_135 = lean_ctor_get(x_15, 1); -lean_inc(x_135); -x_136 = l_Lean_Expr_app___override(x_134, x_135); -x_87 = x_136; -goto block_119; -} -block_119: -{ -lean_object* x_88; -lean_inc(x_12); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -x_88 = l_Lean_Meta_Grind_Arith_CommRing_toContextExpr(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_33); -if (lean_obj_tag(x_88) == 0) -{ -lean_object* x_89; lean_object* x_90; lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; uint8_t x_95; -x_89 = lean_ctor_get(x_88, 0); +lean_object* x_88; lean_object* x_89; lean_object* x_90; +x_88 = lean_ctor_get(x_35, 0); +x_89 = lean_ctor_get(x_35, 1); lean_inc(x_89); -x_90 = lean_ctor_get(x_88, 1); -lean_inc(x_90); -lean_dec(x_88); -x_91 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert(x_1); -x_92 = l_Lean_Meta_Grind_Arith_CommRing_ofNullCert(x_91); -x_93 = lean_ctor_get(x_15, 3); -lean_inc(x_93); -lean_dec(x_15); -x_94 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; -x_95 = lean_int_dec_le(x_94, x_2); -if (x_95 == 0) -{ -lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; -x_96 = lean_int_neg(x_2); -x_97 = l_Int_toNat(x_96); -lean_dec(x_96); -x_98 = l_Lean_instToExprInt_mkNat(x_97); -x_99 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__8; -x_100 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__11; -x_101 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__14; -x_102 = l_Lean_mkApp3(x_99, x_100, x_101, x_98); -x_103 = l_Lean_reflBoolTrue; -x_104 = l_Lean_mkApp6(x_87, x_93, x_85, x_89, x_92, x_102, x_103); -x_105 = lean_unsigned_to_nat(0u); -x_106 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(x_1, x_105, x_104); -x_107 = l_Lean_Meta_Grind_closeGoal(x_106, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_90); -return x_107; +lean_inc(x_88); +lean_dec(x_35); +x_90 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_90, 0, x_88); +lean_ctor_set(x_90, 1, x_89); +return x_90; } -else -{ -lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; -x_108 = l_Int_toNat(x_2); -x_109 = l_Lean_instToExprInt_mkNat(x_108); -x_110 = l_Lean_reflBoolTrue; -x_111 = l_Lean_mkApp6(x_87, x_93, x_85, x_89, x_92, x_109, x_110); -x_112 = lean_unsigned_to_nat(0u); -x_113 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_applyEqs_go(x_1, x_112, x_111); -x_114 = l_Lean_Meta_Grind_closeGoal(x_113, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_90); -return x_114; } } else { -lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; -lean_dec(x_87); -lean_dec(x_85); -lean_dec(x_15); +uint8_t x_91; +lean_free_object(x_21); +lean_dec(x_19); +lean_dec(x_16); lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); @@ -8266,34 +10112,35 @@ lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); -x_115 = lean_ctor_get(x_88, 0); -lean_inc(x_115); -x_116 = lean_ctor_get(x_88, 1); -lean_inc(x_116); -if (lean_is_exclusive(x_88)) { - lean_ctor_release(x_88, 0); - lean_ctor_release(x_88, 1); - x_117 = x_88; -} else { - lean_dec_ref(x_88); - x_117 = lean_box(0); -} -if (lean_is_scalar(x_117)) { - x_118 = lean_alloc_ctor(1, 2, 0); -} else { - x_118 = x_117; -} -lean_ctor_set(x_118, 0, x_115); -lean_ctor_set(x_118, 1, x_116); -return x_118; -} +lean_dec(x_4); +lean_dec(x_2); +x_91 = !lean_is_exclusive(x_32); +if (x_91 == 0) +{ +return x_32; } +else +{ +lean_object* x_92; lean_object* x_93; lean_object* x_94; +x_92 = lean_ctor_get(x_32, 0); +x_93 = lean_ctor_get(x_32, 1); +lean_inc(x_93); +lean_inc(x_92); +lean_dec(x_32); +x_94 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_94, 0, x_92); +lean_ctor_set(x_94, 1, x_93); +return x_94; } } } else { -uint8_t x_137; +uint8_t x_95; +lean_free_object(x_21); +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_14); lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); @@ -8302,142 +10149,50 @@ lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); -x_137 = !lean_is_exclusive(x_14); -if (x_137 == 0) +lean_dec(x_4); +lean_dec(x_2); +x_95 = !lean_is_exclusive(x_30); +if (x_95 == 0) { -return x_14; +return x_30; } else { -lean_object* x_138; lean_object* x_139; lean_object* x_140; -x_138 = lean_ctor_get(x_14, 0); -x_139 = lean_ctor_get(x_14, 1); -lean_inc(x_139); -lean_inc(x_138); -lean_dec(x_14); -x_140 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_140, 0, x_138); -lean_ctor_set(x_140, 1, x_139); -return x_140; -} -} -} -} -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__1() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("*(", 2, 2); -return x_1; -} -} -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__2() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__1; -x_2 = l_Lean_stringToMessageData(x_1); -return x_2; -} -} -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__3() { -_start: -{ -lean_object* x_1; -x_1 = lean_mk_string_unchecked("), ", 3, 3); -return x_1; -} -} -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__4() { -_start: -{ -lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__3; -x_2 = l_Lean_stringToMessageData(x_1); -return x_2; +lean_object* x_96; lean_object* x_97; lean_object* x_98; +x_96 = lean_ctor_get(x_30, 0); +x_97 = lean_ctor_get(x_30, 1); +lean_inc(x_97); +lean_inc(x_96); +lean_dec(x_30); +x_98 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_98, 0, x_96); +lean_ctor_set(x_98, 1, x_97); +return x_98; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { -_start: -{ -lean_object* x_14; lean_object* x_15; -x_14 = lean_ctor_get(x_1, 0); -lean_inc(x_14); -lean_inc(x_14); -x_15 = l_Lean_Meta_Grind_Arith_CommRing_getPolyConst(x_14, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); -if (lean_obj_tag(x_15) == 0) -{ -lean_object* x_16; lean_object* x_17; lean_object* x_18; -x_16 = lean_ctor_get(x_15, 0); -lean_inc(x_16); -x_17 = lean_ctor_get(x_15, 1); -lean_inc(x_17); -lean_dec(x_15); -lean_inc(x_12); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -lean_inc(x_6); -lean_inc(x_5); -lean_inc(x_4); -x_18 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_mkNullCertExt(x_1, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_17); -if (lean_obj_tag(x_18) == 0) -{ -lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; uint8_t x_23; -x_19 = lean_ctor_get(x_18, 0); -lean_inc(x_19); -x_20 = lean_ctor_get(x_18, 1); -lean_inc(x_20); -lean_dec(x_18); -lean_inc(x_2); -x_21 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_2, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_20); -x_22 = lean_ctor_get(x_21, 0); -lean_inc(x_22); -x_23 = lean_unbox(x_22); -lean_dec(x_22); -if (x_23 == 0) -{ -lean_object* x_24; lean_object* x_25; lean_object* x_26; -lean_dec(x_14); -lean_dec(x_2); -x_24 = lean_ctor_get(x_21, 1); -lean_inc(x_24); -lean_dec(x_21); -x_25 = lean_box(0); -x_26 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(x_19, x_16, x_25, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_24); -lean_dec(x_4); -lean_dec(x_16); -lean_dec(x_19); -return x_26; } else { -uint8_t x_27; -x_27 = !lean_is_exclusive(x_21); -if (x_27 == 0) -{ -lean_object* x_28; lean_object* x_29; lean_object* x_30; -x_28 = lean_ctor_get(x_21, 1); -x_29 = lean_ctor_get(x_21, 0); -lean_dec(x_29); -x_30 = l_Lean_Meta_Grind_updateLastTag(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_28); -if (lean_obj_tag(x_30) == 0) +lean_object* x_99; lean_object* x_100; +x_99 = lean_ctor_get(x_21, 1); +lean_inc(x_99); +lean_dec(x_21); +x_100 = l_Lean_Meta_Grind_updateLastTag(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_99); +if (lean_obj_tag(x_100) == 0) { -lean_object* x_31; lean_object* x_32; -x_31 = lean_ctor_get(x_30, 1); -lean_inc(x_31); -lean_dec(x_30); -x_32 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_14, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_31); -if (lean_obj_tag(x_32) == 0) +lean_object* x_101; lean_object* x_102; +x_101 = lean_ctor_get(x_100, 1); +lean_inc(x_101); +lean_dec(x_100); +x_102 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_14, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_101); +if (lean_obj_tag(x_102) == 0) { -lean_object* x_33; lean_object* x_34; lean_object* x_35; -x_33 = lean_ctor_get(x_32, 0); -lean_inc(x_33); -x_34 = lean_ctor_get(x_32, 1); -lean_inc(x_34); -lean_dec(x_32); +lean_object* x_103; lean_object* x_104; lean_object* x_105; +x_103 = lean_ctor_get(x_102, 0); +lean_inc(x_103); +x_104 = lean_ctor_get(x_102, 1); +lean_inc(x_104); +lean_dec(x_102); lean_inc(x_12); lean_inc(x_11); lean_inc(x_10); @@ -8447,130 +10202,129 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); -x_35 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toPoly(x_19, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_34); -if (lean_obj_tag(x_35) == 0) +x_105 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toPoly(x_19, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_104); +if (lean_obj_tag(x_105) == 0) { -lean_object* x_36; lean_object* x_37; lean_object* x_38; -x_36 = lean_ctor_get(x_35, 0); -lean_inc(x_36); -x_37 = lean_ctor_get(x_35, 1); -lean_inc(x_37); -lean_dec(x_35); -x_38 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_36, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_37); -if (lean_obj_tag(x_38) == 0) +lean_object* x_106; lean_object* x_107; lean_object* x_108; +x_106 = lean_ctor_get(x_105, 0); +lean_inc(x_106); +x_107 = lean_ctor_get(x_105, 1); +lean_inc(x_107); +lean_dec(x_105); +x_108 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_106, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_107); +if (lean_obj_tag(x_108) == 0) { -lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; uint8_t x_45; -x_39 = lean_ctor_get(x_38, 0); -lean_inc(x_39); -x_40 = lean_ctor_get(x_38, 1); -lean_inc(x_40); -lean_dec(x_38); -x_41 = lean_ctor_get(x_19, 0); -lean_inc(x_41); -x_42 = l_Lean_MessageData_ofExpr(x_33); -x_43 = l_Lean_MessageData_ofExpr(x_39); -x_44 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; -x_45 = lean_int_dec_lt(x_41, x_44); -if (x_45 == 0) +lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; uint8_t x_115; +x_109 = lean_ctor_get(x_108, 0); +lean_inc(x_109); +x_110 = lean_ctor_get(x_108, 1); +lean_inc(x_110); +lean_dec(x_108); +x_111 = lean_ctor_get(x_19, 0); +lean_inc(x_111); +x_112 = l_Lean_MessageData_ofExpr(x_103); +x_113 = l_Lean_MessageData_ofExpr(x_109); +x_114 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; +x_115 = lean_int_dec_lt(x_111, x_114); +if (x_115 == 0) { -lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; -x_46 = lean_nat_abs(x_41); -lean_dec(x_41); -x_47 = l___private_Init_Data_Repr_0__Nat_reprFast(x_46); -x_48 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_48, 0, x_47); -x_49 = l_Lean_MessageData_ofFormat(x_48); -x_50 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; -lean_ctor_set_tag(x_21, 7); -lean_ctor_set(x_21, 1, x_49); -lean_ctor_set(x_21, 0, x_50); -x_51 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__2; -x_52 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_52, 0, x_21); -lean_ctor_set(x_52, 1, x_51); -x_53 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_53, 0, x_52); -lean_ctor_set(x_53, 1, x_42); -x_54 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__4; -x_55 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_55, 0, x_53); -lean_ctor_set(x_55, 1, x_54); -x_56 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_56, 0, x_55); -lean_ctor_set(x_56, 1, x_43); -x_57 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_57, 0, x_56); -lean_ctor_set(x_57, 1, x_50); -x_58 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_2, x_57, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_40); -x_59 = lean_ctor_get(x_58, 0); -lean_inc(x_59); -x_60 = lean_ctor_get(x_58, 1); -lean_inc(x_60); -lean_dec(x_58); -x_61 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(x_19, x_16, x_59, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_60); +lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; +x_116 = lean_nat_abs(x_111); +lean_dec(x_111); +x_117 = l___private_Init_Data_Repr_0__Nat_reprFast(x_116); +x_118 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_118, 0, x_117); +x_119 = l_Lean_MessageData_ofFormat(x_118); +x_120 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; +x_121 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_121, 0, x_120); +lean_ctor_set(x_121, 1, x_119); +x_122 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__2; +x_123 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_123, 0, x_121); +lean_ctor_set(x_123, 1, x_122); +x_124 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_124, 0, x_123); +lean_ctor_set(x_124, 1, x_112); +x_125 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__4; +x_126 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_126, 0, x_124); +lean_ctor_set(x_126, 1, x_125); +x_127 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_127, 0, x_126); +lean_ctor_set(x_127, 1, x_113); +x_128 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_128, 0, x_127); +lean_ctor_set(x_128, 1, x_120); +x_129 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_2, x_128, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_110); +x_130 = lean_ctor_get(x_129, 0); +lean_inc(x_130); +x_131 = lean_ctor_get(x_129, 1); +lean_inc(x_131); +lean_dec(x_129); +x_132 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(x_19, x_16, x_130, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_131); lean_dec(x_4); -lean_dec(x_59); +lean_dec(x_130); lean_dec(x_16); lean_dec(x_19); -return x_61; +return x_132; } else { -lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; -x_62 = lean_nat_abs(x_41); -lean_dec(x_41); -x_63 = lean_unsigned_to_nat(1u); -x_64 = lean_nat_sub(x_62, x_63); -lean_dec(x_62); -x_65 = lean_nat_add(x_64, x_63); -lean_dec(x_64); -x_66 = l___private_Init_Data_Repr_0__Nat_reprFast(x_65); -x_67 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__10; -x_68 = lean_string_append(x_67, x_66); -lean_dec(x_66); -x_69 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_69, 0, x_68); -x_70 = l_Lean_MessageData_ofFormat(x_69); -x_71 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; -lean_ctor_set_tag(x_21, 7); -lean_ctor_set(x_21, 1, x_70); -lean_ctor_set(x_21, 0, x_71); -x_72 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__2; -x_73 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_73, 0, x_21); -lean_ctor_set(x_73, 1, x_72); -x_74 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_74, 0, x_73); -lean_ctor_set(x_74, 1, x_42); -x_75 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__4; -x_76 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_76, 0, x_74); -lean_ctor_set(x_76, 1, x_75); -x_77 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_77, 0, x_76); -lean_ctor_set(x_77, 1, x_43); -x_78 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_78, 0, x_77); -lean_ctor_set(x_78, 1, x_71); -x_79 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_2, x_78, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_40); -x_80 = lean_ctor_get(x_79, 0); -lean_inc(x_80); -x_81 = lean_ctor_get(x_79, 1); -lean_inc(x_81); -lean_dec(x_79); -x_82 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(x_19, x_16, x_80, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_81); +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; +x_133 = lean_nat_abs(x_111); +lean_dec(x_111); +x_134 = lean_unsigned_to_nat(1u); +x_135 = lean_nat_sub(x_133, x_134); +lean_dec(x_133); +x_136 = lean_nat_add(x_135, x_134); +lean_dec(x_135); +x_137 = l___private_Init_Data_Repr_0__Nat_reprFast(x_136); +x_138 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__10; +x_139 = lean_string_append(x_138, x_137); +lean_dec(x_137); +x_140 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_140, 0, x_139); +x_141 = l_Lean_MessageData_ofFormat(x_140); +x_142 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; +x_143 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_143, 0, x_142); +lean_ctor_set(x_143, 1, x_141); +x_144 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__2; +x_145 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_145, 0, x_143); +lean_ctor_set(x_145, 1, x_144); +x_146 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_146, 0, x_145); +lean_ctor_set(x_146, 1, x_112); +x_147 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__4; +x_148 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_148, 0, x_146); +lean_ctor_set(x_148, 1, x_147); +x_149 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_149, 0, x_148); +lean_ctor_set(x_149, 1, x_113); +x_150 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_150, 0, x_149); +lean_ctor_set(x_150, 1, x_142); +x_151 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_2, x_150, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_110); +x_152 = lean_ctor_get(x_151, 0); +lean_inc(x_152); +x_153 = lean_ctor_get(x_151, 1); +lean_inc(x_153); +lean_dec(x_151); +x_154 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(x_19, x_16, x_152, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_153); lean_dec(x_4); -lean_dec(x_80); +lean_dec(x_152); lean_dec(x_16); lean_dec(x_19); -return x_82; +return x_154; } } else { -uint8_t x_83; -lean_dec(x_33); -lean_free_object(x_21); +lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; +lean_dec(x_103); lean_dec(x_19); lean_dec(x_16); lean_dec(x_12); @@ -8583,31 +10337,69 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_2); -x_83 = !lean_is_exclusive(x_38); -if (x_83 == 0) -{ -return x_38; +x_155 = lean_ctor_get(x_108, 0); +lean_inc(x_155); +x_156 = lean_ctor_get(x_108, 1); +lean_inc(x_156); +if (lean_is_exclusive(x_108)) { + lean_ctor_release(x_108, 0); + lean_ctor_release(x_108, 1); + x_157 = x_108; +} else { + lean_dec_ref(x_108); + x_157 = lean_box(0); +} +if (lean_is_scalar(x_157)) { + x_158 = lean_alloc_ctor(1, 2, 0); +} else { + x_158 = x_157; +} +lean_ctor_set(x_158, 0, x_155); +lean_ctor_set(x_158, 1, x_156); +return x_158; +} } else { -lean_object* x_84; lean_object* x_85; lean_object* x_86; -x_84 = lean_ctor_get(x_38, 0); -x_85 = lean_ctor_get(x_38, 1); -lean_inc(x_85); -lean_inc(x_84); -lean_dec(x_38); -x_86 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_86, 0, x_84); -lean_ctor_set(x_86, 1, x_85); -return x_86; +lean_object* x_159; lean_object* x_160; lean_object* x_161; lean_object* x_162; +lean_dec(x_103); +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_159 = lean_ctor_get(x_105, 0); +lean_inc(x_159); +x_160 = lean_ctor_get(x_105, 1); +lean_inc(x_160); +if (lean_is_exclusive(x_105)) { + lean_ctor_release(x_105, 0); + lean_ctor_release(x_105, 1); + x_161 = x_105; +} else { + lean_dec_ref(x_105); + x_161 = lean_box(0); +} +if (lean_is_scalar(x_161)) { + x_162 = lean_alloc_ctor(1, 2, 0); +} else { + x_162 = x_161; } +lean_ctor_set(x_162, 0, x_159); +lean_ctor_set(x_162, 1, x_160); +return x_162; } } else { -uint8_t x_87; -lean_dec(x_33); -lean_free_object(x_21); +lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; lean_dec(x_19); lean_dec(x_16); lean_dec(x_12); @@ -8620,32 +10412,73 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_2); -x_87 = !lean_is_exclusive(x_35); -if (x_87 == 0) +x_163 = lean_ctor_get(x_102, 0); +lean_inc(x_163); +x_164 = lean_ctor_get(x_102, 1); +lean_inc(x_164); +if (lean_is_exclusive(x_102)) { + lean_ctor_release(x_102, 0); + lean_ctor_release(x_102, 1); + x_165 = x_102; +} else { + lean_dec_ref(x_102); + x_165 = lean_box(0); +} +if (lean_is_scalar(x_165)) { + x_166 = lean_alloc_ctor(1, 2, 0); +} else { + x_166 = x_165; +} +lean_ctor_set(x_166, 0, x_163); +lean_ctor_set(x_166, 1, x_164); +return x_166; +} +} +else { -return x_35; +lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; +lean_dec(x_19); +lean_dec(x_16); +lean_dec(x_14); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +x_167 = lean_ctor_get(x_100, 0); +lean_inc(x_167); +x_168 = lean_ctor_get(x_100, 1); +lean_inc(x_168); +if (lean_is_exclusive(x_100)) { + lean_ctor_release(x_100, 0); + lean_ctor_release(x_100, 1); + x_169 = x_100; +} else { + lean_dec_ref(x_100); + x_169 = lean_box(0); +} +if (lean_is_scalar(x_169)) { + x_170 = lean_alloc_ctor(1, 2, 0); +} else { + x_170 = x_169; +} +lean_ctor_set(x_170, 0, x_167); +lean_ctor_set(x_170, 1, x_168); +return x_170; } -else -{ -lean_object* x_88; lean_object* x_89; lean_object* x_90; -x_88 = lean_ctor_get(x_35, 0); -x_89 = lean_ctor_get(x_35, 1); -lean_inc(x_89); -lean_inc(x_88); -lean_dec(x_35); -x_90 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_90, 0, x_88); -lean_ctor_set(x_90, 1, x_89); -return x_90; } } } else { -uint8_t x_91; -lean_free_object(x_21); -lean_dec(x_19); +uint8_t x_171; lean_dec(x_16); +lean_dec(x_14); lean_dec(x_12); lean_dec(x_11); lean_dec(x_10); @@ -8656,32 +10489,29 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_2); -x_91 = !lean_is_exclusive(x_32); -if (x_91 == 0) +x_171 = !lean_is_exclusive(x_18); +if (x_171 == 0) { -return x_32; +return x_18; } else { -lean_object* x_92; lean_object* x_93; lean_object* x_94; -x_92 = lean_ctor_get(x_32, 0); -x_93 = lean_ctor_get(x_32, 1); -lean_inc(x_93); -lean_inc(x_92); -lean_dec(x_32); -x_94 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_94, 0, x_92); -lean_ctor_set(x_94, 1, x_93); -return x_94; +lean_object* x_172; lean_object* x_173; lean_object* x_174; +x_172 = lean_ctor_get(x_18, 0); +x_173 = lean_ctor_get(x_18, 1); +lean_inc(x_173); +lean_inc(x_172); +lean_dec(x_18); +x_174 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_174, 0, x_172); +lean_ctor_set(x_174, 1, x_173); +return x_174; } } } else { -uint8_t x_95; -lean_free_object(x_21); -lean_dec(x_19); -lean_dec(x_16); +uint8_t x_175; lean_dec(x_14); lean_dec(x_12); lean_dec(x_11); @@ -8693,184 +10523,233 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_2); -x_95 = !lean_is_exclusive(x_30); -if (x_95 == 0) +lean_dec(x_1); +x_175 = !lean_is_exclusive(x_15); +if (x_175 == 0) { -return x_30; +return x_15; } else { -lean_object* x_96; lean_object* x_97; lean_object* x_98; -x_96 = lean_ctor_get(x_30, 0); -x_97 = lean_ctor_get(x_30, 1); -lean_inc(x_97); -lean_inc(x_96); -lean_dec(x_30); -x_98 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_98, 0, x_96); -lean_ctor_set(x_98, 1, x_97); -return x_98; +lean_object* x_176; lean_object* x_177; lean_object* x_178; +x_176 = lean_ctor_get(x_15, 0); +x_177 = lean_ctor_get(x_15, 1); +lean_inc(x_177); +lean_inc(x_176); +lean_dec(x_15); +x_178 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_178, 0, x_176); +lean_ctor_set(x_178, 1, x_177); +return x_178; } } } -else +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__1() { +_start: { -lean_object* x_99; lean_object* x_100; -x_99 = lean_ctor_get(x_21, 1); -lean_inc(x_99); -lean_dec(x_21); -x_100 = l_Lean_Meta_Grind_updateLastTag(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_99); -if (lean_obj_tag(x_100) == 0) +lean_object* x_1; +x_1 = lean_mk_string_unchecked("assert", 6, 6); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__2() { +_start: { -lean_object* x_101; lean_object* x_102; -x_101 = lean_ctor_get(x_100, 1); -lean_inc(x_101); -lean_dec(x_100); -x_102 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_14, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_101); -if (lean_obj_tag(x_102) == 0) +lean_object* x_1; +x_1 = lean_mk_string_unchecked("unsat", 5, 5); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__3() { +_start: { -lean_object* x_103; lean_object* x_104; lean_object* x_105; -x_103 = lean_ctor_get(x_102, 0); -lean_inc(x_103); -x_104 = lean_ctor_get(x_102, 1); -lean_inc(x_104); -lean_dec(x_102); -lean_inc(x_12); -lean_inc(x_11); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -lean_inc(x_6); -lean_inc(x_5); -lean_inc(x_4); -x_105 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toPoly(x_19, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_104); -if (lean_obj_tag(x_105) == 0) +lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__1; +x_2 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__3; +x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__1; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__2; +x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); +return x_5; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -lean_object* x_106; lean_object* x_107; lean_object* x_108; -x_106 = lean_ctor_get(x_105, 0); -lean_inc(x_106); -x_107 = lean_ctor_get(x_105, 1); -lean_inc(x_107); -lean_dec(x_105); -x_108 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_106, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_107); -if (lean_obj_tag(x_108) == 0) +lean_object* x_12; lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__3; +x_13 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_14 = lean_ctor_get(x_13, 0); +lean_inc(x_14); +x_15 = lean_unbox(x_14); +lean_dec(x_14); +if (x_15 == 0) { -lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; uint8_t x_115; -x_109 = lean_ctor_get(x_108, 0); -lean_inc(x_109); -x_110 = lean_ctor_get(x_108, 1); -lean_inc(x_110); -lean_dec(x_108); -x_111 = lean_ctor_get(x_19, 0); -lean_inc(x_111); -x_112 = l_Lean_MessageData_ofExpr(x_103); -x_113 = l_Lean_MessageData_ofExpr(x_109); -x_114 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; -x_115 = lean_int_dec_lt(x_111, x_114); -if (x_115 == 0) +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_13, 1); +lean_inc(x_16); +lean_dec(x_13); +x_17 = lean_box(0); +x_18 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2(x_1, x_12, x_17, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_16); +return x_18; +} +else { -lean_object* x_116; lean_object* x_117; lean_object* x_118; lean_object* x_119; lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; lean_object* x_129; lean_object* x_130; lean_object* x_131; lean_object* x_132; -x_116 = lean_nat_abs(x_111); -lean_dec(x_111); -x_117 = l___private_Init_Data_Repr_0__Nat_reprFast(x_116); -x_118 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_118, 0, x_117); -x_119 = l_Lean_MessageData_ofFormat(x_118); -x_120 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; -x_121 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_121, 0, x_120); -lean_ctor_set(x_121, 1, x_119); -x_122 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__2; -x_123 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_123, 0, x_121); -lean_ctor_set(x_123, 1, x_122); -x_124 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_124, 0, x_123); -lean_ctor_set(x_124, 1, x_112); -x_125 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__4; -x_126 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_126, 0, x_124); -lean_ctor_set(x_126, 1, x_125); -x_127 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_127, 0, x_126); -lean_ctor_set(x_127, 1, x_113); -x_128 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_128, 0, x_127); -lean_ctor_set(x_128, 1, x_120); -x_129 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_2, x_128, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_110); -x_130 = lean_ctor_get(x_129, 0); -lean_inc(x_130); -x_131 = lean_ctor_get(x_129, 1); -lean_inc(x_131); -lean_dec(x_129); -x_132 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(x_19, x_16, x_130, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_131); +uint8_t x_19; +x_19 = !lean_is_exclusive(x_13); +if (x_19 == 0) +{ +lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_20 = lean_ctor_get(x_13, 1); +x_21 = lean_ctor_get(x_13, 0); +lean_dec(x_21); +x_22 = l_Lean_Meta_Grind_updateLastTag(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_20); +if (lean_obj_tag(x_22) == 0) +{ +lean_object* x_23; lean_object* x_24; +x_23 = lean_ctor_get(x_22, 1); +lean_inc(x_23); +lean_dec(x_22); +lean_inc(x_1); +x_24 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_23); +if (lean_obj_tag(x_24) == 0) +{ +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_25 = lean_ctor_get(x_24, 0); +lean_inc(x_25); +x_26 = lean_ctor_get(x_24, 1); +lean_inc(x_26); +lean_dec(x_24); +x_27 = l_Lean_MessageData_ofExpr(x_25); +x_28 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; +lean_ctor_set_tag(x_13, 7); +lean_ctor_set(x_13, 1, x_27); +lean_ctor_set(x_13, 0, x_28); +x_29 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_29, 0, x_13); +lean_ctor_set(x_29, 1, x_28); +x_30 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_12, x_29, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_26); +x_31 = lean_ctor_get(x_30, 0); +lean_inc(x_31); +x_32 = lean_ctor_get(x_30, 1); +lean_inc(x_32); +lean_dec(x_30); +x_33 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2(x_1, x_12, x_31, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_32); +lean_dec(x_31); +return x_33; +} +else +{ +uint8_t x_34; +lean_free_object(x_13); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); lean_dec(x_4); -lean_dec(x_130); -lean_dec(x_16); -lean_dec(x_19); -return x_132; +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_34 = !lean_is_exclusive(x_24); +if (x_34 == 0) +{ +return x_24; } else { -lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; lean_object* x_144; lean_object* x_145; lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; lean_object* x_150; lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; -x_133 = lean_nat_abs(x_111); -lean_dec(x_111); -x_134 = lean_unsigned_to_nat(1u); -x_135 = lean_nat_sub(x_133, x_134); -lean_dec(x_133); -x_136 = lean_nat_add(x_135, x_134); -lean_dec(x_135); -x_137 = l___private_Init_Data_Repr_0__Nat_reprFast(x_136); -x_138 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__10; -x_139 = lean_string_append(x_138, x_137); -lean_dec(x_137); -x_140 = lean_alloc_ctor(3, 1, 0); -lean_ctor_set(x_140, 0, x_139); -x_141 = l_Lean_MessageData_ofFormat(x_140); -x_142 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; -x_143 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_143, 0, x_142); -lean_ctor_set(x_143, 1, x_141); -x_144 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__2; -x_145 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_145, 0, x_143); -lean_ctor_set(x_145, 1, x_144); -x_146 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_146, 0, x_145); -lean_ctor_set(x_146, 1, x_112); -x_147 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__4; -x_148 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_148, 0, x_146); -lean_ctor_set(x_148, 1, x_147); -x_149 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_149, 0, x_148); -lean_ctor_set(x_149, 1, x_113); -x_150 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_150, 0, x_149); -lean_ctor_set(x_150, 1, x_142); -x_151 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_2, x_150, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_110); -x_152 = lean_ctor_get(x_151, 0); -lean_inc(x_152); -x_153 = lean_ctor_get(x_151, 1); -lean_inc(x_153); -lean_dec(x_151); -x_154 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(x_19, x_16, x_152, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_153); +lean_object* x_35; lean_object* x_36; lean_object* x_37; +x_35 = lean_ctor_get(x_24, 0); +x_36 = lean_ctor_get(x_24, 1); +lean_inc(x_36); +lean_inc(x_35); +lean_dec(x_24); +x_37 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_37, 0, x_35); +lean_ctor_set(x_37, 1, x_36); +return x_37; +} +} +} +else +{ +uint8_t x_38; +lean_free_object(x_13); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); lean_dec(x_4); -lean_dec(x_152); -lean_dec(x_16); -lean_dec(x_19); -return x_154; +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_38 = !lean_is_exclusive(x_22); +if (x_38 == 0) +{ +return x_22; +} +else +{ +lean_object* x_39; lean_object* x_40; lean_object* x_41; +x_39 = lean_ctor_get(x_22, 0); +x_40 = lean_ctor_get(x_22, 1); +lean_inc(x_40); +lean_inc(x_39); +lean_dec(x_22); +x_41 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_41, 0, x_39); +lean_ctor_set(x_41, 1, x_40); +return x_41; +} } } else { -lean_object* x_155; lean_object* x_156; lean_object* x_157; lean_object* x_158; -lean_dec(x_103); -lean_dec(x_19); -lean_dec(x_16); -lean_dec(x_12); -lean_dec(x_11); +lean_object* x_42; lean_object* x_43; +x_42 = lean_ctor_get(x_13, 1); +lean_inc(x_42); +lean_dec(x_13); +x_43 = l_Lean_Meta_Grind_updateLastTag(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_42); +if (lean_obj_tag(x_43) == 0) +{ +lean_object* x_44; lean_object* x_45; +x_44 = lean_ctor_get(x_43, 1); +lean_inc(x_44); +lean_dec(x_43); +lean_inc(x_1); +x_45 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_44); +if (lean_obj_tag(x_45) == 0) +{ +lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; +x_46 = lean_ctor_get(x_45, 0); +lean_inc(x_46); +x_47 = lean_ctor_get(x_45, 1); +lean_inc(x_47); +lean_dec(x_45); +x_48 = l_Lean_MessageData_ofExpr(x_46); +x_49 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; +x_50 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_50, 0, x_49); +lean_ctor_set(x_50, 1, x_48); +x_51 = lean_alloc_ctor(7, 2, 0); +lean_ctor_set(x_51, 0, x_50); +lean_ctor_set(x_51, 1, x_49); +x_52 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_12, x_51, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_47); +x_53 = lean_ctor_get(x_52, 0); +lean_inc(x_53); +x_54 = lean_ctor_get(x_52, 1); +lean_inc(x_54); +lean_dec(x_52); +x_55 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2(x_1, x_12, x_53, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_54); +lean_dec(x_53); +return x_55; +} +else +{ +lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -8878,37 +10757,34 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); -x_155 = lean_ctor_get(x_108, 0); -lean_inc(x_155); -x_156 = lean_ctor_get(x_108, 1); -lean_inc(x_156); -if (lean_is_exclusive(x_108)) { - lean_ctor_release(x_108, 0); - lean_ctor_release(x_108, 1); - x_157 = x_108; +lean_dec(x_1); +x_56 = lean_ctor_get(x_45, 0); +lean_inc(x_56); +x_57 = lean_ctor_get(x_45, 1); +lean_inc(x_57); +if (lean_is_exclusive(x_45)) { + lean_ctor_release(x_45, 0); + lean_ctor_release(x_45, 1); + x_58 = x_45; } else { - lean_dec_ref(x_108); - x_157 = lean_box(0); + lean_dec_ref(x_45); + x_58 = lean_box(0); } -if (lean_is_scalar(x_157)) { - x_158 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_58)) { + x_59 = lean_alloc_ctor(1, 2, 0); } else { - x_158 = x_157; + x_59 = x_58; } -lean_ctor_set(x_158, 0, x_155); -lean_ctor_set(x_158, 1, x_156); -return x_158; +lean_ctor_set(x_59, 0, x_56); +lean_ctor_set(x_59, 1, x_57); +return x_59; } } else { -lean_object* x_159; lean_object* x_160; lean_object* x_161; lean_object* x_162; -lean_dec(x_103); -lean_dec(x_19); -lean_dec(x_16); -lean_dec(x_12); -lean_dec(x_11); +lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -8916,36 +10792,56 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); -x_159 = lean_ctor_get(x_105, 0); -lean_inc(x_159); -x_160 = lean_ctor_get(x_105, 1); -lean_inc(x_160); -if (lean_is_exclusive(x_105)) { - lean_ctor_release(x_105, 0); - lean_ctor_release(x_105, 1); - x_161 = x_105; +lean_dec(x_1); +x_60 = lean_ctor_get(x_43, 0); +lean_inc(x_60); +x_61 = lean_ctor_get(x_43, 1); +lean_inc(x_61); +if (lean_is_exclusive(x_43)) { + lean_ctor_release(x_43, 0); + lean_ctor_release(x_43, 1); + x_62 = x_43; } else { - lean_dec_ref(x_105); - x_161 = lean_box(0); + lean_dec_ref(x_43); + x_62 = lean_box(0); } -if (lean_is_scalar(x_161)) { - x_162 = lean_alloc_ctor(1, 2, 0); +if (lean_is_scalar(x_62)) { + x_63 = lean_alloc_ctor(1, 2, 0); } else { - x_162 = x_161; + x_63 = x_62; } -lean_ctor_set(x_162, 0, x_159); -lean_ctor_set(x_162, 1, x_160); -return x_162; +lean_ctor_set(x_63, 0, x_60); +lean_ctor_set(x_63, 1, x_61); +return x_63; } } -else +} +} +} +LEAN_EXPORT lean_object* l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -lean_object* x_163; lean_object* x_164; lean_object* x_165; lean_object* x_166; -lean_dec(x_19); -lean_dec(x_16); -lean_dec(x_12); -lean_dec(x_11); +lean_object* x_12; +x_12 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_12; +} +} +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +lean_object* x_12; +x_12 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -8953,36 +10849,16 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); -x_163 = lean_ctor_get(x_102, 0); -lean_inc(x_163); -x_164 = lean_ctor_get(x_102, 1); -lean_inc(x_164); -if (lean_is_exclusive(x_102)) { - lean_ctor_release(x_102, 0); - lean_ctor_release(x_102, 1); - x_165 = x_102; -} else { - lean_dec_ref(x_102); - x_165 = lean_box(0); -} -if (lean_is_scalar(x_165)) { - x_166 = lean_alloc_ctor(1, 2, 0); -} else { - x_166 = x_165; -} -lean_ctor_set(x_166, 0, x_163); -lean_ctor_set(x_166, 1, x_164); -return x_166; +return x_12; } } -else +LEAN_EXPORT lean_object* l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: { -lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; -lean_dec(x_19); -lean_dec(x_16); -lean_dec(x_14); -lean_dec(x_12); +lean_object* x_13; +x_13 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -8991,37 +10867,15 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_2); -x_167 = lean_ctor_get(x_100, 0); -lean_inc(x_167); -x_168 = lean_ctor_get(x_100, 1); -lean_inc(x_168); -if (lean_is_exclusive(x_100)) { - lean_ctor_release(x_100, 0); - lean_ctor_release(x_100, 1); - x_169 = x_100; -} else { - lean_dec_ref(x_100); - x_169 = lean_box(0); -} -if (lean_is_scalar(x_169)) { - x_170 = lean_alloc_ctor(1, 2, 0); -} else { - x_170 = x_169; -} -lean_ctor_set(x_170, 0, x_167); -lean_ctor_set(x_170, 1, x_168); -return x_170; -} -} +lean_dec(x_3); +return x_13; } } -else +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { +_start: { -uint8_t x_171; -lean_dec(x_16); -lean_dec(x_14); -lean_dec(x_12); +lean_object* x_13; +x_13 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -9030,33 +10884,15 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); -lean_dec(x_2); -x_171 = !lean_is_exclusive(x_18); -if (x_171 == 0) -{ -return x_18; -} -else -{ -lean_object* x_172; lean_object* x_173; lean_object* x_174; -x_172 = lean_ctor_get(x_18, 0); -x_173 = lean_ctor_get(x_18, 1); -lean_inc(x_173); -lean_inc(x_172); -lean_dec(x_18); -x_174 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_174, 0, x_172); -lean_ctor_set(x_174, 1, x_173); -return x_174; -} +lean_dec(x_3); +return x_13; } } -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: { -uint8_t x_175; -lean_dec(x_14); -lean_dec(x_12); -lean_dec(x_11); +lean_object* x_12; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__4(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -9064,131 +10900,133 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); -lean_dec(x_1); -x_175 = !lean_is_exclusive(x_15); -if (x_175 == 0) -{ -return x_15; +return x_12; } -else +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: { -lean_object* x_176; lean_object* x_177; lean_object* x_178; -x_176 = lean_ctor_get(x_15, 0); -x_177 = lean_ctor_get(x_15, 1); -lean_inc(x_177); -lean_inc(x_176); -lean_dec(x_15); -x_178 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_178, 0, x_176); -lean_ctor_set(x_178, 1, x_177); -return x_178; +lean_object* x_14; +x_14 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_14; } } +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +lean_object* x_14; +x_14 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_3); +return x_14; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__1() { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_1; -x_1 = lean_mk_string_unchecked("assert", 6, 6); -return x_1; +lean_object* x_12; lean_object* x_13; +x_12 = lean_box(0); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_12); +lean_ctor_set(x_13, 1, x_11); +return x_13; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__2() { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___closed__1() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("unsat", 5, 5); +x_1 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___lambda__1___boxed), 11, 0); return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__3() { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { -lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__1; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__3; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__1; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__2; -x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); -return x_5; -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: +lean_object* x_11; +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +if (lean_obj_tag(x_11) == 0) { -lean_object* x_12; lean_object* x_13; lean_object* x_14; uint8_t x_15; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__3; -x_13 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -x_14 = lean_ctor_get(x_13, 0); -lean_inc(x_14); -x_15 = lean_unbox(x_14); -lean_dec(x_14); -if (x_15 == 0) +uint8_t x_12; +x_12 = !lean_is_exclusive(x_11); +if (x_12 == 0) { -lean_object* x_16; lean_object* x_17; lean_object* x_18; -x_16 = lean_ctor_get(x_13, 1); +lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_13 = lean_ctor_get(x_11, 0); +x_14 = lean_ctor_get(x_11, 1); +x_15 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___closed__1; +x_16 = lean_ctor_get(x_13, 4); lean_inc(x_16); lean_dec(x_13); +if (lean_obj_tag(x_16) == 0) +{ +lean_object* x_17; lean_object* x_18; +lean_free_object(x_11); x_17 = lean_box(0); -x_18 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2(x_1, x_12, x_17, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_16); +x_18 = lean_apply_11(x_15, x_17, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); return x_18; } else { uint8_t x_19; -x_19 = !lean_is_exclusive(x_13); +x_19 = !lean_is_exclusive(x_16); if (x_19 == 0) { -lean_object* x_20; lean_object* x_21; lean_object* x_22; -x_20 = lean_ctor_get(x_13, 1); -x_21 = lean_ctor_get(x_13, 0); -lean_dec(x_21); -x_22 = l_Lean_Meta_Grind_updateLastTag(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_20); -if (lean_obj_tag(x_22) == 0) +lean_object* x_20; uint8_t x_21; +x_20 = lean_ctor_get(x_16, 0); +x_21 = !lean_is_exclusive(x_20); +if (x_21 == 0) { -lean_object* x_23; lean_object* x_24; -x_23 = lean_ctor_get(x_22, 1); -lean_inc(x_23); -lean_dec(x_22); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -lean_inc(x_1); -x_24 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_23); -if (lean_obj_tag(x_24) == 0) +lean_object* x_22; lean_object* x_23; lean_object* x_24; uint8_t x_25; +x_22 = lean_ctor_get(x_20, 0); +x_23 = lean_ctor_get(x_20, 1); +x_24 = lean_unsigned_to_nat(0u); +x_25 = lean_nat_dec_eq(x_23, x_24); +if (x_25 == 0) { -lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; -x_25 = lean_ctor_get(x_24, 0); -lean_inc(x_25); -x_26 = lean_ctor_get(x_24, 1); -lean_inc(x_26); -lean_dec(x_24); -x_27 = l_Lean_MessageData_ofExpr(x_25); -x_28 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; -lean_ctor_set_tag(x_13, 7); -lean_ctor_set(x_13, 1, x_27); -lean_ctor_set(x_13, 0, x_28); -x_29 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_29, 0, x_13); -lean_ctor_set(x_29, 1, x_28); -x_30 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_12, x_29, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_26); -x_31 = lean_ctor_get(x_30, 0); -lean_inc(x_31); -x_32 = lean_ctor_get(x_30, 1); -lean_inc(x_32); -lean_dec(x_30); -x_33 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2(x_1, x_12, x_31, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_32); -lean_dec(x_31); -return x_33; +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +lean_ctor_set(x_11, 0, x_16); +return x_11; } else { -uint8_t x_34; -lean_free_object(x_13); -lean_dec(x_10); +lean_object* x_26; lean_object* x_27; +lean_free_object(x_20); +lean_dec(x_23); +lean_dec(x_22); +lean_free_object(x_16); +lean_free_object(x_11); +x_26 = lean_box(0); +x_27 = lean_apply_11(x_15, x_26, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); +return x_27; +} +} +else +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; uint8_t x_31; +x_28 = lean_ctor_get(x_20, 0); +x_29 = lean_ctor_get(x_20, 1); +lean_inc(x_29); +lean_inc(x_28); +lean_dec(x_20); +x_30 = lean_unsigned_to_nat(0u); +x_31 = lean_nat_dec_eq(x_29, x_30); +if (x_31 == 0) +{ +lean_object* x_32; lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); @@ -9198,31 +11036,49 @@ lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); -x_34 = !lean_is_exclusive(x_24); -if (x_34 == 0) +x_32 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_32, 0, x_28); +lean_ctor_set(x_32, 1, x_29); +lean_ctor_set(x_16, 0, x_32); +lean_ctor_set(x_11, 0, x_16); +return x_11; +} +else { -return x_24; +lean_object* x_33; lean_object* x_34; +lean_dec(x_29); +lean_dec(x_28); +lean_free_object(x_16); +lean_free_object(x_11); +x_33 = lean_box(0); +x_34 = lean_apply_11(x_15, x_33, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); +return x_34; +} +} } else { -lean_object* x_35; lean_object* x_36; lean_object* x_37; -x_35 = lean_ctor_get(x_24, 0); -x_36 = lean_ctor_get(x_24, 1); -lean_inc(x_36); +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; uint8_t x_40; +x_35 = lean_ctor_get(x_16, 0); lean_inc(x_35); -lean_dec(x_24); -x_37 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_37, 0, x_35); -lean_ctor_set(x_37, 1, x_36); -return x_37; -} -} +lean_dec(x_16); +x_36 = lean_ctor_get(x_35, 0); +lean_inc(x_36); +x_37 = lean_ctor_get(x_35, 1); +lean_inc(x_37); +if (lean_is_exclusive(x_35)) { + lean_ctor_release(x_35, 0); + lean_ctor_release(x_35, 1); + x_38 = x_35; +} else { + lean_dec_ref(x_35); + x_38 = lean_box(0); } -else +x_39 = lean_unsigned_to_nat(0u); +x_40 = lean_nat_dec_eq(x_37, x_39); +if (x_40 == 0) { -uint8_t x_38; -lean_free_object(x_13); -lean_dec(x_10); +lean_object* x_41; lean_object* x_42; lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); @@ -9232,75 +11088,80 @@ lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); -x_38 = !lean_is_exclusive(x_22); -if (x_38 == 0) -{ -return x_22; +if (lean_is_scalar(x_38)) { + x_41 = lean_alloc_ctor(0, 2, 0); +} else { + x_41 = x_38; +} +lean_ctor_set(x_41, 0, x_36); +lean_ctor_set(x_41, 1, x_37); +x_42 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_42, 0, x_41); +lean_ctor_set(x_11, 0, x_42); +return x_11; } else { -lean_object* x_39; lean_object* x_40; lean_object* x_41; -x_39 = lean_ctor_get(x_22, 0); -x_40 = lean_ctor_get(x_22, 1); -lean_inc(x_40); -lean_inc(x_39); -lean_dec(x_22); -x_41 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_41, 0, x_39); -lean_ctor_set(x_41, 1, x_40); -return x_41; +lean_object* x_43; lean_object* x_44; +lean_dec(x_38); +lean_dec(x_37); +lean_dec(x_36); +lean_free_object(x_11); +x_43 = lean_box(0); +x_44 = lean_apply_11(x_15, x_43, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); +return x_44; +} } } } else { -lean_object* x_42; lean_object* x_43; -x_42 = lean_ctor_get(x_13, 1); -lean_inc(x_42); -lean_dec(x_13); -x_43 = l_Lean_Meta_Grind_updateLastTag(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_42); -if (lean_obj_tag(x_43) == 0) -{ -lean_object* x_44; lean_object* x_45; -x_44 = lean_ctor_get(x_43, 1); -lean_inc(x_44); -lean_dec(x_43); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -lean_inc(x_1); -x_45 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_44); -if (lean_obj_tag(x_45) == 0) -{ -lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; -x_46 = lean_ctor_get(x_45, 0); +lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; +x_45 = lean_ctor_get(x_11, 0); +x_46 = lean_ctor_get(x_11, 1); lean_inc(x_46); -x_47 = lean_ctor_get(x_45, 1); -lean_inc(x_47); +lean_inc(x_45); +lean_dec(x_11); +x_47 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___closed__1; +x_48 = lean_ctor_get(x_45, 4); +lean_inc(x_48); lean_dec(x_45); -x_48 = l_Lean_MessageData_ofExpr(x_46); -x_49 = l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4; -x_50 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_50, 0, x_49); -lean_ctor_set(x_50, 1, x_48); -x_51 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_51, 0, x_50); -lean_ctor_set(x_51, 1, x_49); -x_52 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_12, x_51, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_47); -x_53 = lean_ctor_get(x_52, 0); +if (lean_obj_tag(x_48) == 0) +{ +lean_object* x_49; lean_object* x_50; +x_49 = lean_box(0); +x_50 = lean_apply_11(x_47, x_49, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_46); +return x_50; +} +else +{ +lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; uint8_t x_57; +x_51 = lean_ctor_get(x_48, 0); +lean_inc(x_51); +if (lean_is_exclusive(x_48)) { + lean_ctor_release(x_48, 0); + x_52 = x_48; +} else { + lean_dec_ref(x_48); + x_52 = lean_box(0); +} +x_53 = lean_ctor_get(x_51, 0); lean_inc(x_53); -x_54 = lean_ctor_get(x_52, 1); +x_54 = lean_ctor_get(x_51, 1); lean_inc(x_54); -lean_dec(x_52); -x_55 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2(x_1, x_12, x_53, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_54); -lean_dec(x_53); -return x_55; +if (lean_is_exclusive(x_51)) { + lean_ctor_release(x_51, 0); + lean_ctor_release(x_51, 1); + x_55 = x_51; +} else { + lean_dec_ref(x_51); + x_55 = lean_box(0); } -else +x_56 = lean_unsigned_to_nat(0u); +x_57 = lean_nat_dec_eq(x_54, x_56); +if (x_57 == 0) { -lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; -lean_dec(x_10); +lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); @@ -9310,32 +11171,41 @@ lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); -x_56 = lean_ctor_get(x_45, 0); -lean_inc(x_56); -x_57 = lean_ctor_get(x_45, 1); -lean_inc(x_57); -if (lean_is_exclusive(x_45)) { - lean_ctor_release(x_45, 0); - lean_ctor_release(x_45, 1); - x_58 = x_45; +if (lean_is_scalar(x_55)) { + x_58 = lean_alloc_ctor(0, 2, 0); } else { - lean_dec_ref(x_45); - x_58 = lean_box(0); + x_58 = x_55; } -if (lean_is_scalar(x_58)) { - x_59 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_58, 0, x_53); +lean_ctor_set(x_58, 1, x_54); +if (lean_is_scalar(x_52)) { + x_59 = lean_alloc_ctor(1, 1, 0); } else { - x_59 = x_58; + x_59 = x_52; +} +lean_ctor_set(x_59, 0, x_58); +x_60 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_60, 0, x_59); +lean_ctor_set(x_60, 1, x_46); +return x_60; +} +else +{ +lean_object* x_61; lean_object* x_62; +lean_dec(x_55); +lean_dec(x_54); +lean_dec(x_53); +lean_dec(x_52); +x_61 = lean_box(0); +x_62 = lean_apply_11(x_47, x_61, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_46); +return x_62; +} } -lean_ctor_set(x_59, 0, x_56); -lean_ctor_set(x_59, 1, x_57); -return x_59; } } else { -lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; -lean_dec(x_10); +uint8_t x_63; lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); @@ -9345,101 +11215,157 @@ lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); -x_60 = lean_ctor_get(x_43, 0); -lean_inc(x_60); -x_61 = lean_ctor_get(x_43, 1); -lean_inc(x_61); -if (lean_is_exclusive(x_43)) { - lean_ctor_release(x_43, 0); - lean_ctor_release(x_43, 1); - x_62 = x_43; -} else { - lean_dec_ref(x_43); - x_62 = lean_box(0); +x_63 = !lean_is_exclusive(x_11); +if (x_63 == 0) +{ +return x_11; } -if (lean_is_scalar(x_62)) { - x_63 = lean_alloc_ctor(1, 2, 0); -} else { - x_63 = x_62; +else +{ +lean_object* x_64; lean_object* x_65; lean_object* x_66; +x_64 = lean_ctor_get(x_11, 0); +x_65 = lean_ctor_get(x_11, 1); +lean_inc(x_65); +lean_inc(x_64); +lean_dec(x_11); +x_66 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_66, 0, x_64); +lean_ctor_set(x_66, 1, x_65); +return x_66; } -lean_ctor_set(x_63, 0, x_60); -lean_ctor_set(x_63, 1, x_61); -return x_63; } } } +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +lean_object* x_12; lean_object* x_13; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_p(x_1); +x_13 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +return x_13; } } -LEAN_EXPORT lean_object* l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_12; -x_12 = l_Lean_isTracingEnabledFor___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -return x_12; +lean_object* x_12; lean_object* x_13; +x_12 = lean_ctor_get(x_1, 4); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__3(x_12, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +if (lean_obj_tag(x_13) == 0) +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_14 = lean_ctor_get(x_13, 0); +lean_inc(x_14); +x_15 = lean_ctor_get(x_13, 1); +lean_inc(x_15); +lean_dec(x_13); +x_16 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; +x_17 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2(x_16, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_15); +if (lean_obj_tag(x_17) == 0) +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_18 = lean_ctor_get(x_17, 0); +lean_inc(x_18); +x_19 = lean_ctor_get(x_17, 1); +lean_inc(x_19); +lean_dec(x_17); +x_20 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5(x_14, x_18, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_19); +if (lean_obj_tag(x_20) == 0) +{ +uint8_t x_21; +x_21 = !lean_is_exclusive(x_20); +if (x_21 == 0) +{ +lean_object* x_22; lean_object* x_23; +x_22 = lean_ctor_get(x_20, 0); +x_23 = l_Lean_mkNot(x_22); +lean_ctor_set(x_20, 0, x_23); +return x_20; +} +else +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_24 = lean_ctor_get(x_20, 0); +x_25 = lean_ctor_get(x_20, 1); +lean_inc(x_25); +lean_inc(x_24); +lean_dec(x_20); +x_26 = l_Lean_mkNot(x_24); +x_27 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_27, 0, x_26); +lean_ctor_set(x_27, 1, x_25); +return x_27; } } -LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: +else { -lean_object* x_12; -x_12 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -return x_12; +uint8_t x_28; +x_28 = !lean_is_exclusive(x_20); +if (x_28 == 0) +{ +return x_20; +} +else +{ +lean_object* x_29; lean_object* x_30; lean_object* x_31; +x_29 = lean_ctor_get(x_20, 0); +x_30 = lean_ctor_get(x_20, 1); +lean_inc(x_30); +lean_inc(x_29); +lean_dec(x_20); +x_31 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_31, 0, x_29); +lean_ctor_set(x_31, 1, x_30); +return x_31; +} +} +} +else +{ +uint8_t x_32; +lean_dec(x_14); +x_32 = !lean_is_exclusive(x_17); +if (x_32 == 0) +{ +return x_17; +} +else +{ +lean_object* x_33; lean_object* x_34; lean_object* x_35; +x_33 = lean_ctor_get(x_17, 0); +x_34 = lean_ctor_get(x_17, 1); +lean_inc(x_34); +lean_inc(x_33); +lean_dec(x_17); +x_35 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_35, 0, x_33); +lean_ctor_set(x_35, 1, x_34); +return x_35; } } -LEAN_EXPORT lean_object* l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { -_start: +} +else +{ +uint8_t x_36; +x_36 = !lean_is_exclusive(x_13); +if (x_36 == 0) { -lean_object* x_13; -x_13 = l_Lean_addTrace___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); -lean_dec(x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); return x_13; } -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { -_start: +else { -lean_object* x_14; -x_14 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -return x_14; +lean_object* x_37; lean_object* x_38; lean_object* x_39; +x_37 = lean_ctor_get(x_13, 0); +x_38 = lean_ctor_get(x_13, 1); +lean_inc(x_38); +lean_inc(x_37); +lean_dec(x_13); +x_39 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_39, 0, x_37); +lean_ctor_set(x_39, 1, x_38); +return x_39; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { -_start: -{ -lean_object* x_14; -x_14 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); -lean_dec(x_3); -return x_14; } } static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__1() { @@ -9454,10 +11380,10 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__1; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); return x_6; @@ -9475,10 +11401,10 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__3; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); return x_6; @@ -9518,10 +11444,10 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__7; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); return x_6; @@ -9539,10 +11465,10 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__9; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); return x_6; @@ -9554,7 +11480,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___l lean_object* x_14; lean_object* x_15; lean_object* x_16; x_14 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert(x_1); x_15 = l_Lean_Meta_Grind_Arith_CommRing_ofNullCert(x_14); -x_16 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +x_16 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); if (lean_obj_tag(x_16) == 0) { lean_object* x_17; lean_object* x_18; lean_object* x_19; @@ -9592,7 +11518,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); -x_26 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_21); +x_26 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_21); if (lean_obj_tag(x_26) == 0) { lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_42; @@ -9710,7 +11636,7 @@ lean_inc(x_68); x_69 = lean_ctor_get(x_17, 3); lean_inc(x_69); lean_dec(x_17); -x_70 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_70 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_71 = lean_int_dec_le(x_70, x_25); if (x_71 == 0) { @@ -9975,7 +11901,7 @@ x_135 = l_Lean_mkNatLit(x_128); x_136 = lean_ctor_get(x_17, 3); lean_inc(x_136); lean_dec(x_17); -x_137 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_137 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_138 = lean_int_dec_le(x_137, x_25); if (x_138 == 0) { @@ -10129,7 +12055,7 @@ x_171 = l_Lean_mkNatLit(x_163); x_172 = lean_ctor_get(x_17, 3); lean_inc(x_172); lean_dec(x_17); -x_173 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_173 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_174 = lean_int_dec_le(x_173, x_25); if (x_174 == 0) { @@ -10507,7 +12433,7 @@ lean_dec(x_25); x_27 = lean_ctor_get(x_1, 4); lean_inc(x_27); x_28 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_p(x_27); -x_29 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_28, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_26); +x_29 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_28, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_26); if (lean_obj_tag(x_29) == 0) { lean_object* x_30; lean_object* x_31; lean_object* x_32; @@ -10534,7 +12460,7 @@ lean_inc(x_33); x_34 = lean_ctor_get(x_32, 1); lean_inc(x_34); lean_dec(x_32); -x_35 = l_Lean_Grind_CommRing_Poly_denoteExpr(x_33, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_34); +x_35 = l_Lean_Grind_CommRing_Poly_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__1(x_33, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_34); if (lean_obj_tag(x_35) == 0) { lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_91; uint8_t x_92; @@ -10549,7 +12475,7 @@ x_39 = lean_ctor_get(x_15, 0); lean_inc(x_39); x_40 = l_Lean_MessageData_ofExpr(x_30); x_41 = l_Lean_MessageData_ofExpr(x_36); -x_91 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_91 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_92 = lean_int_dec_lt(x_38, x_91); if (x_92 == 0) { @@ -10596,7 +12522,7 @@ x_47 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_toPreNullCert___closed__9 x_48 = lean_alloc_ctor(7, 2, 0); lean_ctor_set(x_48, 0, x_46); lean_ctor_set(x_48, 1, x_47); -x_49 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_49 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_50 = lean_int_dec_lt(x_39, x_49); if (x_50 == 0) { @@ -10912,11 +12838,7 @@ lean_object* x_23; lean_object* x_24; x_23 = lean_ctor_get(x_22, 1); lean_inc(x_23); lean_dec(x_22); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -x_24 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_23); +x_24 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_23); if (lean_obj_tag(x_24) == 0) { lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; @@ -11024,11 +12946,7 @@ lean_object* x_44; lean_object* x_45; x_44 = lean_ctor_get(x_43, 1); lean_inc(x_44); lean_dec(x_43); -lean_inc(x_10); -lean_inc(x_9); -lean_inc(x_8); -lean_inc(x_7); -x_45 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_44); +x_45 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_44); if (lean_obj_tag(x_45) == 0) { lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; @@ -11128,41 +13046,77 @@ return x_63; } } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_14; -x_14 = l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_object* x_12; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); +lean_dec(x_2); lean_dec(x_1); -return x_14; +return x_12; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_14; -x_14 = l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_object* x_12; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_PolyDerivation_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); -return x_14; +lean_dec(x_2); +lean_dec(x_1); +return x_12; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__1() { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { -lean_object* x_1; -x_1 = lean_mk_string_unchecked("Eq", 2, 2); -return x_1; +lean_object* x_12; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_DiseqCnstr_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_12; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__2() { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { _start: { -lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = lean_box(0); -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__1; -x_3 = l_Lean_Name_str___override(x_1, x_2); -return x_3; +lean_object* x_14; +x_14 = l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_3); +lean_dec(x_1); +return x_14; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +lean_object* x_14; +x_14 = l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__2(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_3); +return x_14; } } LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15, lean_object* x_16) { @@ -11176,7 +13130,7 @@ x_19 = lean_box(0); x_20 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_20, 0, x_18); lean_ctor_set(x_20, 1, x_19); -x_21 = l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__2; +x_21 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__2; x_22 = l_Lean_Expr_const___override(x_21, x_20); x_23 = lean_ctor_get(x_1, 1); lean_inc(x_23); @@ -11225,10 +13179,10 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___cl _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__3; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); return x_6; @@ -11246,10 +13200,10 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___cl _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__5; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); return x_6; @@ -11267,10 +13221,10 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___cl _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__7; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); return x_6; @@ -11288,10 +13242,10 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___cl _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; -x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; +x_4 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; x_5 = l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__9; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); return x_6; @@ -11322,7 +13276,7 @@ lean_inc(x_18); lean_dec(x_16); x_19 = l_Lean_Meta_Grind_Arith_CommRing_Null_NullCertExt_toNullCert(x_17); x_20 = l_Lean_Meta_Grind_Arith_CommRing_ofNullCert(x_19); -x_21 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_18); +x_21 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_18); if (lean_obj_tag(x_21) == 0) { lean_object* x_22; lean_object* x_23; lean_object* x_24; @@ -11360,7 +13314,7 @@ lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); lean_inc(x_6); -x_30 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_26); +x_30 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_26); if (lean_obj_tag(x_30) == 0) { lean_object* x_31; lean_object* x_32; lean_object* x_33; @@ -11422,7 +13376,7 @@ x_106 = lean_ctor_get(x_22, 1); lean_inc(x_106); x_107 = lean_ctor_get(x_22, 3); lean_inc(x_107); -x_108 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_108 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_109 = lean_int_dec_le(x_108, x_29); if (x_109 == 0) { @@ -11536,7 +13490,7 @@ lean_inc(x_152); x_153 = l_Lean_mkNatLit(x_146); x_154 = lean_ctor_get(x_22, 3); lean_inc(x_154); -x_155 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_155 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_156 = lean_int_dec_le(x_155, x_29); if (x_156 == 0) { @@ -11589,7 +13543,7 @@ lean_inc(x_176); x_177 = l_Lean_mkNatLit(x_169); x_178 = lean_ctor_get(x_22, 3); lean_inc(x_178); -x_179 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_179 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_180 = lean_int_dec_le(x_179, x_29); if (x_180 == 0) { @@ -16125,100 +18079,389 @@ x_35 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_Meta_Grind_Arith_Comm lean_dec(x_34); if (lean_obj_tag(x_35) == 0) { -lean_object* x_36; lean_object* x_37; -lean_free_object(x_14); -x_36 = lean_box(0); -x_37 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___lambda__1(x_1, x_36, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_18); -return x_37; +lean_object* x_36; lean_object* x_37; +lean_free_object(x_14); +x_36 = lean_box(0); +x_37 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___lambda__1(x_1, x_36, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_18); +return x_37; +} +else +{ +lean_object* x_38; +lean_dec(x_1); +x_38 = lean_ctor_get(x_35, 0); +lean_inc(x_38); +lean_dec(x_35); +lean_ctor_set(x_14, 0, x_38); +return x_14; +} +} +else +{ +lean_object* x_39; lean_object* x_40; lean_object* x_41; uint64_t x_42; uint64_t x_43; uint64_t x_44; uint64_t x_45; uint64_t x_46; uint64_t x_47; uint64_t x_48; size_t x_49; size_t x_50; size_t x_51; size_t x_52; size_t x_53; lean_object* x_54; lean_object* x_55; +x_39 = lean_ctor_get(x_14, 1); +lean_inc(x_39); +lean_dec(x_14); +x_40 = lean_ctor_get(x_16, 1); +lean_inc(x_40); +lean_dec(x_16); +x_41 = lean_array_get_size(x_40); +x_42 = l___private_Init_Grind_CommRing_Poly_0__Lean_Grind_CommRing_hashMon____x40_Init_Grind_CommRing_Poly___hyg_1349_(x_1); +x_43 = 32; +x_44 = lean_uint64_shift_right(x_42, x_43); +x_45 = lean_uint64_xor(x_42, x_44); +x_46 = 16; +x_47 = lean_uint64_shift_right(x_45, x_46); +x_48 = lean_uint64_xor(x_45, x_47); +x_49 = lean_uint64_to_usize(x_48); +x_50 = lean_usize_of_nat(x_41); +lean_dec(x_41); +x_51 = 1; +x_52 = lean_usize_sub(x_50, x_51); +x_53 = lean_usize_land(x_49, x_52); +x_54 = lean_array_uget(x_40, x_53); +lean_dec(x_40); +x_55 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__6(x_1, x_54); +lean_dec(x_54); +if (lean_obj_tag(x_55) == 0) +{ +lean_object* x_56; lean_object* x_57; +x_56 = lean_box(0); +x_57 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___lambda__1(x_1, x_56, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_39); +return x_57; +} +else +{ +lean_object* x_58; lean_object* x_59; +lean_dec(x_1); +x_58 = lean_ctor_get(x_55, 0); +lean_inc(x_58); +lean_dec(x_55); +x_59 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_59, 0, x_58); +lean_ctor_set(x_59, 1, x_39); +return x_59; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_contains___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__1___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +uint8_t x_3; lean_object* x_4; +x_3 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__1(x_1, x_2); +lean_dec(x_2); +lean_dec(x_1); +x_4 = lean_box(x_3); +return x_4; +} +} +LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__6___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__6(x_1, x_2); +lean_dec(x_2); +lean_dec(x_1); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +_start: +{ +lean_object* x_15; +x_15 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +lean_dec(x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_15; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +lean_object* x_14; +x_14 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +return x_14; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepBasicPrefix(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: +{ +lean_object* x_14; +x_14 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +if (lean_obj_tag(x_14) == 0) +{ +uint8_t x_15; +x_15 = !lean_is_exclusive(x_14); +if (x_15 == 0) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_16 = lean_ctor_get(x_14, 0); +x_17 = lean_ctor_get(x_16, 2); +lean_inc(x_17); +x_18 = lean_box(0); +x_19 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_19, 0, x_17); +lean_ctor_set(x_19, 1, x_18); +x_20 = l_Lean_Expr_const___override(x_1, x_19); +x_21 = lean_ctor_get(x_16, 1); +lean_inc(x_21); +x_22 = lean_ctor_get(x_16, 3); +lean_inc(x_22); +lean_dec(x_16); +x_23 = l_Lean_mkApp3(x_20, x_21, x_22, x_2); +lean_ctor_set(x_14, 0, x_23); +return x_14; +} +else +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_24 = lean_ctor_get(x_14, 0); +x_25 = lean_ctor_get(x_14, 1); +lean_inc(x_25); +lean_inc(x_24); +lean_dec(x_14); +x_26 = lean_ctor_get(x_24, 2); +lean_inc(x_26); +x_27 = lean_box(0); +x_28 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_28, 0, x_26); +lean_ctor_set(x_28, 1, x_27); +x_29 = l_Lean_Expr_const___override(x_1, x_28); +x_30 = lean_ctor_get(x_24, 1); +lean_inc(x_30); +x_31 = lean_ctor_get(x_24, 3); +lean_inc(x_31); +lean_dec(x_24); +x_32 = l_Lean_mkApp3(x_29, x_30, x_31, x_2); +x_33 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_33, 0, x_32); +lean_ctor_set(x_33, 1, x_25); +return x_33; +} } else { -lean_object* x_38; +uint8_t x_34; +lean_dec(x_2); lean_dec(x_1); -x_38 = lean_ctor_get(x_35, 0); -lean_inc(x_38); -lean_dec(x_35); -lean_ctor_set(x_14, 0, x_38); +x_34 = !lean_is_exclusive(x_14); +if (x_34 == 0) +{ return x_14; } -} else { -lean_object* x_39; lean_object* x_40; lean_object* x_41; uint64_t x_42; uint64_t x_43; uint64_t x_44; uint64_t x_45; uint64_t x_46; uint64_t x_47; uint64_t x_48; size_t x_49; size_t x_50; size_t x_51; size_t x_52; size_t x_53; lean_object* x_54; lean_object* x_55; -x_39 = lean_ctor_get(x_14, 1); -lean_inc(x_39); +lean_object* x_35; lean_object* x_36; lean_object* x_37; +x_35 = lean_ctor_get(x_14, 0); +x_36 = lean_ctor_get(x_14, 1); +lean_inc(x_36); +lean_inc(x_35); lean_dec(x_14); -x_40 = lean_ctor_get(x_16, 1); -lean_inc(x_40); -lean_dec(x_16); -x_41 = lean_array_get_size(x_40); -x_42 = l___private_Init_Grind_CommRing_Poly_0__Lean_Grind_CommRing_hashMon____x40_Init_Grind_CommRing_Poly___hyg_1349_(x_1); -x_43 = 32; -x_44 = lean_uint64_shift_right(x_42, x_43); -x_45 = lean_uint64_xor(x_42, x_44); -x_46 = 16; -x_47 = lean_uint64_shift_right(x_45, x_46); -x_48 = lean_uint64_xor(x_45, x_47); -x_49 = lean_uint64_to_usize(x_48); -x_50 = lean_usize_of_nat(x_41); -lean_dec(x_41); -x_51 = 1; -x_52 = lean_usize_sub(x_50, x_51); -x_53 = lean_usize_land(x_49, x_52); -x_54 = lean_array_uget(x_40, x_53); -lean_dec(x_40); -x_55 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__6(x_1, x_54); -lean_dec(x_54); -if (lean_obj_tag(x_55) == 0) +x_37 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_37, 0, x_35); +lean_ctor_set(x_37, 1, x_36); +return x_37; +} +} +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepBasicPrefix___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: { -lean_object* x_56; lean_object* x_57; -x_56 = lean_box(0); -x_57 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___lambda__1(x_1, x_56, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_39); -return x_57; +lean_object* x_14; +x_14 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepBasicPrefix(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +return x_14; } -else +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { +_start: { -lean_object* x_58; lean_object* x_59; -lean_dec(x_1); -x_58 = lean_ctor_get(x_55, 0); -lean_inc(x_58); -lean_dec(x_55); -x_59 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_59, 0, x_58); -lean_ctor_set(x_59, 1, x_39); -return x_59; +lean_object* x_14; lean_object* x_15; +x_14 = lean_box(0); +x_15 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_15, 0, x_14); +lean_ctor_set(x_15, 1, x_13); +return x_15; } } +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___lambda__1___boxed), 13, 0); +return x_1; } } -LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_contains___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__1___boxed(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { -uint8_t x_3; lean_object* x_4; -x_3 = l_Std_DHashMap_Internal_AssocList_contains___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__1(x_1, x_2); +lean_object* x_13; +x_13 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +if (lean_obj_tag(x_13) == 0) +{ +uint8_t x_14; +x_14 = !lean_is_exclusive(x_13); +if (x_14 == 0) +{ +lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_15 = lean_ctor_get(x_13, 0); +x_16 = lean_ctor_get(x_13, 1); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___closed__1; +x_18 = lean_ctor_get(x_15, 4); +lean_inc(x_18); +lean_dec(x_15); +if (lean_obj_tag(x_18) == 0) +{ +lean_object* x_19; lean_object* x_20; +lean_free_object(x_13); +x_19 = lean_box(0); +x_20 = lean_apply_13(x_17, x_19, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_16); +return x_20; +} +else +{ +uint8_t x_21; +x_21 = !lean_is_exclusive(x_18); +if (x_21 == 0) +{ +lean_object* x_22; uint8_t x_23; +x_22 = lean_ctor_get(x_18, 0); +x_23 = !lean_is_exclusive(x_22); +if (x_23 == 0) +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; uint8_t x_27; +x_24 = lean_ctor_get(x_22, 0); +x_25 = lean_ctor_get(x_22, 1); +x_26 = lean_unsigned_to_nat(0u); +x_27 = lean_nat_dec_eq(x_25, x_26); +if (x_27 == 0) +{ +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); -x_4 = lean_box(x_3); -return x_4; +lean_ctor_set(x_13, 0, x_18); +return x_13; } +else +{ +lean_object* x_28; lean_object* x_29; +lean_free_object(x_22); +lean_dec(x_25); +lean_dec(x_24); +lean_free_object(x_18); +lean_free_object(x_13); +x_28 = lean_box(0); +x_29 = lean_apply_13(x_17, x_28, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_16); +return x_29; } -LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__6___boxed(lean_object* x_1, lean_object* x_2) { -_start: +} +else { -lean_object* x_3; -x_3 = l_Std_DHashMap_Internal_AssocList_get_x3f___at_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___spec__6(x_1, x_2); +lean_object* x_30; lean_object* x_31; lean_object* x_32; uint8_t x_33; +x_30 = lean_ctor_get(x_22, 0); +x_31 = lean_ctor_get(x_22, 1); +lean_inc(x_31); +lean_inc(x_30); +lean_dec(x_22); +x_32 = lean_unsigned_to_nat(0u); +x_33 = lean_nat_dec_eq(x_31, x_32); +if (x_33 == 0) +{ +lean_object* x_34; +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); -return x_3; +x_34 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_34, 0, x_30); +lean_ctor_set(x_34, 1, x_31); +lean_ctor_set(x_18, 0, x_34); +lean_ctor_set(x_13, 0, x_18); +return x_13; +} +else +{ +lean_object* x_35; lean_object* x_36; +lean_dec(x_31); +lean_dec(x_30); +lean_free_object(x_18); +lean_free_object(x_13); +x_35 = lean_box(0); +x_36 = lean_apply_13(x_17, x_35, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_16); +return x_36; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { -_start: +} +else { -lean_object* x_15; -x_15 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); -lean_dec(x_13); -lean_dec(x_12); +lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; uint8_t x_42; +x_37 = lean_ctor_get(x_18, 0); +lean_inc(x_37); +lean_dec(x_18); +x_38 = lean_ctor_get(x_37, 0); +lean_inc(x_38); +x_39 = lean_ctor_get(x_37, 1); +lean_inc(x_39); +if (lean_is_exclusive(x_37)) { + lean_ctor_release(x_37, 0); + lean_ctor_release(x_37, 1); + x_40 = x_37; +} else { + lean_dec_ref(x_37); + x_40 = lean_box(0); +} +x_41 = lean_unsigned_to_nat(0u); +x_42 = lean_nat_dec_eq(x_39, x_41); +if (x_42 == 0) +{ +lean_object* x_43; lean_object* x_44; lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -16229,15 +18472,81 @@ lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); -return x_15; +lean_dec(x_1); +if (lean_is_scalar(x_40)) { + x_43 = lean_alloc_ctor(0, 2, 0); +} else { + x_43 = x_40; +} +lean_ctor_set(x_43, 0, x_38); +lean_ctor_set(x_43, 1, x_39); +x_44 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_44, 0, x_43); +lean_ctor_set(x_13, 0, x_44); +return x_13; +} +else +{ +lean_object* x_45; lean_object* x_46; +lean_dec(x_40); +lean_dec(x_39); +lean_dec(x_38); +lean_free_object(x_13); +x_45 = lean_box(0); +x_46 = lean_apply_13(x_17, x_45, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_16); +return x_46; +} +} +} +} +else +{ +lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; +x_47 = lean_ctor_get(x_13, 0); +x_48 = lean_ctor_get(x_13, 1); +lean_inc(x_48); +lean_inc(x_47); +lean_dec(x_13); +x_49 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___closed__1; +x_50 = lean_ctor_get(x_47, 4); +lean_inc(x_50); +lean_dec(x_47); +if (lean_obj_tag(x_50) == 0) +{ +lean_object* x_51; lean_object* x_52; +x_51 = lean_box(0); +x_52 = lean_apply_13(x_49, x_51, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_48); +return x_52; } +else +{ +lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; uint8_t x_59; +x_53 = lean_ctor_get(x_50, 0); +lean_inc(x_53); +if (lean_is_exclusive(x_50)) { + lean_ctor_release(x_50, 0); + x_54 = x_50; +} else { + lean_dec_ref(x_50); + x_54 = lean_box(0); } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { -_start: +x_55 = lean_ctor_get(x_53, 0); +lean_inc(x_55); +x_56 = lean_ctor_get(x_53, 1); +lean_inc(x_56); +if (lean_is_exclusive(x_53)) { + lean_ctor_release(x_53, 0); + lean_ctor_release(x_53, 1); + x_57 = x_53; +} else { + lean_dec_ref(x_53); + x_57 = lean_box(0); +} +x_58 = lean_unsigned_to_nat(0u); +x_59 = lean_nat_dec_eq(x_56, x_58); +if (x_59 == 0) { -lean_object* x_14; -x_14 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_mkMonDecl(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); -lean_dec(x_12); +lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -16248,97 +18557,42 @@ lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); -return x_14; -} -} -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepBasicPrefix(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { -_start: -{ -lean_object* x_14; -x_14 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); -if (lean_obj_tag(x_14) == 0) -{ -uint8_t x_15; -x_15 = !lean_is_exclusive(x_14); -if (x_15 == 0) -{ -lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; -x_16 = lean_ctor_get(x_14, 0); -x_17 = lean_ctor_get(x_16, 2); -lean_inc(x_17); -x_18 = lean_box(0); -x_19 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_19, 0, x_17); -lean_ctor_set(x_19, 1, x_18); -x_20 = l_Lean_Expr_const___override(x_1, x_19); -x_21 = lean_ctor_get(x_16, 1); -lean_inc(x_21); -x_22 = lean_ctor_get(x_16, 3); -lean_inc(x_22); -lean_dec(x_16); -x_23 = l_Lean_mkApp3(x_20, x_21, x_22, x_2); -lean_ctor_set(x_14, 0, x_23); -return x_14; -} -else -{ -lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; -x_24 = lean_ctor_get(x_14, 0); -x_25 = lean_ctor_get(x_14, 1); -lean_inc(x_25); -lean_inc(x_24); -lean_dec(x_14); -x_26 = lean_ctor_get(x_24, 2); -lean_inc(x_26); -x_27 = lean_box(0); -x_28 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_28, 0, x_26); -lean_ctor_set(x_28, 1, x_27); -x_29 = l_Lean_Expr_const___override(x_1, x_28); -x_30 = lean_ctor_get(x_24, 1); -lean_inc(x_30); -x_31 = lean_ctor_get(x_24, 3); -lean_inc(x_31); -lean_dec(x_24); -x_32 = l_Lean_mkApp3(x_29, x_30, x_31, x_2); -x_33 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_33, 0, x_32); -lean_ctor_set(x_33, 1, x_25); -return x_33; +lean_dec(x_1); +if (lean_is_scalar(x_57)) { + x_60 = lean_alloc_ctor(0, 2, 0); +} else { + x_60 = x_57; } +lean_ctor_set(x_60, 0, x_55); +lean_ctor_set(x_60, 1, x_56); +if (lean_is_scalar(x_54)) { + x_61 = lean_alloc_ctor(1, 1, 0); +} else { + x_61 = x_54; } -else -{ -uint8_t x_34; -lean_dec(x_2); -lean_dec(x_1); -x_34 = !lean_is_exclusive(x_14); -if (x_34 == 0) -{ -return x_14; +lean_ctor_set(x_61, 0, x_60); +x_62 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_62, 0, x_61); +lean_ctor_set(x_62, 1, x_48); +return x_62; } else { -lean_object* x_35; lean_object* x_36; lean_object* x_37; -x_35 = lean_ctor_get(x_14, 0); -x_36 = lean_ctor_get(x_14, 1); -lean_inc(x_36); -lean_inc(x_35); -lean_dec(x_14); -x_37 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_37, 0, x_35); -lean_ctor_set(x_37, 1, x_36); -return x_37; +lean_object* x_63; lean_object* x_64; +lean_dec(x_57); +lean_dec(x_56); +lean_dec(x_55); +lean_dec(x_54); +x_63 = lean_box(0); +x_64 = lean_apply_13(x_49, x_63, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_48); +return x_64; } } } } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepBasicPrefix___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { -_start: +else { -lean_object* x_14; -x_14 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepBasicPrefix(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); -lean_dec(x_12); +uint8_t x_65; lean_dec(x_11); lean_dec(x_10); lean_dec(x_9); @@ -16348,7 +18602,27 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); lean_dec(x_3); -return x_14; +lean_dec(x_2); +lean_dec(x_1); +x_65 = !lean_is_exclusive(x_13); +if (x_65 == 0) +{ +return x_13; +} +else +{ +lean_object* x_66; lean_object* x_67; lean_object* x_68; +x_66 = lean_ctor_get(x_13, 0); +x_67 = lean_ctor_get(x_13, 1); +lean_inc(x_67); +lean_inc(x_66); +lean_dec(x_13); +x_68 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_68, 0, x_66); +lean_ctor_set(x_68, 1, x_67); +return x_68; +} +} } } LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { @@ -16364,7 +18638,9 @@ lean_inc(x_8); lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); -x_15 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +lean_inc(x_4); +lean_inc(x_3); +x_15 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); if (lean_obj_tag(x_15) == 0) { lean_object* x_16; @@ -16387,11 +18663,13 @@ lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); +lean_dec(x_4); return x_18; } else { lean_object* x_19; lean_object* x_20; uint8_t x_21; +lean_dec(x_4); lean_dec(x_1); x_19 = lean_ctor_get(x_16, 0); lean_inc(x_19); @@ -16405,7 +18683,7 @@ if (x_21 == 0) lean_object* x_22; lean_object* x_23; lean_object* x_24; x_22 = lean_ctor_get(x_19, 0); x_23 = lean_ctor_get(x_19, 1); -x_24 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_20); +x_24 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_20); lean_dec(x_13); lean_dec(x_12); lean_dec(x_11); @@ -16504,7 +18782,7 @@ x_49 = lean_ctor_get(x_19, 1); lean_inc(x_49); lean_inc(x_48); lean_dec(x_19); -x_50 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_20); +x_50 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_20); lean_dec(x_13); lean_dec(x_12); lean_dec(x_11); @@ -16595,6 +18873,7 @@ lean_dec(x_8); lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); +lean_dec(x_4); lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); @@ -16619,13 +18898,24 @@ return x_70; } } } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { _start: { -lean_object* x_15; -x_15 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +lean_object* x_14; +x_14 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +lean_dec(x_12); +lean_dec(x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); lean_dec(x_4); -return x_15; +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_14; } } static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1() { @@ -16648,9 +18938,9 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof__ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__2; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -16669,9 +18959,9 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof__ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__4; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -16690,9 +18980,9 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof__ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__6; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -16711,9 +19001,9 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof__ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__8; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -16732,9 +19022,9 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof__ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__10; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -16753,9 +19043,9 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof__ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__12; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -16774,9 +19064,9 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof__ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__14; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -16795,9 +19085,9 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof__ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__16; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -16816,9 +19106,9 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof__ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__18; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -16837,9 +19127,9 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof__ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__20; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -16915,6 +19205,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_180 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_178, x_179, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_23); if (lean_obj_tag(x_180) == 0) @@ -16970,6 +19261,7 @@ lean_dec(x_191); lean_dec(x_187); lean_dec(x_184); lean_dec(x_181); +lean_dec(x_3); x_198 = !lean_is_exclusive(x_193); if (x_198 == 0) { @@ -17006,6 +19298,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_202 = !lean_is_exclusive(x_180); @@ -17055,6 +19348,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_214 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_212, x_213, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_23); if (lean_obj_tag(x_214) == 0) @@ -17111,6 +19405,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_235 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_208, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_234); if (lean_obj_tag(x_235) == 0) @@ -17121,6 +19416,7 @@ lean_inc(x_236); x_237 = lean_ctor_get(x_235, 1); lean_inc(x_237); lean_dec(x_235); +lean_inc(x_3); x_238 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_211, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_237); if (lean_obj_tag(x_238) == 0) { @@ -17130,7 +19426,7 @@ lean_inc(x_239); x_240 = lean_ctor_get(x_238, 1); lean_inc(x_240); lean_dec(x_238); -x_241 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_241 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_242 = lean_int_dec_le(x_241, x_206); x_243 = lean_int_dec_le(x_241, x_209); if (x_242 == 0) @@ -17223,6 +19519,7 @@ lean_dec(x_218); lean_dec(x_215); lean_dec(x_209); lean_dec(x_206); +lean_dec(x_3); x_276 = !lean_is_exclusive(x_238); if (x_276 == 0) { @@ -17264,6 +19561,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); x_280 = !lean_is_exclusive(x_235); if (x_280 == 0) @@ -17303,6 +19601,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_284 = !lean_is_exclusive(x_214); @@ -17350,6 +19649,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_295 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_293, x_294, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_23); if (lean_obj_tag(x_295) == 0) @@ -17400,6 +19700,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_313 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_289, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_312); if (lean_obj_tag(x_313) == 0) @@ -17410,6 +19711,7 @@ lean_inc(x_314); x_315 = lean_ctor_get(x_313, 1); lean_inc(x_315); lean_dec(x_313); +lean_inc(x_3); x_316 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_292, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_315); if (lean_obj_tag(x_316) == 0) { @@ -17419,7 +19721,7 @@ lean_inc(x_317); x_318 = lean_ctor_get(x_316, 1); lean_inc(x_318); lean_dec(x_316); -x_319 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_319 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_320 = lean_int_dec_le(x_319, x_288); x_321 = lean_int_dec_le(x_319, x_290); if (x_320 == 0) @@ -17511,6 +19813,7 @@ lean_dec(x_300); lean_dec(x_296); lean_dec(x_290); lean_dec(x_288); +lean_dec(x_3); x_354 = !lean_is_exclusive(x_316); if (x_354 == 0) { @@ -17551,6 +19854,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); x_358 = !lean_is_exclusive(x_313); if (x_358 == 0) @@ -17589,6 +19893,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_362 = !lean_is_exclusive(x_295); @@ -17630,6 +19935,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_370 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_368, x_369, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_23); if (lean_obj_tag(x_370) == 0) @@ -17657,6 +19963,7 @@ lean_inc(x_379); x_380 = lean_ctor_get(x_378, 1); lean_inc(x_380); lean_dec(x_378); +lean_inc(x_3); x_381 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_367, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_380); if (lean_obj_tag(x_381) == 0) { @@ -17666,7 +19973,7 @@ lean_inc(x_382); x_383 = lean_ctor_get(x_381, 1); lean_inc(x_383); lean_dec(x_381); -x_384 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_384 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_385 = lean_int_dec_le(x_384, x_366); if (x_385 == 0) { @@ -17706,6 +20013,7 @@ lean_dec(x_379); lean_dec(x_375); lean_dec(x_371); lean_dec(x_366); +lean_dec(x_3); x_399 = !lean_is_exclusive(x_381); if (x_399 == 0) { @@ -17740,6 +20048,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_403 = !lean_is_exclusive(x_370); @@ -17781,6 +20090,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_411 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_409, x_410, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_23); if (lean_obj_tag(x_411) == 0) @@ -17803,6 +20113,7 @@ lean_object* x_416; lean_object* x_417; uint8_t x_418; lean_dec(x_412); lean_dec(x_408); lean_dec(x_407); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_416 = lean_ctor_get(x_414, 1); @@ -17863,6 +20174,7 @@ lean_inc(x_430); x_431 = lean_ctor_get(x_429, 1); lean_inc(x_431); lean_dec(x_429); +lean_inc(x_3); x_432 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_408, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_431); if (lean_obj_tag(x_432) == 0) { @@ -17872,7 +20184,7 @@ lean_inc(x_433); x_434 = lean_ctor_get(x_432, 1); lean_inc(x_434); lean_dec(x_432); -x_435 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_435 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_436 = lean_int_dec_le(x_435, x_407); if (x_436 == 0) { @@ -17913,6 +20225,7 @@ lean_dec(x_426); lean_dec(x_423); lean_dec(x_412); lean_dec(x_407); +lean_dec(x_3); x_450 = !lean_is_exclusive(x_432); if (x_450 == 0) { @@ -17949,6 +20262,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_454 = !lean_is_exclusive(x_414); @@ -17985,6 +20299,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_458 = !lean_is_exclusive(x_411); @@ -18022,6 +20337,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_462 = lean_ctor_get(x_173, 0); @@ -18090,6 +20406,7 @@ x_64 = l_Std_DHashMap_Internal_Raw_u2080_expand___at___private_Lean_Meta_Tactic_ lean_ctor_set(x_40, 1, x_64); lean_ctor_set(x_40, 0, x_54); x_65 = lean_st_ref_set(x_3, x_39, x_41); +lean_dec(x_3); x_66 = !lean_is_exclusive(x_65); if (x_66 == 0) { @@ -18117,6 +20434,7 @@ lean_object* x_70; uint8_t x_71; lean_ctor_set(x_40, 1, x_57); lean_ctor_set(x_40, 0, x_54); x_70 = lean_st_ref_set(x_3, x_39, x_41); +lean_dec(x_3); x_71 = !lean_is_exclusive(x_70); if (x_71 == 0) { @@ -18149,6 +20467,7 @@ x_77 = l_Std_DHashMap_Internal_AssocList_replace___at___private_Lean_Meta_Tactic x_78 = lean_array_uset(x_76, x_50, x_77); lean_ctor_set(x_40, 1, x_78); x_79 = lean_st_ref_set(x_3, x_39, x_41); +lean_dec(x_3); x_80 = !lean_is_exclusive(x_79); if (x_80 == 0) { @@ -18218,6 +20537,7 @@ lean_ctor_set(x_104, 0, x_93); lean_ctor_set(x_104, 1, x_103); lean_ctor_set(x_39, 0, x_104); x_105 = lean_st_ref_set(x_3, x_39, x_41); +lean_dec(x_3); x_106 = lean_ctor_get(x_105, 1); lean_inc(x_106); if (lean_is_exclusive(x_105)) { @@ -18245,6 +20565,7 @@ lean_ctor_set(x_109, 0, x_93); lean_ctor_set(x_109, 1, x_96); lean_ctor_set(x_39, 0, x_109); x_110 = lean_st_ref_set(x_3, x_39, x_41); +lean_dec(x_3); x_111 = lean_ctor_get(x_110, 1); lean_inc(x_111); if (lean_is_exclusive(x_110)) { @@ -18278,6 +20599,7 @@ lean_ctor_set(x_118, 0, x_84); lean_ctor_set(x_118, 1, x_117); lean_ctor_set(x_39, 0, x_118); x_119 = lean_st_ref_set(x_3, x_39, x_41); +lean_dec(x_3); x_120 = lean_ctor_get(x_119, 1); lean_inc(x_120); if (lean_is_exclusive(x_119)) { @@ -18368,6 +20690,7 @@ lean_ctor_set(x_148, 1, x_123); lean_ctor_set(x_148, 2, x_124); lean_ctor_set(x_148, 3, x_125); x_149 = lean_st_ref_set(x_3, x_148, x_41); +lean_dec(x_3); x_150 = lean_ctor_get(x_149, 1); lean_inc(x_150); if (lean_is_exclusive(x_149)) { @@ -18403,6 +20726,7 @@ lean_ctor_set(x_154, 1, x_123); lean_ctor_set(x_154, 2, x_124); lean_ctor_set(x_154, 3, x_125); x_155 = lean_st_ref_set(x_3, x_154, x_41); +lean_dec(x_3); x_156 = lean_ctor_get(x_155, 1); lean_inc(x_156); if (lean_is_exclusive(x_155)) { @@ -18444,6 +20768,7 @@ lean_ctor_set(x_164, 1, x_123); lean_ctor_set(x_164, 2, x_124); lean_ctor_set(x_164, 3, x_125); x_165 = lean_st_ref_set(x_3, x_164, x_41); +lean_dec(x_3); x_166 = lean_ctor_get(x_165, 1); lean_inc(x_166); if (lean_is_exclusive(x_165)) { @@ -18517,6 +20842,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_539 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_537, x_538, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_463); if (lean_obj_tag(x_539) == 0) @@ -18572,6 +20898,7 @@ lean_dec(x_550); lean_dec(x_546); lean_dec(x_543); lean_dec(x_540); +lean_dec(x_3); x_557 = lean_ctor_get(x_552, 0); lean_inc(x_557); x_558 = lean_ctor_get(x_552, 1); @@ -18610,6 +20937,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_561 = lean_ctor_get(x_539, 0); @@ -18661,6 +20989,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_573 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_571, x_572, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_463); if (lean_obj_tag(x_573) == 0) @@ -18717,6 +21046,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_594 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_567, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_593); if (lean_obj_tag(x_594) == 0) @@ -18727,6 +21057,7 @@ lean_inc(x_595); x_596 = lean_ctor_get(x_594, 1); lean_inc(x_596); lean_dec(x_594); +lean_inc(x_3); x_597 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_570, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_596); if (lean_obj_tag(x_597) == 0) { @@ -18736,7 +21067,7 @@ lean_inc(x_598); x_599 = lean_ctor_get(x_597, 1); lean_inc(x_599); lean_dec(x_597); -x_600 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_600 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_601 = lean_int_dec_le(x_600, x_565); x_602 = lean_int_dec_le(x_600, x_568); if (x_601 == 0) @@ -18829,6 +21160,7 @@ lean_dec(x_577); lean_dec(x_574); lean_dec(x_568); lean_dec(x_565); +lean_dec(x_3); x_635 = lean_ctor_get(x_597, 0); lean_inc(x_635); x_636 = lean_ctor_get(x_597, 1); @@ -18872,6 +21204,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); x_639 = lean_ctor_get(x_594, 0); lean_inc(x_639); @@ -18913,6 +21246,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_643 = lean_ctor_get(x_573, 0); @@ -18962,6 +21296,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_654 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_652, x_653, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_463); if (lean_obj_tag(x_654) == 0) @@ -19012,6 +21347,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_672 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_648, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_671); if (lean_obj_tag(x_672) == 0) @@ -19022,6 +21358,7 @@ lean_inc(x_673); x_674 = lean_ctor_get(x_672, 1); lean_inc(x_674); lean_dec(x_672); +lean_inc(x_3); x_675 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_651, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_674); if (lean_obj_tag(x_675) == 0) { @@ -19031,7 +21368,7 @@ lean_inc(x_676); x_677 = lean_ctor_get(x_675, 1); lean_inc(x_677); lean_dec(x_675); -x_678 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_678 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_679 = lean_int_dec_le(x_678, x_647); x_680 = lean_int_dec_le(x_678, x_649); if (x_679 == 0) @@ -19123,6 +21460,7 @@ lean_dec(x_659); lean_dec(x_655); lean_dec(x_649); lean_dec(x_647); +lean_dec(x_3); x_713 = lean_ctor_get(x_675, 0); lean_inc(x_713); x_714 = lean_ctor_get(x_675, 1); @@ -19165,6 +21503,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); x_717 = lean_ctor_get(x_672, 0); lean_inc(x_717); @@ -19205,6 +21544,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_721 = lean_ctor_get(x_654, 0); @@ -19248,6 +21588,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_729 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_727, x_728, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_463); if (lean_obj_tag(x_729) == 0) @@ -19275,6 +21616,7 @@ lean_inc(x_738); x_739 = lean_ctor_get(x_737, 1); lean_inc(x_739); lean_dec(x_737); +lean_inc(x_3); x_740 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_726, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_739); if (lean_obj_tag(x_740) == 0) { @@ -19284,7 +21626,7 @@ lean_inc(x_741); x_742 = lean_ctor_get(x_740, 1); lean_inc(x_742); lean_dec(x_740); -x_743 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_743 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_744 = lean_int_dec_le(x_743, x_725); if (x_744 == 0) { @@ -19324,6 +21666,7 @@ lean_dec(x_738); lean_dec(x_734); lean_dec(x_730); lean_dec(x_725); +lean_dec(x_3); x_758 = lean_ctor_get(x_740, 0); lean_inc(x_758); x_759 = lean_ctor_get(x_740, 1); @@ -19360,6 +21703,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_762 = lean_ctor_get(x_729, 0); @@ -19403,6 +21747,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_770 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_768, x_769, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_463); if (lean_obj_tag(x_770) == 0) @@ -19425,6 +21770,7 @@ lean_object* x_775; lean_object* x_776; lean_object* x_777; lean_object* x_778; lean_dec(x_771); lean_dec(x_767); lean_dec(x_766); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_775 = lean_ctor_get(x_773, 1); @@ -19487,6 +21833,7 @@ lean_inc(x_789); x_790 = lean_ctor_get(x_788, 1); lean_inc(x_790); lean_dec(x_788); +lean_inc(x_3); x_791 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_767, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_790); if (lean_obj_tag(x_791) == 0) { @@ -19496,7 +21843,7 @@ lean_inc(x_792); x_793 = lean_ctor_get(x_791, 1); lean_inc(x_793); lean_dec(x_791); -x_794 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_794 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_795 = lean_int_dec_le(x_794, x_766); if (x_795 == 0) { @@ -19537,6 +21884,7 @@ lean_dec(x_785); lean_dec(x_782); lean_dec(x_771); lean_dec(x_766); +lean_dec(x_3); x_809 = lean_ctor_get(x_791, 0); lean_inc(x_809); x_810 = lean_ctor_get(x_791, 1); @@ -19575,6 +21923,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_813 = lean_ctor_get(x_773, 0); @@ -19613,6 +21962,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_817 = lean_ctor_get(x_770, 0); @@ -19652,6 +22002,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_1); x_821 = lean_ctor_get(x_532, 0); @@ -19752,6 +22103,7 @@ lean_ctor_set(x_507, 1, x_481); lean_ctor_set(x_507, 2, x_482); lean_ctor_set(x_507, 3, x_483); x_508 = lean_st_ref_set(x_3, x_507, x_480); +lean_dec(x_3); x_509 = lean_ctor_get(x_508, 1); lean_inc(x_509); if (lean_is_exclusive(x_508)) { @@ -19791,6 +22143,7 @@ lean_ctor_set(x_513, 1, x_481); lean_ctor_set(x_513, 2, x_482); lean_ctor_set(x_513, 3, x_483); x_514 = lean_st_ref_set(x_3, x_513, x_480); +lean_dec(x_3); x_515 = lean_ctor_get(x_514, 1); lean_inc(x_515); if (lean_is_exclusive(x_514)) { @@ -19836,6 +22189,7 @@ lean_ctor_set(x_523, 1, x_481); lean_ctor_set(x_523, 2, x_482); lean_ctor_set(x_523, 3, x_483); x_524 = lean_st_ref_set(x_3, x_523, x_480); +lean_dec(x_3); x_525 = lean_ctor_get(x_524, 1); lean_inc(x_525); if (lean_is_exclusive(x_524)) { @@ -19859,15 +22213,6 @@ return x_527; } } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { -_start: -{ -lean_object* x_14; -x_14 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); -lean_dec(x_3); -return x_14; -} -} LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13, lean_object* x_14, lean_object* x_15, lean_object* x_16, lean_object* x_17, lean_object* x_18, lean_object* x_19, lean_object* x_20, lean_object* x_21, lean_object* x_22, lean_object* x_23) { _start: { @@ -19909,7 +22254,7 @@ if (x_41 == 0) { lean_object* x_42; lean_object* x_43; uint8_t x_44; uint8_t x_45; lean_object* x_46; x_42 = lean_ctor_get(x_40, 0); -x_43 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_43 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_44 = lean_int_dec_le(x_43, x_6); x_45 = lean_int_dec_le(x_43, x_7); x_46 = lean_int_mul(x_8, x_6); @@ -20005,7 +22350,7 @@ x_80 = lean_ctor_get(x_40, 1); lean_inc(x_80); lean_inc(x_79); lean_dec(x_40); -x_81 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_81 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_82 = lean_int_dec_le(x_81, x_6); x_83 = lean_int_dec_le(x_81, x_7); x_84 = lean_int_mul(x_8, x_6); @@ -20123,7 +22468,7 @@ if (lean_is_exclusive(x_123)) { lean_dec_ref(x_123); x_126 = lean_box(0); } -x_127 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_127 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_128 = lean_int_dec_le(x_127, x_6); x_129 = lean_int_dec_le(x_127, x_7); x_130 = lean_int_mul(x_8, x_6); @@ -20277,7 +22622,7 @@ if (lean_is_exclusive(x_178)) { lean_dec_ref(x_178); x_181 = lean_box(0); } -x_182 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_182 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_183 = lean_int_dec_le(x_182, x_6); x_184 = lean_int_dec_le(x_182, x_7); x_185 = lean_int_mul(x_8, x_6); @@ -20427,9 +22772,9 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___closed__1; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -20448,9 +22793,9 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___closed__3; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -20469,9 +22814,9 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___closed__5; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -20490,9 +22835,9 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___closed__7; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -20511,9 +22856,9 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___closed__9; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -20551,6 +22896,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); x_20 = !lean_is_exclusive(x_19); if (x_20 == 0) @@ -20561,7 +22907,7 @@ x_22 = l_Lean_Expr_app___override(x_17, x_21); x_23 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_23, 0, x_14); lean_ctor_set(x_23, 1, x_22); -x_24 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2; +x_24 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; x_25 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_25, 0, x_24); lean_ctor_set(x_25, 1, x_23); @@ -20580,7 +22926,7 @@ x_28 = l_Lean_Expr_app___override(x_17, x_26); x_29 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_29, 0, x_14); lean_ctor_set(x_29, 1, x_28); -x_30 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2; +x_30 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; x_31 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_31, 0, x_30); lean_ctor_set(x_31, 1, x_29); @@ -20603,6 +22949,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); x_33 = !lean_is_exclusive(x_16); if (x_33 == 0) @@ -20649,6 +22996,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); lean_inc(x_39); x_43 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof(x_39, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); @@ -20679,6 +23027,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); lean_inc(x_42); x_50 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof(x_42, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_46); @@ -20690,7 +23039,7 @@ lean_inc(x_51); x_52 = lean_ctor_get(x_50, 1); lean_inc(x_52); lean_dec(x_50); -x_53 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2; +x_53 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; x_54 = lean_int_dec_eq(x_38, x_53); if (x_54 == 0) { @@ -20706,6 +23055,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_57 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_55, x_56, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_52); if (lean_obj_tag(x_57) == 0) @@ -20716,7 +23066,7 @@ lean_inc(x_58); x_59 = lean_ctor_get(x_57, 1); lean_inc(x_59); lean_dec(x_57); -x_60 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_60 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_61 = lean_int_dec_le(x_60, x_38); if (x_61 == 0) { @@ -20740,6 +23090,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_38); lean_dec(x_40); @@ -20763,6 +23114,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_38); lean_dec(x_40); @@ -20793,6 +23145,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); x_75 = !lean_is_exclusive(x_57); if (x_75 == 0) @@ -20828,6 +23181,7 @@ lean_inc(x_7); lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); +lean_inc(x_3); lean_inc(x_2); x_81 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_79, x_80, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_52); if (lean_obj_tag(x_81) == 0) @@ -20848,6 +23202,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); lean_dec(x_38); lean_dec(x_40); @@ -20877,6 +23232,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); x_85 = !lean_is_exclusive(x_81); if (x_85 == 0) @@ -20920,6 +23276,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); x_89 = !lean_is_exclusive(x_50); if (x_89 == 0) @@ -20959,6 +23316,7 @@ lean_dec(x_7); lean_dec(x_6); lean_dec(x_5); lean_dec(x_4); +lean_dec(x_3); lean_dec(x_2); x_93 = !lean_is_exclusive(x_43); if (x_93 == 0) @@ -21028,15 +23386,6 @@ lean_dec(x_2); return x_24; } } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12, lean_object* x_13) { -_start: -{ -lean_object* x_14; -x_14 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); -lean_dec(x_3); -return x_14; -} -} static lean_object* _init_l_panic___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___spec__1___closed__1() { _start: { @@ -21238,9 +23587,9 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___closed__7; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -21259,9 +23608,9 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___closed__9; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -21280,9 +23629,9 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___closed__11; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -21301,9 +23650,9 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkImpEqExprProof___closed__13; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -21320,7 +23669,7 @@ if (lean_obj_tag(x_16) == 0) lean_object* x_17; lean_object* x_18; uint8_t x_19; x_17 = lean_ctor_get(x_16, 0); lean_inc(x_17); -x_18 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_18 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_19 = lean_int_dec_eq(x_17, x_18); lean_dec(x_17); if (x_19 == 0) @@ -21346,6 +23695,7 @@ lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); lean_inc(x_6); +lean_inc(x_5); lean_inc(x_4); x_22 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_derivToExprProof(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); if (lean_obj_tag(x_22) == 0) @@ -21366,7 +23716,7 @@ lean_inc(x_27); x_28 = lean_ctor_get(x_24, 1); lean_inc(x_28); lean_dec(x_24); -x_29 = l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2; +x_29 = l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1; x_30 = lean_int_dec_eq(x_26, x_29); if (x_30 == 0) { @@ -21440,6 +23790,7 @@ lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); lean_inc(x_6); +lean_inc(x_5); lean_inc(x_4); x_43 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_41, x_42, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_39); if (lean_obj_tag(x_43) == 0) @@ -21597,6 +23948,7 @@ lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); lean_inc(x_6); +lean_inc(x_5); lean_inc(x_4); x_70 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_68, x_69, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_25); if (lean_obj_tag(x_70) == 0) @@ -22562,9 +24914,9 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___closed__3; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; @@ -22618,9 +24970,9 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___closed__9; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; @@ -22674,9 +25026,9 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext_go___closed__15; x_5 = l_Lean_Name_mkStr4(x_1, x_2, x_3, x_4); return x_5; @@ -23531,7 +25883,7 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proo _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_withProofContext___closed__3; x_3 = l_Lean_Name_mkStr2(x_1, x_2); return x_3; @@ -23541,7 +25893,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof _start: { lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_12 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); if (lean_obj_tag(x_12) == 0) { lean_object* x_13; lean_object* x_14; lean_object* x_15; @@ -23693,7 +26045,7 @@ if (x_26 == 0) { lean_object* x_27; lean_object* x_28; uint8_t x_29; x_27 = lean_ctor_get(x_25, 0); -x_28 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_28 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_29 = lean_int_dec_le(x_28, x_19); if (x_29 == 0) { @@ -23743,7 +26095,7 @@ x_48 = lean_ctor_get(x_25, 1); lean_inc(x_48); lean_inc(x_47); lean_dec(x_25); -x_49 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_49 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_50 = lean_int_dec_le(x_49, x_19); if (x_50 == 0) { @@ -23842,7 +26194,7 @@ if (lean_is_exclusive(x_76)) { lean_dec_ref(x_76); x_79 = lean_box(0); } -x_80 = l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2; +x_80 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8; x_81 = lean_int_dec_le(x_80, x_19); if (x_81 == 0) { @@ -23938,6 +26290,7 @@ lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); +lean_dec(x_6); lean_dec(x_5); lean_dec(x_3); lean_dec(x_2); @@ -23983,6 +26336,7 @@ lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); lean_inc(x_6); +lean_inc(x_18); lean_inc(x_5); x_91 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix(x_2, x_3, x_5, x_18, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_19); if (lean_obj_tag(x_91) == 0) @@ -24023,6 +26377,7 @@ lean_inc(x_10); lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); +lean_inc(x_18); lean_inc(x_5); lean_inc(x_1); x_102 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat___lambda__1(x_4, x_1, x_92, x_101, x_5, x_18, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_96); @@ -24085,6 +26440,7 @@ lean_inc(x_10); lean_inc(x_9); lean_inc(x_8); lean_inc(x_7); +lean_inc(x_18); lean_inc(x_5); lean_inc(x_1); x_111 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat___lambda__1(x_4, x_1, x_109, x_110, x_5, x_18, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_96); @@ -24437,9 +26793,9 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat__ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat___closed__1; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -24458,9 +26814,9 @@ static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat__ _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__15; -x_2 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__16; -x_3 = l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__17; +x_1 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2; +x_3 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3; x_4 = l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1; x_5 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat___closed__3; x_6 = l_Lean_Name_mkStr5(x_1, x_2, x_3, x_4, x_5); @@ -24471,7 +26827,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat(le _start: { lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_12 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); if (lean_obj_tag(x_12) == 0) { lean_object* x_13; lean_object* x_14; lean_object* x_15; @@ -24638,7 +26994,6 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat___ { lean_object* x_17; x_17 = l_Lean_Meta_Grind_Arith_CommRing_Stepwise_setEqUnsat___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); -lean_dec(x_6); lean_dec(x_4); return x_17; } @@ -24931,7 +27286,7 @@ x_13 = lean_ctor_get(x_1, 3); lean_inc(x_13); x_14 = lean_ctor_get(x_1, 4); lean_inc(x_14); -x_15 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_15 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); if (lean_obj_tag(x_15) == 0) { lean_object* x_16; lean_object* x_17; lean_object* x_18; @@ -25157,7 +27512,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_Stepwise_propagateEq(l _start: { lean_object* x_16; -x_16 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +x_16 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); if (lean_obj_tag(x_16) == 0) { lean_object* x_17; lean_object* x_18; lean_object* x_19; @@ -25216,7 +27571,7 @@ lean_inc(x_33); x_34 = lean_ctor_get(x_32, 1); lean_inc(x_34); lean_dec(x_32); -x_35 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_34); +x_35 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_34); lean_dec(x_6); if (lean_obj_tag(x_35) == 0) { @@ -25232,7 +27587,7 @@ x_39 = l_Lean_Level_succ___override(x_38); x_40 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_40, 0, x_39); lean_ctor_set(x_40, 1, x_23); -x_41 = l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__2; +x_41 = l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__2; x_42 = l_Lean_Expr_const___override(x_41, x_40); x_43 = lean_ctor_get(x_36, 1); lean_inc(x_43); @@ -25514,6 +27869,24 @@ l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__3 = _in lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__3); l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4 = _init_l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_throwNoNatZeroDivisors___rarg___closed__4); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__1); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__2); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__3); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__4 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__4(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__4); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__5 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__5(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__5); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__6 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__6(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__6); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__7 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__7(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__7); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_denoteNum___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__2___closed__8); +l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1 = _init_l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1(); +lean_mark_persistent(l_Lean_Grind_CommRing_Mon_denoteExpr___at_Lean_Meta_Grind_Arith_CommRing_getPolyConst___spec__4___closed__1); l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__1(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__1); l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_getPolyConst___closed__2(); @@ -25522,16 +27895,12 @@ l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__1 = _in lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__1); l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__2); -l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__3 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__3(); -lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert___closed__3); l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_instInhabitedPreNullCert); l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__1); l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__2); -l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__3 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__3(); -lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero___closed__3); l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_zero); l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_PreNullCert_unit___closed__1(); @@ -25567,6 +27936,10 @@ l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_mkNullCertExt___closed__3 = _init_l_Lea lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_mkNullCertExt___closed__3); l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_mkNullCertExt___closed__4 = _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_mkNullCertExt___closed__4(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_mkNullCertExt___closed__4); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__1 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__1(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__1); +l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__2 = _init_l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__2(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_DenoteExpr_0__Lean_Meta_Grind_Arith_CommRing_mkEq___at_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___spec__5___closed__2); l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__1(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__1); l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__2(); @@ -25605,12 +27978,6 @@ l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18 = _ini lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__18); l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__19 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__19(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__19); -l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__20 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__20(); -lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__20); -l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__21 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__21(); -lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__21); -l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__22 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__22(); -lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__1___closed__22); l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__1(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__1); l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___lambda__2___closed__2(); @@ -25625,6 +27992,8 @@ l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__2 = _init_l_Lean_Meta lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__2); l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__3 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__3(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_setEqUnsat___closed__3); +l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___spec__1___closed__1); l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__1(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__1); l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__1___closed__2(); @@ -25649,10 +28018,6 @@ l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__2___closed__1 = _i lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__2___closed__1); l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__2___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__2___closed__2(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_setDiseqUnsat___lambda__2___closed__2); -l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__1(); -lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__1); -l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__2(); -lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___lambda__1___closed__2); l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__1(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__1); l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__2(); @@ -25673,6 +28038,8 @@ l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__9 = _init_l_Lean_Met lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__9); l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__10 = _init_l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__10(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_Null_propagateEq___closed__10); +l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___at___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Proof_0__Lean_Meta_Grind_Arith_CommRing_Stepwise_mkStepPrefix___spec__1___closed__1); l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__1); l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_toExprProof___closed__2(); diff --git a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Reify.c b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Reify.c index e0ad4dbcf41a..2225e2b559ac 100644 --- a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Reify.c +++ b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Reify.c @@ -24,7 +24,6 @@ lean_object* l_Lean_Meta_getNatValue_x3f(lean_object*, lean_object*, lean_object LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_isIntCastInst___boxed(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f_go___closed__5; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f_go___closed__7; -lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f___closed__2; static lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f___closed__4; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f___lambda__10___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -113,6 +112,7 @@ static lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f___closed__6; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f___lambda__9(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f___lambda__9___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_Arith_CommRing_Reify_0__Lean_Meta_Grind_Arith_CommRing_reportAppIssue___closed__2; +lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f_go___closed__4; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f_go___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT uint8_t l_Lean_Meta_Grind_Arith_CommRing_isPowInst(lean_object*, lean_object*); @@ -516,7 +516,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f_go___lambda_ _start: { lean_object* x_15; -x_15 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +x_15 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); if (lean_obj_tag(x_15) == 0) { lean_object* x_16; lean_object* x_17; uint8_t x_18; @@ -705,7 +705,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f_go___lambda_ _start: { lean_object* x_15; -x_15 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); +x_15 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14); if (lean_obj_tag(x_15) == 0) { lean_object* x_16; lean_object* x_17; uint8_t x_18; @@ -889,7 +889,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f_go___lambda_ _start: { lean_object* x_16; -x_16 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +x_16 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); if (lean_obj_tag(x_16) == 0) { lean_object* x_17; lean_object* x_18; uint8_t x_19; @@ -1037,7 +1037,7 @@ lean_dec(x_17); x_22 = lean_ctor_get(x_18, 0); lean_inc(x_22); lean_dec(x_18); -x_23 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_21); +x_23 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_21); if (lean_obj_tag(x_23) == 0) { lean_object* x_24; lean_object* x_25; uint8_t x_26; @@ -1197,7 +1197,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f_go___lambda_ _start: { lean_object* x_17; -x_17 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); if (lean_obj_tag(x_17) == 0) { lean_object* x_18; lean_object* x_19; uint8_t x_20; @@ -1375,7 +1375,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f_go___lambda_ _start: { lean_object* x_17; -x_17 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); if (lean_obj_tag(x_17) == 0) { lean_object* x_18; lean_object* x_19; uint8_t x_20; @@ -1553,7 +1553,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f_go___lambda_ _start: { lean_object* x_17; -x_17 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); if (lean_obj_tag(x_17) == 0) { lean_object* x_18; lean_object* x_19; uint8_t x_20; @@ -2516,7 +2516,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f___lambda__4( _start: { lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); if (lean_obj_tag(x_13) == 0) { lean_object* x_14; lean_object* x_15; uint8_t x_16; @@ -2717,7 +2717,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f___lambda__5( _start: { lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); if (lean_obj_tag(x_13) == 0) { lean_object* x_14; lean_object* x_15; uint8_t x_16; @@ -2914,7 +2914,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f___lambda__6( _start: { lean_object* x_16; -x_16 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); +x_16 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15); if (lean_obj_tag(x_16) == 0) { lean_object* x_17; lean_object* x_18; uint8_t x_19; @@ -3127,7 +3127,7 @@ if (x_26 == 0) { lean_object* x_27; lean_object* x_28; x_27 = lean_ctor_get(x_18, 0); -x_28 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_25); +x_28 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_25); if (lean_obj_tag(x_28) == 0) { lean_object* x_29; lean_object* x_30; uint8_t x_31; @@ -3287,7 +3287,7 @@ lean_object* x_55; lean_object* x_56; x_55 = lean_ctor_get(x_18, 0); lean_inc(x_55); lean_dec(x_18); -x_56 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_25); +x_56 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_25); if (lean_obj_tag(x_56) == 0) { lean_object* x_57; lean_object* x_58; uint8_t x_59; @@ -3478,7 +3478,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f___lambda__8( _start: { lean_object* x_17; -x_17 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); if (lean_obj_tag(x_17) == 0) { lean_object* x_18; lean_object* x_19; uint8_t x_20; @@ -3691,7 +3691,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f___lambda__9( _start: { lean_object* x_17; -x_17 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); if (lean_obj_tag(x_17) == 0) { lean_object* x_18; lean_object* x_19; uint8_t x_20; @@ -3904,7 +3904,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_reify_x3f___lambda__10 _start: { lean_object* x_17; -x_17 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); +x_17 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_7, x_8, x_9, x_10, x_11, x_12, x_13, x_14, x_15, x_16); if (lean_obj_tag(x_17) == 0) { lean_object* x_18; lean_object* x_19; uint8_t x_20; diff --git a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Util.c b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Util.c index 05c57dd400b5..a0d5711851b9 100644 --- a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Util.c +++ b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Util.c @@ -25,49 +25,60 @@ size_t lean_usize_shift_right(size_t, size_t); lean_object* l_Lean_Grind_CommRing_Poly_mulC(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_noZeroDivisors___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getCharInst(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg___lambda__1(lean_object*, lean_object*); uint8_t lean_usize_dec_le(size_t, size_t); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_mulMon_x27(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRingId(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_uint64_to_usize(uint64_t); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getNext_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Meta_Grind_reportIssue(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_push(lean_object*, lean_object*); size_t lean_usize_mul(size_t, size_t); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_findAtAux___at_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAtCollisionNodeAux___at_Lean_Meta_Grind_Arith_CommRing_setTermRingId___spec__4(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_mulMonM___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRing___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___closed__1; lean_object* lean_array_fset(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___rarg___lambda__1(lean_object*, lean_object*, lean_object*); lean_object* l_Lean_RBNode_balLeft___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkCoeffDvd___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_stringToMessageData(lean_object*); lean_object* l_Lean_RBNode_balRight___rarg(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkMaxSteps(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_combineM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAux___at_Lean_Meta_Grind_Arith_CommRing_setTermRingId___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_setTermRingId___closed__5; +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_isQueueEmpty___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_setTermRingId___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg___lambda__1___boxed(lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_combine_x27(lean_object*, lean_object*, lean_object*); static size_t l_Lean_PersistentHashMap_findAux___at_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f___spec__2___closed__1; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_setTermRingId(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_RBNode_appendTrees___rarg(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingOfMonadLift(lean_object*, lean_object*); lean_object* lean_st_ref_take(lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_spol(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingRingM___closed__1; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_mulM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_noZeroDivisorsInst_x3f___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_RBNode_setBlack___rarg(lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkMaxSteps___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_del___at_Lean_Meta_Grind_Arith_CommRing_getNext_x3f___spec__2___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingRingM; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_modify_x27(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_erase___at_Lean_Meta_Grind_Arith_CommRing_getNext_x3f___spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_setTermRingId___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_findAux___at_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f___spec__2___boxed(lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(lean_object*); lean_object* lean_st_ref_get(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_hasChar(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_mulConst_x27(lean_object*, lean_object*, lean_object*); @@ -77,25 +88,21 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_hasChar___boxed(lean_o LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getNext_x3f___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Grind_CommRing_Poly_mul(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_isQueueEmpty(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__1; -static lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___closed__1; LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAux_traverse___at_Lean_Meta_Grind_Arith_CommRing_setTermRingId___spec__3(size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getRing___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_incSteps(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_setTermRingId___closed__1; uint8_t l_Lean_Meta_Grind_isSameExpr_unsafe__1(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getRing___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_setTermRingId___closed__2; LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_mulMonM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_toPolyM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_del___at_Lean_Meta_Grind_Arith_CommRing_getNext_x3f___spec__2(lean_object*, lean_object*); lean_object* lean_usize_to_nat(size_t); +static lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__2; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_setTermRingId___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_findAtAux___at_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_spolM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_erase___at_Lean_Meta_Grind_Arith_CommRing_getNext_x3f___spec__1___boxed(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_getCharInst___closed__2; -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_mulConstM(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_withCheckCoeffDvd___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_modify_x27___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -103,16 +110,18 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_modifyRing___boxed(lea lean_object* lean_array_fget(lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Arith_CommRing_setTermRingId___closed__3; lean_object* l_Lean_Grind_CommRing_Expr_toPolyC_go(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingOfMonadLift___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_noZeroDivisorsInst_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getCharInst___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_eq(lean_object*, lean_object*); uint8_t lean_nat_dec_lt(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAux_traverse___at_Lean_Meta_Grind_Arith_CommRing_setTermRingId___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static size_t l_Lean_PersistentHashMap_findAux___at_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f___spec__2___closed__2; lean_object* l_Lean_indentExpr(lean_object*); lean_object* l_Lean_PersistentHashMap_mkEmptyEntries(lean_object*, lean_object*); uint8_t l_Lean_Meta_Grind_Arith_CommRing_EqCnstr_compare(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f(lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_withCheckCoeffDvd(lean_object*); lean_object* l_Lean_addMessageContextFull___at_Lean_Meta_instAddMessageContextMetaM___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_RBNode_isBlack___rarg(lean_object*); @@ -120,12 +129,13 @@ lean_object* l_Lean_PersistentHashMap_mkCollisionNode___rarg(lean_object*, lean_ static lean_object* l_Lean_PersistentHashMap_insertAux___at_Lean_Meta_Grind_Arith_CommRing_setTermRingId___spec__2___closed__1; size_t lean_usize_sub(size_t, size_t); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_get_x27(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_find_x3f___at_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f___spec__1___boxed(lean_object*, lean_object*); uint64_t l_Lean_Meta_Grind_instHashableENodeKey_unsafe__1(lean_object*); size_t lean_usize_add(size_t, size_t); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insert___at_Lean_Meta_Grind_Arith_CommRing_setTermRingId___spec__1(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getCharInst___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__2; +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_st_ref_set(lean_object*, lean_object*, lean_object*); size_t lean_usize_shift_left(size_t, size_t); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_run___rarg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -133,6 +143,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_checkCoeffDvd(lean_obj LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_incSteps___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_findAux___at_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f___spec__2(lean_object*, size_t, lean_object*); lean_object* lean_array_get_size(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg___lambda__2(lean_object*, lean_object*, lean_object*); lean_object* lean_array_get(lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_le(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_modifyRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -782,6 +793,22 @@ lean_dec(x_1); return x_10; } } +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingOfMonadLift___rarg(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = lean_apply_2(x_1, lean_box(0), x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingOfMonadLift(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingOfMonadLift___rarg), 2, 0); +return x_3; +} +} LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_run___rarg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { @@ -831,7 +858,7 @@ lean_dec(x_1); return x_11; } } -LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getRing___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { lean_object* x_12; lean_object* x_13; uint8_t x_14; @@ -869,7 +896,7 @@ return x_20; } } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__1() { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__1() { _start: { lean_object* x_1; @@ -877,16 +904,16 @@ x_1 = lean_mk_string_unchecked("`grind` internal error, invalid ringId", 38, 38) return x_1; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__2() { +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__2() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__1; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__1; x_2 = l_Lean_stringToMessageData(x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRing(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { lean_object* x_11; uint8_t x_12; @@ -909,8 +936,8 @@ if (x_18 == 0) lean_object* x_19; lean_object* x_20; lean_dec(x_16); lean_free_object(x_11); -x_19 = l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__2; -x_20 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getRing___spec__1(x_19, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); +x_19 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__2; +x_20 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___spec__1(x_19, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); return x_20; } else @@ -941,8 +968,8 @@ if (x_27 == 0) { lean_object* x_28; lean_object* x_29; lean_dec(x_25); -x_28 = l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__2; -x_29 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getRing___spec__1(x_28, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_23); +x_28 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__2; +x_29 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___spec__1(x_28, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_23); return x_29; } else @@ -958,11 +985,11 @@ return x_31; } } } -LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getRing___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { lean_object* x_12; -x_12 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_getRing___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_12 = l_Lean_throwError___at_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___spec__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); lean_dec(x_10); lean_dec(x_9); lean_dec(x_8); @@ -975,11 +1002,11 @@ lean_dec(x_2); return x_12; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRing___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); lean_dec(x_9); lean_dec(x_8); lean_dec(x_7); @@ -992,6 +1019,22 @@ lean_dec(x_1); return x_11; } } +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingRingM___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___boxed), 10, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingRingM() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingRingM___closed__1; +return x_1; +} +} LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_modifyRing(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { @@ -2975,548 +3018,378 @@ lean_dec(x_1); return x_12; } } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg___lambda__1(lean_object* x_1, lean_object* x_2) { _start: { -lean_object* x_12; lean_object* x_13; -x_12 = lean_box(0); -x_13 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_13, 0, x_12); -lean_ctor_set(x_13, 1, x_11); -return x_13; +lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; +x_3 = lean_ctor_get(x_1, 0); +lean_inc(x_3); +lean_dec(x_1); +x_4 = lean_ctor_get(x_3, 1); +lean_inc(x_4); +lean_dec(x_3); +x_5 = lean_box(0); +x_6 = lean_apply_2(x_4, lean_box(0), x_5); +return x_6; } } -static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___closed__1() { +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg___lambda__2(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { -lean_object* x_1; -x_1 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___lambda__1___boxed), 11, 0); -return x_1; -} +lean_object* x_4; lean_object* x_5; +lean_inc(x_1); +x_4 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg___lambda__1___boxed), 2, 1); +lean_closure_set(x_4, 0, x_1); +x_5 = lean_ctor_get(x_3, 4); +lean_inc(x_5); +lean_dec(x_3); +if (lean_obj_tag(x_5) == 0) +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_6 = lean_ctor_get(x_1, 0); +lean_inc(x_6); +lean_dec(x_1); +x_7 = lean_ctor_get(x_6, 1); +lean_inc(x_7); +lean_dec(x_6); +x_8 = lean_box(0); +x_9 = lean_apply_2(x_7, lean_box(0), x_8); +x_10 = lean_apply_4(x_2, lean_box(0), lean_box(0), x_9, x_4); +return x_10; } -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { -_start: +else { -lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); -if (lean_obj_tag(x_11) == 0) +uint8_t x_11; +x_11 = !lean_is_exclusive(x_5); +if (x_11 == 0) { -uint8_t x_12; -x_12 = !lean_is_exclusive(x_11); -if (x_12 == 0) +lean_object* x_12; lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_12 = lean_ctor_get(x_5, 0); +x_13 = lean_ctor_get(x_12, 1); +lean_inc(x_13); +lean_dec(x_12); +x_14 = lean_unsigned_to_nat(0u); +x_15 = lean_nat_dec_eq(x_13, x_14); +if (x_15 == 0) { -lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; -x_13 = lean_ctor_get(x_11, 0); -x_14 = lean_ctor_get(x_11, 1); -x_15 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___closed__1; -x_16 = lean_ctor_get(x_13, 4); +lean_object* x_16; lean_object* x_17; lean_object* x_18; +lean_dec(x_4); +lean_dec(x_2); +x_16 = lean_ctor_get(x_1, 0); lean_inc(x_16); +lean_dec(x_1); +x_17 = lean_ctor_get(x_16, 1); +lean_inc(x_17); +lean_dec(x_16); +lean_ctor_set(x_5, 0, x_13); +x_18 = lean_apply_2(x_17, lean_box(0), x_5); +return x_18; +} +else +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_dec(x_13); -if (lean_obj_tag(x_16) == 0) +lean_free_object(x_5); +x_19 = lean_ctor_get(x_1, 0); +lean_inc(x_19); +lean_dec(x_1); +x_20 = lean_ctor_get(x_19, 1); +lean_inc(x_20); +lean_dec(x_19); +x_21 = lean_box(0); +x_22 = lean_apply_2(x_20, lean_box(0), x_21); +x_23 = lean_apply_4(x_2, lean_box(0), lean_box(0), x_22, x_4); +return x_23; +} +} +else { -lean_object* x_17; lean_object* x_18; -lean_free_object(x_11); -x_17 = lean_box(0); -x_18 = lean_apply_11(x_15, x_17, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); -return x_18; +lean_object* x_24; lean_object* x_25; lean_object* x_26; uint8_t x_27; +x_24 = lean_ctor_get(x_5, 0); +lean_inc(x_24); +lean_dec(x_5); +x_25 = lean_ctor_get(x_24, 1); +lean_inc(x_25); +lean_dec(x_24); +x_26 = lean_unsigned_to_nat(0u); +x_27 = lean_nat_dec_eq(x_25, x_26); +if (x_27 == 0) +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; +lean_dec(x_4); +lean_dec(x_2); +x_28 = lean_ctor_get(x_1, 0); +lean_inc(x_28); +lean_dec(x_1); +x_29 = lean_ctor_get(x_28, 1); +lean_inc(x_29); +lean_dec(x_28); +x_30 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_30, 0, x_25); +x_31 = lean_apply_2(x_29, lean_box(0), x_30); +return x_31; } else { -uint8_t x_19; -x_19 = !lean_is_exclusive(x_16); -if (x_19 == 0) +lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; +lean_dec(x_25); +x_32 = lean_ctor_get(x_1, 0); +lean_inc(x_32); +lean_dec(x_1); +x_33 = lean_ctor_get(x_32, 1); +lean_inc(x_33); +lean_dec(x_32); +x_34 = lean_box(0); +x_35 = lean_apply_2(x_33, lean_box(0), x_34); +x_36 = lean_apply_4(x_2, lean_box(0), lean_box(0), x_35, x_4); +return x_36; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg(lean_object* x_1, lean_object* x_2) { +_start: { -lean_object* x_20; lean_object* x_21; lean_object* x_22; uint8_t x_23; -x_20 = lean_ctor_get(x_16, 0); -x_21 = lean_ctor_get(x_20, 1); -lean_inc(x_21); -lean_dec(x_20); -x_22 = lean_unsigned_to_nat(0u); -x_23 = lean_nat_dec_eq(x_21, x_22); -if (x_23 == 0) +lean_object* x_3; lean_object* x_4; lean_object* x_5; +x_3 = lean_ctor_get(x_1, 1); +lean_inc(x_3); +lean_inc(x_3); +x_4 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg___lambda__2), 3, 2); +lean_closure_set(x_4, 0, x_1); +lean_closure_set(x_4, 1, x_3); +x_5 = lean_apply_4(x_3, lean_box(0), lean_box(0), x_2, x_4); +return x_5; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(lean_object* x_1) { +_start: { -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg), 2, 0); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg___lambda__1___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg___lambda__1(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___rarg___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +lean_object* x_4; lean_object* x_5; +lean_inc(x_1); +x_4 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___rarg___lambda__1___boxed), 2, 1); +lean_closure_set(x_4, 0, x_1); +x_5 = lean_ctor_get(x_3, 4); +lean_inc(x_5); +lean_dec(x_3); +if (lean_obj_tag(x_5) == 0) +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_6 = lean_ctor_get(x_1, 0); +lean_inc(x_6); +lean_dec(x_1); +x_7 = lean_ctor_get(x_6, 1); +lean_inc(x_7); lean_dec(x_6); -lean_dec(x_5); +x_8 = lean_box(0); +x_9 = lean_apply_2(x_7, lean_box(0), x_8); +x_10 = lean_apply_4(x_2, lean_box(0), lean_box(0), x_9, x_4); +return x_10; +} +else +{ +uint8_t x_11; +x_11 = !lean_is_exclusive(x_5); +if (x_11 == 0) +{ +lean_object* x_12; uint8_t x_13; +x_12 = lean_ctor_get(x_5, 0); +x_13 = !lean_is_exclusive(x_12); +if (x_13 == 0) +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; uint8_t x_17; +x_14 = lean_ctor_get(x_12, 0); +x_15 = lean_ctor_get(x_12, 1); +x_16 = lean_unsigned_to_nat(0u); +x_17 = lean_nat_dec_eq(x_15, x_16); +if (x_17 == 0) +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_dec(x_4); -lean_dec(x_3); lean_dec(x_2); +x_18 = lean_ctor_get(x_1, 0); +lean_inc(x_18); lean_dec(x_1); -lean_ctor_set(x_16, 0, x_21); -lean_ctor_set(x_11, 0, x_16); -return x_11; +x_19 = lean_ctor_get(x_18, 1); +lean_inc(x_19); +lean_dec(x_18); +x_20 = lean_apply_2(x_19, lean_box(0), x_5); +return x_20; } else { -lean_object* x_24; lean_object* x_25; +lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; +lean_free_object(x_12); +lean_dec(x_15); +lean_dec(x_14); +lean_free_object(x_5); +x_21 = lean_ctor_get(x_1, 0); +lean_inc(x_21); +lean_dec(x_1); +x_22 = lean_ctor_get(x_21, 1); +lean_inc(x_22); lean_dec(x_21); -lean_free_object(x_16); -lean_free_object(x_11); -x_24 = lean_box(0); -x_25 = lean_apply_11(x_15, x_24, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); +x_23 = lean_box(0); +x_24 = lean_apply_2(x_22, lean_box(0), x_23); +x_25 = lean_apply_4(x_2, lean_box(0), lean_box(0), x_24, x_4); return x_25; } } else { lean_object* x_26; lean_object* x_27; lean_object* x_28; uint8_t x_29; -x_26 = lean_ctor_get(x_16, 0); -lean_inc(x_26); -lean_dec(x_16); -x_27 = lean_ctor_get(x_26, 1); +x_26 = lean_ctor_get(x_12, 0); +x_27 = lean_ctor_get(x_12, 1); lean_inc(x_27); -lean_dec(x_26); +lean_inc(x_26); +lean_dec(x_12); x_28 = lean_unsigned_to_nat(0u); x_29 = lean_nat_dec_eq(x_27, x_28); if (x_29 == 0) { -lean_object* x_30; -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_dec(x_4); -lean_dec(x_3); lean_dec(x_2); +x_30 = lean_ctor_get(x_1, 0); +lean_inc(x_30); lean_dec(x_1); -x_30 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_30, 0, x_27); -lean_ctor_set(x_11, 0, x_30); -return x_11; +x_31 = lean_ctor_get(x_30, 1); +lean_inc(x_31); +lean_dec(x_30); +x_32 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_32, 0, x_26); +lean_ctor_set(x_32, 1, x_27); +lean_ctor_set(x_5, 0, x_32); +x_33 = lean_apply_2(x_31, lean_box(0), x_5); +return x_33; } else { -lean_object* x_31; lean_object* x_32; +lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_dec(x_27); -lean_free_object(x_11); -x_31 = lean_box(0); -x_32 = lean_apply_11(x_15, x_31, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); -return x_32; -} -} -} -} -else -{ -lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; -x_33 = lean_ctor_get(x_11, 0); -x_34 = lean_ctor_get(x_11, 1); +lean_dec(x_26); +lean_free_object(x_5); +x_34 = lean_ctor_get(x_1, 0); lean_inc(x_34); -lean_inc(x_33); -lean_dec(x_11); -x_35 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___closed__1; -x_36 = lean_ctor_get(x_33, 4); -lean_inc(x_36); -lean_dec(x_33); -if (lean_obj_tag(x_36) == 0) -{ -lean_object* x_37; lean_object* x_38; -x_37 = lean_box(0); -x_38 = lean_apply_11(x_35, x_37, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_34); +lean_dec(x_1); +x_35 = lean_ctor_get(x_34, 1); +lean_inc(x_35); +lean_dec(x_34); +x_36 = lean_box(0); +x_37 = lean_apply_2(x_35, lean_box(0), x_36); +x_38 = lean_apply_4(x_2, lean_box(0), lean_box(0), x_37, x_4); return x_38; } +} +} else { -lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; uint8_t x_43; -x_39 = lean_ctor_get(x_36, 0); +lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; uint8_t x_44; +x_39 = lean_ctor_get(x_5, 0); lean_inc(x_39); -if (lean_is_exclusive(x_36)) { - lean_ctor_release(x_36, 0); - x_40 = x_36; -} else { - lean_dec_ref(x_36); - x_40 = lean_box(0); -} +lean_dec(x_5); +x_40 = lean_ctor_get(x_39, 0); +lean_inc(x_40); x_41 = lean_ctor_get(x_39, 1); lean_inc(x_41); -lean_dec(x_39); -x_42 = lean_unsigned_to_nat(0u); -x_43 = lean_nat_dec_eq(x_41, x_42); -if (x_43 == 0) +if (lean_is_exclusive(x_39)) { + lean_ctor_release(x_39, 0); + lean_ctor_release(x_39, 1); + x_42 = x_39; +} else { + lean_dec_ref(x_39); + x_42 = lean_box(0); +} +x_43 = lean_unsigned_to_nat(0u); +x_44 = lean_nat_dec_eq(x_41, x_43); +if (x_44 == 0) { -lean_object* x_44; lean_object* x_45; -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); +lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_dec(x_4); -lean_dec(x_3); lean_dec(x_2); +x_45 = lean_ctor_get(x_1, 0); +lean_inc(x_45); lean_dec(x_1); -if (lean_is_scalar(x_40)) { - x_44 = lean_alloc_ctor(1, 1, 0); +x_46 = lean_ctor_get(x_45, 1); +lean_inc(x_46); +lean_dec(x_45); +if (lean_is_scalar(x_42)) { + x_47 = lean_alloc_ctor(0, 2, 0); } else { - x_44 = x_40; + x_47 = x_42; } -lean_ctor_set(x_44, 0, x_41); -x_45 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_45, 0, x_44); -lean_ctor_set(x_45, 1, x_34); -return x_45; +lean_ctor_set(x_47, 0, x_40); +lean_ctor_set(x_47, 1, x_41); +x_48 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_48, 0, x_47); +x_49 = lean_apply_2(x_46, lean_box(0), x_48); +return x_49; } else { -lean_object* x_46; lean_object* x_47; +lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; +lean_dec(x_42); lean_dec(x_41); lean_dec(x_40); -x_46 = lean_box(0); -x_47 = lean_apply_11(x_35, x_46, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_34); -return x_47; -} -} -} -} -else -{ -uint8_t x_48; -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_48 = !lean_is_exclusive(x_11); -if (x_48 == 0) -{ -return x_11; -} -else -{ -lean_object* x_49; lean_object* x_50; lean_object* x_51; -x_49 = lean_ctor_get(x_11, 0); -x_50 = lean_ctor_get(x_11, 1); +x_50 = lean_ctor_get(x_1, 0); lean_inc(x_50); -lean_inc(x_49); -lean_dec(x_11); -x_51 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_51, 0, x_49); -lean_ctor_set(x_51, 1, x_50); -return x_51; -} -} -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { -_start: -{ -lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); -lean_dec(x_10); -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -return x_12; -} -} -LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { -_start: -{ -lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); -if (lean_obj_tag(x_11) == 0) -{ -uint8_t x_12; -x_12 = !lean_is_exclusive(x_11); -if (x_12 == 0) -{ -lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; -x_13 = lean_ctor_get(x_11, 0); -x_14 = lean_ctor_get(x_11, 1); -x_15 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___closed__1; -x_16 = lean_ctor_get(x_13, 4); -lean_inc(x_16); -lean_dec(x_13); -if (lean_obj_tag(x_16) == 0) -{ -lean_object* x_17; lean_object* x_18; -lean_free_object(x_11); -x_17 = lean_box(0); -x_18 = lean_apply_11(x_15, x_17, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); -return x_18; -} -else -{ -uint8_t x_19; -x_19 = !lean_is_exclusive(x_16); -if (x_19 == 0) -{ -lean_object* x_20; uint8_t x_21; -x_20 = lean_ctor_get(x_16, 0); -x_21 = !lean_is_exclusive(x_20); -if (x_21 == 0) -{ -lean_object* x_22; lean_object* x_23; lean_object* x_24; uint8_t x_25; -x_22 = lean_ctor_get(x_20, 0); -x_23 = lean_ctor_get(x_20, 1); -x_24 = lean_unsigned_to_nat(0u); -x_25 = lean_nat_dec_eq(x_23, x_24); -if (x_25 == 0) -{ -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); lean_dec(x_1); -lean_ctor_set(x_11, 0, x_16); -return x_11; -} -else -{ -lean_object* x_26; lean_object* x_27; -lean_free_object(x_20); -lean_dec(x_23); -lean_dec(x_22); -lean_free_object(x_16); -lean_free_object(x_11); -x_26 = lean_box(0); -x_27 = lean_apply_11(x_15, x_26, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); -return x_27; -} -} -else -{ -lean_object* x_28; lean_object* x_29; lean_object* x_30; uint8_t x_31; -x_28 = lean_ctor_get(x_20, 0); -x_29 = lean_ctor_get(x_20, 1); -lean_inc(x_29); -lean_inc(x_28); -lean_dec(x_20); -x_30 = lean_unsigned_to_nat(0u); -x_31 = lean_nat_dec_eq(x_29, x_30); -if (x_31 == 0) -{ -lean_object* x_32; -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_32 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_32, 0, x_28); -lean_ctor_set(x_32, 1, x_29); -lean_ctor_set(x_16, 0, x_32); -lean_ctor_set(x_11, 0, x_16); -return x_11; -} -else -{ -lean_object* x_33; lean_object* x_34; -lean_dec(x_29); -lean_dec(x_28); -lean_free_object(x_16); -lean_free_object(x_11); -x_33 = lean_box(0); -x_34 = lean_apply_11(x_15, x_33, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); -return x_34; -} -} -} -else -{ -lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; uint8_t x_40; -x_35 = lean_ctor_get(x_16, 0); -lean_inc(x_35); -lean_dec(x_16); -x_36 = lean_ctor_get(x_35, 0); -lean_inc(x_36); -x_37 = lean_ctor_get(x_35, 1); -lean_inc(x_37); -if (lean_is_exclusive(x_35)) { - lean_ctor_release(x_35, 0); - lean_ctor_release(x_35, 1); - x_38 = x_35; -} else { - lean_dec_ref(x_35); - x_38 = lean_box(0); -} -x_39 = lean_unsigned_to_nat(0u); -x_40 = lean_nat_dec_eq(x_37, x_39); -if (x_40 == 0) -{ -lean_object* x_41; lean_object* x_42; -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -if (lean_is_scalar(x_38)) { - x_41 = lean_alloc_ctor(0, 2, 0); -} else { - x_41 = x_38; -} -lean_ctor_set(x_41, 0, x_36); -lean_ctor_set(x_41, 1, x_37); -x_42 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_42, 0, x_41); -lean_ctor_set(x_11, 0, x_42); -return x_11; -} -else -{ -lean_object* x_43; lean_object* x_44; -lean_dec(x_38); -lean_dec(x_37); -lean_dec(x_36); -lean_free_object(x_11); -x_43 = lean_box(0); -x_44 = lean_apply_11(x_15, x_43, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); -return x_44; -} -} -} -} -else -{ -lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; -x_45 = lean_ctor_get(x_11, 0); -x_46 = lean_ctor_get(x_11, 1); -lean_inc(x_46); -lean_inc(x_45); -lean_dec(x_11); -x_47 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___closed__1; -x_48 = lean_ctor_get(x_45, 4); -lean_inc(x_48); -lean_dec(x_45); -if (lean_obj_tag(x_48) == 0) -{ -lean_object* x_49; lean_object* x_50; -x_49 = lean_box(0); -x_50 = lean_apply_11(x_47, x_49, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_46); -return x_50; -} -else -{ -lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; uint8_t x_57; -x_51 = lean_ctor_get(x_48, 0); +x_51 = lean_ctor_get(x_50, 1); lean_inc(x_51); -if (lean_is_exclusive(x_48)) { - lean_ctor_release(x_48, 0); - x_52 = x_48; -} else { - lean_dec_ref(x_48); - x_52 = lean_box(0); +lean_dec(x_50); +x_52 = lean_box(0); +x_53 = lean_apply_2(x_51, lean_box(0), x_52); +x_54 = lean_apply_4(x_2, lean_box(0), lean_box(0), x_53, x_4); +return x_54; } -x_53 = lean_ctor_get(x_51, 0); -lean_inc(x_53); -x_54 = lean_ctor_get(x_51, 1); -lean_inc(x_54); -if (lean_is_exclusive(x_51)) { - lean_ctor_release(x_51, 0); - lean_ctor_release(x_51, 1); - x_55 = x_51; -} else { - lean_dec_ref(x_51); - x_55 = lean_box(0); } -x_56 = lean_unsigned_to_nat(0u); -x_57 = lean_nat_dec_eq(x_54, x_56); -if (x_57 == 0) -{ -lean_object* x_58; lean_object* x_59; lean_object* x_60; -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -if (lean_is_scalar(x_55)) { - x_58 = lean_alloc_ctor(0, 2, 0); -} else { - x_58 = x_55; } -lean_ctor_set(x_58, 0, x_53); -lean_ctor_set(x_58, 1, x_54); -if (lean_is_scalar(x_52)) { - x_59 = lean_alloc_ctor(1, 1, 0); -} else { - x_59 = x_52; } -lean_ctor_set(x_59, 0, x_58); -x_60 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_60, 0, x_59); -lean_ctor_set(x_60, 1, x_46); -return x_60; } -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___rarg(lean_object* x_1, lean_object* x_2) { +_start: { -lean_object* x_61; lean_object* x_62; -lean_dec(x_55); -lean_dec(x_54); -lean_dec(x_53); -lean_dec(x_52); -x_61 = lean_box(0); -x_62 = lean_apply_11(x_47, x_61, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_46); -return x_62; -} -} +lean_object* x_3; lean_object* x_4; lean_object* x_5; +x_3 = lean_ctor_get(x_1, 1); +lean_inc(x_3); +lean_inc(x_3); +x_4 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___rarg___lambda__1), 3, 2); +lean_closure_set(x_4, 0, x_1); +lean_closure_set(x_4, 1, x_3); +x_5 = lean_apply_4(x_3, lean_box(0), lean_box(0), x_2, x_4); +return x_5; } } -else -{ -uint8_t x_63; -lean_dec(x_9); -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_3); -lean_dec(x_2); -lean_dec(x_1); -x_63 = !lean_is_exclusive(x_11); -if (x_63 == 0) -{ -return x_11; -} -else +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f(lean_object* x_1) { +_start: { -lean_object* x_64; lean_object* x_65; lean_object* x_66; -x_64 = lean_ctor_get(x_11, 0); -x_65 = lean_ctor_get(x_11, 1); -lean_inc(x_65); -lean_inc(x_64); -lean_dec(x_11); -x_66 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_66, 0, x_64); -lean_ctor_set(x_66, 1, x_65); -return x_66; -} -} +lean_object* x_2; +x_2 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_nonzeroCharInst_x3f___rarg), 2, 0); +return x_2; } } LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_noZeroDivisorsInst_x3f(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { uint8_t x_12; @@ -3593,7 +3466,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_noZeroDivisors(lean_ob _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; @@ -3705,7 +3578,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_hasChar(lean_object* x _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; @@ -3872,7 +3745,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getCharInst(lean_objec _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; @@ -3980,11 +3853,239 @@ lean_dec(x_1); return x_11; } } +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___lambda__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +lean_object* x_12; lean_object* x_13; +x_12 = lean_box(0); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_12); +lean_ctor_set(x_13, 1, x_11); +return x_13; +} +} +static lean_object* _init_l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___lambda__1___boxed), 11, 0); +return x_1; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10) { +_start: +{ +lean_object* x_11; +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +if (lean_obj_tag(x_11) == 0) +{ +uint8_t x_12; +x_12 = !lean_is_exclusive(x_11); +if (x_12 == 0) +{ +lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_13 = lean_ctor_get(x_11, 0); +x_14 = lean_ctor_get(x_11, 1); +x_15 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___closed__1; +x_16 = lean_ctor_get(x_13, 4); +lean_inc(x_16); +lean_dec(x_13); +if (lean_obj_tag(x_16) == 0) +{ +lean_object* x_17; lean_object* x_18; +lean_free_object(x_11); +x_17 = lean_box(0); +x_18 = lean_apply_11(x_15, x_17, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); +return x_18; +} +else +{ +uint8_t x_19; +x_19 = !lean_is_exclusive(x_16); +if (x_19 == 0) +{ +lean_object* x_20; lean_object* x_21; lean_object* x_22; uint8_t x_23; +x_20 = lean_ctor_get(x_16, 0); +x_21 = lean_ctor_get(x_20, 1); +lean_inc(x_21); +lean_dec(x_20); +x_22 = lean_unsigned_to_nat(0u); +x_23 = lean_nat_dec_eq(x_21, x_22); +if (x_23 == 0) +{ +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +lean_ctor_set(x_16, 0, x_21); +lean_ctor_set(x_11, 0, x_16); +return x_11; +} +else +{ +lean_object* x_24; lean_object* x_25; +lean_dec(x_21); +lean_free_object(x_16); +lean_free_object(x_11); +x_24 = lean_box(0); +x_25 = lean_apply_11(x_15, x_24, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); +return x_25; +} +} +else +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; uint8_t x_29; +x_26 = lean_ctor_get(x_16, 0); +lean_inc(x_26); +lean_dec(x_16); +x_27 = lean_ctor_get(x_26, 1); +lean_inc(x_27); +lean_dec(x_26); +x_28 = lean_unsigned_to_nat(0u); +x_29 = lean_nat_dec_eq(x_27, x_28); +if (x_29 == 0) +{ +lean_object* x_30; +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_30 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_30, 0, x_27); +lean_ctor_set(x_11, 0, x_30); +return x_11; +} +else +{ +lean_object* x_31; lean_object* x_32; +lean_dec(x_27); +lean_free_object(x_11); +x_31 = lean_box(0); +x_32 = lean_apply_11(x_15, x_31, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_14); +return x_32; +} +} +} +} +else +{ +lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_33 = lean_ctor_get(x_11, 0); +x_34 = lean_ctor_get(x_11, 1); +lean_inc(x_34); +lean_inc(x_33); +lean_dec(x_11); +x_35 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___closed__1; +x_36 = lean_ctor_get(x_33, 4); +lean_inc(x_36); +lean_dec(x_33); +if (lean_obj_tag(x_36) == 0) +{ +lean_object* x_37; lean_object* x_38; +x_37 = lean_box(0); +x_38 = lean_apply_11(x_35, x_37, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_34); +return x_38; +} +else +{ +lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; uint8_t x_43; +x_39 = lean_ctor_get(x_36, 0); +lean_inc(x_39); +if (lean_is_exclusive(x_36)) { + lean_ctor_release(x_36, 0); + x_40 = x_36; +} else { + lean_dec_ref(x_36); + x_40 = lean_box(0); +} +x_41 = lean_ctor_get(x_39, 1); +lean_inc(x_41); +lean_dec(x_39); +x_42 = lean_unsigned_to_nat(0u); +x_43 = lean_nat_dec_eq(x_41, x_42); +if (x_43 == 0) +{ +lean_object* x_44; lean_object* x_45; +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +if (lean_is_scalar(x_40)) { + x_44 = lean_alloc_ctor(1, 1, 0); +} else { + x_44 = x_40; +} +lean_ctor_set(x_44, 0, x_41); +x_45 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_34); +return x_45; +} +else +{ +lean_object* x_46; lean_object* x_47; +lean_dec(x_41); +lean_dec(x_40); +x_46 = lean_box(0); +x_47 = lean_apply_11(x_35, x_46, x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_34); +return x_47; +} +} +} +} +else +{ +uint8_t x_48; +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +x_48 = !lean_is_exclusive(x_11); +if (x_48 == 0) +{ +return x_11; +} +else +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; +x_49 = lean_ctor_get(x_11, 0); +x_50 = lean_ctor_get(x_11, 1); +lean_inc(x_50); +lean_inc(x_49); +lean_dec(x_11); +x_51 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_51, 0, x_49); +lean_ctor_set(x_51, 1, x_50); +return x_51; +} +} +} +} LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Expr_toPolyM(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { _start: { lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_12 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); if (lean_obj_tag(x_12) == 0) { lean_object* x_13; @@ -4074,11 +4175,29 @@ return x_31; } } } +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11) { +_start: +{ +lean_object* x_12; +x_12 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___lambda__1(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +lean_dec(x_10); +lean_dec(x_9); +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_2); +lean_dec(x_1); +return x_12; +} +} LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_mulConstM(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9, lean_object* x_10, lean_object* x_11, lean_object* x_12) { _start: { lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); if (lean_obj_tag(x_13) == 0) { uint8_t x_14; @@ -4144,7 +4263,7 @@ LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_mulMonM(lean_object* x_1, le _start: { lean_object* x_14; -x_14 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); +x_14 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12, x_13); if (lean_obj_tag(x_14) == 0) { uint8_t x_15; @@ -4211,7 +4330,7 @@ LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_mulM(lean_object* x_1, lean_ _start: { lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); if (lean_obj_tag(x_13) == 0) { lean_object* x_14; @@ -4306,7 +4425,7 @@ LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_combineM(lean_object* x_1, l _start: { lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); if (lean_obj_tag(x_13) == 0) { uint8_t x_14; @@ -4364,7 +4483,7 @@ LEAN_EXPORT lean_object* l_Lean_Grind_CommRing_Poly_spolM(lean_object* x_1, lean _start: { lean_object* x_13; -x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); +x_13 = l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1(x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11, x_12); if (lean_obj_tag(x_13) == 0) { lean_object* x_14; @@ -4487,7 +4606,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_isQueueEmpty(lean_obje _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { lean_object* x_12; lean_object* x_13; @@ -4762,7 +4881,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_getNext_x3f(lean_objec _start: { lean_object* x_11; -x_11 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); +x_11 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10); if (lean_obj_tag(x_11) == 0) { uint8_t x_12; @@ -6508,10 +6627,14 @@ lean_dec_ref(res); res = initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_Poly(builtin, lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); -l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__1(); -lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__1); -l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__2(); -lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_getRing___closed__2); +l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__1); +l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__2(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing___closed__2); +l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingRingM___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingRingM___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingRingM___closed__1); +l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingRingM = _init_l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingRingM(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_instMonadGetRingRingM); l_Lean_PersistentHashMap_findAux___at_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f___spec__2___closed__1 = _init_l_Lean_PersistentHashMap_findAux___at_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f___spec__2___closed__1(); l_Lean_PersistentHashMap_findAux___at_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f___spec__2___closed__2 = _init_l_Lean_PersistentHashMap_findAux___at_Lean_Meta_Grind_Arith_CommRing_getTermRingId_x3f___spec__2___closed__2(); l_Lean_PersistentHashMap_insertAux___at_Lean_Meta_Grind_Arith_CommRing_setTermRingId___spec__2___closed__1 = _init_l_Lean_PersistentHashMap_insertAux___at_Lean_Meta_Grind_Arith_CommRing_setTermRingId___spec__2___closed__1(); @@ -6526,12 +6649,12 @@ l_Lean_Meta_Grind_Arith_CommRing_setTermRingId___closed__4 = _init_l_Lean_Meta_G lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_setTermRingId___closed__4); l_Lean_Meta_Grind_Arith_CommRing_setTermRingId___closed__5 = _init_l_Lean_Meta_Grind_Arith_CommRing_setTermRingId___closed__5(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_setTermRingId___closed__5); -l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___closed__1(); -lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___closed__1); l_Lean_Meta_Grind_Arith_CommRing_getCharInst___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_getCharInst___closed__1(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_getCharInst___closed__1); l_Lean_Meta_Grind_Arith_CommRing_getCharInst___closed__2 = _init_l_Lean_Meta_Grind_Arith_CommRing_getCharInst___closed__2(); lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_getCharInst___closed__2); +l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___closed__1 = _init_l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___closed__1(); +lean_mark_persistent(l_Lean_Meta_Grind_Arith_CommRing_nonzeroChar_x3f___at_Lean_Grind_CommRing_Expr_toPolyM___spec__1___closed__1); return lean_io_result_mk_ok(lean_box(0)); } #ifdef __cplusplus diff --git a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Var.c b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Var.c index 5591ed4c55c2..ad600fa200bb 100644 --- a/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Var.c +++ b/stage0/stdlib/Lean/Meta/Tactic/Grind/Arith/CommRing/Var.c @@ -17,7 +17,6 @@ LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAux___at_Lean_Meta_Grind lean_object* l_Lean_Meta_Grind_markAsCommRingTerm(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_usize_shift_right(size_t, size_t); uint8_t lean_usize_dec_le(size_t, size_t); -lean_object* l_Lean_Meta_Grind_Arith_CommRing_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAux_traverse___at_Lean_Meta_Grind_Arith_CommRing_mkVar___spec__3(size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_uint64_to_usize(uint64_t); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_insertAux_traverse___at_Lean_Meta_Grind_Arith_CommRing_mkVar___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -48,6 +47,7 @@ static size_t l_Lean_PersistentHashMap_insertAux___at_Lean_Meta_Grind_Arith_Comm size_t lean_usize_sub(size_t, size_t); uint64_t l_Lean_Meta_Grind_instHashableENodeKey_unsafe__1(lean_object*); size_t lean_usize_add(size_t, size_t); +lean_object* l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_st_ref_set(lean_object*, lean_object*, lean_object*); size_t lean_usize_shift_left(size_t, size_t); LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_find_x3f___at_Lean_Meta_Grind_Arith_CommRing_mkVar___spec__5(lean_object*, lean_object*); @@ -1682,7 +1682,7 @@ LEAN_EXPORT lean_object* l_Lean_Meta_Grind_Arith_CommRing_mkVar(lean_object* x_1 _start: { lean_object* x_12; -x_12 = l_Lean_Meta_Grind_Arith_CommRing_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); +x_12 = l_Lean_Meta_Grind_Arith_CommRing_RingM_getRing(x_2, x_3, x_4, x_5, x_6, x_7, x_8, x_9, x_10, x_11); if (lean_obj_tag(x_12) == 0) { uint8_t x_13; diff --git a/stage0/stdlib/Lean/Meta/Tactic/Grind/PP.c b/stage0/stdlib/Lean/Meta/Tactic/Grind/PP.c index 3b8f9b5c8e46..94b327187020 100644 --- a/stage0/stdlib/Lean/Meta/Tactic/Grind/PP.c +++ b/stage0/stdlib/Lean/Meta/Tactic/Grind/PP.c @@ -1,6 +1,6 @@ // Lean compiler output // Module: Lean.Meta.Tactic.Grind.PP -// Imports: Init.Grind.Util Init.Grind.PP Lean.Meta.Tactic.Grind.Types Lean.Meta.Tactic.Grind.Arith.Model +// Imports: Init.Grind.Util Init.Grind.PP Lean.Meta.Tactic.Grind.Types Lean.Meta.Tactic.Grind.Arith.Model Lean.Meta.Tactic.Grind.Arith.CommRing.PP #include <lean/lean.h> #if defined(__clang__) #pragma clang diagnostic ignored "-Wunused-parameter" @@ -55,6 +55,7 @@ LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at___private_Lean_Meta_Tact static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__4___closed__1; uint8_t l_Lean_Expr_isApp(lean_object*); static lean_object* l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCasesTrace___spec__1___closed__1; +static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__3; static lean_object* l_Lean_Meta_Grind_Goal_ppENodeRef___closed__7; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppOffset___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -71,6 +72,7 @@ LEAN_EXPORT lean_object* l_Lean_PersistentHashMap_isEmpty___at___private_Lean_Me static lean_object* l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__13; static lean_object* l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__9; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCutsat___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +static double l_Lean_Meta_Grind_ppExprArray___closed__1; lean_object* lean_mk_array(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Goal_ppState___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -80,6 +82,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Data_PersistentArray_0__Lean_Persisten static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___closed__2; static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppActiveTheoremPatterns___closed__3; lean_object* l_List_mapTR_loop___at_Lean_Meta_Grind_mkEMatchTheoremCore___spec__2(lean_object*, lean_object*); +lean_object* l_Lean_Meta_Grind_Arith_CommRing_pp_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__1___closed__4; static lean_object* l_List_forIn_x27_loop___at_Lean_Meta_Grind_Goal_ppState___spec__7___closed__4; LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCasesTrace___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -143,7 +146,6 @@ static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_Goa static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCasesTrace___closed__1; static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__1___closed__3; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -static double l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___spec__8___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Goal_ppENodeRef___closed__1; LEAN_EXPORT lean_object* l_Lean_PersistentArray_foldlM___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___spec__1(lean_object*, lean_object*, lean_object*, lean_object*); @@ -166,16 +168,17 @@ static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppC lean_object* lean_st_ref_get(lean_object*, lean_object*); static lean_object* l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__12; uint8_t l_List_isEmpty___rarg(lean_object*); +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_ppExprArray___spec__1(lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Goal_ppState___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Goal_ppState___closed__1; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCutsat___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCommRing___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); extern lean_object* l_Lean_levelZero; static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppOffset___lambda__1___closed__6; LEAN_EXPORT lean_object* l_Lean_Meta_Grind_goalToMessageData_go___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__2___closed__2; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_pushMsg(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_goalToMessageData___lambda__1___closed__2; -static lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2; LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppOffset___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppOffset___spec__1___closed__3; static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__3___closed__1; @@ -224,6 +227,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grin lean_object* lean_nat_abs(lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCutsat___lambda__1___closed__5; static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_Goal_ppENodeDecl___lambda__4___closed__2; +lean_object* l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10(lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppActiveTheoremPatterns___closed__6; lean_object* l_Lean_Expr_isTrue___boxed(lean_object*); static lean_object* l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCasesTrace___spec__1___closed__7; @@ -233,9 +237,11 @@ LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_G static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_Goal_ppENodeDecl___lambda__3___closed__3; uint8_t lean_nat_dec_eq(lean_object*, lean_object*); lean_object* l_Lean_mkApp3(lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCommRing(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_goalToMessageData___lambda__1___closed__4; lean_object* l_Lean_Meta_Grind_Goal_getEqcs(lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__5___closed__2; +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_ppExprArray___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t lean_nat_dec_lt(lean_object*, lean_object*); static lean_object* l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCasesTrace___spec__1___closed__2; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -274,11 +280,9 @@ LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grin lean_object* l_Lean_Meta_Grind_Goal_getENode_x3f(lean_object*, lean_object*); size_t lean_usize_sub(size_t, size_t); lean_object* lean_array_mk(lean_object*); -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(lean_object*, lean_object*, lean_object*, lean_object*); uint8_t l_Lean_Expr_isTrue(lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCutsat___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppOffset___spec__1___closed__1; -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at_Lean_Meta_Grind_Goal_ppState___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_ppENodeRef___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_usize_add(size_t, size_t); @@ -289,6 +293,7 @@ static lean_object* l_Lean_Meta_Grind_Goal_ppState___closed__2; uint8_t l_Lean_PersistentArray_isEmpty___rarg(lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_Goal_ppENodeDecl___lambda__4___closed__1; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppActiveTheoremPatterns___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_ppExprArray(lean_object*, lean_object*, lean_object*, lean_object*); size_t lean_usize_shift_left(size_t, size_t); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppActiveTheoremPatterns___spec__1(size_t, size_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Meta_Grind_Goal_ppENodeRef___closed__5; @@ -329,7 +334,6 @@ LEAN_EXPORT lean_object* l_Lean_PersistentArray_forInAux___at_Lean_Meta_Grind_Go static lean_object* l_Lean_Meta_Grind_goalToMessageData___lambda__1___closed__7; LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___lambda__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_Goal_ppENodeDeclValue___spec__1___closed__3; -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1(lean_object*, size_t, size_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_Meta_Grind_ppGoals(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__1___closed__1; LEAN_EXPORT lean_object* l_Lean_PersistentArray_forIn___at_Lean_Meta_Grind_Goal_ppState___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -3408,26 +3412,35 @@ lean_dec(x_1); return x_12; } } -static double _init_l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1() { +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_pushMsg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { -lean_object* x_1; uint8_t x_2; double x_3; -x_1 = lean_unsigned_to_nat(0u); -x_2 = 0; -x_3 = l_Float_ofScientific(x_1, x_2, x_1); -return x_3; +lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; +x_9 = lean_array_push(x_3, x_1); +x_10 = lean_box(0); +x_11 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_11, 0, x_10); +lean_ctor_set(x_11, 1, x_9); +x_12 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_12, 0, x_11); +lean_ctor_set(x_12, 1, x_8); +return x_12; } } -static lean_object* _init_l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2() { +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_pushMsg___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { _start: { -lean_object* x_1; lean_object* x_2; -x_1 = lean_box(0); -x_2 = lean_array_mk(x_1); -return x_2; +lean_object* x_9; +x_9 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_pushMsg(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_2); +return x_9; } } -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_ppExprArray___spec__1(lean_object* x_1, size_t x_2, size_t x_3, lean_object* x_4) { _start: { uint8_t x_5; @@ -3439,50 +3452,39 @@ return x_4; } else { -lean_object* x_6; lean_object* x_7; lean_object* x_8; double x_9; uint8_t x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; size_t x_19; size_t x_20; lean_object* x_21; +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; size_t x_10; size_t x_11; lean_object* x_12; x_6 = lean_array_uget(x_4, x_3); x_7 = lean_unsigned_to_nat(0u); x_8 = lean_array_uset(x_4, x_3, x_7); -x_9 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; -x_10 = 1; -x_11 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__8; lean_inc(x_1); -x_12 = lean_alloc_ctor(0, 2, 17); -lean_ctor_set(x_12, 0, x_1); -lean_ctor_set(x_12, 1, x_11); -lean_ctor_set_float(x_12, sizeof(void*)*2, x_9); -lean_ctor_set_float(x_12, sizeof(void*)*2 + 8, x_9); -lean_ctor_set_uint8(x_12, sizeof(void*)*2 + 16, x_10); -x_13 = l_Lean_MessageData_ofExpr(x_6); -x_14 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__9; -x_15 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_15, 0, x_14); -lean_ctor_set(x_15, 1, x_13); -x_16 = lean_alloc_ctor(7, 2, 0); -lean_ctor_set(x_16, 0, x_15); -lean_ctor_set(x_16, 1, x_14); -x_17 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2; -x_18 = lean_alloc_ctor(9, 3, 0); -lean_ctor_set(x_18, 0, x_12); -lean_ctor_set(x_18, 1, x_16); -lean_ctor_set(x_18, 2, x_17); -x_19 = 1; -x_20 = lean_usize_add(x_3, x_19); -x_21 = lean_array_uset(x_8, x_3, x_18); -x_3 = x_20; -x_4 = x_21; +x_9 = l_Lean_toTraceElem___at_Lean_Meta_Grind_Arith_CommRing_ppBasis_x3f___spec__10(x_6, x_1); +x_10 = 1; +x_11 = lean_usize_add(x_3, x_10); +x_12 = lean_array_uset(x_8, x_3, x_9); +x_3 = x_11; +x_4 = x_12; goto _start; } } } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +static double _init_l_Lean_Meta_Grind_ppExprArray___closed__1() { +_start: +{ +lean_object* x_1; uint8_t x_2; double x_3; +x_1 = lean_unsigned_to_nat(0u); +x_2 = 0; +x_3 = l_Float_ofScientific(x_1, x_2, x_1); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_Meta_Grind_ppExprArray(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { size_t x_5; size_t x_6; lean_object* x_7; double x_8; uint8_t x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; x_5 = lean_array_size(x_3); x_6 = 0; -x_7 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1(x_4, x_5, x_6, x_3); -x_8 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; +x_7 = l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_ppExprArray___spec__1(x_4, x_5, x_6, x_3); +x_8 = l_Lean_Meta_Grind_ppExprArray___closed__1; x_9 = 1; x_10 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__8; x_11 = lean_alloc_ctor(0, 2, 17); @@ -3501,7 +3503,7 @@ lean_ctor_set(x_14, 2, x_7); return x_14; } } -LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_ppExprArray___spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4) { _start: { size_t x_5; size_t x_6; lean_object* x_7; @@ -3509,38 +3511,10 @@ x_5 = lean_unbox_usize(x_2); lean_dec(x_2); x_6 = lean_unbox_usize(x_3); lean_dec(x_3); -x_7 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1(x_1, x_5, x_6, x_4); +x_7 = l_Array_mapMUnsafe_map___at_Lean_Meta_Grind_ppExprArray___spec__1(x_1, x_5, x_6, x_4); return x_7; } } -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_pushMsg(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { -_start: -{ -lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; -x_9 = lean_array_push(x_3, x_1); -x_10 = lean_box(0); -x_11 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_11, 0, x_10); -lean_ctor_set(x_11, 1, x_9); -x_12 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_12, 0, x_11); -lean_ctor_set(x_12, 1, x_8); -return x_12; -} -} -LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_pushMsg___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8) { -_start: -{ -lean_object* x_9; -x_9 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_pushMsg(x_1, x_2, x_3, x_4, x_5, x_6, x_7, x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_4); -lean_dec(x_2); -return x_9; -} -} LEAN_EXPORT lean_object* l_List_filterTR_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__1(lean_object* x_1, lean_object* x_2) { _start: { @@ -3718,7 +3692,7 @@ static lean_object* _init_l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_ { lean_object* x_1; double x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_1 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__4; -x_2 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; +x_2 = l_Lean_Meta_Grind_ppExprArray___closed__1; x_3 = 1; x_4 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__8; x_5 = lean_alloc_ctor(0, 2, 17); @@ -4219,7 +4193,7 @@ x_113 = lean_array_mk(x_111); x_114 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__4; x_115 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__12; x_116 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__11; -x_117 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(x_114, x_115, x_113, x_116); +x_117 = l_Lean_Meta_Grind_ppExprArray(x_114, x_115, x_113, x_116); lean_ctor_set(x_37, 0, x_117); lean_ctor_set(x_7, 0, x_37); x_118 = lean_alloc_ctor(1, 1, 0); @@ -4261,7 +4235,7 @@ x_123 = lean_array_mk(x_121); x_124 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__4; x_125 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__12; x_126 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__11; -x_127 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(x_124, x_125, x_123, x_126); +x_127 = l_Lean_Meta_Grind_ppExprArray(x_124, x_125, x_123, x_126); x_128 = lean_alloc_ctor(1, 1, 0); lean_ctor_set(x_128, 0, x_127); lean_ctor_set(x_7, 0, x_128); @@ -4310,7 +4284,7 @@ x_136 = lean_array_mk(x_134); x_137 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__4; x_138 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__13; x_139 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__11; -x_140 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(x_137, x_138, x_136, x_139); +x_140 = l_Lean_Meta_Grind_ppExprArray(x_137, x_138, x_136, x_139); lean_ctor_set(x_35, 0, x_140); lean_ctor_set(x_29, 1, x_35); x_141 = lean_alloc_ctor(1, 1, 0); @@ -4352,7 +4326,7 @@ x_146 = lean_array_mk(x_144); x_147 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__4; x_148 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__13; x_149 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__11; -x_150 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(x_147, x_148, x_146, x_149); +x_150 = l_Lean_Meta_Grind_ppExprArray(x_147, x_148, x_146, x_149); x_151 = lean_alloc_ctor(1, 1, 0); lean_ctor_set(x_151, 0, x_150); lean_ctor_set(x_29, 1, x_151); @@ -4626,7 +4600,7 @@ x_200 = lean_array_mk(x_198); x_201 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__4; x_202 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__12; x_203 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__11; -x_204 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(x_201, x_202, x_200, x_203); +x_204 = l_Lean_Meta_Grind_ppExprArray(x_201, x_202, x_200, x_203); if (lean_is_scalar(x_196)) { x_205 = lean_alloc_ctor(1, 1, 0); } else { @@ -4688,7 +4662,7 @@ x_214 = lean_array_mk(x_212); x_215 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__4; x_216 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__13; x_217 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__11; -x_218 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(x_215, x_216, x_214, x_217); +x_218 = l_Lean_Meta_Grind_ppExprArray(x_215, x_216, x_214, x_217); if (lean_is_scalar(x_210)) { x_219 = lean_alloc_ctor(1, 1, 0); } else { @@ -5009,7 +4983,7 @@ x_276 = lean_array_mk(x_274); x_277 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__4; x_278 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__12; x_279 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__11; -x_280 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(x_277, x_278, x_276, x_279); +x_280 = l_Lean_Meta_Grind_ppExprArray(x_277, x_278, x_276, x_279); if (lean_is_scalar(x_272)) { x_281 = lean_alloc_ctor(1, 1, 0); } else { @@ -5082,7 +5056,7 @@ x_292 = lean_array_mk(x_290); x_293 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__4; x_294 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__13; x_295 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__11; -x_296 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(x_293, x_294, x_292, x_295); +x_296 = l_Lean_Meta_Grind_ppExprArray(x_293, x_294, x_292, x_295); if (lean_is_scalar(x_288)) { x_297 = lean_alloc_ctor(1, 1, 0); } else { @@ -5459,7 +5433,7 @@ x_368 = lean_array_mk(x_366); x_369 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__4; x_370 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__12; x_371 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__11; -x_372 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(x_369, x_370, x_368, x_371); +x_372 = l_Lean_Meta_Grind_ppExprArray(x_369, x_370, x_368, x_371); if (lean_is_scalar(x_364)) { x_373 = lean_alloc_ctor(1, 1, 0); } else { @@ -5540,7 +5514,7 @@ x_386 = lean_array_mk(x_384); x_387 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__4; x_388 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__13; x_389 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__11; -x_390 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(x_387, x_388, x_386, x_389); +x_390 = l_Lean_Meta_Grind_ppExprArray(x_387, x_388, x_386, x_389); if (lean_is_scalar(x_382)) { x_391 = lean_alloc_ctor(1, 1, 0); } else { @@ -5716,21 +5690,30 @@ return x_19; static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1() { _start: { +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(0); +x_2 = lean_array_mk(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__2() { +_start: +{ lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2; +x_2 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1; x_3 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_3, 0, x_2); lean_ctor_set(x_3, 1, x_1); return x_3; } } -static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__2() { +static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1; +x_2 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__2; x_3 = lean_alloc_ctor(0, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); @@ -5745,8 +5728,8 @@ x_8 = lean_box(0); x_9 = lean_box(0); lean_inc(x_1); x_10 = l_Lean_Meta_Grind_Goal_getEqcs(x_1); -x_11 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2; -x_12 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__2; +x_11 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1; +x_12 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__3; lean_inc(x_6); lean_inc(x_5); lean_inc(x_4); @@ -5937,7 +5920,7 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Gri { lean_object* x_1; double x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_1 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEMatchTheorem___closed__4; -x_2 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; +x_2 = l_Lean_Meta_Grind_ppExprArray___closed__1; x_3 = 1; x_4 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__8; x_5 = lean_alloc_ctor(0, 2, 17); @@ -5986,7 +5969,7 @@ x_21 = lean_alloc_ctor(7, 2, 0); lean_ctor_set(x_21, 0, x_20); lean_ctor_set(x_21, 1, x_11); x_22 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEMatchTheorem___closed__5; -x_23 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2; +x_23 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1; x_24 = lean_alloc_ctor(9, 3, 0); lean_ctor_set(x_24, 0, x_22); lean_ctor_set(x_24, 1, x_21); @@ -6024,7 +6007,7 @@ x_37 = lean_alloc_ctor(7, 2, 0); lean_ctor_set(x_37, 0, x_36); lean_ctor_set(x_37, 1, x_27); x_38 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEMatchTheorem___closed__5; -x_39 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2; +x_39 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1; x_40 = lean_alloc_ctor(9, 3, 0); lean_ctor_set(x_40, 0, x_38); lean_ctor_set(x_40, 1, x_37); @@ -6161,7 +6144,7 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Gri { lean_object* x_1; double x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_1 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppActiveTheoremPatterns___closed__2; -x_2 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; +x_2 = l_Lean_Meta_Grind_ppExprArray___closed__1; x_3 = 1; x_4 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__8; x_5 = lean_alloc_ctor(0, 2, 17); @@ -6462,7 +6445,7 @@ static lean_object* _init_l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_ { lean_object* x_1; double x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppOffset___spec__1___closed__2; -x_2 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; +x_2 = l_Lean_Meta_Grind_ppExprArray___closed__1; x_3 = 1; x_4 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__8; x_5 = lean_alloc_ctor(0, 2, 17); @@ -6599,7 +6582,7 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Gri { lean_object* x_1; double x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_1 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppOffset___lambda__1___closed__2; -x_2 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; +x_2 = l_Lean_Meta_Grind_ppExprArray___closed__1; x_3 = 1; x_4 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__8; x_5 = lean_alloc_ctor(0, 2, 17); @@ -6645,7 +6628,7 @@ lean_object* x_10; size_t x_11; size_t x_12; lean_object* x_13; lean_object* x_1 x_10 = lean_box(0); x_11 = lean_array_size(x_1); x_12 = 0; -x_13 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2; +x_13 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1; x_14 = l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppOffset___spec__1(x_1, x_13, x_10, x_1, x_11, x_12, x_13, x_3, x_4, x_5, x_6, x_7, x_8, x_9); x_15 = lean_ctor_get(x_14, 0); lean_inc(x_15); @@ -7268,7 +7251,7 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Gri { lean_object* x_1; double x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_1 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCutsat___lambda__1___closed__2; -x_2 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; +x_2 = l_Lean_Meta_Grind_ppExprArray___closed__1; x_3 = 1; x_4 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__8; x_5 = lean_alloc_ctor(0, 2, 17); @@ -7314,7 +7297,7 @@ lean_object* x_10; size_t x_11; size_t x_12; lean_object* x_13; lean_object* x_1 x_10 = lean_box(0); x_11 = lean_array_size(x_1); x_12 = 0; -x_13 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2; +x_13 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1; x_14 = l_Array_forIn_x27Unsafe_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCutsat___spec__1(x_1, x_13, x_10, x_1, x_11, x_12, x_13, x_3, x_4, x_5, x_6, x_7, x_8, x_9); x_15 = lean_ctor_get(x_14, 0); lean_inc(x_15); @@ -7550,6 +7533,72 @@ lean_dec(x_2); return x_10; } } +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCommRing(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; lean_object* x_9; +x_8 = l_Lean_Meta_Grind_Arith_CommRing_pp_x3f(x_1, x_3, x_4, x_5, x_6, x_7); +x_9 = lean_ctor_get(x_8, 0); +lean_inc(x_9); +if (lean_obj_tag(x_9) == 0) +{ +uint8_t x_10; +x_10 = !lean_is_exclusive(x_8); +if (x_10 == 0) +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; +x_11 = lean_ctor_get(x_8, 0); +lean_dec(x_11); +x_12 = lean_box(0); +x_13 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_13, 0, x_12); +lean_ctor_set(x_13, 1, x_2); +lean_ctor_set(x_8, 0, x_13); +return x_8; +} +else +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_14 = lean_ctor_get(x_8, 1); +lean_inc(x_14); +lean_dec(x_8); +x_15 = lean_box(0); +x_16 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_16, 0, x_15); +lean_ctor_set(x_16, 1, x_2); +x_17 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_17, 0, x_16); +lean_ctor_set(x_17, 1, x_14); +return x_17; +} +} +else +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_18 = lean_ctor_get(x_8, 1); +lean_inc(x_18); +lean_dec(x_8); +x_19 = lean_ctor_get(x_9, 0); +lean_inc(x_19); +lean_dec(x_9); +x_20 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_pushMsg(x_19, x_1, x_2, x_3, x_4, x_5, x_6, x_18); +return x_20; +} +} +} +LEAN_EXPORT lean_object* l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCommRing___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { +_start: +{ +lean_object* x_8; +x_8 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCommRing(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_4); +lean_dec(x_3); +lean_dec(x_1); +return x_8; +} +} LEAN_EXPORT lean_object* l_Array_foldlMUnsafe_fold___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___spec__4(lean_object* x_1, lean_object* x_2, size_t x_3, size_t x_4, lean_object* x_5) { _start: { @@ -8327,7 +8376,7 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Gri { lean_object* x_1; double x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_1 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__1___closed__2; -x_2 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; +x_2 = l_Lean_Meta_Grind_ppExprArray___closed__1; x_3 = 1; x_4 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__8; x_5 = lean_alloc_ctor(0, 2, 17); @@ -8429,7 +8478,7 @@ static lean_object* _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Gri { lean_object* x_1; double x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_1 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__2___closed__3; -x_2 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; +x_2 = l_Lean_Meta_Grind_ppExprArray___closed__1; x_3 = 1; x_4 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__8; x_5 = lean_alloc_ctor(0, 2, 17); @@ -8747,7 +8796,7 @@ if (x_15 == 0) { lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_dec(x_12); -x_16 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2; +x_16 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1; x_17 = lean_box(0); lean_inc(x_2); x_18 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__5(x_1, x_2, x_16, x_11, x_16, x_17, x_2, x_3, x_4, x_5, x_6, x_7, x_8); @@ -8771,7 +8820,7 @@ x_25 = lean_alloc_ctor(7, 2, 0); lean_ctor_set(x_25, 0, x_23); lean_ctor_set(x_25, 1, x_24); x_26 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds___lambda__2___closed__4; -x_27 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2; +x_27 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1; x_28 = lean_alloc_ctor(9, 3, 0); lean_ctor_set(x_28, 0, x_26); lean_ctor_set(x_28, 1, x_25); @@ -9024,7 +9073,7 @@ static lean_object* _init_l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_ { lean_object* x_1; double x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_1 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCasesTrace___spec__1___closed__2; -x_2 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; +x_2 = l_Lean_Meta_Grind_ppExprArray___closed__1; x_3 = 1; x_4 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__8; x_5 = lean_alloc_ctor(0, 2, 17); @@ -9333,7 +9382,7 @@ if (x_10 == 0) lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; x_11 = lean_box(0); x_12 = l_List_reverse___rarg(x_9); -x_13 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2; +x_13 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1; lean_inc(x_12); x_14 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCasesTrace___spec__1(x_13, x_11, x_12, x_12, x_12, x_13, lean_box(0), x_1, x_2, x_3, x_4, x_5, x_6, x_7); lean_dec(x_12); @@ -9436,7 +9485,7 @@ x_11 = l_Lean_PersistentArray_toArray___rarg(x_10); x_12 = l_Lean_Meta_Grind_goalToMessageData_go___closed__2; x_13 = l_Lean_Meta_Grind_goalToMessageData_go___closed__3; x_14 = l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__11; -x_15 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray(x_12, x_13, x_11, x_14); +x_15 = l_Lean_Meta_Grind_ppExprArray(x_12, x_13, x_11, x_14); x_16 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_pushMsg(x_15, x_3, x_4, x_5, x_6, x_7, x_8, x_9); x_17 = lean_ctor_get(x_16, 0); lean_inc(x_17); @@ -9510,7 +9559,7 @@ lean_inc(x_3); x_36 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCutsat(x_3, x_35, x_5, x_6, x_7, x_8, x_34); if (lean_obj_tag(x_36) == 0) { -lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; +lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; x_37 = lean_ctor_get(x_36, 0); lean_inc(x_37); x_38 = lean_ctor_get(x_36, 1); @@ -9519,38 +9568,18 @@ lean_dec(x_36); x_39 = lean_ctor_get(x_37, 1); lean_inc(x_39); lean_dec(x_37); -x_40 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds(x_2, x_3, x_39, x_5, x_6, x_7, x_8, x_38); -return x_40; -} -else -{ -uint8_t x_41; -lean_dec(x_8); -lean_dec(x_7); -lean_dec(x_6); -lean_dec(x_5); -lean_dec(x_3); -lean_dec(x_2); -x_41 = !lean_is_exclusive(x_36); -if (x_41 == 0) -{ -return x_36; -} -else -{ -lean_object* x_42; lean_object* x_43; lean_object* x_44; -x_42 = lean_ctor_get(x_36, 0); -x_43 = lean_ctor_get(x_36, 1); -lean_inc(x_43); +x_40 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppCommRing(x_3, x_39, x_5, x_6, x_7, x_8, x_38); +x_41 = lean_ctor_get(x_40, 0); +lean_inc(x_41); +x_42 = lean_ctor_get(x_40, 1); lean_inc(x_42); -lean_dec(x_36); -x_44 = lean_alloc_ctor(1, 2, 0); -lean_ctor_set(x_44, 0, x_42); -lean_ctor_set(x_44, 1, x_43); +lean_dec(x_40); +x_43 = lean_ctor_get(x_41, 1); +lean_inc(x_43); +lean_dec(x_41); +x_44 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppThresholds(x_2, x_3, x_43, x_5, x_6, x_7, x_8, x_42); return x_44; } -} -} else { uint8_t x_45; @@ -9560,19 +9589,19 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_3); lean_dec(x_2); -x_45 = !lean_is_exclusive(x_32); +x_45 = !lean_is_exclusive(x_36); if (x_45 == 0) { -return x_32; +return x_36; } else { lean_object* x_46; lean_object* x_47; lean_object* x_48; -x_46 = lean_ctor_get(x_32, 0); -x_47 = lean_ctor_get(x_32, 1); +x_46 = lean_ctor_get(x_36, 0); +x_47 = lean_ctor_get(x_36, 1); lean_inc(x_47); lean_inc(x_46); -lean_dec(x_32); +lean_dec(x_36); x_48 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_48, 0, x_46); lean_ctor_set(x_48, 1, x_47); @@ -9589,19 +9618,19 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_3); lean_dec(x_2); -x_49 = !lean_is_exclusive(x_28); +x_49 = !lean_is_exclusive(x_32); if (x_49 == 0) { -return x_28; +return x_32; } else { lean_object* x_50; lean_object* x_51; lean_object* x_52; -x_50 = lean_ctor_get(x_28, 0); -x_51 = lean_ctor_get(x_28, 1); +x_50 = lean_ctor_get(x_32, 0); +x_51 = lean_ctor_get(x_32, 1); lean_inc(x_51); lean_inc(x_50); -lean_dec(x_28); +lean_dec(x_32); x_52 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_52, 0, x_50); lean_ctor_set(x_52, 1, x_51); @@ -9618,19 +9647,19 @@ lean_dec(x_6); lean_dec(x_5); lean_dec(x_3); lean_dec(x_2); -x_53 = !lean_is_exclusive(x_20); +x_53 = !lean_is_exclusive(x_28); if (x_53 == 0) { -return x_20; +return x_28; } else { lean_object* x_54; lean_object* x_55; lean_object* x_56; -x_54 = lean_ctor_get(x_20, 0); -x_55 = lean_ctor_get(x_20, 1); +x_54 = lean_ctor_get(x_28, 0); +x_55 = lean_ctor_get(x_28, 1); lean_inc(x_55); lean_inc(x_54); -lean_dec(x_20); +lean_dec(x_28); x_56 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_56, 0, x_54); lean_ctor_set(x_56, 1, x_55); @@ -9638,6 +9667,35 @@ return x_56; } } } +else +{ +uint8_t x_57; +lean_dec(x_8); +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +lean_dec(x_3); +lean_dec(x_2); +x_57 = !lean_is_exclusive(x_20); +if (x_57 == 0) +{ +return x_20; +} +else +{ +lean_object* x_58; lean_object* x_59; lean_object* x_60; +x_58 = lean_ctor_get(x_20, 0); +x_59 = lean_ctor_get(x_20, 1); +lean_inc(x_59); +lean_inc(x_58); +lean_dec(x_20); +x_60 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_60, 0, x_58); +lean_ctor_set(x_60, 1, x_59); +return x_60; +} +} +} } LEAN_EXPORT lean_object* l_Lean_Meta_Grind_goalToMessageData_go___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7, lean_object* x_8, lean_object* x_9) { _start: @@ -9671,7 +9729,7 @@ static lean_object* _init_l_Lean_Meta_Grind_goalToMessageData___lambda__1___clos { lean_object* x_1; double x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; x_1 = l_Lean_Meta_Grind_goalToMessageData___lambda__1___closed__2; -x_2 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1; +x_2 = l_Lean_Meta_Grind_ppExprArray___closed__1; x_3 = 0; x_4 = l_Lean_Meta_Grind_Goal_ppENodeRef___closed__8; x_5 = lean_alloc_ctor(0, 2, 17); @@ -9864,7 +9922,7 @@ else lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; x_13 = lean_ctor_get(x_1, 0); lean_inc(x_13); -x_14 = l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2; +x_14 = l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1; lean_inc(x_13); x_15 = lean_alloc_closure((void*)(l_Lean_Meta_Grind_goalToMessageData___lambda__1), 9, 4); lean_closure_set(x_15, 0, x_1); @@ -9880,6 +9938,7 @@ lean_object* initialize_Init_Grind_Util(uint8_t builtin, lean_object*); lean_object* initialize_Init_Grind_PP(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Meta_Tactic_Grind_Types(uint8_t builtin, lean_object*); lean_object* initialize_Lean_Meta_Tactic_Grind_Arith_Model(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_PP(uint8_t builtin, lean_object*); static bool _G_initialized = false; LEAN_EXPORT lean_object* initialize_Lean_Meta_Tactic_Grind_PP(uint8_t builtin, lean_object* w) { lean_object * res; @@ -9897,6 +9956,9 @@ lean_dec_ref(res); res = initialize_Lean_Meta_Tactic_Grind_Arith_Model(builtin, lean_io_mk_world()); if (lean_io_result_is_error(res)) return res; lean_dec_ref(res); +res = initialize_Lean_Meta_Tactic_Grind_Arith_CommRing_PP(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); l_Lean_Meta_Grind_Goal_ppENodeRef___closed__1 = _init_l_Lean_Meta_Grind_Goal_ppENodeRef___closed__1(); lean_mark_persistent(l_Lean_Meta_Grind_Goal_ppENodeRef___closed__1); l_Lean_Meta_Grind_Goal_ppENodeRef___closed__2 = _init_l_Lean_Meta_Grind_Goal_ppENodeRef___closed__2(); @@ -9981,9 +10043,7 @@ l_Lean_Meta_Grind_Goal_ppState___closed__2 = _init_l_Lean_Meta_Grind_Goal_ppStat lean_mark_persistent(l_Lean_Meta_Grind_Goal_ppState___closed__2); l_List_forIn_x27_loop___at_Lean_Meta_Grind_ppGoals___spec__1___closed__1 = _init_l_List_forIn_x27_loop___at_Lean_Meta_Grind_ppGoals___spec__1___closed__1(); lean_mark_persistent(l_List_forIn_x27_loop___at_Lean_Meta_Grind_ppGoals___spec__1___closed__1); -l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1 = _init_l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__1(); -l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2 = _init_l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2(); -lean_mark_persistent(l_Array_mapMUnsafe_map___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppExprArray___spec__1___closed__2); +l_Lean_Meta_Grind_ppExprArray___closed__1 = _init_l_Lean_Meta_Grind_ppExprArray___closed__1(); l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__1 = _init_l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__1(); lean_mark_persistent(l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__1); l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__2 = _init_l_List_forIn_x27_loop___at___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___spec__3___closed__2(); @@ -10020,6 +10080,8 @@ l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1 = _i lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__1); l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__2 = _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__2(); lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__2); +l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__3 = _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__3(); +lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEqcs___closed__3); l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEMatchTheorem___closed__1 = _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEMatchTheorem___closed__1(); lean_mark_persistent(l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEMatchTheorem___closed__1); l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEMatchTheorem___closed__2 = _init_l___private_Lean_Meta_Tactic_Grind_PP_0__Lean_Meta_Grind_ppEMatchTheorem___closed__2(); diff --git a/stage0/stdlib/Lean/Server/CodeActions/Basic.c b/stage0/stdlib/Lean/Server/CodeActions/Basic.c index 5225fb9d9955..fcf08ac2e0a1 100644 --- a/stage0/stdlib/Lean/Server/CodeActions/Basic.c +++ b/stage0/stdlib/Lean/Server/CodeActions/Basic.c @@ -197,6 +197,7 @@ static lean_object* l_Lean_Server_initFn____x40_Lean_Server_CodeActions_Basic___ LEAN_EXPORT lean_object* l_Lean_Server_registerLspRequestHandler___at_Lean_Server_initFn____x40_Lean_Server_CodeActions_Basic___hyg_1538____spec__1___lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Array_append___rarg(lean_object*, lean_object*); static lean_object* l___private_Lean_Server_CodeActions_Basic_0__Lean_Server_fromJsonCodeActionResolveData____x40_Lean_Server_CodeActions_Basic___hyg_125____closed__18; +lean_object* l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Server_initFn____x40_Lean_Server_CodeActions_Basic___hyg_517____lambda__4___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Server_initFn____x40_Lean_Server_CodeActions_Basic___hyg_517____lambda__4(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapFinIdxM_map___at_Lean_Server_handleCodeAction___spec__9___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); @@ -257,7 +258,6 @@ lean_object* l_Lean_registerBuiltinAttribute(lean_object*, lean_object*); static lean_object* l___private_Lean_Server_CodeActions_Basic_0__Lean_Server_fromJsonCodeActionResolveData____x40_Lean_Server_CodeActions_Basic___hyg_125____closed__12; static lean_object* l_Lean_Server_initFn____x40_Lean_Server_CodeActions_Basic___hyg_517____closed__26; lean_object* l___private_Lean_Data_Lsp_CodeActions_0__Lean_Lsp_fromJsonCodeActionParams____x40_Lean_Data_Lsp_CodeActions___hyg_390_(lean_object*); -lean_object* l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(lean_object*, lean_object*); uint8_t l___private_Lean_Attributes_0__Lean_beqAttributeKind____x40_Lean_Attributes___hyg_162_(uint8_t, uint8_t); lean_object* l___private_Lean_ToExpr_0__Lean_Name_toExprAux(lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at_Lean_Server_handleCodeAction___spec__7(lean_object*, lean_object*); @@ -426,7 +426,7 @@ x_24 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_24, 0, x_7); lean_ctor_set(x_24, 1, x_23); x_25 = l___private_Lean_Server_CodeActions_Basic_0__Lean_Server_toJsonCodeActionResolveData____x40_Lean_Server_CodeActions_Basic___hyg_59____closed__5; -x_26 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_24, x_25); +x_26 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_24, x_25); x_27 = l_Lean_Json_mkObj(x_26); return x_27; } diff --git a/stage0/stdlib/Lean/Server/FileWorker.c b/stage0/stdlib/Lean/Server/FileWorker.c index c72f6df80497..469cc4143763 100644 --- a/stage0/stdlib/Lean/Server/FileWorker.c +++ b/stage0/stdlib/Lean/Server/FileWorker.c @@ -169,7 +169,6 @@ uint8_t l___private_Lean_Data_JsonRpc_0__Lean_JsonRpc_ordRequestID____x40_Lean_D static lean_object* l_Lean_RBNode_foldM___at_Lean_Server_FileWorker_mainLoop___spec__1___closed__2; LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_setupImports___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Server_FileWorker_runRefreshTasks___spec__4___lambda__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_Lean_Elab_headerToImports(lean_object*); static lean_object* l_IO_FS_Stream_readRequestAs___at_Lean_Server_FileWorker_initAndRunWorker___spec__2___closed__17; static lean_object* l_Lean_Server_FileWorker_initAndRunWorker___closed__5; static lean_object* l_IO_FS_Stream_readRequestAs___at_Lean_Server_FileWorker_initAndRunWorker___spec__2___closed__19; @@ -474,6 +473,7 @@ static lean_object* l_Lean_Server_FileWorker_setupImports___lambda__5___closed__ static lean_object* l___private_Lean_Server_FileWorker_0__Lean_Server_FileWorker_reportSnapshots___closed__1; static lean_object* l_Lean_Server_FileWorker_handlePostRequestSpecialCases___lambda__3___closed__1; LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_sendServerRequest___at_Lean_Server_FileWorker_sendUntypedServerRequest___spec__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Elab_HeaderSyntax_imports(lean_object*); LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_emitRequestResponse(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_IO_FS_Stream_readRequestAs___at_Lean_Server_FileWorker_initAndRunWorker___spec__2___closed__27; static lean_object* l_Lean_Server_FileWorker_handleRequest___closed__1; @@ -544,7 +544,7 @@ extern lean_object* l_Task_Priority_dedicated; LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_WorkerContext_initPendingServerRequest___at_Lean_Server_FileWorker_sendUntypedServerRequest___spec__2(lean_object*); static lean_object* l_IO_FS_Stream_readRequestAs___at_Lean_Server_FileWorker_initAndRunWorker___spec__2___closed__26; lean_object* l_Lean_Name_mkStr2(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_setupImports___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_setupImports___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_Language_Lean_instToSnapshotTreeCommandParsedSnapshot_go(lean_object*); static lean_object* l_IO_FS_Stream_readRequestAs___at_Lean_Server_FileWorker_initAndRunWorker___spec__2___closed__31; lean_object* l_Lean_Environment_allImportedModuleNames(lean_object*); @@ -690,7 +690,7 @@ lean_object* lean_string_append(lean_object*, lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Server_FileWorker_0__Lean_Server_FileWorker_reportSnapshots_handleTasks___lambda__2(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Loop_forIn_loop___at_Lean_Server_FileWorker_runRefreshTasks___spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* lean_array_get_size(lean_object*); -LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_setupImports___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_setupImports___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_anyMUnsafe_any___at___private_Lean_Server_FileWorker_0__Lean_Server_FileWorker_reportSnapshots_handleTasks___spec__1(lean_object*, size_t, size_t, lean_object*, lean_object*); extern lean_object* l_Lean_Elab_inServer; LEAN_EXPORT lean_object* l_Lean_RBNode_erase___at_Lean_Server_FileWorker_mainLoop___spec__2(uint64_t, lean_object*); @@ -21339,36 +21339,41 @@ x_1 = l_Lean_Elab_inServer; return x_1; } } -LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_setupImports___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_setupImports___lambda__3(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { _start: { -lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; uint8_t x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; uint32_t x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; -x_7 = lean_ctor_get(x_1, 1); -lean_inc(x_7); -x_8 = l_Lean_Server_FileWorker_setupImports___lambda__3___closed__1; -x_9 = l_Lean_KVMap_mergeBy(x_8, x_2, x_7); -x_10 = l_Lean_Server_FileWorker_setupImports___lambda__3___closed__2; -x_11 = 1; -x_12 = l_Lean_Option_setIfNotSet___at_Lean_Language_Lean_process_processHeader___spec__2(x_9, x_10, x_11); -x_13 = l_Lean_Server_FileWorker_setupImports___lambda__3___closed__3; -x_14 = l_Lean_Option_set___at_Lean_Environment_realizeConst___spec__3(x_12, x_13, x_11); -x_15 = lean_ctor_get(x_3, 1); -x_16 = 0; -x_17 = lean_ctor_get(x_1, 2); -lean_inc(x_17); +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; uint8_t x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; uint32_t x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_8 = lean_ctor_get(x_1, 1); +lean_inc(x_8); +x_9 = l_Lean_Server_FileWorker_setupImports___lambda__3___closed__1; +x_10 = l_Lean_KVMap_mergeBy(x_9, x_2, x_8); +x_11 = l_Lean_Server_FileWorker_setupImports___lambda__3___closed__2; +x_12 = 1; +x_13 = l_Lean_Option_setIfNotSet___at_Lean_Language_Lean_process_processHeader___spec__2(x_10, x_11, x_12); +x_14 = l_Lean_Server_FileWorker_setupImports___lambda__3___closed__3; +x_15 = l_Lean_Option_set___at_Lean_Environment_realizeConst___spec__3(x_13, x_14, x_12); +x_16 = lean_ctor_get(x_3, 1); +x_17 = 0; +x_18 = lean_box(0); +x_19 = lean_ctor_get(x_1, 2); +lean_inc(x_19); lean_dec(x_1); -lean_inc(x_15); -x_18 = lean_alloc_ctor(0, 3, 4); -lean_ctor_set(x_18, 0, x_15); -lean_ctor_set(x_18, 1, x_14); -lean_ctor_set(x_18, 2, x_17); -lean_ctor_set_uint32(x_18, sizeof(void*)*3, x_16); -x_19 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_19, 0, x_18); -x_20 = lean_alloc_ctor(0, 2, 0); -lean_ctor_set(x_20, 0, x_19); -lean_ctor_set(x_20, 1, x_6); -return x_20; +x_20 = 0; +lean_inc(x_16); +x_21 = lean_alloc_ctor(0, 5, 5); +lean_ctor_set(x_21, 0, x_16); +lean_ctor_set(x_21, 1, x_4); +lean_ctor_set(x_21, 2, x_15); +lean_ctor_set(x_21, 3, x_18); +lean_ctor_set(x_21, 4, x_19); +lean_ctor_set_uint8(x_21, sizeof(void*)*5 + 4, x_20); +lean_ctor_set_uint32(x_21, sizeof(void*)*5, x_17); +x_22 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_22, 0, x_21); +x_23 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_23, 0, x_22); +lean_ctor_set(x_23, 1, x_7); +return x_23; } } LEAN_EXPORT uint8_t l_Lean_Server_FileWorker_setupImports___lambda__4(lean_object* x_1) { @@ -21478,12 +21483,13 @@ LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_setupImports___lambda__5(lean_ _start: { lean_object* x_8; lean_object* x_9; lean_object* x_10; -x_8 = l_Lean_Elab_headerToImports(x_1); +x_8 = l_Lean_Elab_HeaderSyntax_imports(x_1); lean_inc(x_3); lean_inc(x_2); x_9 = lean_alloc_closure((void*)(l_Lean_Server_FileWorker_setupImports___lambda__1), 4, 2); lean_closure_set(x_9, 0, x_2); lean_closure_set(x_9, 1, x_3); +lean_inc(x_8); lean_inc(x_2); x_10 = l_Lean_Server_FileWorker_setupFile(x_2, x_8, x_9, x_7); if (lean_obj_tag(x_10) == 0) @@ -21545,6 +21551,7 @@ case 2: { lean_object* x_15; lean_object* x_16; uint8_t x_17; lean_dec(x_11); +lean_dec(x_8); lean_dec(x_4); lean_dec(x_2); x_15 = l_Lean_Server_FileWorker_setupImports___lambda__5___closed__1; @@ -21605,6 +21612,7 @@ case 3: { uint8_t x_36; lean_dec(x_11); +lean_dec(x_8); lean_dec(x_4); lean_dec(x_2); x_36 = !lean_is_exclusive(x_14); @@ -21714,7 +21722,7 @@ return x_68; lean_object* x_69; lean_object* x_70; lean_dec(x_14); x_69 = lean_box(0); -x_70 = l_Lean_Server_FileWorker_setupImports___lambda__3(x_11, x_4, x_2, x_69, x_6, x_13); +x_70 = l_Lean_Server_FileWorker_setupImports___lambda__3(x_11, x_4, x_2, x_8, x_69, x_6, x_13); lean_dec(x_6); lean_dec(x_2); return x_70; @@ -21725,6 +21733,7 @@ return x_70; else { uint8_t x_87; +lean_dec(x_8); lean_dec(x_6); lean_dec(x_4); lean_dec(x_3); @@ -21928,15 +21937,15 @@ lean_dec(x_1); return x_4; } } -LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_setupImports___lambda__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6) { +LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_setupImports___lambda__3___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3, lean_object* x_4, lean_object* x_5, lean_object* x_6, lean_object* x_7) { _start: { -lean_object* x_7; -x_7 = l_Lean_Server_FileWorker_setupImports___lambda__3(x_1, x_2, x_3, x_4, x_5, x_6); +lean_object* x_8; +x_8 = l_Lean_Server_FileWorker_setupImports___lambda__3(x_1, x_2, x_3, x_4, x_5, x_6, x_7); +lean_dec(x_6); lean_dec(x_5); -lean_dec(x_4); lean_dec(x_3); -return x_7; +return x_8; } } LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_setupImports___lambda__4___boxed(lean_object* x_1) { diff --git a/stage0/stdlib/Lean/Server/FileWorker/SemanticHighlighting.c b/stage0/stdlib/Lean/Server/FileWorker/SemanticHighlighting.c index fec388ed66d3..b28c4d374261 100644 --- a/stage0/stdlib/Lean/Server/FileWorker/SemanticHighlighting.c +++ b/stage0/stdlib/Lean/Server/FileWorker/SemanticHighlighting.c @@ -242,6 +242,7 @@ LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_collectInfoBasedSemanticTokens lean_object* l_Array_append___rarg(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_handleSemanticTokensRange___lambda__1___boxed(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_Server_FileWorker_computeSemanticTokens___spec__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_qsort_sort___at_Lean_Server_FileWorker_computeDeltaLspSemanticTokens___spec__2___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Server_FileWorker_collectSyntaxBasedSemanticTokens___lambda__2___closed__3; uint8_t l_Lean_LocalDecl_isImplementationDetail(lean_object*); @@ -318,7 +319,6 @@ lean_object* l_List_foldl___at_Array_appendList___spec__1___rarg(lean_object*, l LEAN_EXPORT lean_object* l___private_Lean_Server_Requests_0__Lean_Server_overrideStatefulLspRequestHandler___at_Lean_Server_FileWorker_initFn____x40_Lean_Server_FileWorker_SemanticHighlighting___hyg_2982____spec__6___lambda__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Server_registerPartialStatefulLspRequestHandler___at_Lean_Server_FileWorker_initFn____x40_Lean_Server_FileWorker_SemanticHighlighting___hyg_2982____spec__4(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Server_FileWorker_computeAbsoluteLspSemanticTokens___boxed(lean_object*, lean_object*, lean_object*, lean_object*); -lean_object* l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(lean_object*, lean_object*); static lean_object* l_Lean_Server_FileWorker_keywordSemanticTokenMap___closed__2; lean_object* l_List_reverse___rarg(lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_find___at_Lean_Server_FileWorker_collectSyntaxBasedSemanticTokens___spec__1(lean_object*, lean_object*); @@ -3685,7 +3685,7 @@ x_20 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_20, 0, x_7); lean_ctor_set(x_20, 1, x_19); x_21 = l___private_Lean_Server_FileWorker_SemanticHighlighting_0__Lean_Server_FileWorker_toJsonAbsoluteLspSemanticToken____x40_Lean_Server_FileWorker_SemanticHighlighting___hyg_381____closed__1; -x_22 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_20, x_21); +x_22 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_20, x_21); x_23 = l_Lean_Json_mkObj(x_22); return x_23; } diff --git a/stage0/stdlib/Lean/Server/FileWorker/WidgetRequests.c b/stage0/stdlib/Lean/Server/FileWorker/WidgetRequests.c index bb5a070ab3db..7c3463127992 100644 --- a/stage0/stdlib/Lean/Server/FileWorker/WidgetRequests.c +++ b/stage0/stdlib/Lean/Server/FileWorker/WidgetRequests.c @@ -218,6 +218,7 @@ static lean_object* l_Lean_Widget_initFn____x40_Lean_Server_FileWorker_WidgetReq LEAN_EXPORT lean_object* l_Lean_Server_registerBuiltinRpcProcedure___at_Lean_Widget_initFn____x40_Lean_Server_FileWorker_WidgetRequests___hyg_1595____spec__1(lean_object*, lean_object*, lean_object*); lean_object* l___private_Lean_Server_GoTo_0__Lean_Server_fromJsonGoToKind____x40_Lean_Server_GoTo___hyg_67_(lean_object*); lean_object* l_Lean_Elab_Info_docString_x3f(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); +lean_object* l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Widget_makePopup___lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at_Lean_Widget_lazyTraceChildrenToInteractive___spec__1(lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*); static lean_object* l_Lean_Widget_initFn____x40_Lean_Server_FileWorker_WidgetRequests___hyg_1575____closed__1; @@ -277,7 +278,6 @@ LEAN_EXPORT lean_object* l_Lean_Widget_makePopup___lambda__1(lean_object*, lean_ LEAN_EXPORT lean_object* l_Lean_Server_registerBuiltinRpcProcedure___at_Lean_Widget_initFn____x40_Lean_Server_FileWorker_WidgetRequests___hyg_2131____spec__2___lambda__1___boxed(lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Json_opt___at___private_Lean_Server_FileWorker_WidgetRequests_0__Lean_Widget_toJsonGetInteractiveDiagnosticsParams____x40_Lean_Server_FileWorker_WidgetRequests___hyg_1725____spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Widget_initFn____x40_Lean_Server_FileWorker_WidgetRequests___hyg_1575_(lean_object*); -lean_object* l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(lean_object*, lean_object*); lean_object* l_Lean_Widget_msgToInteractive(lean_object*, uint8_t, lean_object*, lean_object*); static lean_object* l___private_Lean_Server_FileWorker_WidgetRequests_0__Lean_Widget_fromJsonRpcEncodablePacket____x40_Lean_Server_FileWorker_WidgetRequests___hyg_58____closed__16; lean_object* lean_array_mk(lean_object*); @@ -792,7 +792,7 @@ x_12 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_12, 0, x_7); lean_ctor_set(x_12, 1, x_11); x_13 = l___private_Lean_Server_FileWorker_WidgetRequests_0__Lean_Widget_toJsonRpcEncodablePacket____x40_Lean_Server_FileWorker_WidgetRequests___hyg_204____closed__1; -x_14 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_12, x_13); +x_14 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_12, x_13); x_15 = l_Lean_Json_mkObj(x_14); return x_15; } @@ -826,7 +826,7 @@ x_26 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_26, 0, x_21); lean_ctor_set(x_26, 1, x_25); x_27 = l___private_Lean_Server_FileWorker_WidgetRequests_0__Lean_Widget_toJsonRpcEncodablePacket____x40_Lean_Server_FileWorker_WidgetRequests___hyg_204____closed__1; -x_28 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_26, x_27); +x_28 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_26, x_27); x_29 = l_Lean_Json_mkObj(x_28); return x_29; } @@ -1733,7 +1733,7 @@ x_17 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_17, 0, x_8); lean_ctor_set(x_17, 1, x_16); x_18 = l___private_Lean_Server_FileWorker_WidgetRequests_0__Lean_Widget_toJsonRpcEncodablePacket____x40_Lean_Server_FileWorker_WidgetRequests___hyg_204____closed__1; -x_19 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_17, x_18); +x_19 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_17, x_18); x_20 = l_Lean_Json_mkObj(x_19); return x_20; } @@ -4602,7 +4602,7 @@ x_5 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_5, 0, x_3); lean_ctor_set(x_5, 1, x_4); x_6 = l___private_Lean_Server_FileWorker_WidgetRequests_0__Lean_Widget_toJsonRpcEncodablePacket____x40_Lean_Server_FileWorker_WidgetRequests___hyg_204____closed__1; -x_7 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_5, x_6); +x_7 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_5, x_6); x_8 = l_Lean_Json_mkObj(x_7); return x_8; } @@ -4837,7 +4837,7 @@ x_12 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_12, 0, x_7); lean_ctor_set(x_12, 1, x_11); x_13 = l___private_Lean_Server_FileWorker_WidgetRequests_0__Lean_Widget_toJsonRpcEncodablePacket____x40_Lean_Server_FileWorker_WidgetRequests___hyg_204____closed__1; -x_14 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_12, x_13); +x_14 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_12, x_13); x_15 = l_Lean_Json_mkObj(x_14); return x_15; } @@ -4871,7 +4871,7 @@ x_26 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_26, 0, x_21); lean_ctor_set(x_26, 1, x_25); x_27 = l___private_Lean_Server_FileWorker_WidgetRequests_0__Lean_Widget_toJsonRpcEncodablePacket____x40_Lean_Server_FileWorker_WidgetRequests___hyg_204____closed__1; -x_28 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_26, x_27); +x_28 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_26, x_27); x_29 = l_Lean_Json_mkObj(x_28); return x_29; } diff --git a/stage0/stdlib/Lean/Setup.c b/stage0/stdlib/Lean/Setup.c new file mode 100644 index 000000000000..1c8f4d5215e4 --- /dev/null +++ b/stage0/stdlib/Lean/Setup.c @@ -0,0 +1,6663 @@ +// Lean compiler output +// Module: Lean.Setup +// Imports: Lean.Data.Json Lean.Util.LeanOptions +#include <lean/lean.h> +#if defined(__clang__) +#pragma clang diagnostic ignored "-Wunused-parameter" +#pragma clang diagnostic ignored "-Wunused-label" +#elif defined(__GNUC__) && !defined(__CLANG__) +#pragma GCC diagnostic ignored "-Wunused-parameter" +#pragma GCC diagnostic ignored "-Wunused-label" +#pragma GCC diagnostic ignored "-Wunused-but-set-variable" +#endif +#ifdef __cplusplus +extern "C" { +#endif +lean_object* l_Lean_JsonNumber_fromNat(lean_object*); +LEAN_EXPORT lean_object* l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1(lean_object*, lean_object*); +static lean_object* l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__5; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__28; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__25; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__17; +lean_object* l_Lean_Name_reprPrec(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__18; +static lean_object* l_Lean_instReprModuleSetup___closed__1; +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34_(lean_object*, lean_object*); +static lean_object* l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__5; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__18; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__12; +static lean_object* l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__13; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__21; +lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_Paths_0__Lean_fromJsonLeanPaths____x40_Lean_Util_Paths___hyg_135____spec__3(lean_object*, lean_object*); +static lean_object* l_Lean_instInhabitedModuleSetup___closed__2; +LEAN_EXPORT lean_object* l_Std_Format_joinSep___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__12(lean_object*, lean_object*); +static lean_object* l_Lean_instInhabitedModuleArtifacts___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__4; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__24; +LEAN_EXPORT lean_object* l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__5(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__21; +lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541_(lean_object*, lean_object*); +lean_object* l_Lean_Json_mkObj(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__14; +static lean_object* l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__3; +LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__2___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__3(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__13; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__3; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__27; +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2___boxed(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__9; +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___boxed(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__16; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__23; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__17; +lean_object* l_String_quote(lean_object*); +lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__1(size_t, size_t, lean_object*); +lean_object* l_Lean_Name_toString(lean_object*, uint8_t, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__7; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__16; +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____boxed(lean_object*, lean_object*); +static lean_object* l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__4; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__23; +LEAN_EXPORT lean_object* l_Lean_instFromJsonModuleSetup; +uint8_t l_Lean_Name_isAnonymous(lean_object*); +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__1___boxed(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___boxed(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__11; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__15; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__6; +static lean_object* l_Lean_instFromJsonModuleArtifacts___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__6; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__10; +LEAN_EXPORT lean_object* l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__22; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__1; +LEAN_EXPORT uint8_t l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____lambda__1(lean_object*); +static lean_object* l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__7; +static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__4; +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__5___boxed(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__10; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__5; +static lean_object* l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__6; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__20; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__10; +static lean_object* l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__2; +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____boxed(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___lambda__1___boxed(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__1(size_t, size_t, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549_(lean_object*); +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__2(size_t, size_t, lean_object*); +uint8_t lean_string_dec_eq(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__21; +static lean_object* l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__19; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__11; +static lean_object* l_Lean_instInhabitedImport___closed__1; +lean_object* l_Lean_Json_getBool_x3f(lean_object*); +LEAN_EXPORT lean_object* l_Lean_instFromJsonModuleArtifacts; +LEAN_EXPORT lean_object* l_List_foldl___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__11(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Json_opt___at___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____spec__1(lean_object*, lean_object*); +lean_object* l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__16; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__4; +static lean_object* l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__2; +lean_object* l_Lean_Json_getStr_x3f(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__8; +LEAN_EXPORT lean_object* l_Lean_instReprModuleSetup; +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1___boxed(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__2___boxed(lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__12; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__14; +static lean_object* l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__5; +LEAN_EXPORT lean_object* l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___boxed(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__4(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__5; +static lean_object* l_Lean_instFromJsonImport___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__9; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__18; +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___boxed(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_instCoeNameImport(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__6; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__24; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__5; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__23; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__15; +LEAN_EXPORT lean_object* l_Lean_instToStringImport(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__19; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__11; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__7; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__7; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__27; +LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__4(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_instInhabitedModuleSetup___closed__1; +lean_object* lean_nat_to_int(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__21; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__3; +static lean_object* l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__1; +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__5(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__16; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__12; +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__29; +lean_object* l_Lean_Json_getObjValD(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_ModuleSetup_load___boxed(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__11; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__29; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__27; +LEAN_EXPORT lean_object* l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__9(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_instInhabitedModuleSetup; +static lean_object* l_Lean_instToJsonModuleArtifacts___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__32; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__10; +static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__7; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__4; +LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__2(lean_object*); +static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__3; +static lean_object* l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__2; +LEAN_EXPORT lean_object* l_Std_Format_joinSep___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__10(lean_object*, lean_object*); +lean_object* lean_array_to_list(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__6; +static lean_object* l_Lean_instFromJsonModuleSetup___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__3; +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124_(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__5; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__1; +static lean_object* l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__3; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__10; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__9; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__12; +static lean_object* l_Lean_ModuleSetup_load___closed__3; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__9; +lean_object* l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(lean_object*, lean_object*, lean_object*); +static lean_object* l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__4; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__22; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__26; +LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__3(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__5; +lean_object* l_Lean_Name_str___override(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__2; +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190_(lean_object*); +static lean_object* l_Lean_ModuleSetup_load___closed__2; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__12; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__28; +LEAN_EXPORT lean_object* l_Lean_instInhabitedModuleArtifacts; +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913_(lean_object*, lean_object*); +static lean_object* l_Lean_instToJsonModuleSetup___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__9; +static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1; +static lean_object* l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__8; +LEAN_EXPORT lean_object* l_Lean_ModuleSetup_load(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__17; +LEAN_EXPORT lean_object* l_Lean_instInhabitedImport; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__15; +static lean_object* l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__2; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__14; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__5; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__17; +static lean_object* l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__3; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__14; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__5; +static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__13; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__6; +lean_object* l_IO_FS_readFile(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209_(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__19; +LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__2___boxed(lean_object*); +LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__2(lean_object*, lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__14; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__15; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__8; +LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__4___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__5(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__8; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__22; +LEAN_EXPORT lean_object* l_List_foldl___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__8(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Std_Format_joinSep___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__7(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__22; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__24; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__16; +LEAN_EXPORT lean_object* l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___boxed(lean_object*, lean_object*); +static lean_object* l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__6; +lean_object* lean_string_length(lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____lambda__1___boxed(lean_object*); +static lean_object* l_Lean_instReprImport___closed__1; +lean_object* l_Std_Internal_Parsec_String_Parser_run___rarg(lean_object*, lean_object*); +uint8_t lean_nat_dec_eq(lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_instReprImport; +static lean_object* l_Lean_instToJsonImport___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__20; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__6; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__13; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__25; +lean_object* l_Lean_Name_mkStr2(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__19; +LEAN_EXPORT lean_object* l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__9___boxed(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__7; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__11; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__20; +lean_object* l_Std_Format_joinSep___at_Prod_repr___spec__1(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__13; +lean_object* l_String_toName(lean_object*); +lean_object* l_Repr_addAppParen(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__12; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__31; +static lean_object* l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__4; +static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1___closed__1; +LEAN_EXPORT lean_object* l_Lean_instToJsonModuleArtifacts; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__3; +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1(lean_object*, lean_object*); +lean_object* l_Lean_Json_Parser_any(lean_object*); +LEAN_EXPORT lean_object* l_Lean_instReprModuleArtifacts; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__2; +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1(lean_object*, lean_object*); +lean_object* l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(lean_object*, lean_object*); +lean_object* l_List_reverse___rarg(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__2; +static lean_object* l_Lean_ModuleSetup_load___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__13; +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1___boxed(lean_object*, lean_object*); +lean_object* lean_array_mk(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__8; +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087_(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__8; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__17; +static lean_object* l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__4; +size_t lean_usize_add(size_t, size_t); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__8; +lean_object* lean_array_uget(lean_object*, size_t); +size_t lean_array_size(lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417_(lean_object*, lean_object*); +static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__2; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__2; +static lean_object* l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__2; +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593_(lean_object*); +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____boxed(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__3; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__26; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__30; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__28; +LEAN_EXPORT lean_object* l_List_foldl___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__13(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Lean_instToJsonModuleSetup; +lean_object* lean_string_append(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__29; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__25; +lean_object* lean_array_get_size(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__4; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__26; +static lean_object* l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__7; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__7; +static lean_object* l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__3; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__4; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__14; +static lean_object* l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__6; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__2; +uint8_t lean_usize_dec_lt(size_t, size_t); +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +static lean_object* l_Lean_instReprModuleArtifacts___closed__1; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__19; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__4; +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__9; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__24; +lean_object* l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(lean_object*, lean_object*, lean_object*); +lean_object* l_Lean_Json_pretty(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__15; +LEAN_EXPORT lean_object* l_Lean_instFromJsonImport; +LEAN_EXPORT lean_object* l_Lean_instToJsonImport; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__18; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__20; +lean_object* lean_array_uset(lean_object*, size_t, lean_object*); +LEAN_EXPORT lean_object* l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__1(lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__23; +lean_object* lean_mk_empty_array_with_capacity(lean_object*); +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___lambda__1(lean_object*, lean_object*); +static lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__18; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__15; +static lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__2; +static lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__11; +LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__3___boxed(lean_object*, lean_object*); +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("module", 6, 6); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__1; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__2; +x_3 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__4() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked(" := ", 4, 4); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__4; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__6() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__3; +x_2 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__5; +x_3 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__7() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(10u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__8() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked(",", 1, 1); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__9() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__8; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__10() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("importAll", 9, 9); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__11() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__10; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__12() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(13u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__13() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("isExported", 10, 10); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__14() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__13; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__15() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(14u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__16() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("{ ", 2, 2); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__17() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__16; +x_2 = lean_string_length(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__18() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__17; +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__19() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__16; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__20() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked(" }", 2, 2); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__21() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__20; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__22() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("false", 5, 5); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__23() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__22; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__24() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__15; +x_2 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__23; +x_3 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__25() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__24; +x_2 = 0; +x_3 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__26() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("true", 4, 4); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__27() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__26; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__28() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__15; +x_2 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__27; +x_3 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__29() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__28; +x_2 = 0; +x_3 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34_(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; uint8_t x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; uint8_t x_21; lean_object* x_22; +x_3 = lean_ctor_get(x_1, 0); +lean_inc(x_3); +x_4 = lean_unsigned_to_nat(0u); +x_5 = l_Lean_Name_reprPrec(x_3, x_4); +x_6 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__7; +x_7 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_7, 0, x_6); +lean_ctor_set(x_7, 1, x_5); +x_8 = 0; +x_9 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_9, 0, x_7); +lean_ctor_set_uint8(x_9, sizeof(void*)*1, x_8); +x_10 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__6; +x_11 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_11, 0, x_10); +lean_ctor_set(x_11, 1, x_9); +x_12 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__9; +x_13 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13, 0, x_11); +lean_ctor_set(x_13, 1, x_12); +x_14 = lean_box(1); +x_15 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_15, 0, x_13); +lean_ctor_set(x_15, 1, x_14); +x_16 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__11; +x_17 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17, 0, x_15); +lean_ctor_set(x_17, 1, x_16); +x_18 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__5; +x_19 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19, 0, x_17); +lean_ctor_set(x_19, 1, x_18); +x_20 = lean_ctor_get_uint8(x_1, sizeof(void*)*1); +x_21 = lean_ctor_get_uint8(x_1, sizeof(void*)*1 + 1); +lean_dec(x_1); +if (x_20 == 0) +{ +lean_object* x_51; +x_51 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__23; +x_22 = x_51; +goto block_50; +} +else +{ +lean_object* x_52; +x_52 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__27; +x_22 = x_52; +goto block_50; +} +block_50: +{ +lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; +x_23 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__12; +x_24 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_24, 0, x_23); +lean_ctor_set(x_24, 1, x_22); +x_25 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_25, 0, x_24); +lean_ctor_set_uint8(x_25, sizeof(void*)*1, x_8); +x_26 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_26, 0, x_19); +lean_ctor_set(x_26, 1, x_25); +x_27 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_27, 0, x_26); +lean_ctor_set(x_27, 1, x_12); +x_28 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_28, 0, x_27); +lean_ctor_set(x_28, 1, x_14); +x_29 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__14; +x_30 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_30, 0, x_28); +lean_ctor_set(x_30, 1, x_29); +x_31 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_31, 0, x_30); +lean_ctor_set(x_31, 1, x_18); +if (x_21 == 0) +{ +lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_32 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__25; +x_33 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_33, 0, x_31); +lean_ctor_set(x_33, 1, x_32); +x_34 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__19; +x_35 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_35, 0, x_34); +lean_ctor_set(x_35, 1, x_33); +x_36 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__21; +x_37 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_37, 0, x_35); +lean_ctor_set(x_37, 1, x_36); +x_38 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__18; +x_39 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_39, 0, x_38); +lean_ctor_set(x_39, 1, x_37); +x_40 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_40, 0, x_39); +lean_ctor_set_uint8(x_40, sizeof(void*)*1, x_8); +return x_40; +} +else +{ +lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; +x_41 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__29; +x_42 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_42, 0, x_31); +lean_ctor_set(x_42, 1, x_41); +x_43 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__19; +x_44 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_44, 0, x_43); +lean_ctor_set(x_44, 1, x_42); +x_45 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__21; +x_46 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_46, 0, x_44); +lean_ctor_set(x_46, 1, x_45); +x_47 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__18; +x_48 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_48, 0, x_47); +lean_ctor_set(x_48, 1, x_46); +x_49 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_49, 0, x_48); +lean_ctor_set_uint8(x_49, sizeof(void*)*1, x_8); +return x_49; +} +} +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34_(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_instReprImport___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____boxed), 2, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_instReprImport() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_instReprImport___closed__1; +return x_1; +} +} +static lean_object* _init_l_Lean_instInhabitedImport___closed__1() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = 0; +x_3 = lean_alloc_ctor(0, 1, 2); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); +lean_ctor_set_uint8(x_3, sizeof(void*)*1 + 1, x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_instInhabitedImport() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_instInhabitedImport___closed__1; +return x_1; +} +} +LEAN_EXPORT uint8_t l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____lambda__1(lean_object* x_1) { +_start: +{ +uint8_t x_2; +x_2 = 0; +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____lambda__1___boxed), 1, 0); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(0); +x_2 = lean_array_mk(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124_(lean_object* x_1) { +_start: +{ +lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; uint8_t x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; uint8_t x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; +x_2 = lean_ctor_get(x_1, 0); +lean_inc(x_2); +x_3 = 1; +x_4 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_5 = l_Lean_Name_toString(x_2, x_3, x_4); +x_6 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_6, 0, x_5); +x_7 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__1; +x_8 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_8, 0, x_7); +lean_ctor_set(x_8, 1, x_6); +x_9 = lean_box(0); +x_10 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_10, 0, x_8); +lean_ctor_set(x_10, 1, x_9); +x_11 = lean_ctor_get_uint8(x_1, sizeof(void*)*1); +x_12 = lean_alloc_ctor(1, 0, 1); +lean_ctor_set_uint8(x_12, 0, x_11); +x_13 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__10; +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_12); +x_15 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15, 0, x_14); +lean_ctor_set(x_15, 1, x_9); +x_16 = lean_ctor_get_uint8(x_1, sizeof(void*)*1 + 1); +lean_dec(x_1); +x_17 = lean_alloc_ctor(1, 0, 1); +lean_ctor_set_uint8(x_17, 0, x_16); +x_18 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__13; +x_19 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_19, 0, x_18); +lean_ctor_set(x_19, 1, x_17); +x_20 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_20, 0, x_19); +lean_ctor_set(x_20, 1, x_9); +x_21 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21, 0, x_20); +lean_ctor_set(x_21, 1, x_9); +x_22 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_22, 0, x_15); +lean_ctor_set(x_22, 1, x_21); +x_23 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_23, 0, x_10); +lean_ctor_set(x_23, 1, x_22); +x_24 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__2; +x_25 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_23, x_24); +x_26 = l_Lean_Json_mkObj(x_25); +return x_26; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____lambda__1___boxed(lean_object* x_1) { +_start: +{ +uint8_t x_2; lean_object* x_3; +x_2 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____lambda__1(x_1); +lean_dec(x_1); +x_3 = lean_box(x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_instToJsonImport___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124_), 1, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_instToJsonImport() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_instToJsonImport___closed__1; +return x_1; +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___lambda__1(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_3, 0, x_1); +return x_3; +} +} +static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("[anonymous]", 11, 11); +return x_1; +} +} +static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("expected a `Name`, got '", 24, 24); +return x_1; +} +} +static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("'", 1, 1); +return x_1; +} +} +static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(0); +x_2 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; lean_object* x_4; +x_3 = l_Lean_Json_getObjValD(x_1, x_2); +lean_inc(x_3); +x_4 = l_Lean_Json_getStr_x3f(x_3); +if (lean_obj_tag(x_4) == 0) +{ +uint8_t x_5; +lean_dec(x_3); +x_5 = !lean_is_exclusive(x_4); +if (x_5 == 0) +{ +return x_4; +} +else +{ +lean_object* x_6; lean_object* x_7; +x_6 = lean_ctor_get(x_4, 0); +lean_inc(x_6); +lean_dec(x_4); +x_7 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_7, 0, x_6); +return x_7; +} +} +else +{ +uint8_t x_8; +x_8 = !lean_is_exclusive(x_4); +if (x_8 == 0) +{ +lean_object* x_9; lean_object* x_10; uint8_t x_11; +x_9 = lean_ctor_get(x_4, 0); +x_10 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__1; +x_11 = lean_string_dec_eq(x_9, x_10); +if (x_11 == 0) +{ +lean_object* x_12; uint8_t x_13; +x_12 = l_String_toName(x_9); +x_13 = l_Lean_Name_isAnonymous(x_12); +if (x_13 == 0) +{ +lean_dec(x_3); +lean_ctor_set(x_4, 0, x_12); +return x_4; +} +else +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; +lean_dec(x_12); +x_14 = lean_unsigned_to_nat(80u); +x_15 = l_Lean_Json_pretty(x_3, x_14); +x_16 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__2; +x_17 = lean_string_append(x_16, x_15); +lean_dec(x_15); +x_18 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_19 = lean_string_append(x_17, x_18); +lean_ctor_set_tag(x_4, 0); +lean_ctor_set(x_4, 0, x_19); +return x_4; +} +} +else +{ +lean_object* x_20; +lean_free_object(x_4); +lean_dec(x_9); +lean_dec(x_3); +x_20 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__4; +return x_20; +} +} +else +{ +lean_object* x_21; lean_object* x_22; uint8_t x_23; +x_21 = lean_ctor_get(x_4, 0); +lean_inc(x_21); +lean_dec(x_4); +x_22 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__1; +x_23 = lean_string_dec_eq(x_21, x_22); +if (x_23 == 0) +{ +lean_object* x_24; uint8_t x_25; +x_24 = l_String_toName(x_21); +x_25 = l_Lean_Name_isAnonymous(x_24); +if (x_25 == 0) +{ +lean_object* x_26; +lean_dec(x_3); +x_26 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_26, 0, x_24); +return x_26; +} +else +{ +lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +lean_dec(x_24); +x_27 = lean_unsigned_to_nat(80u); +x_28 = l_Lean_Json_pretty(x_3, x_27); +x_29 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__2; +x_30 = lean_string_append(x_29, x_28); +lean_dec(x_28); +x_31 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_32 = lean_string_append(x_30, x_31); +x_33 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_33, 0, x_32); +return x_33; +} +} +else +{ +lean_object* x_34; +lean_dec(x_21); +lean_dec(x_3); +x_34 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__4; +return x_34; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; lean_object* x_4; +x_3 = l_Lean_Json_getObjValD(x_1, x_2); +x_4 = l_Lean_Json_getBool_x3f(x_3); +lean_dec(x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean", 4, 4); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Import", 6, 6); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__1; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__2; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__4() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__3; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__5() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked(".", 1, 1); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__6() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__5; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__7() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__1; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__8() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__7; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__9() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__6; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__8; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked(": ", 2, 2); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__11() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__9; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__12() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__10; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__13() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__12; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__14() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__6; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__13; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__15() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__14; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__16() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__13; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__17() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__16; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__18() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__6; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__17; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__19() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__18; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190_(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; +x_2 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__1; +lean_inc(x_1); +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1(x_1, x_2); +if (lean_obj_tag(x_3) == 0) +{ +uint8_t x_4; +lean_dec(x_1); +x_4 = !lean_is_exclusive(x_3); +if (x_4 == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; +x_5 = lean_ctor_get(x_3, 0); +x_6 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__11; +x_7 = lean_string_append(x_6, x_5); +lean_dec(x_5); +lean_ctor_set(x_3, 0, x_7); +return x_3; +} +else +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_8 = lean_ctor_get(x_3, 0); +lean_inc(x_8); +lean_dec(x_3); +x_9 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__11; +x_10 = lean_string_append(x_9, x_8); +lean_dec(x_8); +x_11 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_11, 0, x_10); +return x_11; +} +} +else +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_12 = lean_ctor_get(x_3, 0); +lean_inc(x_12); +lean_dec(x_3); +x_13 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__10; +lean_inc(x_1); +x_14 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_13); +if (lean_obj_tag(x_14) == 0) +{ +uint8_t x_15; +lean_dec(x_12); +lean_dec(x_1); +x_15 = !lean_is_exclusive(x_14); +if (x_15 == 0) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_14, 0); +x_17 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__15; +x_18 = lean_string_append(x_17, x_16); +lean_dec(x_16); +lean_ctor_set(x_14, 0, x_18); +return x_14; +} +else +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_19 = lean_ctor_get(x_14, 0); +lean_inc(x_19); +lean_dec(x_14); +x_20 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__15; +x_21 = lean_string_append(x_20, x_19); +lean_dec(x_19); +x_22 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_22, 0, x_21); +return x_22; +} +} +else +{ +lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_23 = lean_ctor_get(x_14, 0); +lean_inc(x_23); +lean_dec(x_14); +x_24 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__13; +x_25 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_24); +if (lean_obj_tag(x_25) == 0) +{ +uint8_t x_26; +lean_dec(x_23); +lean_dec(x_12); +x_26 = !lean_is_exclusive(x_25); +if (x_26 == 0) +{ +lean_object* x_27; lean_object* x_28; lean_object* x_29; +x_27 = lean_ctor_get(x_25, 0); +x_28 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__19; +x_29 = lean_string_append(x_28, x_27); +lean_dec(x_27); +lean_ctor_set(x_25, 0, x_29); +return x_25; +} +else +{ +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_30 = lean_ctor_get(x_25, 0); +lean_inc(x_30); +lean_dec(x_25); +x_31 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__19; +x_32 = lean_string_append(x_31, x_30); +lean_dec(x_30); +x_33 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_33, 0, x_32); +return x_33; +} +} +else +{ +uint8_t x_34; +x_34 = !lean_is_exclusive(x_25); +if (x_34 == 0) +{ +lean_object* x_35; lean_object* x_36; uint8_t x_37; uint8_t x_38; +x_35 = lean_ctor_get(x_25, 0); +x_36 = lean_alloc_ctor(0, 1, 2); +lean_ctor_set(x_36, 0, x_12); +x_37 = lean_unbox(x_23); +lean_dec(x_23); +lean_ctor_set_uint8(x_36, sizeof(void*)*1, x_37); +x_38 = lean_unbox(x_35); +lean_dec(x_35); +lean_ctor_set_uint8(x_36, sizeof(void*)*1 + 1, x_38); +lean_ctor_set(x_25, 0, x_36); +return x_25; +} +else +{ +lean_object* x_39; lean_object* x_40; uint8_t x_41; uint8_t x_42; lean_object* x_43; +x_39 = lean_ctor_get(x_25, 0); +lean_inc(x_39); +lean_dec(x_25); +x_40 = lean_alloc_ctor(0, 1, 2); +lean_ctor_set(x_40, 0, x_12); +x_41 = lean_unbox(x_23); +lean_dec(x_23); +lean_ctor_set_uint8(x_40, sizeof(void*)*1, x_41); +x_42 = lean_unbox(x_39); +lean_dec(x_39); +lean_ctor_set_uint8(x_40, sizeof(void*)*1 + 1, x_42); +x_43 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_43, 0, x_40); +return x_43; +} +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___lambda__1___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___lambda__1(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_instFromJsonImport___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190_), 1, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_instFromJsonImport() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_instFromJsonImport___closed__1; +return x_1; +} +} +LEAN_EXPORT lean_object* l_Lean_instCoeNameImport(lean_object* x_1) { +_start: +{ +uint8_t x_2; uint8_t x_3; lean_object* x_4; +x_2 = 0; +x_3 = 1; +x_4 = lean_alloc_ctor(0, 1, 2); +lean_ctor_set(x_4, 0, x_1); +lean_ctor_set_uint8(x_4, sizeof(void*)*1, x_2); +lean_ctor_set_uint8(x_4, sizeof(void*)*1 + 1, x_3); +return x_4; +} +} +LEAN_EXPORT lean_object* l_Lean_instToStringImport(lean_object* x_1) { +_start: +{ +lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; +x_2 = lean_ctor_get(x_1, 0); +lean_inc(x_2); +lean_dec(x_1); +x_3 = 1; +x_4 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_5 = l_Lean_Name_toString(x_2, x_3, x_4); +return x_5; +} +} +static lean_object* _init_l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("none", 4, 4); +return x_1; +} +} +static lean_object* _init_l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__1; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("some ", 5, 5); +return x_1; +} +} +static lean_object* _init_l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__3; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__5() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("FilePath.mk ", 12, 12); +return x_1; +} +} +static lean_object* _init_l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__6() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__5; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_3; +x_3 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__2; +return x_3; +} +else +{ +uint8_t x_4; +x_4 = !lean_is_exclusive(x_1); +if (x_4 == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; +x_5 = lean_ctor_get(x_1, 0); +x_6 = l_String_quote(x_5); +lean_dec(x_5); +lean_ctor_set_tag(x_1, 3); +lean_ctor_set(x_1, 0, x_6); +x_7 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__6; +x_8 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_8, 0, x_7); +lean_ctor_set(x_8, 1, x_1); +x_9 = lean_unsigned_to_nat(1024u); +x_10 = l_Repr_addAppParen(x_8, x_9); +x_11 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__4; +x_12 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_12, 0, x_11); +lean_ctor_set(x_12, 1, x_10); +x_13 = l_Repr_addAppParen(x_12, x_2); +return x_13; +} +else +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; +x_14 = lean_ctor_get(x_1, 0); +lean_inc(x_14); +lean_dec(x_1); +x_15 = l_String_quote(x_14); +lean_dec(x_14); +x_16 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_16, 0, x_15); +x_17 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__6; +x_18 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_18, 0, x_17); +lean_ctor_set(x_18, 1, x_16); +x_19 = lean_unsigned_to_nat(1024u); +x_20 = l_Repr_addAppParen(x_18, x_19); +x_21 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__4; +x_22 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_22, 0, x_21); +lean_ctor_set(x_22, 1, x_20); +x_23 = l_Repr_addAppParen(x_22, x_2); +return x_23; +} +} +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("lean\?", 5, 5); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__1; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__2; +x_3 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__3; +x_2 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__5; +x_3 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(9u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__6() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("olean\?", 6, 6); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__7() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__6; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__8() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("oleanServer\?", 12, 12); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__9() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__8; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__10() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(16u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__11() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("oleanPrivate\?", 13, 13); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__12() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__11; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__13() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(17u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__14() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("ilean\?", 6, 6); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__15() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__14; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417_(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; uint8_t x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; +x_3 = lean_ctor_get(x_1, 0); +lean_inc(x_3); +x_4 = lean_unsigned_to_nat(0u); +x_5 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1(x_3, x_4); +x_6 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__5; +x_7 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_7, 0, x_6); +lean_ctor_set(x_7, 1, x_5); +x_8 = 0; +x_9 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_9, 0, x_7); +lean_ctor_set_uint8(x_9, sizeof(void*)*1, x_8); +x_10 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__4; +x_11 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_11, 0, x_10); +lean_ctor_set(x_11, 1, x_9); +x_12 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__9; +x_13 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13, 0, x_11); +lean_ctor_set(x_13, 1, x_12); +x_14 = lean_box(1); +x_15 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_15, 0, x_13); +lean_ctor_set(x_15, 1, x_14); +x_16 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__7; +x_17 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17, 0, x_15); +lean_ctor_set(x_17, 1, x_16); +x_18 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__5; +x_19 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19, 0, x_17); +lean_ctor_set(x_19, 1, x_18); +x_20 = lean_ctor_get(x_1, 1); +lean_inc(x_20); +x_21 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1(x_20, x_4); +x_22 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__7; +x_23 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_23, 0, x_22); +lean_ctor_set(x_23, 1, x_21); +x_24 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_24, 0, x_23); +lean_ctor_set_uint8(x_24, sizeof(void*)*1, x_8); +x_25 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_25, 0, x_19); +lean_ctor_set(x_25, 1, x_24); +x_26 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_26, 0, x_25); +lean_ctor_set(x_26, 1, x_12); +x_27 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_27, 0, x_26); +lean_ctor_set(x_27, 1, x_14); +x_28 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__9; +x_29 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_29, 0, x_27); +lean_ctor_set(x_29, 1, x_28); +x_30 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_30, 0, x_29); +lean_ctor_set(x_30, 1, x_18); +x_31 = lean_ctor_get(x_1, 2); +lean_inc(x_31); +x_32 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1(x_31, x_4); +x_33 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__10; +x_34 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_34, 0, x_33); +lean_ctor_set(x_34, 1, x_32); +x_35 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_35, 0, x_34); +lean_ctor_set_uint8(x_35, sizeof(void*)*1, x_8); +x_36 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_36, 0, x_30); +lean_ctor_set(x_36, 1, x_35); +x_37 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_37, 0, x_36); +lean_ctor_set(x_37, 1, x_12); +x_38 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_14); +x_39 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__12; +x_40 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_40, 0, x_38); +lean_ctor_set(x_40, 1, x_39); +x_41 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_41, 0, x_40); +lean_ctor_set(x_41, 1, x_18); +x_42 = lean_ctor_get(x_1, 3); +lean_inc(x_42); +x_43 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1(x_42, x_4); +x_44 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__13; +x_45 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_43); +x_46 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_46, 0, x_45); +lean_ctor_set_uint8(x_46, sizeof(void*)*1, x_8); +x_47 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_47, 0, x_41); +lean_ctor_set(x_47, 1, x_46); +x_48 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_48, 0, x_47); +lean_ctor_set(x_48, 1, x_12); +x_49 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_49, 0, x_48); +lean_ctor_set(x_49, 1, x_14); +x_50 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__15; +x_51 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_51, 0, x_49); +lean_ctor_set(x_51, 1, x_50); +x_52 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_52, 0, x_51); +lean_ctor_set(x_52, 1, x_18); +x_53 = lean_ctor_get(x_1, 4); +lean_inc(x_53); +lean_dec(x_1); +x_54 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1(x_53, x_4); +x_55 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_55, 0, x_22); +lean_ctor_set(x_55, 1, x_54); +x_56 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_56, 0, x_55); +lean_ctor_set_uint8(x_56, sizeof(void*)*1, x_8); +x_57 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_57, 0, x_52); +lean_ctor_set(x_57, 1, x_56); +x_58 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__19; +x_59 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_59, 0, x_58); +lean_ctor_set(x_59, 1, x_57); +x_60 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__21; +x_61 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_61, 0, x_59); +lean_ctor_set(x_61, 1, x_60); +x_62 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__18; +x_63 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_63, 0, x_62); +lean_ctor_set(x_63, 1, x_61); +x_64 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_64, 0, x_63); +lean_ctor_set_uint8(x_64, sizeof(void*)*1, x_8); +return x_64; +} +} +LEAN_EXPORT lean_object* l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417_(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_instReprModuleArtifacts___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____boxed), 2, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_instReprModuleArtifacts() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_instReprModuleArtifacts___closed__1; +return x_1; +} +} +static lean_object* _init_l_Lean_instInhabitedModuleArtifacts___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(0); +x_2 = lean_alloc_ctor(0, 5, 0); +lean_ctor_set(x_2, 0, x_1); +lean_ctor_set(x_2, 1, x_1); +lean_ctor_set(x_2, 2, x_1); +lean_ctor_set(x_2, 3, x_1); +lean_ctor_set(x_2, 4, x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_instInhabitedModuleArtifacts() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_instInhabitedModuleArtifacts___closed__1; +return x_1; +} +} +LEAN_EXPORT lean_object* l_Lean_Json_opt___at___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____spec__1(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +lean_object* x_3; +lean_dec(x_1); +x_3 = lean_box(0); +return x_3; +} +else +{ +uint8_t x_4; +x_4 = !lean_is_exclusive(x_2); +if (x_4 == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; +lean_ctor_set_tag(x_2, 3); +x_5 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_5, 0, x_1); +lean_ctor_set(x_5, 1, x_2); +x_6 = lean_box(0); +x_7 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_7, 0, x_5); +lean_ctor_set(x_7, 1, x_6); +return x_7; +} +else +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; +x_8 = lean_ctor_get(x_2, 0); +lean_inc(x_8); +lean_dec(x_2); +x_9 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_9, 0, x_8); +x_10 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_10, 0, x_1); +lean_ctor_set(x_10, 1, x_9); +x_11 = lean_box(0); +x_12 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_12, 0, x_10); +lean_ctor_set(x_12, 1, x_11); +return x_12; +} +} +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("lean", 4, 4); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("olean", 5, 5); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("oleanServer", 11, 11); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__4() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("oleanPrivate", 12, 12); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__5() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("ilean", 5, 5); +return x_1; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549_(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_2 = lean_ctor_get(x_1, 0); +lean_inc(x_2); +x_3 = l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__1; +x_4 = l_Lean_Json_opt___at___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____spec__1(x_3, x_2); +x_5 = lean_ctor_get(x_1, 1); +lean_inc(x_5); +x_6 = l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__2; +x_7 = l_Lean_Json_opt___at___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____spec__1(x_6, x_5); +x_8 = lean_ctor_get(x_1, 2); +lean_inc(x_8); +x_9 = l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__3; +x_10 = l_Lean_Json_opt___at___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____spec__1(x_9, x_8); +x_11 = lean_ctor_get(x_1, 3); +lean_inc(x_11); +x_12 = l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__4; +x_13 = l_Lean_Json_opt___at___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____spec__1(x_12, x_11); +x_14 = lean_ctor_get(x_1, 4); +lean_inc(x_14); +lean_dec(x_1); +x_15 = l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__5; +x_16 = l_Lean_Json_opt___at___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____spec__1(x_15, x_14); +x_17 = lean_box(0); +x_18 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_18, 0, x_16); +lean_ctor_set(x_18, 1, x_17); +x_19 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_19, 0, x_13); +lean_ctor_set(x_19, 1, x_18); +x_20 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_20, 0, x_10); +lean_ctor_set(x_20, 1, x_19); +x_21 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_21, 0, x_7); +lean_ctor_set(x_21, 1, x_20); +x_22 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_22, 0, x_4); +lean_ctor_set(x_22, 1, x_21); +x_23 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__2; +x_24 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_22, x_23); +x_25 = l_Lean_Json_mkObj(x_24); +return x_25; +} +} +static lean_object* _init_l_Lean_instToJsonModuleArtifacts___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549_), 1, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_instToJsonModuleArtifacts() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_instToJsonModuleArtifacts___closed__1; +return x_1; +} +} +static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_box(0); +x_2 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Json_getObjValD(x_1, x_2); +switch (lean_obj_tag(x_3)) { +case 0: +{ +lean_object* x_4; +x_4 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1___closed__1; +return x_4; +} +case 1: +{ +lean_object* x_5; +x_5 = l_Lean_Json_getStr_x3f(x_3); +if (lean_obj_tag(x_5) == 0) +{ +uint8_t x_6; +x_6 = !lean_is_exclusive(x_5); +if (x_6 == 0) +{ +return x_5; +} +else +{ +lean_object* x_7; lean_object* x_8; +x_7 = lean_ctor_get(x_5, 0); +lean_inc(x_7); +lean_dec(x_5); +x_8 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_8, 0, x_7); +return x_8; +} +} +else +{ +uint8_t x_9; +x_9 = !lean_is_exclusive(x_5); +if (x_9 == 0) +{ +lean_object* x_10; lean_object* x_11; +x_10 = lean_ctor_get(x_5, 0); +x_11 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_11, 0, x_10); +lean_ctor_set(x_5, 0, x_11); +return x_5; +} +else +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_12 = lean_ctor_get(x_5, 0); +lean_inc(x_12); +lean_dec(x_5); +x_13 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_13, 0, x_12); +x_14 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_14, 0, x_13); +return x_14; +} +} +} +default: +{ +lean_object* x_15; uint8_t x_16; +lean_inc(x_3); +x_15 = l_Lean_Json_getStr_x3f(x_3); +x_16 = !lean_is_exclusive(x_3); +if (x_16 == 0) +{ +lean_object* x_17; +x_17 = lean_ctor_get(x_3, 0); +lean_dec(x_17); +if (lean_obj_tag(x_15) == 0) +{ +uint8_t x_18; +lean_free_object(x_3); +x_18 = !lean_is_exclusive(x_15); +if (x_18 == 0) +{ +return x_15; +} +else +{ +lean_object* x_19; lean_object* x_20; +x_19 = lean_ctor_get(x_15, 0); +lean_inc(x_19); +lean_dec(x_15); +x_20 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_20, 0, x_19); +return x_20; +} +} +else +{ +uint8_t x_21; +x_21 = !lean_is_exclusive(x_15); +if (x_21 == 0) +{ +lean_object* x_22; +x_22 = lean_ctor_get(x_15, 0); +lean_ctor_set_tag(x_3, 1); +lean_ctor_set(x_3, 0, x_22); +lean_ctor_set(x_15, 0, x_3); +return x_15; +} +else +{ +lean_object* x_23; lean_object* x_24; +x_23 = lean_ctor_get(x_15, 0); +lean_inc(x_23); +lean_dec(x_15); +lean_ctor_set_tag(x_3, 1); +lean_ctor_set(x_3, 0, x_23); +x_24 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_24, 0, x_3); +return x_24; +} +} +} +else +{ +lean_dec(x_3); +if (lean_obj_tag(x_15) == 0) +{ +lean_object* x_25; lean_object* x_26; lean_object* x_27; +x_25 = lean_ctor_get(x_15, 0); +lean_inc(x_25); +if (lean_is_exclusive(x_15)) { + lean_ctor_release(x_15, 0); + x_26 = x_15; +} else { + lean_dec_ref(x_15); + x_26 = lean_box(0); +} +if (lean_is_scalar(x_26)) { + x_27 = lean_alloc_ctor(0, 1, 0); +} else { + x_27 = x_26; +} +lean_ctor_set(x_27, 0, x_25); +return x_27; +} +else +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; +x_28 = lean_ctor_get(x_15, 0); +lean_inc(x_28); +if (lean_is_exclusive(x_15)) { + lean_ctor_release(x_15, 0); + x_29 = x_15; +} else { + lean_dec_ref(x_15); + x_29 = lean_box(0); +} +x_30 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_30, 0, x_28); +if (lean_is_scalar(x_29)) { + x_31 = lean_alloc_ctor(1, 1, 0); +} else { + x_31 = x_29; +} +lean_ctor_set(x_31, 0, x_30); +return x_31; +} +} +} +} +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("ModuleArtifacts", 15, 15); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__1; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__1; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__3() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__2; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__3; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__5; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__1; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__6() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__5; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__7() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__6; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__8() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__7; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__9() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__6; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__10() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__9; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__11() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__12() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__11; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__13() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__8; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__14() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__13; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__15() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__14; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__16() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__15; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__17() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__11; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__18() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__17; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__19() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__18; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__20() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__19; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__21() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__14; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__22() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__21; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__23() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__22; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__24() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__23; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593_(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; +x_2 = l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__1; +lean_inc(x_1); +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1(x_1, x_2); +if (lean_obj_tag(x_3) == 0) +{ +uint8_t x_4; +lean_dec(x_1); +x_4 = !lean_is_exclusive(x_3); +if (x_4 == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; +x_5 = lean_ctor_get(x_3, 0); +x_6 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__8; +x_7 = lean_string_append(x_6, x_5); +lean_dec(x_5); +lean_ctor_set(x_3, 0, x_7); +return x_3; +} +else +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_8 = lean_ctor_get(x_3, 0); +lean_inc(x_8); +lean_dec(x_3); +x_9 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__8; +x_10 = lean_string_append(x_9, x_8); +lean_dec(x_8); +x_11 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_11, 0, x_10); +return x_11; +} +} +else +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_12 = lean_ctor_get(x_3, 0); +lean_inc(x_12); +lean_dec(x_3); +x_13 = l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__2; +lean_inc(x_1); +x_14 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1(x_1, x_13); +if (lean_obj_tag(x_14) == 0) +{ +uint8_t x_15; +lean_dec(x_12); +lean_dec(x_1); +x_15 = !lean_is_exclusive(x_14); +if (x_15 == 0) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_14, 0); +x_17 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__12; +x_18 = lean_string_append(x_17, x_16); +lean_dec(x_16); +lean_ctor_set(x_14, 0, x_18); +return x_14; +} +else +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_19 = lean_ctor_get(x_14, 0); +lean_inc(x_19); +lean_dec(x_14); +x_20 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__12; +x_21 = lean_string_append(x_20, x_19); +lean_dec(x_19); +x_22 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_22, 0, x_21); +return x_22; +} +} +else +{ +lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_23 = lean_ctor_get(x_14, 0); +lean_inc(x_23); +lean_dec(x_14); +x_24 = l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__3; +lean_inc(x_1); +x_25 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1(x_1, x_24); +if (lean_obj_tag(x_25) == 0) +{ +uint8_t x_26; +lean_dec(x_23); +lean_dec(x_12); +lean_dec(x_1); +x_26 = !lean_is_exclusive(x_25); +if (x_26 == 0) +{ +lean_object* x_27; lean_object* x_28; lean_object* x_29; +x_27 = lean_ctor_get(x_25, 0); +x_28 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__16; +x_29 = lean_string_append(x_28, x_27); +lean_dec(x_27); +lean_ctor_set(x_25, 0, x_29); +return x_25; +} +else +{ +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_30 = lean_ctor_get(x_25, 0); +lean_inc(x_30); +lean_dec(x_25); +x_31 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__16; +x_32 = lean_string_append(x_31, x_30); +lean_dec(x_30); +x_33 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_33, 0, x_32); +return x_33; +} +} +else +{ +lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_34 = lean_ctor_get(x_25, 0); +lean_inc(x_34); +lean_dec(x_25); +x_35 = l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__4; +lean_inc(x_1); +x_36 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1(x_1, x_35); +if (lean_obj_tag(x_36) == 0) +{ +uint8_t x_37; +lean_dec(x_34); +lean_dec(x_23); +lean_dec(x_12); +lean_dec(x_1); +x_37 = !lean_is_exclusive(x_36); +if (x_37 == 0) +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_38 = lean_ctor_get(x_36, 0); +x_39 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__20; +x_40 = lean_string_append(x_39, x_38); +lean_dec(x_38); +lean_ctor_set(x_36, 0, x_40); +return x_36; +} +else +{ +lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; +x_41 = lean_ctor_get(x_36, 0); +lean_inc(x_41); +lean_dec(x_36); +x_42 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__20; +x_43 = lean_string_append(x_42, x_41); +lean_dec(x_41); +x_44 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_44, 0, x_43); +return x_44; +} +} +else +{ +lean_object* x_45; lean_object* x_46; lean_object* x_47; +x_45 = lean_ctor_get(x_36, 0); +lean_inc(x_45); +lean_dec(x_36); +x_46 = l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__5; +x_47 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1(x_1, x_46); +if (lean_obj_tag(x_47) == 0) +{ +uint8_t x_48; +lean_dec(x_45); +lean_dec(x_34); +lean_dec(x_23); +lean_dec(x_12); +x_48 = !lean_is_exclusive(x_47); +if (x_48 == 0) +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; +x_49 = lean_ctor_get(x_47, 0); +x_50 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__24; +x_51 = lean_string_append(x_50, x_49); +lean_dec(x_49); +lean_ctor_set(x_47, 0, x_51); +return x_47; +} +else +{ +lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; +x_52 = lean_ctor_get(x_47, 0); +lean_inc(x_52); +lean_dec(x_47); +x_53 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__24; +x_54 = lean_string_append(x_53, x_52); +lean_dec(x_52); +x_55 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_55, 0, x_54); +return x_55; +} +} +else +{ +uint8_t x_56; +x_56 = !lean_is_exclusive(x_47); +if (x_56 == 0) +{ +lean_object* x_57; lean_object* x_58; +x_57 = lean_ctor_get(x_47, 0); +x_58 = lean_alloc_ctor(0, 5, 0); +lean_ctor_set(x_58, 0, x_12); +lean_ctor_set(x_58, 1, x_23); +lean_ctor_set(x_58, 2, x_34); +lean_ctor_set(x_58, 3, x_45); +lean_ctor_set(x_58, 4, x_57); +lean_ctor_set(x_47, 0, x_58); +return x_47; +} +else +{ +lean_object* x_59; lean_object* x_60; lean_object* x_61; +x_59 = lean_ctor_get(x_47, 0); +lean_inc(x_59); +lean_dec(x_47); +x_60 = lean_alloc_ctor(0, 5, 0); +lean_ctor_set(x_60, 0, x_12); +lean_ctor_set(x_60, 1, x_23); +lean_ctor_set(x_60, 2, x_34); +lean_ctor_set(x_60, 3, x_45); +lean_ctor_set(x_60, 4, x_59); +x_61 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_61, 0, x_60); +return x_61; +} +} +} +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_instFromJsonModuleArtifacts___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593_), 1, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_instFromJsonModuleArtifacts() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_instFromJsonModuleArtifacts___closed__1; +return x_1; +} +} +LEAN_EXPORT lean_object* l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__1(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; +x_2 = lean_unsigned_to_nat(0u); +x_3 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34_(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__3(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +return x_1; +} +else +{ +lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_3 = lean_ctor_get(x_2, 0); +x_4 = lean_ctor_get(x_2, 1); +x_5 = lean_ctor_get(x_2, 2); +x_6 = lean_ctor_get(x_2, 3); +x_7 = l_Lean_RBNode_revFold___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__3(x_1, x_6); +lean_inc(x_5); +lean_inc(x_4); +x_8 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_8, 0, x_4); +lean_ctor_set(x_8, 1, x_5); +x_9 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_9, 0, x_8); +lean_ctor_set(x_9, 1, x_7); +x_1 = x_9; +x_2 = x_3; +goto _start; +} +} +} +LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__2(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; +x_2 = lean_box(0); +x_3 = l_Lean_RBNode_revFold___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__3(x_2, x_1); +return x_3; +} +} +static lean_object* _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__9; +x_2 = lean_box(1); +x_3 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("(", 1, 1); +return x_1; +} +} +static lean_object* _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__2; +x_2 = lean_string_length(x_1); +return x_2; +} +} +static lean_object* _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__3; +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__2; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__6() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked(")", 1, 1); +return x_1; +} +} +static lean_object* _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__7() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__6; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6(lean_object* x_1, lean_object* x_2) { +_start: +{ +uint8_t x_3; +x_3 = !lean_is_exclusive(x_1); +if (x_3 == 0) +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; lean_object* x_21; +x_4 = lean_ctor_get(x_1, 0); +x_5 = lean_ctor_get(x_1, 1); +x_6 = lean_unsigned_to_nat(0u); +x_7 = l_Lean_Name_reprPrec(x_4, x_6); +x_8 = lean_box(0); +lean_ctor_set_tag(x_1, 1); +lean_ctor_set(x_1, 1, x_8); +lean_ctor_set(x_1, 0, x_7); +x_9 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417_(x_5, x_6); +x_10 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_10, 0, x_9); +lean_ctor_set(x_10, 1, x_1); +x_11 = l_List_reverse___rarg(x_10); +x_12 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__1; +x_13 = l_Std_Format_joinSep___at_Prod_repr___spec__1(x_11, x_12); +x_14 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__5; +x_15 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_15, 0, x_14); +lean_ctor_set(x_15, 1, x_13); +x_16 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__7; +x_17 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17, 0, x_15); +lean_ctor_set(x_17, 1, x_16); +x_18 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__4; +x_19 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_19, 0, x_18); +lean_ctor_set(x_19, 1, x_17); +x_20 = 0; +x_21 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_21, 0, x_19); +lean_ctor_set_uint8(x_21, sizeof(void*)*1, x_20); +return x_21; +} +else +{ +lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; uint8_t x_39; lean_object* x_40; +x_22 = lean_ctor_get(x_1, 0); +x_23 = lean_ctor_get(x_1, 1); +lean_inc(x_23); +lean_inc(x_22); +lean_dec(x_1); +x_24 = lean_unsigned_to_nat(0u); +x_25 = l_Lean_Name_reprPrec(x_22, x_24); +x_26 = lean_box(0); +x_27 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_27, 0, x_25); +lean_ctor_set(x_27, 1, x_26); +x_28 = l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417_(x_23, x_24); +x_29 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_29, 0, x_28); +lean_ctor_set(x_29, 1, x_27); +x_30 = l_List_reverse___rarg(x_29); +x_31 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__1; +x_32 = l_Std_Format_joinSep___at_Prod_repr___spec__1(x_30, x_31); +x_33 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__5; +x_34 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_34, 0, x_33); +lean_ctor_set(x_34, 1, x_32); +x_35 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__7; +x_36 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_36, 0, x_34); +lean_ctor_set(x_36, 1, x_35); +x_37 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__4; +x_38 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_36); +x_39 = 0; +x_40 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_40, 0, x_38); +lean_ctor_set_uint8(x_40, sizeof(void*)*1, x_39); +return x_40; +} +} +} +LEAN_EXPORT lean_object* l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__5(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; +x_2 = lean_unsigned_to_nat(0u); +x_3 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_List_foldl___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__8(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_3) == 0) +{ +lean_dec(x_1); +return x_2; +} +else +{ +uint8_t x_4; +x_4 = !lean_is_exclusive(x_3); +if (x_4 == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_5 = lean_ctor_get(x_3, 0); +x_6 = lean_ctor_get(x_3, 1); +lean_inc(x_1); +lean_ctor_set_tag(x_3, 5); +lean_ctor_set(x_3, 1, x_1); +lean_ctor_set(x_3, 0, x_2); +x_7 = lean_unsigned_to_nat(0u); +x_8 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6(x_5, x_7); +x_9 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_9, 0, x_3); +lean_ctor_set(x_9, 1, x_8); +x_2 = x_9; +x_3 = x_6; +goto _start; +} +else +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_11 = lean_ctor_get(x_3, 0); +x_12 = lean_ctor_get(x_3, 1); +lean_inc(x_12); +lean_inc(x_11); +lean_dec(x_3); +lean_inc(x_1); +x_13 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13, 0, x_2); +lean_ctor_set(x_13, 1, x_1); +x_14 = lean_unsigned_to_nat(0u); +x_15 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6(x_11, x_14); +x_16 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_16, 0, x_13); +lean_ctor_set(x_16, 1, x_15); +x_2 = x_16; +x_3 = x_12; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_Format_joinSep___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__7(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_3; +lean_dec(x_2); +x_3 = lean_box(0); +return x_3; +} +else +{ +lean_object* x_4; +x_4 = lean_ctor_get(x_1, 1); +lean_inc(x_4); +if (lean_obj_tag(x_4) == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; +lean_dec(x_2); +x_5 = lean_ctor_get(x_1, 0); +lean_inc(x_5); +lean_dec(x_1); +x_6 = lean_unsigned_to_nat(0u); +x_7 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6(x_5, x_6); +return x_7; +} +else +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_8 = lean_ctor_get(x_1, 0); +lean_inc(x_8); +lean_dec(x_1); +x_9 = lean_unsigned_to_nat(0u); +x_10 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6(x_8, x_9); +x_11 = l_List_foldl___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__8(x_2, x_10, x_4); +return x_11; +} +} +} +} +static lean_object* _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("[]", 2, 2); +return x_1; +} +} +static lean_object* _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__1; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("[", 1, 1); +return x_1; +} +} +static lean_object* _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__3; +x_2 = lean_string_length(x_1); +return x_2; +} +} +static lean_object* _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__4; +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__6() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__3; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__7() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("]", 1, 1); +return x_1; +} +} +static lean_object* _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__8() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__7; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_3; +x_3 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__2; +return x_3; +} +else +{ +lean_object* x_4; lean_object* x_5; uint8_t x_6; +x_4 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__1; +lean_inc(x_1); +x_5 = l_Std_Format_joinSep___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__7(x_1, x_4); +x_6 = !lean_is_exclusive(x_1); +if (x_6 == 0) +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; uint8_t x_14; lean_object* x_15; +x_7 = lean_ctor_get(x_1, 1); +lean_dec(x_7); +x_8 = lean_ctor_get(x_1, 0); +lean_dec(x_8); +x_9 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__6; +lean_ctor_set_tag(x_1, 5); +lean_ctor_set(x_1, 1, x_5); +lean_ctor_set(x_1, 0, x_9); +x_10 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__8; +x_11 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_11, 0, x_1); +lean_ctor_set(x_11, 1, x_10); +x_12 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__5; +x_13 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_13, 0, x_12); +lean_ctor_set(x_13, 1, x_11); +x_14 = 0; +x_15 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_15, 0, x_13); +lean_ctor_set_uint8(x_15, sizeof(void*)*1, x_14); +return x_15; +} +else +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; lean_object* x_23; +lean_dec(x_1); +x_16 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__6; +x_17 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17, 0, x_16); +lean_ctor_set(x_17, 1, x_5); +x_18 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__8; +x_19 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19, 0, x_17); +lean_ctor_set(x_19, 1, x_18); +x_20 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__5; +x_21 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_21, 0, x_20); +lean_ctor_set(x_21, 1, x_19); +x_22 = 0; +x_23 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_23, 0, x_21); +lean_ctor_set_uint8(x_23, sizeof(void*)*1, x_22); +return x_23; +} +} +} +} +LEAN_EXPORT lean_object* l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__9(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; +x_2 = l_String_quote(x_1); +x_3 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_3, 0, x_2); +x_4 = l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__6; +x_5 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_5, 0, x_4); +lean_ctor_set(x_5, 1, x_3); +x_6 = lean_unsigned_to_nat(0u); +x_7 = l_Repr_addAppParen(x_5, x_6); +return x_7; +} +} +LEAN_EXPORT lean_object* l_List_foldl___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__11(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_3) == 0) +{ +lean_dec(x_1); +return x_2; +} +else +{ +uint8_t x_4; +x_4 = !lean_is_exclusive(x_3); +if (x_4 == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_5 = lean_ctor_get(x_3, 0); +x_6 = lean_ctor_get(x_3, 1); +lean_inc(x_1); +lean_ctor_set_tag(x_3, 5); +lean_ctor_set(x_3, 1, x_1); +lean_ctor_set(x_3, 0, x_2); +x_7 = l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__9(x_5); +lean_dec(x_5); +x_8 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_8, 0, x_3); +lean_ctor_set(x_8, 1, x_7); +x_2 = x_8; +x_3 = x_6; +goto _start; +} +else +{ +lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_10 = lean_ctor_get(x_3, 0); +x_11 = lean_ctor_get(x_3, 1); +lean_inc(x_11); +lean_inc(x_10); +lean_dec(x_3); +lean_inc(x_1); +x_12 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_12, 0, x_2); +lean_ctor_set(x_12, 1, x_1); +x_13 = l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__9(x_10); +lean_dec(x_10); +x_14 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_14, 0, x_12); +lean_ctor_set(x_14, 1, x_13); +x_2 = x_14; +x_3 = x_11; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_Format_joinSep___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__10(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_3; +lean_dec(x_2); +x_3 = lean_box(0); +return x_3; +} +else +{ +lean_object* x_4; +x_4 = lean_ctor_get(x_1, 1); +lean_inc(x_4); +if (lean_obj_tag(x_4) == 0) +{ +lean_object* x_5; lean_object* x_6; +lean_dec(x_2); +x_5 = lean_ctor_get(x_1, 0); +lean_inc(x_5); +lean_dec(x_1); +x_6 = l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__9(x_5); +lean_dec(x_5); +return x_6; +} +else +{ +lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_7 = lean_ctor_get(x_1, 0); +lean_inc(x_7); +lean_dec(x_1); +x_8 = l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__9(x_7); +lean_dec(x_7); +x_9 = l_List_foldl___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__11(x_2, x_8, x_4); +return x_9; +} +} +} +} +LEAN_EXPORT lean_object* l_List_foldl___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__13(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_3) == 0) +{ +lean_dec(x_1); +return x_2; +} +else +{ +uint8_t x_4; +x_4 = !lean_is_exclusive(x_3); +if (x_4 == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_5 = lean_ctor_get(x_3, 0); +x_6 = lean_ctor_get(x_3, 1); +lean_inc(x_1); +lean_ctor_set_tag(x_3, 5); +lean_ctor_set(x_3, 1, x_1); +lean_ctor_set(x_3, 0, x_2); +x_7 = lean_unsigned_to_nat(0u); +x_8 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34_(x_5, x_7); +x_9 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_9, 0, x_3); +lean_ctor_set(x_9, 1, x_8); +x_2 = x_9; +x_3 = x_6; +goto _start; +} +else +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_11 = lean_ctor_get(x_3, 0); +x_12 = lean_ctor_get(x_3, 1); +lean_inc(x_12); +lean_inc(x_11); +lean_dec(x_3); +lean_inc(x_1); +x_13 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13, 0, x_2); +lean_ctor_set(x_13, 1, x_1); +x_14 = lean_unsigned_to_nat(0u); +x_15 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34_(x_11, x_14); +x_16 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_16, 0, x_13); +lean_ctor_set(x_16, 1, x_15); +x_2 = x_16; +x_3 = x_12; +goto _start; +} +} +} +} +LEAN_EXPORT lean_object* l_Std_Format_joinSep___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__12(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_1) == 0) +{ +lean_object* x_3; +lean_dec(x_2); +x_3 = lean_box(0); +return x_3; +} +else +{ +lean_object* x_4; +x_4 = lean_ctor_get(x_1, 1); +lean_inc(x_4); +if (lean_obj_tag(x_4) == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; +lean_dec(x_2); +x_5 = lean_ctor_get(x_1, 0); +lean_inc(x_5); +lean_dec(x_1); +x_6 = lean_unsigned_to_nat(0u); +x_7 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34_(x_5, x_6); +return x_7; +} +else +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_8 = lean_ctor_get(x_1, 0); +lean_inc(x_8); +lean_dec(x_1); +x_9 = lean_unsigned_to_nat(0u); +x_10 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34_(x_8, x_9); +x_11 = l_List_foldl___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__13(x_2, x_10, x_4); +return x_11; +} +} +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("name", 4, 4); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__1; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__3() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__2; +x_3 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__3; +x_2 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__5; +x_3 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(8u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__6() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("isModule", 8, 8); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__7() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__6; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__8() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(12u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__9() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("imports", 7, 7); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__10() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__9; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__11() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(11u); +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__12() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("modules", 7, 7); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__13() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__12; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__14() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("Lean.rbmapOf ", 13, 13); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__15() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__14; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__16() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("dynlibs", 7, 7); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__17() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__16; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__18() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("plugins", 7, 7); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__19() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__18; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__20() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("options", 7, 7); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__21() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__20; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__22() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("#[", 2, 2); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__23() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__22; +x_2 = lean_string_length(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__24() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__23; +x_2 = lean_nat_to_int(x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__25() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__22; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__26() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("#[]", 3, 3); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__27() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__26; +x_2 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_2, 0, x_1); +return x_2; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__28() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__11; +x_2 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__27; +x_3 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set(x_3, 1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__29() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__28; +x_2 = 0; +x_3 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_3, 0, x_1); +lean_ctor_set_uint8(x_3, sizeof(void*)*1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913_(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; uint8_t x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; uint8_t x_20; lean_object* x_21; lean_object* x_22; uint8_t x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; uint8_t x_35; lean_object* x_36; lean_object* x_37; uint8_t x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; +x_3 = lean_ctor_get(x_1, 0); +lean_inc(x_3); +x_4 = lean_unsigned_to_nat(0u); +x_5 = l_Lean_Name_reprPrec(x_3, x_4); +x_6 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__5; +x_7 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_7, 0, x_6); +lean_ctor_set(x_7, 1, x_5); +x_8 = 0; +x_9 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_9, 0, x_7); +lean_ctor_set_uint8(x_9, sizeof(void*)*1, x_8); +x_10 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__4; +x_11 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_11, 0, x_10); +lean_ctor_set(x_11, 1, x_9); +x_12 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__9; +x_13 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_13, 0, x_11); +lean_ctor_set(x_13, 1, x_12); +x_14 = lean_box(1); +x_15 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_15, 0, x_13); +lean_ctor_set(x_15, 1, x_14); +x_16 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__7; +x_17 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_17, 0, x_15); +lean_ctor_set(x_17, 1, x_16); +x_18 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__5; +x_19 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_19, 0, x_17); +lean_ctor_set(x_19, 1, x_18); +x_20 = lean_ctor_get_uint8(x_1, sizeof(void*)*6); +x_21 = lean_ctor_get(x_1, 1); +lean_inc(x_21); +x_22 = lean_array_get_size(x_21); +x_23 = lean_nat_dec_eq(x_22, x_4); +lean_dec(x_22); +x_24 = lean_ctor_get(x_1, 2); +lean_inc(x_24); +x_25 = l_Lean_RBMap_toList___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__2(x_24); +lean_dec(x_24); +x_26 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4(x_25, x_4); +x_27 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__15; +x_28 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_28, 0, x_27); +lean_ctor_set(x_28, 1, x_26); +x_29 = l_Repr_addAppParen(x_28, x_4); +x_30 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__11; +x_31 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_31, 0, x_30); +lean_ctor_set(x_31, 1, x_29); +x_32 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_32, 0, x_31); +lean_ctor_set_uint8(x_32, sizeof(void*)*1, x_8); +x_33 = lean_ctor_get(x_1, 3); +lean_inc(x_33); +x_34 = lean_array_get_size(x_33); +x_35 = lean_nat_dec_eq(x_34, x_4); +lean_dec(x_34); +x_36 = lean_ctor_get(x_1, 4); +lean_inc(x_36); +x_37 = lean_array_get_size(x_36); +x_38 = lean_nat_dec_eq(x_37, x_4); +lean_dec(x_37); +x_39 = lean_ctor_get(x_1, 5); +lean_inc(x_39); +lean_dec(x_1); +x_40 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541_(x_39, x_4); +lean_dec(x_39); +x_41 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_41, 0, x_30); +lean_ctor_set(x_41, 1, x_40); +x_42 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_42, 0, x_41); +lean_ctor_set_uint8(x_42, sizeof(void*)*1, x_8); +if (x_20 == 0) +{ +lean_object* x_146; +x_146 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__23; +x_43 = x_146; +goto block_145; +} +else +{ +lean_object* x_147; +x_147 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__27; +x_43 = x_147; +goto block_145; +} +block_145: +{ +lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; +x_44 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__8; +x_45 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_43); +x_46 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_46, 0, x_45); +lean_ctor_set_uint8(x_46, sizeof(void*)*1, x_8); +x_47 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_47, 0, x_19); +lean_ctor_set(x_47, 1, x_46); +x_48 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_48, 0, x_47); +lean_ctor_set(x_48, 1, x_12); +x_49 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_49, 0, x_48); +lean_ctor_set(x_49, 1, x_14); +x_50 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__10; +x_51 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_51, 0, x_49); +lean_ctor_set(x_51, 1, x_50); +x_52 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_52, 0, x_51); +lean_ctor_set(x_52, 1, x_18); +if (x_23 == 0) +{ +lean_object* x_133; lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; lean_object* x_138; lean_object* x_139; lean_object* x_140; lean_object* x_141; uint8_t x_142; lean_object* x_143; +x_133 = lean_array_to_list(x_21); +x_134 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__1; +x_135 = l_Std_Format_joinSep___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__12(x_133, x_134); +x_136 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__25; +x_137 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_137, 0, x_136); +lean_ctor_set(x_137, 1, x_135); +x_138 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__8; +x_139 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_139, 0, x_137); +lean_ctor_set(x_139, 1, x_138); +x_140 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__24; +x_141 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_141, 0, x_140); +lean_ctor_set(x_141, 1, x_139); +x_142 = 1; +x_143 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_143, 0, x_141); +lean_ctor_set_uint8(x_143, sizeof(void*)*1, x_142); +x_53 = x_143; +goto block_132; +} +else +{ +lean_object* x_144; +lean_dec(x_21); +x_144 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__27; +x_53 = x_144; +goto block_132; +} +block_132: +{ +lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; +x_54 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_54, 0, x_30); +lean_ctor_set(x_54, 1, x_53); +x_55 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_55, 0, x_54); +lean_ctor_set_uint8(x_55, sizeof(void*)*1, x_8); +x_56 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_56, 0, x_52); +lean_ctor_set(x_56, 1, x_55); +x_57 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_57, 0, x_56); +lean_ctor_set(x_57, 1, x_12); +x_58 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_58, 0, x_57); +lean_ctor_set(x_58, 1, x_14); +x_59 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__13; +x_60 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_60, 0, x_58); +lean_ctor_set(x_60, 1, x_59); +x_61 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_61, 0, x_60); +lean_ctor_set(x_61, 1, x_18); +x_62 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_62, 0, x_61); +lean_ctor_set(x_62, 1, x_32); +x_63 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_63, 0, x_62); +lean_ctor_set(x_63, 1, x_12); +x_64 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_64, 0, x_63); +lean_ctor_set(x_64, 1, x_14); +x_65 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__17; +x_66 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_66, 0, x_64); +lean_ctor_set(x_66, 1, x_65); +x_67 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_67, 0, x_66); +lean_ctor_set(x_67, 1, x_18); +if (x_35 == 0) +{ +lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; lean_object* x_125; lean_object* x_126; lean_object* x_127; lean_object* x_128; uint8_t x_129; lean_object* x_130; +x_120 = lean_array_to_list(x_33); +x_121 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__1; +x_122 = l_Std_Format_joinSep___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__10(x_120, x_121); +x_123 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__25; +x_124 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_124, 0, x_123); +lean_ctor_set(x_124, 1, x_122); +x_125 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__8; +x_126 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_126, 0, x_124); +lean_ctor_set(x_126, 1, x_125); +x_127 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__24; +x_128 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_128, 0, x_127); +lean_ctor_set(x_128, 1, x_126); +x_129 = 1; +x_130 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_130, 0, x_128); +lean_ctor_set_uint8(x_130, sizeof(void*)*1, x_129); +x_68 = x_130; +goto block_119; +} +else +{ +lean_object* x_131; +lean_dec(x_33); +x_131 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__27; +x_68 = x_131; +goto block_119; +} +block_119: +{ +lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; +x_69 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_69, 0, x_30); +lean_ctor_set(x_69, 1, x_68); +x_70 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_70, 0, x_69); +lean_ctor_set_uint8(x_70, sizeof(void*)*1, x_8); +x_71 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_71, 0, x_67); +lean_ctor_set(x_71, 1, x_70); +x_72 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_72, 0, x_71); +lean_ctor_set(x_72, 1, x_12); +x_73 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_73, 0, x_72); +lean_ctor_set(x_73, 1, x_14); +x_74 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__19; +x_75 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_75, 0, x_73); +lean_ctor_set(x_75, 1, x_74); +x_76 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_76, 0, x_75); +lean_ctor_set(x_76, 1, x_18); +if (x_38 == 0) +{ +lean_object* x_77; lean_object* x_78; lean_object* x_79; lean_object* x_80; lean_object* x_81; lean_object* x_82; lean_object* x_83; lean_object* x_84; lean_object* x_85; uint8_t x_86; lean_object* x_87; lean_object* x_88; lean_object* x_89; lean_object* x_90; lean_object* x_91; lean_object* x_92; lean_object* x_93; lean_object* x_94; lean_object* x_95; lean_object* x_96; lean_object* x_97; lean_object* x_98; lean_object* x_99; lean_object* x_100; lean_object* x_101; lean_object* x_102; lean_object* x_103; +x_77 = lean_array_to_list(x_36); +x_78 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__1; +x_79 = l_Std_Format_joinSep___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__10(x_77, x_78); +x_80 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__25; +x_81 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_81, 0, x_80); +lean_ctor_set(x_81, 1, x_79); +x_82 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__8; +x_83 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_83, 0, x_81); +lean_ctor_set(x_83, 1, x_82); +x_84 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__24; +x_85 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_85, 0, x_84); +lean_ctor_set(x_85, 1, x_83); +x_86 = 1; +x_87 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_87, 0, x_85); +lean_ctor_set_uint8(x_87, sizeof(void*)*1, x_86); +x_88 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_88, 0, x_30); +lean_ctor_set(x_88, 1, x_87); +x_89 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_89, 0, x_88); +lean_ctor_set_uint8(x_89, sizeof(void*)*1, x_8); +x_90 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_90, 0, x_76); +lean_ctor_set(x_90, 1, x_89); +x_91 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_91, 0, x_90); +lean_ctor_set(x_91, 1, x_12); +x_92 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_92, 0, x_91); +lean_ctor_set(x_92, 1, x_14); +x_93 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__21; +x_94 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_94, 0, x_92); +lean_ctor_set(x_94, 1, x_93); +x_95 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_95, 0, x_94); +lean_ctor_set(x_95, 1, x_18); +x_96 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_96, 0, x_95); +lean_ctor_set(x_96, 1, x_42); +x_97 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__19; +x_98 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_98, 0, x_97); +lean_ctor_set(x_98, 1, x_96); +x_99 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__21; +x_100 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_100, 0, x_98); +lean_ctor_set(x_100, 1, x_99); +x_101 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__18; +x_102 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_102, 0, x_101); +lean_ctor_set(x_102, 1, x_100); +x_103 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_103, 0, x_102); +lean_ctor_set_uint8(x_103, sizeof(void*)*1, x_8); +return x_103; +} +else +{ +lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; lean_object* x_108; lean_object* x_109; lean_object* x_110; lean_object* x_111; lean_object* x_112; lean_object* x_113; lean_object* x_114; lean_object* x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; +lean_dec(x_36); +x_104 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__29; +x_105 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_105, 0, x_76); +lean_ctor_set(x_105, 1, x_104); +x_106 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_106, 0, x_105); +lean_ctor_set(x_106, 1, x_12); +x_107 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_107, 0, x_106); +lean_ctor_set(x_107, 1, x_14); +x_108 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__21; +x_109 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_109, 0, x_107); +lean_ctor_set(x_109, 1, x_108); +x_110 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_110, 0, x_109); +lean_ctor_set(x_110, 1, x_18); +x_111 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_111, 0, x_110); +lean_ctor_set(x_111, 1, x_42); +x_112 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__19; +x_113 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_113, 0, x_112); +lean_ctor_set(x_113, 1, x_111); +x_114 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__21; +x_115 = lean_alloc_ctor(5, 2, 0); +lean_ctor_set(x_115, 0, x_113); +lean_ctor_set(x_115, 1, x_114); +x_116 = l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__18; +x_117 = lean_alloc_ctor(4, 2, 0); +lean_ctor_set(x_117, 0, x_116); +lean_ctor_set(x_117, 1, x_115); +x_118 = lean_alloc_ctor(6, 1, 1); +lean_ctor_set(x_118, 0, x_117); +lean_ctor_set_uint8(x_118, sizeof(void*)*1, x_8); +return x_118; +} +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__3___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_RBNode_revFold___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__3(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__2___boxed(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = l_Lean_RBMap_toList___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__2(x_1); +lean_dec(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__9___boxed(lean_object* x_1) { +_start: +{ +lean_object* x_2; +x_2 = l_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__9(x_1); +lean_dec(x_1); +return x_2; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913_(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_instReprModuleSetup___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____boxed), 2, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_instReprModuleSetup() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_instReprModuleSetup___closed__1; +return x_1; +} +} +static lean_object* _init_l_Lean_instInhabitedModuleSetup___closed__1() { +_start: +{ +lean_object* x_1; lean_object* x_2; +x_1 = lean_unsigned_to_nat(0u); +x_2 = lean_mk_empty_array_with_capacity(x_1); +return x_2; +} +} +static lean_object* _init_l_Lean_instInhabitedModuleSetup___closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; +x_1 = lean_box(0); +x_2 = lean_box(0); +x_3 = 0; +x_4 = l_Lean_instInhabitedModuleSetup___closed__1; +x_5 = lean_alloc_ctor(0, 6, 1); +lean_ctor_set(x_5, 0, x_2); +lean_ctor_set(x_5, 1, x_4); +lean_ctor_set(x_5, 2, x_1); +lean_ctor_set(x_5, 3, x_4); +lean_ctor_set(x_5, 4, x_4); +lean_ctor_set(x_5, 5, x_1); +lean_ctor_set_uint8(x_5, sizeof(void*)*6, x_3); +return x_5; +} +} +static lean_object* _init_l_Lean_instInhabitedModuleSetup() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_instInhabitedModuleSetup___closed__2; +return x_1; +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__1(size_t x_1, size_t x_2, lean_object* x_3) { +_start: +{ +uint8_t x_4; +x_4 = lean_usize_dec_lt(x_2, x_1); +if (x_4 == 0) +{ +return x_3; +} +else +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; size_t x_9; size_t x_10; lean_object* x_11; +x_5 = lean_array_uget(x_3, x_2); +x_6 = lean_unsigned_to_nat(0u); +x_7 = lean_array_uset(x_3, x_2, x_6); +x_8 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124_(x_5); +x_9 = 1; +x_10 = lean_usize_add(x_2, x_9); +x_11 = lean_array_uset(x_7, x_2, x_8); +x_2 = x_10; +x_3 = x_11; +goto _start; +} +} +} +LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__2(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_3) == 0) +{ +lean_dec(x_1); +return x_2; +} +else +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; uint8_t x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; +x_4 = lean_ctor_get(x_3, 0); +lean_inc(x_4); +x_5 = lean_ctor_get(x_3, 1); +lean_inc(x_5); +x_6 = lean_ctor_get(x_3, 2); +lean_inc(x_6); +x_7 = lean_ctor_get(x_3, 3); +lean_inc(x_7); +lean_dec(x_3); +lean_inc(x_1); +x_8 = l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__2(x_1, x_2, x_4); +x_9 = 1; +lean_inc(x_1); +x_10 = l_Lean_Name_toString(x_5, x_9, x_1); +x_11 = l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549_(x_6); +x_12 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_8, x_10, x_11); +x_2 = x_12; +x_3 = x_7; +goto _start; +} +} +} +LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__2___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__3(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +return x_1; +} +else +{ +lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; uint8_t x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; +x_3 = lean_ctor_get(x_2, 0); +lean_inc(x_3); +x_4 = lean_ctor_get(x_2, 1); +lean_inc(x_4); +x_5 = lean_ctor_get(x_2, 2); +lean_inc(x_5); +x_6 = lean_ctor_get(x_2, 3); +lean_inc(x_6); +lean_dec(x_2); +x_7 = l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__2___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__3(x_1, x_3); +x_8 = 1; +x_9 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_10 = l_Lean_Name_toString(x_4, x_8, x_9); +x_11 = l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549_(x_5); +x_12 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_7, x_10, x_11); +x_1 = x_12; +x_2 = x_6; +goto _start; +} +} +} +LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__4(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +if (lean_obj_tag(x_3) == 0) +{ +lean_dec(x_1); +return x_2; +} +else +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; uint8_t x_9; lean_object* x_10; +x_4 = lean_ctor_get(x_3, 0); +lean_inc(x_4); +x_5 = lean_ctor_get(x_3, 1); +lean_inc(x_5); +x_6 = lean_ctor_get(x_3, 2); +lean_inc(x_6); +x_7 = lean_ctor_get(x_3, 3); +lean_inc(x_7); +lean_dec(x_3); +lean_inc(x_1); +x_8 = l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__4(x_1, x_2, x_4); +x_9 = 1; +lean_inc(x_1); +x_10 = l_Lean_Name_toString(x_5, x_9, x_1); +switch (lean_obj_tag(x_6)) { +case 0: +{ +uint8_t x_11; +x_11 = !lean_is_exclusive(x_6); +if (x_11 == 0) +{ +lean_object* x_12; +lean_ctor_set_tag(x_6, 3); +x_12 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_8, x_10, x_6); +x_2 = x_12; +x_3 = x_7; +goto _start; +} +else +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_14 = lean_ctor_get(x_6, 0); +lean_inc(x_14); +lean_dec(x_6); +x_15 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_15, 0, x_14); +x_16 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_8, x_10, x_15); +x_2 = x_16; +x_3 = x_7; +goto _start; +} +} +case 1: +{ +uint8_t x_18; +x_18 = !lean_is_exclusive(x_6); +if (x_18 == 0) +{ +lean_object* x_19; +x_19 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_8, x_10, x_6); +x_2 = x_19; +x_3 = x_7; +goto _start; +} +else +{ +uint8_t x_21; lean_object* x_22; lean_object* x_23; +x_21 = lean_ctor_get_uint8(x_6, 0); +lean_dec(x_6); +x_22 = lean_alloc_ctor(1, 0, 1); +lean_ctor_set_uint8(x_22, 0, x_21); +x_23 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_8, x_10, x_22); +x_2 = x_23; +x_3 = x_7; +goto _start; +} +} +default: +{ +uint8_t x_25; +x_25 = !lean_is_exclusive(x_6); +if (x_25 == 0) +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_26 = lean_ctor_get(x_6, 0); +x_27 = l_Lean_JsonNumber_fromNat(x_26); +lean_ctor_set(x_6, 0, x_27); +x_28 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_8, x_10, x_6); +x_2 = x_28; +x_3 = x_7; +goto _start; +} +else +{ +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_30 = lean_ctor_get(x_6, 0); +lean_inc(x_30); +lean_dec(x_6); +x_31 = l_Lean_JsonNumber_fromNat(x_30); +x_32 = lean_alloc_ctor(2, 1, 0); +lean_ctor_set(x_32, 0, x_31); +x_33 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_8, x_10, x_32); +x_2 = x_33; +x_3 = x_7; +goto _start; +} +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__4___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__5(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +return x_1; +} +else +{ +lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; uint8_t x_8; lean_object* x_9; lean_object* x_10; +x_3 = lean_ctor_get(x_2, 0); +lean_inc(x_3); +x_4 = lean_ctor_get(x_2, 1); +lean_inc(x_4); +x_5 = lean_ctor_get(x_2, 2); +lean_inc(x_5); +x_6 = lean_ctor_get(x_2, 3); +lean_inc(x_6); +lean_dec(x_2); +x_7 = l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__4___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__5(x_1, x_3); +x_8 = 1; +x_9 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_10 = l_Lean_Name_toString(x_4, x_8, x_9); +switch (lean_obj_tag(x_5)) { +case 0: +{ +uint8_t x_11; +x_11 = !lean_is_exclusive(x_5); +if (x_11 == 0) +{ +lean_object* x_12; +lean_ctor_set_tag(x_5, 3); +x_12 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_7, x_10, x_5); +x_1 = x_12; +x_2 = x_6; +goto _start; +} +else +{ +lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_14 = lean_ctor_get(x_5, 0); +lean_inc(x_14); +lean_dec(x_5); +x_15 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_15, 0, x_14); +x_16 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_7, x_10, x_15); +x_1 = x_16; +x_2 = x_6; +goto _start; +} +} +case 1: +{ +uint8_t x_18; +x_18 = !lean_is_exclusive(x_5); +if (x_18 == 0) +{ +lean_object* x_19; +x_19 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_7, x_10, x_5); +x_1 = x_19; +x_2 = x_6; +goto _start; +} +else +{ +uint8_t x_21; lean_object* x_22; lean_object* x_23; +x_21 = lean_ctor_get_uint8(x_5, 0); +lean_dec(x_5); +x_22 = lean_alloc_ctor(1, 0, 1); +lean_ctor_set_uint8(x_22, 0, x_21); +x_23 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_7, x_10, x_22); +x_1 = x_23; +x_2 = x_6; +goto _start; +} +} +default: +{ +uint8_t x_25; +x_25 = !lean_is_exclusive(x_5); +if (x_25 == 0) +{ +lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_26 = lean_ctor_get(x_5, 0); +x_27 = l_Lean_JsonNumber_fromNat(x_26); +lean_ctor_set(x_5, 0, x_27); +x_28 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_7, x_10, x_5); +x_1 = x_28; +x_2 = x_6; +goto _start; +} +else +{ +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_30 = lean_ctor_get(x_5, 0); +lean_inc(x_30); +lean_dec(x_5); +x_31 = l_Lean_JsonNumber_fromNat(x_30); +x_32 = lean_alloc_ctor(2, 1, 0); +lean_ctor_set(x_32, 0, x_31); +x_33 = l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(x_7, x_10, x_32); +x_1 = x_33; +x_2 = x_6; +goto _start; +} +} +} +} +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087_(lean_object* x_1) { +_start: +{ +lean_object* x_2; uint8_t x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; uint8_t x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; size_t x_17; size_t x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; lean_object* x_26; size_t x_27; lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; size_t x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; lean_object* x_46; lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; +x_2 = lean_ctor_get(x_1, 0); +lean_inc(x_2); +x_3 = 1; +x_4 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_5 = l_Lean_Name_toString(x_2, x_3, x_4); +x_6 = lean_alloc_ctor(3, 1, 0); +lean_ctor_set(x_6, 0, x_5); +x_7 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__1; +x_8 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_8, 0, x_7); +lean_ctor_set(x_8, 1, x_6); +x_9 = lean_box(0); +x_10 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_10, 0, x_8); +lean_ctor_set(x_10, 1, x_9); +x_11 = lean_ctor_get_uint8(x_1, sizeof(void*)*6); +x_12 = lean_alloc_ctor(1, 0, 1); +lean_ctor_set_uint8(x_12, 0, x_11); +x_13 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__6; +x_14 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_14, 0, x_13); +lean_ctor_set(x_14, 1, x_12); +x_15 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_15, 0, x_14); +lean_ctor_set(x_15, 1, x_9); +x_16 = lean_ctor_get(x_1, 1); +lean_inc(x_16); +x_17 = lean_array_size(x_16); +x_18 = 0; +x_19 = l_Array_mapMUnsafe_map___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__1(x_17, x_18, x_16); +x_20 = lean_alloc_ctor(4, 1, 0); +lean_ctor_set(x_20, 0, x_19); +x_21 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__9; +x_22 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_22, 0, x_21); +lean_ctor_set(x_22, 1, x_20); +x_23 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_23, 0, x_22); +lean_ctor_set(x_23, 1, x_9); +x_24 = lean_ctor_get(x_1, 2); +lean_inc(x_24); +x_25 = lean_box(0); +x_26 = lean_ctor_get(x_1, 3); +lean_inc(x_26); +x_27 = lean_array_size(x_26); +x_28 = l_Array_mapMUnsafe_map___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__1(x_27, x_18, x_26); +x_29 = lean_alloc_ctor(4, 1, 0); +lean_ctor_set(x_29, 0, x_28); +x_30 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__16; +x_31 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_31, 0, x_30); +lean_ctor_set(x_31, 1, x_29); +x_32 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_32, 0, x_31); +lean_ctor_set(x_32, 1, x_9); +x_33 = lean_ctor_get(x_1, 4); +lean_inc(x_33); +x_34 = lean_array_size(x_33); +x_35 = l_Array_mapMUnsafe_map___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__1(x_34, x_18, x_33); +x_36 = lean_alloc_ctor(4, 1, 0); +lean_ctor_set(x_36, 0, x_35); +x_37 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__18; +x_38 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_38, 0, x_37); +lean_ctor_set(x_38, 1, x_36); +x_39 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_39, 0, x_38); +lean_ctor_set(x_39, 1, x_9); +x_40 = lean_ctor_get(x_1, 5); +lean_inc(x_40); +lean_dec(x_1); +x_41 = l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__2___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__3(x_25, x_24); +x_42 = lean_alloc_ctor(5, 1, 0); +lean_ctor_set(x_42, 0, x_41); +x_43 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__12; +x_44 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_44, 0, x_43); +lean_ctor_set(x_44, 1, x_42); +x_45 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set(x_45, 1, x_9); +x_46 = l_Lean_RBNode_fold___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__4___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__5(x_25, x_40); +x_47 = lean_alloc_ctor(5, 1, 0); +lean_ctor_set(x_47, 0, x_46); +x_48 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__20; +x_49 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_49, 0, x_48); +lean_ctor_set(x_49, 1, x_47); +x_50 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_50, 0, x_49); +lean_ctor_set(x_50, 1, x_9); +x_51 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_51, 0, x_50); +lean_ctor_set(x_51, 1, x_9); +x_52 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_52, 0, x_39); +lean_ctor_set(x_52, 1, x_51); +x_53 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_53, 0, x_32); +lean_ctor_set(x_53, 1, x_52); +x_54 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_54, 0, x_45); +lean_ctor_set(x_54, 1, x_53); +x_55 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_55, 0, x_23); +lean_ctor_set(x_55, 1, x_54); +x_56 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_56, 0, x_15); +lean_ctor_set(x_56, 1, x_55); +x_57 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_57, 0, x_10); +lean_ctor_set(x_57, 1, x_56); +x_58 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__2; +x_59 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_57, x_58); +x_60 = l_Lean_Json_mkObj(x_59); +return x_60; +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__1___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +size_t x_4; size_t x_5; lean_object* x_6; +x_4 = lean_unbox_usize(x_1); +lean_dec(x_1); +x_5 = lean_unbox_usize(x_2); +lean_dec(x_2); +x_6 = l_Array_mapMUnsafe_map___at___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087____spec__1(x_4, x_5, x_3); +return x_6; +} +} +static lean_object* _init_l_Lean_instToJsonModuleSetup___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l___private_Lean_Setup_0__Lean_toJsonModuleSetup____x40_Lean_Setup___hyg_1087_), 1, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_instToJsonModuleSetup() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_instToJsonModuleSetup___closed__1; +return x_1; +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__2(size_t x_1, size_t x_2, lean_object* x_3) { +_start: +{ +uint8_t x_4; +x_4 = lean_usize_dec_lt(x_2, x_1); +if (x_4 == 0) +{ +lean_object* x_5; +x_5 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_5, 0, x_3); +return x_5; +} +else +{ +lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; +x_6 = lean_array_uget(x_3, x_2); +x_7 = lean_unsigned_to_nat(0u); +x_8 = lean_array_uset(x_3, x_2, x_7); +x_9 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190_(x_6); +if (lean_obj_tag(x_9) == 0) +{ +uint8_t x_10; +lean_dec(x_8); +x_10 = !lean_is_exclusive(x_9); +if (x_10 == 0) +{ +return x_9; +} +else +{ +lean_object* x_11; lean_object* x_12; +x_11 = lean_ctor_get(x_9, 0); +lean_inc(x_11); +lean_dec(x_9); +x_12 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_12, 0, x_11); +return x_12; +} +} +else +{ +lean_object* x_13; size_t x_14; size_t x_15; lean_object* x_16; +x_13 = lean_ctor_get(x_9, 0); +lean_inc(x_13); +lean_dec(x_9); +x_14 = 1; +x_15 = lean_usize_add(x_2, x_14); +x_16 = lean_array_uset(x_8, x_2, x_13); +x_2 = x_15; +x_3 = x_16; +goto _start; +} +} +} +} +static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("expected JSON array, got '", 26, 26); +return x_1; +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Json_getObjValD(x_1, x_2); +switch (lean_obj_tag(x_3)) { +case 0: +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_4 = lean_unsigned_to_nat(80u); +x_5 = l_Lean_Json_pretty(x_3, x_4); +x_6 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1___closed__1; +x_7 = lean_string_append(x_6, x_5); +lean_dec(x_5); +x_8 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_9 = lean_string_append(x_7, x_8); +x_10 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_10, 0, x_9); +return x_10; +} +case 1: +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_11 = lean_unsigned_to_nat(80u); +x_12 = l_Lean_Json_pretty(x_3, x_11); +x_13 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1___closed__1; +x_14 = lean_string_append(x_13, x_12); +lean_dec(x_12); +x_15 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_16 = lean_string_append(x_14, x_15); +x_17 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_17, 0, x_16); +return x_17; +} +case 4: +{ +lean_object* x_18; size_t x_19; size_t x_20; lean_object* x_21; +x_18 = lean_ctor_get(x_3, 0); +lean_inc(x_18); +lean_dec(x_3); +x_19 = lean_array_size(x_18); +x_20 = 0; +x_21 = l_Array_mapMUnsafe_map___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__2(x_19, x_20, x_18); +return x_21; +} +default: +{ +lean_object* x_22; lean_object* x_23; uint8_t x_24; +x_22 = lean_unsigned_to_nat(80u); +lean_inc(x_3); +x_23 = l_Lean_Json_pretty(x_3, x_22); +x_24 = !lean_is_exclusive(x_3); +if (x_24 == 0) +{ +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; +x_25 = lean_ctor_get(x_3, 0); +lean_dec(x_25); +x_26 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1___closed__1; +x_27 = lean_string_append(x_26, x_23); +lean_dec(x_23); +x_28 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_29 = lean_string_append(x_27, x_28); +lean_ctor_set_tag(x_3, 0); +lean_ctor_set(x_3, 0, x_29); +return x_3; +} +else +{ +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +lean_dec(x_3); +x_30 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1___closed__1; +x_31 = lean_string_append(x_30, x_23); +lean_dec(x_23); +x_32 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_33 = lean_string_append(x_31, x_32); +x_34 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_34, 0, x_33); +return x_34; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__4(lean_object* x_1, lean_object* x_2) { +_start: +{ +if (lean_obj_tag(x_2) == 0) +{ +lean_object* x_3; +x_3 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_3, 0, x_1); +return x_3; +} +else +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_4 = lean_ctor_get(x_2, 0); +lean_inc(x_4); +x_5 = lean_ctor_get(x_2, 1); +lean_inc(x_5); +x_6 = lean_ctor_get(x_2, 2); +lean_inc(x_6); +x_7 = lean_ctor_get(x_2, 3); +lean_inc(x_7); +lean_dec(x_2); +x_8 = l_Lean_RBNode_foldM___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__4(x_1, x_4); +if (lean_obj_tag(x_8) == 0) +{ +uint8_t x_9; +lean_dec(x_7); +lean_dec(x_6); +lean_dec(x_5); +x_9 = !lean_is_exclusive(x_8); +if (x_9 == 0) +{ +return x_8; +} +else +{ +lean_object* x_10; lean_object* x_11; +x_10 = lean_ctor_get(x_8, 0); +lean_inc(x_10); +lean_dec(x_8); +x_11 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_11, 0, x_10); +return x_11; +} +} +else +{ +uint8_t x_12; +x_12 = !lean_is_exclusive(x_8); +if (x_12 == 0) +{ +lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_13 = lean_ctor_get(x_8, 0); +x_14 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__1; +x_15 = lean_string_dec_eq(x_5, x_14); +if (x_15 == 0) +{ +lean_object* x_16; uint8_t x_17; +lean_inc(x_5); +x_16 = l_String_toName(x_5); +x_17 = l_Lean_Name_isAnonymous(x_16); +if (x_17 == 0) +{ +lean_object* x_18; +lean_free_object(x_8); +lean_dec(x_5); +x_18 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593_(x_6); +if (lean_obj_tag(x_18) == 0) +{ +uint8_t x_19; +lean_dec(x_16); +lean_dec(x_13); +lean_dec(x_7); +x_19 = !lean_is_exclusive(x_18); +if (x_19 == 0) +{ +return x_18; +} +else +{ +lean_object* x_20; lean_object* x_21; +x_20 = lean_ctor_get(x_18, 0); +lean_inc(x_20); +lean_dec(x_18); +x_21 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_21, 0, x_20); +return x_21; +} +} +else +{ +lean_object* x_22; lean_object* x_23; +x_22 = lean_ctor_get(x_18, 0); +lean_inc(x_22); +lean_dec(x_18); +x_23 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_16, x_22); +x_1 = x_23; +x_2 = x_7; +goto _start; +} +} +else +{ +lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +lean_dec(x_16); +lean_dec(x_13); +lean_dec(x_7); +lean_dec(x_6); +x_25 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__2; +x_26 = lean_string_append(x_25, x_5); +lean_dec(x_5); +x_27 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_28 = lean_string_append(x_26, x_27); +lean_ctor_set_tag(x_8, 0); +lean_ctor_set(x_8, 0, x_28); +return x_8; +} +} +else +{ +lean_object* x_29; +lean_free_object(x_8); +lean_dec(x_5); +x_29 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593_(x_6); +if (lean_obj_tag(x_29) == 0) +{ +uint8_t x_30; +lean_dec(x_13); +lean_dec(x_7); +x_30 = !lean_is_exclusive(x_29); +if (x_30 == 0) +{ +return x_29; +} +else +{ +lean_object* x_31; lean_object* x_32; +x_31 = lean_ctor_get(x_29, 0); +lean_inc(x_31); +lean_dec(x_29); +x_32 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_32, 0, x_31); +return x_32; +} +} +else +{ +lean_object* x_33; lean_object* x_34; lean_object* x_35; +x_33 = lean_ctor_get(x_29, 0); +lean_inc(x_33); +lean_dec(x_29); +x_34 = lean_box(0); +x_35 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_34, x_33); +x_1 = x_35; +x_2 = x_7; +goto _start; +} +} +} +else +{ +lean_object* x_37; lean_object* x_38; uint8_t x_39; +x_37 = lean_ctor_get(x_8, 0); +lean_inc(x_37); +lean_dec(x_8); +x_38 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__1; +x_39 = lean_string_dec_eq(x_5, x_38); +if (x_39 == 0) +{ +lean_object* x_40; uint8_t x_41; +lean_inc(x_5); +x_40 = l_String_toName(x_5); +x_41 = l_Lean_Name_isAnonymous(x_40); +if (x_41 == 0) +{ +lean_object* x_42; +lean_dec(x_5); +x_42 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593_(x_6); +if (lean_obj_tag(x_42) == 0) +{ +lean_object* x_43; lean_object* x_44; lean_object* x_45; +lean_dec(x_40); +lean_dec(x_37); +lean_dec(x_7); +x_43 = lean_ctor_get(x_42, 0); +lean_inc(x_43); +if (lean_is_exclusive(x_42)) { + lean_ctor_release(x_42, 0); + x_44 = x_42; +} else { + lean_dec_ref(x_42); + x_44 = lean_box(0); +} +if (lean_is_scalar(x_44)) { + x_45 = lean_alloc_ctor(0, 1, 0); +} else { + x_45 = x_44; +} +lean_ctor_set(x_45, 0, x_43); +return x_45; +} +else +{ +lean_object* x_46; lean_object* x_47; +x_46 = lean_ctor_get(x_42, 0); +lean_inc(x_46); +lean_dec(x_42); +x_47 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_37, x_40, x_46); +x_1 = x_47; +x_2 = x_7; +goto _start; +} +} +else +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; lean_object* x_52; lean_object* x_53; +lean_dec(x_40); +lean_dec(x_37); +lean_dec(x_7); +lean_dec(x_6); +x_49 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__2; +x_50 = lean_string_append(x_49, x_5); +lean_dec(x_5); +x_51 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_52 = lean_string_append(x_50, x_51); +x_53 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_53, 0, x_52); +return x_53; +} +} +else +{ +lean_object* x_54; +lean_dec(x_5); +x_54 = l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593_(x_6); +if (lean_obj_tag(x_54) == 0) +{ +lean_object* x_55; lean_object* x_56; lean_object* x_57; +lean_dec(x_37); +lean_dec(x_7); +x_55 = lean_ctor_get(x_54, 0); +lean_inc(x_55); +if (lean_is_exclusive(x_54)) { + lean_ctor_release(x_54, 0); + x_56 = x_54; +} else { + lean_dec_ref(x_54); + x_56 = lean_box(0); +} +if (lean_is_scalar(x_56)) { + x_57 = lean_alloc_ctor(0, 1, 0); +} else { + x_57 = x_56; +} +lean_ctor_set(x_57, 0, x_55); +return x_57; +} +else +{ +lean_object* x_58; lean_object* x_59; lean_object* x_60; +x_58 = lean_ctor_get(x_54, 0); +lean_inc(x_58); +lean_dec(x_54); +x_59 = lean_box(0); +x_60 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_37, x_59, x_58); +x_1 = x_60; +x_2 = x_7; +goto _start; +} +} +} +} +} +} +} +static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("expected a `NameMap`, got '", 27, 27); +return x_1; +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Json_getObjValD(x_1, x_2); +switch (lean_obj_tag(x_3)) { +case 0: +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_4 = lean_unsigned_to_nat(80u); +x_5 = l_Lean_Json_pretty(x_3, x_4); +x_6 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1; +x_7 = lean_string_append(x_6, x_5); +lean_dec(x_5); +x_8 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_9 = lean_string_append(x_7, x_8); +x_10 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_10, 0, x_9); +return x_10; +} +case 1: +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_11 = lean_unsigned_to_nat(80u); +x_12 = l_Lean_Json_pretty(x_3, x_11); +x_13 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1; +x_14 = lean_string_append(x_13, x_12); +lean_dec(x_12); +x_15 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_16 = lean_string_append(x_14, x_15); +x_17 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_17, 0, x_16); +return x_17; +} +case 5: +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_18 = lean_ctor_get(x_3, 0); +lean_inc(x_18); +lean_dec(x_3); +x_19 = lean_box(0); +x_20 = l_Lean_RBNode_foldM___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__4(x_19, x_18); +return x_20; +} +default: +{ +lean_object* x_21; lean_object* x_22; uint8_t x_23; +x_21 = lean_unsigned_to_nat(80u); +lean_inc(x_3); +x_22 = l_Lean_Json_pretty(x_3, x_21); +x_23 = !lean_is_exclusive(x_3); +if (x_23 == 0) +{ +lean_object* x_24; lean_object* x_25; lean_object* x_26; lean_object* x_27; lean_object* x_28; +x_24 = lean_ctor_get(x_3, 0); +lean_dec(x_24); +x_25 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1; +x_26 = lean_string_append(x_25, x_22); +lean_dec(x_22); +x_27 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_28 = lean_string_append(x_26, x_27); +lean_ctor_set_tag(x_3, 0); +lean_ctor_set(x_3, 0, x_28); +return x_3; +} +else +{ +lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +lean_dec(x_3); +x_29 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1; +x_30 = lean_string_append(x_29, x_22); +lean_dec(x_22); +x_31 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_32 = lean_string_append(x_30, x_31); +x_33 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_33, 0, x_32); +return x_33; +} +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__5(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Json_getObjValD(x_1, x_2); +switch (lean_obj_tag(x_3)) { +case 0: +{ +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_4 = lean_unsigned_to_nat(80u); +x_5 = l_Lean_Json_pretty(x_3, x_4); +x_6 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1; +x_7 = lean_string_append(x_6, x_5); +lean_dec(x_5); +x_8 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_9 = lean_string_append(x_7, x_8); +x_10 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_10, 0, x_9); +return x_10; +} +case 1: +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_11 = lean_unsigned_to_nat(80u); +x_12 = l_Lean_Json_pretty(x_3, x_11); +x_13 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1; +x_14 = lean_string_append(x_13, x_12); +lean_dec(x_12); +x_15 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_16 = lean_string_append(x_14, x_15); +x_17 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_17, 0, x_16); +return x_17; +} +case 5: +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_18 = lean_ctor_get(x_3, 0); +lean_inc(x_18); +lean_dec(x_3); +x_19 = lean_box(0); +x_20 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1(x_19, x_18); +if (lean_obj_tag(x_20) == 0) +{ +uint8_t x_21; +x_21 = !lean_is_exclusive(x_20); +if (x_21 == 0) +{ +return x_20; +} +else +{ +lean_object* x_22; lean_object* x_23; +x_22 = lean_ctor_get(x_20, 0); +lean_inc(x_22); +lean_dec(x_20); +x_23 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_23, 0, x_22); +return x_23; +} +} +else +{ +uint8_t x_24; +x_24 = !lean_is_exclusive(x_20); +if (x_24 == 0) +{ +return x_20; +} +else +{ +lean_object* x_25; lean_object* x_26; +x_25 = lean_ctor_get(x_20, 0); +lean_inc(x_25); +lean_dec(x_20); +x_26 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_26, 0, x_25); +return x_26; +} +} +} +default: +{ +lean_object* x_27; lean_object* x_28; uint8_t x_29; +x_27 = lean_unsigned_to_nat(80u); +lean_inc(x_3); +x_28 = l_Lean_Json_pretty(x_3, x_27); +x_29 = !lean_is_exclusive(x_3); +if (x_29 == 0) +{ +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_30 = lean_ctor_get(x_3, 0); +lean_dec(x_30); +x_31 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1; +x_32 = lean_string_append(x_31, x_28); +lean_dec(x_28); +x_33 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_34 = lean_string_append(x_32, x_33); +lean_ctor_set_tag(x_3, 0); +lean_ctor_set(x_3, 0, x_34); +return x_3; +} +else +{ +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; +lean_dec(x_3); +x_35 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1; +x_36 = lean_string_append(x_35, x_28); +lean_dec(x_28); +x_37 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3; +x_38 = lean_string_append(x_36, x_37); +x_39 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_39, 0, x_38); +return x_39; +} +} +} +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("ModuleSetup", 11, 11); +return x_1; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__2() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__1; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__1; +x_3 = l_Lean_Name_mkStr2(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__3() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__2; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__4() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__3; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__5; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__5() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__1; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__6() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__5; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__7() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__6; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__8() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__7; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__9() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__6; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__10() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__9; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__11() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__12() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__11; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__13() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__9; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__14() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__13; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__15() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__14; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__16() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__15; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__17() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__12; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__18() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__17; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__19() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__18; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__20() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__19; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__21() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__16; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__22() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__21; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__23() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__22; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__24() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__23; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__25() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__18; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__26() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__25; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__27() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__26; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__28() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__27; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__29() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = lean_box(0); +x_2 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__20; +x_3 = l_Lean_Name_str___override(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__30() { +_start: +{ +lean_object* x_1; uint8_t x_2; lean_object* x_3; lean_object* x_4; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__29; +x_2 = 1; +x_3 = l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1; +x_4 = l_Lean_Name_toString(x_1, x_2, x_3); +return x_4; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__31() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__4; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__30; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +static lean_object* _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__32() { +_start: +{ +lean_object* x_1; lean_object* x_2; lean_object* x_3; +x_1 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__31; +x_2 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_3 = lean_string_append(x_1, x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209_(lean_object* x_1) { +_start: +{ +lean_object* x_2; lean_object* x_3; +x_2 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__1; +lean_inc(x_1); +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1(x_1, x_2); +if (lean_obj_tag(x_3) == 0) +{ +uint8_t x_4; +lean_dec(x_1); +x_4 = !lean_is_exclusive(x_3); +if (x_4 == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; +x_5 = lean_ctor_get(x_3, 0); +x_6 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__8; +x_7 = lean_string_append(x_6, x_5); +lean_dec(x_5); +lean_ctor_set(x_3, 0, x_7); +return x_3; +} +else +{ +lean_object* x_8; lean_object* x_9; lean_object* x_10; lean_object* x_11; +x_8 = lean_ctor_get(x_3, 0); +lean_inc(x_8); +lean_dec(x_3); +x_9 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__8; +x_10 = lean_string_append(x_9, x_8); +lean_dec(x_8); +x_11 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_11, 0, x_10); +return x_11; +} +} +else +{ +lean_object* x_12; lean_object* x_13; lean_object* x_14; +x_12 = lean_ctor_get(x_3, 0); +lean_inc(x_12); +lean_dec(x_3); +x_13 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__6; +lean_inc(x_1); +x_14 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_13); +if (lean_obj_tag(x_14) == 0) +{ +uint8_t x_15; +lean_dec(x_12); +lean_dec(x_1); +x_15 = !lean_is_exclusive(x_14); +if (x_15 == 0) +{ +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_14, 0); +x_17 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__12; +x_18 = lean_string_append(x_17, x_16); +lean_dec(x_16); +lean_ctor_set(x_14, 0, x_18); +return x_14; +} +else +{ +lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; +x_19 = lean_ctor_get(x_14, 0); +lean_inc(x_19); +lean_dec(x_14); +x_20 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__12; +x_21 = lean_string_append(x_20, x_19); +lean_dec(x_19); +x_22 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_22, 0, x_21); +return x_22; +} +} +else +{ +lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_23 = lean_ctor_get(x_14, 0); +lean_inc(x_23); +lean_dec(x_14); +x_24 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__9; +lean_inc(x_1); +x_25 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1(x_1, x_24); +if (lean_obj_tag(x_25) == 0) +{ +uint8_t x_26; +lean_dec(x_23); +lean_dec(x_12); +lean_dec(x_1); +x_26 = !lean_is_exclusive(x_25); +if (x_26 == 0) +{ +lean_object* x_27; lean_object* x_28; lean_object* x_29; +x_27 = lean_ctor_get(x_25, 0); +x_28 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__16; +x_29 = lean_string_append(x_28, x_27); +lean_dec(x_27); +lean_ctor_set(x_25, 0, x_29); +return x_25; +} +else +{ +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; +x_30 = lean_ctor_get(x_25, 0); +lean_inc(x_30); +lean_dec(x_25); +x_31 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__16; +x_32 = lean_string_append(x_31, x_30); +lean_dec(x_30); +x_33 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_33, 0, x_32); +return x_33; +} +} +else +{ +lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_34 = lean_ctor_get(x_25, 0); +lean_inc(x_34); +lean_dec(x_25); +x_35 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__12; +lean_inc(x_1); +x_36 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3(x_1, x_35); +if (lean_obj_tag(x_36) == 0) +{ +uint8_t x_37; +lean_dec(x_34); +lean_dec(x_23); +lean_dec(x_12); +lean_dec(x_1); +x_37 = !lean_is_exclusive(x_36); +if (x_37 == 0) +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; +x_38 = lean_ctor_get(x_36, 0); +x_39 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__20; +x_40 = lean_string_append(x_39, x_38); +lean_dec(x_38); +lean_ctor_set(x_36, 0, x_40); +return x_36; +} +else +{ +lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; +x_41 = lean_ctor_get(x_36, 0); +lean_inc(x_41); +lean_dec(x_36); +x_42 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__20; +x_43 = lean_string_append(x_42, x_41); +lean_dec(x_41); +x_44 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_44, 0, x_43); +return x_44; +} +} +else +{ +lean_object* x_45; lean_object* x_46; lean_object* x_47; +x_45 = lean_ctor_get(x_36, 0); +lean_inc(x_45); +lean_dec(x_36); +x_46 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__16; +lean_inc(x_1); +x_47 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_Paths_0__Lean_fromJsonLeanPaths____x40_Lean_Util_Paths___hyg_135____spec__3(x_1, x_46); +if (lean_obj_tag(x_47) == 0) +{ +uint8_t x_48; +lean_dec(x_45); +lean_dec(x_34); +lean_dec(x_23); +lean_dec(x_12); +lean_dec(x_1); +x_48 = !lean_is_exclusive(x_47); +if (x_48 == 0) +{ +lean_object* x_49; lean_object* x_50; lean_object* x_51; +x_49 = lean_ctor_get(x_47, 0); +x_50 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__24; +x_51 = lean_string_append(x_50, x_49); +lean_dec(x_49); +lean_ctor_set(x_47, 0, x_51); +return x_47; +} +else +{ +lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; +x_52 = lean_ctor_get(x_47, 0); +lean_inc(x_52); +lean_dec(x_47); +x_53 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__24; +x_54 = lean_string_append(x_53, x_52); +lean_dec(x_52); +x_55 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_55, 0, x_54); +return x_55; +} +} +else +{ +lean_object* x_56; lean_object* x_57; lean_object* x_58; +x_56 = lean_ctor_get(x_47, 0); +lean_inc(x_56); +lean_dec(x_47); +x_57 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__18; +lean_inc(x_1); +x_58 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_Paths_0__Lean_fromJsonLeanPaths____x40_Lean_Util_Paths___hyg_135____spec__3(x_1, x_57); +if (lean_obj_tag(x_58) == 0) +{ +uint8_t x_59; +lean_dec(x_56); +lean_dec(x_45); +lean_dec(x_34); +lean_dec(x_23); +lean_dec(x_12); +lean_dec(x_1); +x_59 = !lean_is_exclusive(x_58); +if (x_59 == 0) +{ +lean_object* x_60; lean_object* x_61; lean_object* x_62; +x_60 = lean_ctor_get(x_58, 0); +x_61 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__28; +x_62 = lean_string_append(x_61, x_60); +lean_dec(x_60); +lean_ctor_set(x_58, 0, x_62); +return x_58; +} +else +{ +lean_object* x_63; lean_object* x_64; lean_object* x_65; lean_object* x_66; +x_63 = lean_ctor_get(x_58, 0); +lean_inc(x_63); +lean_dec(x_58); +x_64 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__28; +x_65 = lean_string_append(x_64, x_63); +lean_dec(x_63); +x_66 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_66, 0, x_65); +return x_66; +} +} +else +{ +lean_object* x_67; lean_object* x_68; lean_object* x_69; +x_67 = lean_ctor_get(x_58, 0); +lean_inc(x_67); +lean_dec(x_58); +x_68 = l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__20; +x_69 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__5(x_1, x_68); +if (lean_obj_tag(x_69) == 0) +{ +uint8_t x_70; +lean_dec(x_67); +lean_dec(x_56); +lean_dec(x_45); +lean_dec(x_34); +lean_dec(x_23); +lean_dec(x_12); +x_70 = !lean_is_exclusive(x_69); +if (x_70 == 0) +{ +lean_object* x_71; lean_object* x_72; lean_object* x_73; +x_71 = lean_ctor_get(x_69, 0); +x_72 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__32; +x_73 = lean_string_append(x_72, x_71); +lean_dec(x_71); +lean_ctor_set(x_69, 0, x_73); +return x_69; +} +else +{ +lean_object* x_74; lean_object* x_75; lean_object* x_76; lean_object* x_77; +x_74 = lean_ctor_get(x_69, 0); +lean_inc(x_74); +lean_dec(x_69); +x_75 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__32; +x_76 = lean_string_append(x_75, x_74); +lean_dec(x_74); +x_77 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_77, 0, x_76); +return x_77; +} +} +else +{ +uint8_t x_78; +x_78 = !lean_is_exclusive(x_69); +if (x_78 == 0) +{ +lean_object* x_79; lean_object* x_80; uint8_t x_81; +x_79 = lean_ctor_get(x_69, 0); +x_80 = lean_alloc_ctor(0, 6, 1); +lean_ctor_set(x_80, 0, x_12); +lean_ctor_set(x_80, 1, x_34); +lean_ctor_set(x_80, 2, x_45); +lean_ctor_set(x_80, 3, x_56); +lean_ctor_set(x_80, 4, x_67); +lean_ctor_set(x_80, 5, x_79); +x_81 = lean_unbox(x_23); +lean_dec(x_23); +lean_ctor_set_uint8(x_80, sizeof(void*)*6, x_81); +lean_ctor_set(x_69, 0, x_80); +return x_69; +} +else +{ +lean_object* x_82; lean_object* x_83; uint8_t x_84; lean_object* x_85; +x_82 = lean_ctor_get(x_69, 0); +lean_inc(x_82); +lean_dec(x_69); +x_83 = lean_alloc_ctor(0, 6, 1); +lean_ctor_set(x_83, 0, x_12); +lean_ctor_set(x_83, 1, x_34); +lean_ctor_set(x_83, 2, x_45); +lean_ctor_set(x_83, 3, x_56); +lean_ctor_set(x_83, 4, x_67); +lean_ctor_set(x_83, 5, x_82); +x_84 = lean_unbox(x_23); +lean_dec(x_23); +lean_ctor_set_uint8(x_83, sizeof(void*)*6, x_84); +x_85 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_85, 0, x_83); +return x_85; +} +} +} +} +} +} +} +} +} +} +LEAN_EXPORT lean_object* l_Array_mapMUnsafe_map___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__2___boxed(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +_start: +{ +size_t x_4; size_t x_5; lean_object* x_6; +x_4 = lean_unbox_usize(x_1); +lean_dec(x_1); +x_5 = lean_unbox_usize(x_2); +lean_dec(x_2); +x_6 = l_Array_mapMUnsafe_map___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__2(x_4, x_5, x_3); +return x_6; +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__5___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__5(x_1, x_2); +lean_dec(x_2); +return x_3; +} +} +static lean_object* _init_l_Lean_instFromJsonModuleSetup___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209_), 1, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_instFromJsonModuleSetup() { +_start: +{ +lean_object* x_1; +x_1 = l_Lean_instFromJsonModuleSetup___closed__1; +return x_1; +} +} +static lean_object* _init_l_Lean_ModuleSetup_load___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_alloc_closure((void*)(l_Lean_Json_Parser_any), 1, 0); +return x_1; +} +} +static lean_object* _init_l_Lean_ModuleSetup_load___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("failed to load header from ", 27, 27); +return x_1; +} +} +static lean_object* _init_l_Lean_ModuleSetup_load___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("", 0, 0); +return x_1; +} +} +LEAN_EXPORT lean_object* l_Lean_ModuleSetup_load(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_IO_FS_readFile(x_1, x_2); +if (lean_obj_tag(x_3) == 0) +{ +uint8_t x_4; +x_4 = !lean_is_exclusive(x_3); +if (x_4 == 0) +{ +lean_object* x_5; lean_object* x_6; lean_object* x_7; +x_5 = lean_ctor_get(x_3, 0); +x_6 = l_Lean_ModuleSetup_load___closed__1; +x_7 = l_Std_Internal_Parsec_String_Parser_run___rarg(x_6, x_5); +if (lean_obj_tag(x_7) == 0) +{ +uint8_t x_8; +x_8 = !lean_is_exclusive(x_7); +if (x_8 == 0) +{ +lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; +x_9 = lean_ctor_get(x_7, 0); +x_10 = l_Lean_ModuleSetup_load___closed__2; +x_11 = lean_string_append(x_10, x_1); +x_12 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_13 = lean_string_append(x_11, x_12); +x_14 = lean_string_append(x_13, x_9); +lean_dec(x_9); +x_15 = l_Lean_ModuleSetup_load___closed__3; +x_16 = lean_string_append(x_14, x_15); +lean_ctor_set_tag(x_7, 18); +lean_ctor_set(x_7, 0, x_16); +lean_ctor_set_tag(x_3, 1); +lean_ctor_set(x_3, 0, x_7); +return x_3; +} +else +{ +lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; +x_17 = lean_ctor_get(x_7, 0); +lean_inc(x_17); +lean_dec(x_7); +x_18 = l_Lean_ModuleSetup_load___closed__2; +x_19 = lean_string_append(x_18, x_1); +x_20 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_21 = lean_string_append(x_19, x_20); +x_22 = lean_string_append(x_21, x_17); +lean_dec(x_17); +x_23 = l_Lean_ModuleSetup_load___closed__3; +x_24 = lean_string_append(x_22, x_23); +x_25 = lean_alloc_ctor(18, 1, 0); +lean_ctor_set(x_25, 0, x_24); +lean_ctor_set_tag(x_3, 1); +lean_ctor_set(x_3, 0, x_25); +return x_3; +} +} +else +{ +lean_object* x_26; lean_object* x_27; +x_26 = lean_ctor_get(x_7, 0); +lean_inc(x_26); +lean_dec(x_7); +x_27 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209_(x_26); +if (lean_obj_tag(x_27) == 0) +{ +uint8_t x_28; +x_28 = !lean_is_exclusive(x_27); +if (x_28 == 0) +{ +lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; +x_29 = lean_ctor_get(x_27, 0); +x_30 = l_Lean_ModuleSetup_load___closed__2; +x_31 = lean_string_append(x_30, x_1); +x_32 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_33 = lean_string_append(x_31, x_32); +x_34 = lean_string_append(x_33, x_29); +lean_dec(x_29); +x_35 = l_Lean_ModuleSetup_load___closed__3; +x_36 = lean_string_append(x_34, x_35); +lean_ctor_set_tag(x_27, 18); +lean_ctor_set(x_27, 0, x_36); +lean_ctor_set_tag(x_3, 1); +lean_ctor_set(x_3, 0, x_27); +return x_3; +} +else +{ +lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; lean_object* x_42; lean_object* x_43; lean_object* x_44; lean_object* x_45; +x_37 = lean_ctor_get(x_27, 0); +lean_inc(x_37); +lean_dec(x_27); +x_38 = l_Lean_ModuleSetup_load___closed__2; +x_39 = lean_string_append(x_38, x_1); +x_40 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_41 = lean_string_append(x_39, x_40); +x_42 = lean_string_append(x_41, x_37); +lean_dec(x_37); +x_43 = l_Lean_ModuleSetup_load___closed__3; +x_44 = lean_string_append(x_42, x_43); +x_45 = lean_alloc_ctor(18, 1, 0); +lean_ctor_set(x_45, 0, x_44); +lean_ctor_set_tag(x_3, 1); +lean_ctor_set(x_3, 0, x_45); +return x_3; +} +} +else +{ +lean_object* x_46; +x_46 = lean_ctor_get(x_27, 0); +lean_inc(x_46); +lean_dec(x_27); +lean_ctor_set(x_3, 0, x_46); +return x_3; +} +} +} +else +{ +lean_object* x_47; lean_object* x_48; lean_object* x_49; lean_object* x_50; +x_47 = lean_ctor_get(x_3, 0); +x_48 = lean_ctor_get(x_3, 1); +lean_inc(x_48); +lean_inc(x_47); +lean_dec(x_3); +x_49 = l_Lean_ModuleSetup_load___closed__1; +x_50 = l_Std_Internal_Parsec_String_Parser_run___rarg(x_49, x_47); +if (lean_obj_tag(x_50) == 0) +{ +lean_object* x_51; lean_object* x_52; lean_object* x_53; lean_object* x_54; lean_object* x_55; lean_object* x_56; lean_object* x_57; lean_object* x_58; lean_object* x_59; lean_object* x_60; lean_object* x_61; +x_51 = lean_ctor_get(x_50, 0); +lean_inc(x_51); +if (lean_is_exclusive(x_50)) { + lean_ctor_release(x_50, 0); + x_52 = x_50; +} else { + lean_dec_ref(x_50); + x_52 = lean_box(0); +} +x_53 = l_Lean_ModuleSetup_load___closed__2; +x_54 = lean_string_append(x_53, x_1); +x_55 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_56 = lean_string_append(x_54, x_55); +x_57 = lean_string_append(x_56, x_51); +lean_dec(x_51); +x_58 = l_Lean_ModuleSetup_load___closed__3; +x_59 = lean_string_append(x_57, x_58); +if (lean_is_scalar(x_52)) { + x_60 = lean_alloc_ctor(18, 1, 0); +} else { + x_60 = x_52; + lean_ctor_set_tag(x_60, 18); +} +lean_ctor_set(x_60, 0, x_59); +x_61 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_61, 0, x_60); +lean_ctor_set(x_61, 1, x_48); +return x_61; +} +else +{ +lean_object* x_62; lean_object* x_63; +x_62 = lean_ctor_get(x_50, 0); +lean_inc(x_62); +lean_dec(x_50); +x_63 = l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209_(x_62); +if (lean_obj_tag(x_63) == 0) +{ +lean_object* x_64; lean_object* x_65; lean_object* x_66; lean_object* x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; lean_object* x_71; lean_object* x_72; lean_object* x_73; lean_object* x_74; +x_64 = lean_ctor_get(x_63, 0); +lean_inc(x_64); +if (lean_is_exclusive(x_63)) { + lean_ctor_release(x_63, 0); + x_65 = x_63; +} else { + lean_dec_ref(x_63); + x_65 = lean_box(0); +} +x_66 = l_Lean_ModuleSetup_load___closed__2; +x_67 = lean_string_append(x_66, x_1); +x_68 = l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10; +x_69 = lean_string_append(x_67, x_68); +x_70 = lean_string_append(x_69, x_64); +lean_dec(x_64); +x_71 = l_Lean_ModuleSetup_load___closed__3; +x_72 = lean_string_append(x_70, x_71); +if (lean_is_scalar(x_65)) { + x_73 = lean_alloc_ctor(18, 1, 0); +} else { + x_73 = x_65; + lean_ctor_set_tag(x_73, 18); +} +lean_ctor_set(x_73, 0, x_72); +x_74 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_74, 0, x_73); +lean_ctor_set(x_74, 1, x_48); +return x_74; +} +else +{ +lean_object* x_75; lean_object* x_76; +x_75 = lean_ctor_get(x_63, 0); +lean_inc(x_75); +lean_dec(x_63); +x_76 = lean_alloc_ctor(0, 2, 0); +lean_ctor_set(x_76, 0, x_75); +lean_ctor_set(x_76, 1, x_48); +return x_76; +} +} +} +} +else +{ +uint8_t x_77; +x_77 = !lean_is_exclusive(x_3); +if (x_77 == 0) +{ +return x_3; +} +else +{ +lean_object* x_78; lean_object* x_79; lean_object* x_80; +x_78 = lean_ctor_get(x_3, 0); +x_79 = lean_ctor_get(x_3, 1); +lean_inc(x_79); +lean_inc(x_78); +lean_dec(x_3); +x_80 = lean_alloc_ctor(1, 2, 0); +lean_ctor_set(x_80, 0, x_78); +lean_ctor_set(x_80, 1, x_79); +return x_80; +} +} +} +} +LEAN_EXPORT lean_object* l_Lean_ModuleSetup_load___boxed(lean_object* x_1, lean_object* x_2) { +_start: +{ +lean_object* x_3; +x_3 = l_Lean_ModuleSetup_load(x_1, x_2); +lean_dec(x_1); +return x_3; +} +} +lean_object* initialize_Lean_Data_Json(uint8_t builtin, lean_object*); +lean_object* initialize_Lean_Util_LeanOptions(uint8_t builtin, lean_object*); +static bool _G_initialized = false; +LEAN_EXPORT lean_object* initialize_Lean_Setup(uint8_t builtin, lean_object* w) { +lean_object * res; +if (_G_initialized) return lean_io_result_mk_ok(lean_box(0)); +_G_initialized = true; +res = initialize_Lean_Data_Json(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +res = initialize_Lean_Util_LeanOptions(builtin, lean_io_mk_world()); +if (lean_io_result_is_error(res)) return res; +lean_dec_ref(res); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__1 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__1(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__1); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__2 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__2(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__2); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__3 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__3(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__3); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__4 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__4(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__4); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__5 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__5(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__5); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__6 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__6(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__6); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__7 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__7(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__7); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__8 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__8(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__8); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__9 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__9(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__9); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__10 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__10(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__10); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__11 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__11(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__11); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__12 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__12(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__12); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__13 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__13(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__13); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__14 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__14(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__14); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__15 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__15(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__15); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__16 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__16(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__16); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__17 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__17(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__17); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__18 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__18(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__18); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__19 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__19(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__19); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__20 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__20(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__20); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__21 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__21(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__21); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__22 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__22(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__22); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__23 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__23(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__23); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__24 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__24(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__24); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__25 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__25(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__25); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__26 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__26(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__26); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__27 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__27(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__27); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__28 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__28(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__28); +l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__29 = _init_l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__29(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprImport____x40_Lean_Setup___hyg_34____closed__29); +l_Lean_instReprImport___closed__1 = _init_l_Lean_instReprImport___closed__1(); +lean_mark_persistent(l_Lean_instReprImport___closed__1); +l_Lean_instReprImport = _init_l_Lean_instReprImport(); +lean_mark_persistent(l_Lean_instReprImport); +l_Lean_instInhabitedImport___closed__1 = _init_l_Lean_instInhabitedImport___closed__1(); +lean_mark_persistent(l_Lean_instInhabitedImport___closed__1); +l_Lean_instInhabitedImport = _init_l_Lean_instInhabitedImport(); +lean_mark_persistent(l_Lean_instInhabitedImport); +l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1 = _init_l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__1); +l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__2 = _init_l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__2(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_toJsonImport____x40_Lean_Setup___hyg_124____closed__2); +l_Lean_instToJsonImport___closed__1 = _init_l_Lean_instToJsonImport___closed__1(); +lean_mark_persistent(l_Lean_instToJsonImport___closed__1); +l_Lean_instToJsonImport = _init_l_Lean_instToJsonImport(); +lean_mark_persistent(l_Lean_instToJsonImport); +l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__1 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__1(); +lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__1); +l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__2 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__2(); +lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__2); +l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3(); +lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__3); +l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__4 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__4(); +lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1___closed__4); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__1 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__1(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__1); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__2 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__2(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__2); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__3 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__3(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__3); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__4 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__4(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__4); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__5 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__5(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__5); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__6 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__6(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__6); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__7 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__7(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__7); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__8 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__8(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__8); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__9 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__9(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__9); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__10); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__11 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__11(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__11); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__12 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__12(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__12); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__13 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__13(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__13); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__14 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__14(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__14); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__15 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__15(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__15); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__16 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__16(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__16); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__17 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__17(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__17); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__18 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__18(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__18); +l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__19 = _init_l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__19(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____closed__19); +l_Lean_instFromJsonImport___closed__1 = _init_l_Lean_instFromJsonImport___closed__1(); +lean_mark_persistent(l_Lean_instFromJsonImport___closed__1); +l_Lean_instFromJsonImport = _init_l_Lean_instFromJsonImport(); +lean_mark_persistent(l_Lean_instFromJsonImport); +l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__1 = _init_l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__1(); +lean_mark_persistent(l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__1); +l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__2 = _init_l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__2(); +lean_mark_persistent(l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__2); +l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__3 = _init_l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__3(); +lean_mark_persistent(l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__3); +l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__4 = _init_l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__4(); +lean_mark_persistent(l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__4); +l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__5 = _init_l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__5(); +lean_mark_persistent(l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__5); +l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__6 = _init_l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__6(); +lean_mark_persistent(l_Option_repr___at___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____spec__1___closed__6); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__1 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__1(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__1); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__2 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__2(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__2); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__3 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__3(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__3); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__4 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__4(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__4); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__5 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__5(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__5); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__6 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__6(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__6); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__7 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__7(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__7); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__8 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__8(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__8); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__9 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__9(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__9); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__10 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__10(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__10); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__11 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__11(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__11); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__12 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__12(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__12); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__13 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__13(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__13); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__14 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__14(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__14); +l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__15 = _init_l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__15(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleArtifacts____x40_Lean_Setup___hyg_417____closed__15); +l_Lean_instReprModuleArtifacts___closed__1 = _init_l_Lean_instReprModuleArtifacts___closed__1(); +lean_mark_persistent(l_Lean_instReprModuleArtifacts___closed__1); +l_Lean_instReprModuleArtifacts = _init_l_Lean_instReprModuleArtifacts(); +lean_mark_persistent(l_Lean_instReprModuleArtifacts); +l_Lean_instInhabitedModuleArtifacts___closed__1 = _init_l_Lean_instInhabitedModuleArtifacts___closed__1(); +lean_mark_persistent(l_Lean_instInhabitedModuleArtifacts___closed__1); +l_Lean_instInhabitedModuleArtifacts = _init_l_Lean_instInhabitedModuleArtifacts(); +lean_mark_persistent(l_Lean_instInhabitedModuleArtifacts); +l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__1 = _init_l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__1(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__1); +l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__2 = _init_l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__2(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__2); +l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__3 = _init_l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__3(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__3); +l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__4 = _init_l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__4(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__4); +l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__5 = _init_l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__5(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_toJsonModuleArtifacts____x40_Lean_Setup___hyg_549____closed__5); +l_Lean_instToJsonModuleArtifacts___closed__1 = _init_l_Lean_instToJsonModuleArtifacts___closed__1(); +lean_mark_persistent(l_Lean_instToJsonModuleArtifacts___closed__1); +l_Lean_instToJsonModuleArtifacts = _init_l_Lean_instToJsonModuleArtifacts(); +lean_mark_persistent(l_Lean_instToJsonModuleArtifacts); +l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1___closed__1 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1___closed__1(); +lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____spec__1___closed__1); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__1 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__1(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__1); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__2 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__2(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__2); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__3 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__3(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__3); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__4 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__4(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__4); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__5 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__5(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__5); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__6 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__6(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__6); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__7 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__7(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__7); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__8 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__8(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__8); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__9 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__9(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__9); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__10 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__10(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__10); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__11 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__11(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__11); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__12 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__12(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__12); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__13 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__13(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__13); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__14 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__14(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__14); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__15 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__15(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__15); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__16 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__16(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__16); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__17 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__17(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__17); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__18 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__18(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__18); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__19 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__19(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__19); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__20 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__20(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__20); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__21 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__21(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__21); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__22 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__22(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__22); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__23 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__23(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__23); +l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__24 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__24(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleArtifacts____x40_Lean_Setup___hyg_593____closed__24); +l_Lean_instFromJsonModuleArtifacts___closed__1 = _init_l_Lean_instFromJsonModuleArtifacts___closed__1(); +lean_mark_persistent(l_Lean_instFromJsonModuleArtifacts___closed__1); +l_Lean_instFromJsonModuleArtifacts = _init_l_Lean_instFromJsonModuleArtifacts(); +lean_mark_persistent(l_Lean_instFromJsonModuleArtifacts); +l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__1 = _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__1(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__1); +l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__2 = _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__2(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__2); +l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__3 = _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__3(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__3); +l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__4 = _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__4(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__4); +l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__5 = _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__5(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__5); +l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__6 = _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__6(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__6); +l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__7 = _init_l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__7(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__6___closed__7); +l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__1 = _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__1(); +lean_mark_persistent(l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__1); +l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__2 = _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__2(); +lean_mark_persistent(l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__2); +l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__3 = _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__3(); +lean_mark_persistent(l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__3); +l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__4 = _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__4(); +lean_mark_persistent(l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__4); +l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__5 = _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__5(); +lean_mark_persistent(l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__5); +l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__6 = _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__6(); +lean_mark_persistent(l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__6); +l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__7 = _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__7(); +lean_mark_persistent(l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__7); +l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__8 = _init_l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__8(); +lean_mark_persistent(l_List_repr___at___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____spec__4___closed__8); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__1 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__1(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__1); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__2 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__2(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__2); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__3 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__3(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__3); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__4 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__4(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__4); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__5 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__5(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__5); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__6 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__6(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__6); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__7 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__7(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__7); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__8 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__8(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__8); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__9 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__9(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__9); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__10 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__10(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__10); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__11 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__11(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__11); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__12 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__12(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__12); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__13 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__13(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__13); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__14 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__14(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__14); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__15 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__15(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__15); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__16 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__16(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__16); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__17 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__17(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__17); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__18 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__18(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__18); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__19 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__19(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__19); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__20 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__20(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__20); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__21 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__21(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__21); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__22 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__22(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__22); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__23 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__23(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__23); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__24 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__24(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__24); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__25 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__25(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__25); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__26 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__26(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__26); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__27 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__27(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__27); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__28 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__28(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__28); +l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__29 = _init_l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__29(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_reprModuleSetup____x40_Lean_Setup___hyg_913____closed__29); +l_Lean_instReprModuleSetup___closed__1 = _init_l_Lean_instReprModuleSetup___closed__1(); +lean_mark_persistent(l_Lean_instReprModuleSetup___closed__1); +l_Lean_instReprModuleSetup = _init_l_Lean_instReprModuleSetup(); +lean_mark_persistent(l_Lean_instReprModuleSetup); +l_Lean_instInhabitedModuleSetup___closed__1 = _init_l_Lean_instInhabitedModuleSetup___closed__1(); +lean_mark_persistent(l_Lean_instInhabitedModuleSetup___closed__1); +l_Lean_instInhabitedModuleSetup___closed__2 = _init_l_Lean_instInhabitedModuleSetup___closed__2(); +lean_mark_persistent(l_Lean_instInhabitedModuleSetup___closed__2); +l_Lean_instInhabitedModuleSetup = _init_l_Lean_instInhabitedModuleSetup(); +lean_mark_persistent(l_Lean_instInhabitedModuleSetup); +l_Lean_instToJsonModuleSetup___closed__1 = _init_l_Lean_instToJsonModuleSetup___closed__1(); +lean_mark_persistent(l_Lean_instToJsonModuleSetup___closed__1); +l_Lean_instToJsonModuleSetup = _init_l_Lean_instToJsonModuleSetup(); +lean_mark_persistent(l_Lean_instToJsonModuleSetup); +l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1___closed__1 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1___closed__1(); +lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__1___closed__1); +l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1 = _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1(); +lean_mark_persistent(l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____spec__3___closed__1); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__1 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__1(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__1); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__2 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__2(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__2); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__3 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__3(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__3); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__4 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__4(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__4); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__5 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__5(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__5); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__6 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__6(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__6); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__7 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__7(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__7); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__8 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__8(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__8); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__9 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__9(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__9); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__10 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__10(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__10); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__11 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__11(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__11); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__12 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__12(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__12); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__13 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__13(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__13); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__14 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__14(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__14); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__15 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__15(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__15); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__16 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__16(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__16); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__17 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__17(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__17); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__18 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__18(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__18); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__19 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__19(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__19); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__20 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__20(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__20); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__21 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__21(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__21); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__22 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__22(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__22); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__23 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__23(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__23); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__24 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__24(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__24); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__25 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__25(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__25); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__26 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__26(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__26); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__27 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__27(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__27); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__28 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__28(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__28); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__29 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__29(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__29); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__30 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__30(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__30); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__31 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__31(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__31); +l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__32 = _init_l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__32(); +lean_mark_persistent(l___private_Lean_Setup_0__Lean_fromJsonModuleSetup____x40_Lean_Setup___hyg_1209____closed__32); +l_Lean_instFromJsonModuleSetup___closed__1 = _init_l_Lean_instFromJsonModuleSetup___closed__1(); +lean_mark_persistent(l_Lean_instFromJsonModuleSetup___closed__1); +l_Lean_instFromJsonModuleSetup = _init_l_Lean_instFromJsonModuleSetup(); +lean_mark_persistent(l_Lean_instFromJsonModuleSetup); +l_Lean_ModuleSetup_load___closed__1 = _init_l_Lean_ModuleSetup_load___closed__1(); +lean_mark_persistent(l_Lean_ModuleSetup_load___closed__1); +l_Lean_ModuleSetup_load___closed__2 = _init_l_Lean_ModuleSetup_load___closed__2(); +lean_mark_persistent(l_Lean_ModuleSetup_load___closed__2); +l_Lean_ModuleSetup_load___closed__3 = _init_l_Lean_ModuleSetup_load___closed__3(); +lean_mark_persistent(l_Lean_ModuleSetup_load___closed__3); +return lean_io_result_mk_ok(lean_box(0)); +} +#ifdef __cplusplus +} +#endif diff --git a/stage0/stdlib/Lean/Util/FileSetupInfo.c b/stage0/stdlib/Lean/Util/FileSetupInfo.c index b4ce657bc30d..72f220b7d45b 100644 --- a/stage0/stdlib/Lean/Util/FileSetupInfo.c +++ b/stage0/stdlib/Lean/Util/FileSetupInfo.c @@ -56,6 +56,7 @@ lean_object* l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLe static lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2___closed__2; lean_object* lean_array_mk(lean_object*); lean_object* lean_string_append(lean_object*, lean_object*); +lean_object* l_Lean_Json_pretty(lean_object*, lean_object*); static lean_object* l___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____closed__11; LEAN_EXPORT uint8_t l___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____lambda__1(lean_object*); LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__1(lean_object* x_1, lean_object* x_2) { @@ -71,18 +72,16 @@ static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_F _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("invalid LeanOptions type", 24, 24); +x_1 = lean_mk_string_unchecked("expected a `NameMap`, got '", 27, 27); return x_1; } } static lean_object* _init_l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2___closed__2() { _start: { -lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2___closed__1; -x_2 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; +lean_object* x_1; +x_1 = lean_mk_string_unchecked("'", 1, 1); +return x_1; } } LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2(lean_object* x_1, lean_object* x_2) { @@ -90,59 +89,117 @@ LEAN_EXPORT lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_Fi { lean_object* x_3; x_3 = l_Lean_Json_getObjValD(x_1, x_2); -if (lean_obj_tag(x_3) == 5) +switch (lean_obj_tag(x_3)) { +case 0: { -lean_object* x_4; lean_object* x_5; lean_object* x_6; -x_4 = lean_ctor_get(x_3, 0); -lean_inc(x_4); +lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; +x_4 = lean_unsigned_to_nat(80u); +x_5 = l_Lean_Json_pretty(x_3, x_4); +x_6 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2___closed__1; +x_7 = lean_string_append(x_6, x_5); +lean_dec(x_5); +x_8 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2___closed__2; +x_9 = lean_string_append(x_7, x_8); +x_10 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_10, 0, x_9); +return x_10; +} +case 1: +{ +lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; +x_11 = lean_unsigned_to_nat(80u); +x_12 = l_Lean_Json_pretty(x_3, x_11); +x_13 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2___closed__1; +x_14 = lean_string_append(x_13, x_12); +lean_dec(x_12); +x_15 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2___closed__2; +x_16 = lean_string_append(x_14, x_15); +x_17 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_17, 0, x_16); +return x_17; +} +case 5: +{ +lean_object* x_18; lean_object* x_19; lean_object* x_20; +x_18 = lean_ctor_get(x_3, 0); +lean_inc(x_18); lean_dec(x_3); -x_5 = lean_box(0); -x_6 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1(x_5, x_4); -if (lean_obj_tag(x_6) == 0) +x_19 = lean_box(0); +x_20 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1(x_19, x_18); +if (lean_obj_tag(x_20) == 0) { -uint8_t x_7; -x_7 = !lean_is_exclusive(x_6); -if (x_7 == 0) +uint8_t x_21; +x_21 = !lean_is_exclusive(x_20); +if (x_21 == 0) { -return x_6; +return x_20; } else { -lean_object* x_8; lean_object* x_9; -x_8 = lean_ctor_get(x_6, 0); -lean_inc(x_8); -lean_dec(x_6); -x_9 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_9, 0, x_8); -return x_9; +lean_object* x_22; lean_object* x_23; +x_22 = lean_ctor_get(x_20, 0); +lean_inc(x_22); +lean_dec(x_20); +x_23 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_23, 0, x_22); +return x_23; } } else { -uint8_t x_10; -x_10 = !lean_is_exclusive(x_6); -if (x_10 == 0) +uint8_t x_24; +x_24 = !lean_is_exclusive(x_20); +if (x_24 == 0) { -return x_6; +return x_20; } else { -lean_object* x_11; lean_object* x_12; -x_11 = lean_ctor_get(x_6, 0); -lean_inc(x_11); -lean_dec(x_6); -x_12 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_12, 0, x_11); -return x_12; -} -} +lean_object* x_25; lean_object* x_26; +x_25 = lean_ctor_get(x_20, 0); +lean_inc(x_25); +lean_dec(x_20); +x_26 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_26, 0, x_25); +return x_26; +} +} +} +default: +{ +lean_object* x_27; lean_object* x_28; uint8_t x_29; +x_27 = lean_unsigned_to_nat(80u); +lean_inc(x_3); +x_28 = l_Lean_Json_pretty(x_3, x_27); +x_29 = !lean_is_exclusive(x_3); +if (x_29 == 0) +{ +lean_object* x_30; lean_object* x_31; lean_object* x_32; lean_object* x_33; lean_object* x_34; +x_30 = lean_ctor_get(x_3, 0); +lean_dec(x_30); +x_31 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2___closed__1; +x_32 = lean_string_append(x_31, x_28); +lean_dec(x_28); +x_33 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2___closed__2; +x_34 = lean_string_append(x_32, x_33); +lean_ctor_set_tag(x_3, 0); +lean_ctor_set(x_3, 0, x_34); +return x_3; } else { -lean_object* x_13; +lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_object* x_38; lean_object* x_39; lean_dec(x_3); -x_13 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2___closed__2; -return x_13; +x_35 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2___closed__1; +x_36 = lean_string_append(x_35, x_28); +lean_dec(x_28); +x_37 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Util_FileSetupInfo_0__Lean_fromJsonFileSetupInfo____x40_Lean_Util_FileSetupInfo___hyg_26____spec__2___closed__2; +x_38 = lean_string_append(x_36, x_37); +x_39 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_39, 0, x_38); +return x_39; +} +} } } } diff --git a/stage0/stdlib/Lean/Util/LeanOptions.c b/stage0/stdlib/Lean/Util/LeanOptions.c index 1b5b5088a32d..d170188ac078 100644 --- a/stage0/stdlib/Lean/Util/LeanOptions.c +++ b/stage0/stdlib/Lean/Util/LeanOptions.c @@ -15,135 +15,142 @@ extern "C" { #endif lean_object* l_Lean_JsonNumber_fromNat(lean_object*); lean_object* l_Lean_Name_reprPrec(lean_object*, lean_object*); -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__10; -LEAN_EXPORT lean_object* l_Std_Format_joinSep___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__6(lean_object*, lean_object*); -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__13; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__7; +LEAN_EXPORT lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541_(lean_object*, lean_object*); static lean_object* l_Lean_instFromJsonLeanOptionValue___closed__1; LEAN_EXPORT lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____boxed(lean_object*, lean_object*); static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__24; -LEAN_EXPORT lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___boxed(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__4(lean_object*); -LEAN_EXPORT lean_object* l_List_foldl___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__7(lean_object*, lean_object*, lean_object*); +LEAN_EXPORT lean_object* l_Std_Format_joinSep___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__6(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_ins___at_Lean_LeanOptions_fromOptions_x3f___spec__2(lean_object*, lean_object*, lean_object*); -static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__1; LEAN_EXPORT lean_object* l_Lean_RBNode_forIn_visit___at_Lean_LeanOptions_toOptions___spec__1(lean_object*, lean_object*); -LEAN_EXPORT lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543_(lean_object*, lean_object*); static lean_object* l_Lean_instInhabitedLeanOptionValue___closed__2; LEAN_EXPORT lean_object* l_Lean_instReprLeanOptionValue; -static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__9; lean_object* l_String_quote(lean_object*); lean_object* l_Lean_Name_toString(lean_object*, uint8_t, lean_object*); LEAN_EXPORT lean_object* l_Lean_LeanOptionValue_asCliFlagValue(lean_object*); uint8_t l_Lean_RBNode_isRed___rarg(lean_object*); -LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__1(lean_object*); static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__25; +LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__2(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_LeanOptions_fromOptions_x3f(lean_object*); +uint8_t l_Lean_Name_isAnonymous(lean_object*); static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__1; -static lean_object* l_Lean_instFromJsonLeanOptions___closed__2; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__17; -static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__2; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__22; -LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__1___boxed(lean_object*); static lean_object* l_Lean_instValueLeanOptionValue___closed__1; LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_LeanOptions_fromOptions_x3f___spec__3___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); -LEAN_EXPORT lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3(lean_object*, lean_object*); +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__15; +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__8; +static lean_object* l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__3; +LEAN_EXPORT lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5(lean_object*, lean_object*); static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__9; static lean_object* l_Lean_instValueLeanOptionValue___closed__3; LEAN_EXPORT lean_object* l_Lean_instCoeBoolLeanOptionValue(uint8_t); +static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__7; +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__11; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__4; -LEAN_EXPORT lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___boxed(lean_object*, lean_object*); +static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__3; +static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__2; lean_object* l_Lean_KVMap_insertCore(lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__2; +uint8_t lean_string_dec_eq(lean_object*, lean_object*); +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__12; LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1(lean_object*, lean_object*); -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__11; -static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__7; +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__3; +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__13; +LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__1___boxed(lean_object*); LEAN_EXPORT lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43_(lean_object*, lean_object*); -static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__6; -static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__4; -uint8_t l_Lean_Name_cmp(lean_object*, lean_object*); -static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__7; +static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__7; +static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__1; +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__5; +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__4; LEAN_EXPORT lean_object* l_Lean_instToJsonLeanOptionValue(lean_object*); +LEAN_EXPORT lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___boxed(lean_object*, lean_object*); lean_object* l_Lean_RBNode_setBlack___rarg(lean_object*); lean_object* lean_nat_to_int(lean_object*); LEAN_EXPORT uint8_t l_Lean_RBNode_fold___at_Lean_instToJsonLeanOptions___spec__1___lambda__1(lean_object*); -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__5; -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__3; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__20; +static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__9; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__12; +LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__1(lean_object*); LEAN_EXPORT lean_object* l_Lean_LeanOptions_toOptions(lean_object*); -LEAN_EXPORT lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5(lean_object*, lean_object*); +static lean_object* l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__2; +LEAN_EXPORT lean_object* l_List_foldl___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__7(lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_LeanOptionValue_toDataValue(lean_object*); +static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__6; +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__9; +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__6; +lean_object* l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(lean_object*, lean_object*, lean_object*); +static lean_object* l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__1; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__13; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__14; static lean_object* l_Lean_instValueLeanOptionValue___closed__2; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__16; -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__7; +LEAN_EXPORT lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3(lean_object*, lean_object*); +static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__4; static lean_object* l_Lean_instFromJsonLeanOptionValue___closed__2; +static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__5; LEAN_EXPORT lean_object* l_Lean_instCoeBoolLeanOptionValue___boxed(lean_object*); -static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__2; -static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__1; +static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__8; +LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__2___boxed(lean_object*, lean_object*); static lean_object* l_Lean_instInhabitedLeanOptionValue___closed__1; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__6; LEAN_EXPORT lean_object* l_Lean_instCoeStringLeanOptionValue(lean_object*); LEAN_EXPORT lean_object* l_Lean_LeanOptionValue_ofDataValue_x3f(lean_object*); static lean_object* l_Lean_LeanOptionValue_asCliFlagValue___closed__1; -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__6; +static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__5; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__3; +LEAN_EXPORT lean_object* l_Lean_instEmptyCollectionLeanOptions; static lean_object* l_Lean_instFromJsonLeanOptionValue___closed__3; LEAN_EXPORT lean_object* l_Lean_instInhabitedLeanOptionValue; -LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__2___boxed(lean_object*, lean_object*); lean_object* lean_nat_abs(lean_object*); LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lean_instToJsonLeanOptions___spec__1___lambda__1___boxed(lean_object*); -static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__4; -static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__3; +static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__6; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__11; static lean_object* l_Lean_instFromJsonLeanOptions___closed__1; lean_object* lean_string_length(lean_object*); uint8_t lean_nat_dec_eq(lean_object*, lean_object*); -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__4; -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__12; +static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__2; LEAN_EXPORT lean_object* l_Lean_RBNode_fold___at_Lean_instToJsonLeanOptions___spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instReprLeanOptions; LEAN_EXPORT lean_object* l_Lean_instInhabitedLeanOptions; -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__9; -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__14; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__23; lean_object* l_Std_Format_joinSep___at_Prod_repr___spec__1(lean_object*, lean_object*); -static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__8; lean_object* l_String_toName(lean_object*); lean_object* l_Repr_addAppParen(lean_object*, lean_object*); static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__21; +LEAN_EXPORT lean_object* l_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__4(lean_object*); uint8_t lean_int_dec_lt(lean_object*, lean_object*); -static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__5; +static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__8; +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__14; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__18; LEAN_EXPORT lean_object* l_Lean_instCoeNatLeanOptionValue(lean_object*); -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__8; lean_object* l_List_reverse___rarg(lean_object*); -LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__2(lean_object*, lean_object*); +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__10; static lean_object* l_Lean_RBNode_fold___at_Lean_instToJsonLeanOptions___spec__1___closed__1; +uint8_t l_Lean_Name_quickCmp(lean_object*, lean_object*); static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__8; -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__15; LEAN_EXPORT lean_object* l_Lean_instFromJsonLeanOptionValue(lean_object*); LEAN_EXPORT lean_object* l_Lean_instValueLeanOptionValue; LEAN_EXPORT lean_object* l_Lean_instFromJsonLeanOptions(lean_object*); -static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__6; +static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__4; +LEAN_EXPORT lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____boxed(lean_object*, lean_object*); extern lean_object* l_Lean_KVMap_empty; lean_object* lean_string_append(lean_object*, lean_object*); -static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__5; +static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__3; static lean_object* l_Lean_instReprLeanOptions___closed__1; +static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__1; static lean_object* l_Lean_instReprLeanOptionValue___closed__1; -static lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__8; +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__2; uint8_t lean_nat_dec_le(lean_object*, lean_object*); -static lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__3; +LEAN_EXPORT lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___boxed(lean_object*, lean_object*); static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__15; lean_object* l_Lean_RBNode_insert___at_Lean_Json_mkObj___spec__1(lean_object*, lean_object*, lean_object*); -static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__1; -LEAN_EXPORT lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____boxed(lean_object*, lean_object*); +lean_object* l_Lean_Json_pretty(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_instToJsonLeanOptions(lean_object*); lean_object* l___private_Init_Data_Repr_0__Nat_reprFast(lean_object*); +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__1; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__10; static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__5; +static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__7; LEAN_EXPORT lean_object* l_List_forIn_x27_loop___at_Lean_LeanOptions_fromOptions_x3f___spec__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptionValue____x40_Lean_Util_LeanOptions___hyg_43____closed__19; LEAN_EXPORT lean_object* l_Lean_RBNode_insert___at_Lean_LeanOptions_fromOptions_x3f___spec__1(lean_object*, lean_object*, lean_object*); @@ -1237,7 +1244,7 @@ x_1 = lean_box(0); return x_1; } } -LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__2(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__2(lean_object* x_1, lean_object* x_2) { _start: { if (lean_obj_tag(x_2) == 0) @@ -1251,7 +1258,7 @@ x_3 = lean_ctor_get(x_2, 0); x_4 = lean_ctor_get(x_2, 1); x_5 = lean_ctor_get(x_2, 2); x_6 = lean_ctor_get(x_2, 3); -x_7 = l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__2(x_1, x_6); +x_7 = l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__2(x_1, x_6); lean_inc(x_5); lean_inc(x_4); x_8 = lean_alloc_ctor(0, 2, 0); @@ -1266,16 +1273,16 @@ goto _start; } } } -LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__1(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__1(lean_object* x_1) { _start: { lean_object* x_2; lean_object* x_3; x_2 = lean_box(0); -x_3 = l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__2(x_2, x_1); +x_3 = l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__2(x_2, x_1); return x_3; } } -static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__1() { +static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__1() { _start: { lean_object* x_1; @@ -1283,21 +1290,21 @@ x_1 = lean_mk_string_unchecked(",", 1, 1); return x_1; } } -static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__2() { +static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__2() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__1; +x_1 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__1; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__3() { +static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__2; +x_1 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__2; x_2 = lean_box(1); x_3 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_3, 0, x_1); @@ -1305,7 +1312,7 @@ lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__4() { +static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__4() { _start: { lean_object* x_1; @@ -1313,35 +1320,35 @@ x_1 = lean_mk_string_unchecked("(", 1, 1); return x_1; } } -static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__5() { +static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__5() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__4; +x_1 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__4; x_2 = lean_string_length(x_1); return x_2; } } -static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__6() { +static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__6() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__5; +x_1 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__5; x_2 = lean_nat_to_int(x_1); return x_2; } } -static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__7() { +static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__7() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__4; +x_1 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__4; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__8() { +static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__8() { _start: { lean_object* x_1; @@ -1349,17 +1356,17 @@ x_1 = lean_mk_string_unchecked(")", 1, 1); return x_1; } } -static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__9() { +static lean_object* _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__9() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__8; +x_1 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__8; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5(lean_object* x_1, lean_object* x_2) { _start: { uint8_t x_3; @@ -1380,17 +1387,17 @@ x_10 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_10, 0, x_9); lean_ctor_set(x_10, 1, x_1); x_11 = l_List_reverse___rarg(x_10); -x_12 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__3; +x_12 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__3; x_13 = l_Std_Format_joinSep___at_Prod_repr___spec__1(x_11, x_12); -x_14 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__7; +x_14 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__7; x_15 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_15, 0, x_14); lean_ctor_set(x_15, 1, x_13); -x_16 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__9; +x_16 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__9; x_17 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_17, 0, x_15); lean_ctor_set(x_17, 1, x_16); -x_18 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__6; +x_18 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__6; x_19 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_19, 0, x_18); lean_ctor_set(x_19, 1, x_17); @@ -1419,17 +1426,17 @@ x_29 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_29, 0, x_28); lean_ctor_set(x_29, 1, x_27); x_30 = l_List_reverse___rarg(x_29); -x_31 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__3; +x_31 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__3; x_32 = l_Std_Format_joinSep___at_Prod_repr___spec__1(x_30, x_31); -x_33 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__7; +x_33 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__7; x_34 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_34, 0, x_33); lean_ctor_set(x_34, 1, x_32); -x_35 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__9; +x_35 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__9; x_36 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_36, 0, x_34); lean_ctor_set(x_36, 1, x_35); -x_37 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__6; +x_37 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__6; x_38 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_38, 0, x_37); lean_ctor_set(x_38, 1, x_36); @@ -1441,16 +1448,16 @@ return x_40; } } } -LEAN_EXPORT lean_object* l_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__4(lean_object* x_1) { +LEAN_EXPORT lean_object* l_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__4(lean_object* x_1) { _start: { lean_object* x_2; lean_object* x_3; x_2 = lean_unsigned_to_nat(0u); -x_3 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5(x_1, x_2); +x_3 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5(x_1, x_2); return x_3; } } -LEAN_EXPORT lean_object* l_List_foldl___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__7(lean_object* x_1, lean_object* x_2, lean_object* x_3) { +LEAN_EXPORT lean_object* l_List_foldl___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__7(lean_object* x_1, lean_object* x_2, lean_object* x_3) { _start: { if (lean_obj_tag(x_3) == 0) @@ -1472,7 +1479,7 @@ lean_ctor_set_tag(x_3, 5); lean_ctor_set(x_3, 1, x_1); lean_ctor_set(x_3, 0, x_2); x_7 = lean_unsigned_to_nat(0u); -x_8 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5(x_5, x_7); +x_8 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5(x_5, x_7); x_9 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_9, 0, x_3); lean_ctor_set(x_9, 1, x_8); @@ -1493,7 +1500,7 @@ x_13 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_13, 0, x_2); lean_ctor_set(x_13, 1, x_1); x_14 = lean_unsigned_to_nat(0u); -x_15 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5(x_11, x_14); +x_15 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5(x_11, x_14); x_16 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_16, 0, x_13); lean_ctor_set(x_16, 1, x_15); @@ -1504,7 +1511,7 @@ goto _start; } } } -LEAN_EXPORT lean_object* l_Std_Format_joinSep___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__6(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l_Std_Format_joinSep___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__6(lean_object* x_1, lean_object* x_2) { _start: { if (lean_obj_tag(x_1) == 0) @@ -1527,7 +1534,7 @@ x_5 = lean_ctor_get(x_1, 0); lean_inc(x_5); lean_dec(x_1); x_6 = lean_unsigned_to_nat(0u); -x_7 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5(x_5, x_6); +x_7 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5(x_5, x_6); return x_7; } else @@ -1537,14 +1544,14 @@ x_8 = lean_ctor_get(x_1, 0); lean_inc(x_8); lean_dec(x_1); x_9 = lean_unsigned_to_nat(0u); -x_10 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5(x_8, x_9); -x_11 = l_List_foldl___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__7(x_2, x_10, x_4); +x_10 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5(x_8, x_9); +x_11 = l_List_foldl___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__7(x_2, x_10, x_4); return x_11; } } } } -static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__1() { +static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__1() { _start: { lean_object* x_1; @@ -1552,17 +1559,17 @@ x_1 = lean_mk_string_unchecked("[]", 2, 2); return x_1; } } -static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__2() { +static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__2() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__1; +x_1 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__1; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__3() { +static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__3() { _start: { lean_object* x_1; @@ -1570,35 +1577,35 @@ x_1 = lean_mk_string_unchecked("[", 1, 1); return x_1; } } -static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__4() { +static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__4() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__3; +x_1 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__3; x_2 = lean_string_length(x_1); return x_2; } } -static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__5() { +static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__5() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__4; +x_1 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__4; x_2 = lean_nat_to_int(x_1); return x_2; } } -static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__6() { +static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__6() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__3; +x_1 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__3; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__7() { +static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__7() { _start: { lean_object* x_1; @@ -1606,31 +1613,31 @@ x_1 = lean_mk_string_unchecked("]", 1, 1); return x_1; } } -static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__8() { +static lean_object* _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__8() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__7; +x_1 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__7; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -LEAN_EXPORT lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3(lean_object* x_1, lean_object* x_2) { _start: { if (lean_obj_tag(x_1) == 0) { lean_object* x_3; -x_3 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__2; +x_3 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__2; return x_3; } else { lean_object* x_4; lean_object* x_5; uint8_t x_6; -x_4 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__3; +x_4 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__3; lean_inc(x_1); -x_5 = l_Std_Format_joinSep___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__6(x_1, x_4); +x_5 = l_Std_Format_joinSep___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__6(x_1, x_4); x_6 = !lean_is_exclusive(x_1); if (x_6 == 0) { @@ -1639,15 +1646,15 @@ x_7 = lean_ctor_get(x_1, 1); lean_dec(x_7); x_8 = lean_ctor_get(x_1, 0); lean_dec(x_8); -x_9 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__6; +x_9 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__6; lean_ctor_set_tag(x_1, 5); lean_ctor_set(x_1, 1, x_5); lean_ctor_set(x_1, 0, x_9); -x_10 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__8; +x_10 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__8; x_11 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_11, 0, x_1); lean_ctor_set(x_11, 1, x_10); -x_12 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__5; +x_12 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__5; x_13 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_13, 0, x_12); lean_ctor_set(x_13, 1, x_11); @@ -1661,15 +1668,15 @@ else { lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; uint8_t x_22; lean_object* x_23; lean_dec(x_1); -x_16 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__6; +x_16 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__6; x_17 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_17, 0, x_16); lean_ctor_set(x_17, 1, x_5); -x_18 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__8; +x_18 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__8; x_19 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_19, 0, x_17); lean_ctor_set(x_19, 1, x_18); -x_20 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__5; +x_20 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__5; x_21 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_21, 0, x_20); lean_ctor_set(x_21, 1, x_19); @@ -1682,7 +1689,7 @@ return x_23; } } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__1() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__1() { _start: { lean_object* x_1; @@ -1690,29 +1697,29 @@ x_1 = lean_mk_string_unchecked("values", 6, 6); return x_1; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__2() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__2() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__1; +x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__1; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__3() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__3() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; x_1 = lean_box(0); -x_2 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__2; +x_2 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__2; x_3 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__4() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__4() { _start: { lean_object* x_1; @@ -1720,29 +1727,29 @@ x_1 = lean_mk_string_unchecked(" := ", 4, 4); return x_1; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__5() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__5() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__4; +x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__4; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__6() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__6() { _start: { lean_object* x_1; lean_object* x_2; lean_object* x_3; -x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__3; -x_2 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__5; +x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__3; +x_2 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__5; x_3 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_3, 0, x_1); lean_ctor_set(x_3, 1, x_2); return x_3; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__7() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__7() { _start: { lean_object* x_1; lean_object* x_2; @@ -1751,7 +1758,7 @@ x_2 = lean_nat_to_int(x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__8() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__8() { _start: { lean_object* x_1; @@ -1759,17 +1766,17 @@ x_1 = lean_mk_string_unchecked("Lean.rbmapOf ", 13, 13); return x_1; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__9() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__9() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__8; +x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__8; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__10() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__10() { _start: { lean_object* x_1; @@ -1777,35 +1784,35 @@ x_1 = lean_mk_string_unchecked("{ ", 2, 2); return x_1; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__11() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__11() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__10; +x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__10; x_2 = lean_string_length(x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__12() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__12() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__11; +x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__11; x_2 = lean_nat_to_int(x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__13() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__13() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__10; +x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__10; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__14() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__14() { _start: { lean_object* x_1; @@ -1813,29 +1820,29 @@ x_1 = lean_mk_string_unchecked(" }", 2, 2); return x_1; } } -static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__15() { +static lean_object* _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__15() { _start: { lean_object* x_1; lean_object* x_2; -x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__14; +x_1 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__14; x_2 = lean_alloc_ctor(3, 1, 0); lean_ctor_set(x_2, 0, x_1); return x_2; } } -LEAN_EXPORT lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543_(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541_(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; lean_object* x_9; lean_object* x_10; uint8_t x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; lean_object* x_16; lean_object* x_17; lean_object* x_18; lean_object* x_19; lean_object* x_20; lean_object* x_21; -x_3 = l_Lean_RBMap_toList___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__1(x_1); +x_3 = l_Lean_RBMap_toList___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__1(x_1); x_4 = lean_unsigned_to_nat(0u); -x_5 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3(x_3, x_4); -x_6 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__9; +x_5 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3(x_3, x_4); +x_6 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__9; x_7 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_7, 0, x_6); lean_ctor_set(x_7, 1, x_5); x_8 = l_Repr_addAppParen(x_7, x_4); -x_9 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__7; +x_9 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__7; x_10 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_10, 0, x_9); lean_ctor_set(x_10, 1, x_8); @@ -1843,19 +1850,19 @@ x_11 = 0; x_12 = lean_alloc_ctor(6, 1, 1); lean_ctor_set(x_12, 0, x_10); lean_ctor_set_uint8(x_12, sizeof(void*)*1, x_11); -x_13 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__6; +x_13 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__6; x_14 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_14, 0, x_13); lean_ctor_set(x_14, 1, x_12); -x_15 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__13; +x_15 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__13; x_16 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_16, 0, x_15); lean_ctor_set(x_16, 1, x_14); -x_17 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__15; +x_17 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__15; x_18 = lean_alloc_ctor(5, 2, 0); lean_ctor_set(x_18, 0, x_16); lean_ctor_set(x_18, 1, x_17); -x_19 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__12; +x_19 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__12; x_20 = lean_alloc_ctor(4, 2, 0); lean_ctor_set(x_20, 0, x_19); lean_ctor_set(x_20, 1, x_18); @@ -1865,47 +1872,47 @@ lean_ctor_set_uint8(x_21, sizeof(void*)*1, x_11); return x_21; } } -LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__2___boxed(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__2___boxed(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; -x_3 = l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__2(x_1, x_2); +x_3 = l_Lean_RBNode_revFold___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__2(x_1, x_2); lean_dec(x_2); return x_3; } } -LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__1___boxed(lean_object* x_1) { +LEAN_EXPORT lean_object* l_Lean_RBMap_toList___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__1___boxed(lean_object* x_1) { _start: { lean_object* x_2; -x_2 = l_Lean_RBMap_toList___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__1(x_1); +x_2 = l_Lean_RBMap_toList___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__1(x_1); lean_dec(x_1); return x_2; } } -LEAN_EXPORT lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___boxed(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___boxed(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; -x_3 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5(x_1, x_2); +x_3 = l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5(x_1, x_2); lean_dec(x_2); return x_3; } } -LEAN_EXPORT lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___boxed(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___boxed(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; -x_3 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3(x_1, x_2); +x_3 = l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3(x_1, x_2); lean_dec(x_2); return x_3; } } -LEAN_EXPORT lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____boxed(lean_object* x_1, lean_object* x_2) { +LEAN_EXPORT lean_object* l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____boxed(lean_object* x_1, lean_object* x_2) { _start: { lean_object* x_3; -x_3 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543_(x_1, x_2); +x_3 = l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541_(x_1, x_2); lean_dec(x_2); lean_dec(x_1); return x_3; @@ -1915,7 +1922,7 @@ static lean_object* _init_l_Lean_instReprLeanOptions___closed__1() { _start: { lean_object* x_1; -x_1 = lean_alloc_closure((void*)(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____boxed), 2, 0); +x_1 = lean_alloc_closure((void*)(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____boxed), 2, 0); return x_1; } } @@ -1927,6 +1934,14 @@ x_1 = l_Lean_instReprLeanOptions___closed__1; return x_1; } } +static lean_object* _init_l_Lean_instEmptyCollectionLeanOptions() { +_start: +{ +lean_object* x_1; +x_1 = lean_box(0); +return x_1; +} +} LEAN_EXPORT lean_object* l_Lean_RBNode_forIn_visit___at_Lean_LeanOptions_toOptions___spec__1(lean_object* x_1, lean_object* x_2) { _start: { @@ -2004,7 +2019,7 @@ x_9 = lean_ctor_get(x_1, 0); x_10 = lean_ctor_get(x_1, 1); x_11 = lean_ctor_get(x_1, 2); x_12 = lean_ctor_get(x_1, 3); -x_13 = l_Lean_Name_cmp(x_2, x_10); +x_13 = l_Lean_Name_quickCmp(x_2, x_10); switch (x_13) { case 0: { @@ -2049,7 +2064,7 @@ lean_inc(x_21); lean_inc(x_20); lean_inc(x_19); lean_dec(x_1); -x_23 = l_Lean_Name_cmp(x_2, x_20); +x_23 = l_Lean_Name_quickCmp(x_2, x_20); switch (x_23) { case 0: { @@ -2105,7 +2120,7 @@ x_33 = lean_ctor_get(x_1, 0); x_34 = lean_ctor_get(x_1, 1); x_35 = lean_ctor_get(x_1, 2); x_36 = lean_ctor_get(x_1, 3); -x_37 = l_Lean_Name_cmp(x_2, x_34); +x_37 = l_Lean_Name_quickCmp(x_2, x_34); switch (x_37) { case 0: { @@ -3499,7 +3514,7 @@ lean_inc(x_344); lean_inc(x_343); lean_inc(x_342); lean_dec(x_1); -x_346 = l_Lean_Name_cmp(x_2, x_343); +x_346 = l_Lean_Name_quickCmp(x_2, x_343); switch (x_346) { case 0: { @@ -4466,6 +4481,30 @@ lean_dec(x_1); return x_7; } } +static lean_object* _init_l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__1() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("[anonymous]", 11, 11); +return x_1; +} +} +static lean_object* _init_l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__2() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("expected a `Name`, got '", 24, 24); +return x_1; +} +} +static lean_object* _init_l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__3() { +_start: +{ +lean_object* x_1; +x_1 = lean_mk_string_unchecked("'", 1, 1); +return x_1; +} +} LEAN_EXPORT lean_object* l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1(lean_object* x_1, lean_object* x_2) { _start: { @@ -4513,10 +4552,24 @@ return x_11; } else { -lean_object* x_12; lean_object* x_13; -x_12 = lean_ctor_get(x_8, 0); -lean_inc(x_12); -lean_dec(x_8); +uint8_t x_12; +x_12 = !lean_is_exclusive(x_8); +if (x_12 == 0) +{ +lean_object* x_13; lean_object* x_14; uint8_t x_15; +x_13 = lean_ctor_get(x_8, 0); +x_14 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__1; +x_15 = lean_string_dec_eq(x_5, x_14); +if (x_15 == 0) +{ +lean_object* x_16; uint8_t x_17; +lean_inc(x_5); +x_16 = l_String_toName(x_5); +x_17 = l_Lean_Name_isAnonymous(x_16); +if (x_17 == 0) +{ +lean_free_object(x_8); +lean_dec(x_5); switch (lean_obj_tag(x_6)) { case 1: { @@ -4524,169 +4577,662 @@ uint8_t x_18; x_18 = !lean_is_exclusive(x_6); if (x_18 == 0) { -x_13 = x_6; -goto block_17; +lean_object* x_19; +x_19 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_16, x_6); +x_1 = x_19; +x_2 = x_7; +goto _start; } else { -uint8_t x_19; lean_object* x_20; -x_19 = lean_ctor_get_uint8(x_6, 0); +uint8_t x_21; lean_object* x_22; lean_object* x_23; +x_21 = lean_ctor_get_uint8(x_6, 0); lean_dec(x_6); -x_20 = lean_alloc_ctor(1, 0, 1); -lean_ctor_set_uint8(x_20, 0, x_19); -x_13 = x_20; -goto block_17; +x_22 = lean_alloc_ctor(1, 0, 1); +lean_ctor_set_uint8(x_22, 0, x_21); +x_23 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_16, x_22); +x_1 = x_23; +x_2 = x_7; +goto _start; } } case 2: { -uint8_t x_21; -x_21 = !lean_is_exclusive(x_6); -if (x_21 == 0) +uint8_t x_25; +x_25 = !lean_is_exclusive(x_6); +if (x_25 == 0) { -lean_object* x_22; lean_object* x_23; lean_object* x_24; lean_object* x_25; uint8_t x_26; -x_22 = lean_ctor_get(x_6, 0); -x_23 = lean_ctor_get(x_22, 0); -lean_inc(x_23); -x_24 = lean_ctor_get(x_22, 1); -lean_inc(x_24); -lean_dec(x_22); -x_25 = l_Lean_instFromJsonLeanOptionValue___closed__3; -x_26 = lean_int_dec_lt(x_23, x_25); -if (x_26 == 0) +lean_object* x_26; lean_object* x_27; lean_object* x_28; lean_object* x_29; uint8_t x_30; +x_26 = lean_ctor_get(x_6, 0); +x_27 = lean_ctor_get(x_26, 0); +lean_inc(x_27); +x_28 = lean_ctor_get(x_26, 1); +lean_inc(x_28); +lean_dec(x_26); +x_29 = l_Lean_instFromJsonLeanOptionValue___closed__3; +x_30 = lean_int_dec_lt(x_27, x_29); +if (x_30 == 0) +{ +lean_object* x_31; lean_object* x_32; uint8_t x_33; +x_31 = lean_nat_abs(x_27); +lean_dec(x_27); +x_32 = lean_unsigned_to_nat(0u); +x_33 = lean_nat_dec_eq(x_28, x_32); +lean_dec(x_28); +if (x_33 == 0) { -lean_object* x_27; lean_object* x_28; uint8_t x_29; -x_27 = lean_nat_abs(x_23); -lean_dec(x_23); -x_28 = lean_unsigned_to_nat(0u); -x_29 = lean_nat_dec_eq(x_24, x_28); -lean_dec(x_24); -if (x_29 == 0) +lean_object* x_34; +lean_dec(x_31); +lean_free_object(x_6); +lean_dec(x_16); +lean_dec(x_13); +lean_dec(x_7); +x_34 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_34; +} +else { -lean_object* x_30; +lean_object* x_35; +lean_ctor_set(x_6, 0, x_31); +x_35 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_16, x_6); +x_1 = x_35; +x_2 = x_7; +goto _start; +} +} +else +{ +lean_object* x_37; +lean_dec(x_28); lean_dec(x_27); lean_free_object(x_6); -lean_dec(x_12); +lean_dec(x_16); +lean_dec(x_13); +lean_dec(x_7); +x_37 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_37; +} +} +else +{ +lean_object* x_38; lean_object* x_39; lean_object* x_40; lean_object* x_41; uint8_t x_42; +x_38 = lean_ctor_get(x_6, 0); +lean_inc(x_38); +lean_dec(x_6); +x_39 = lean_ctor_get(x_38, 0); +lean_inc(x_39); +x_40 = lean_ctor_get(x_38, 1); +lean_inc(x_40); +lean_dec(x_38); +x_41 = l_Lean_instFromJsonLeanOptionValue___closed__3; +x_42 = lean_int_dec_lt(x_39, x_41); +if (x_42 == 0) +{ +lean_object* x_43; lean_object* x_44; uint8_t x_45; +x_43 = lean_nat_abs(x_39); +lean_dec(x_39); +x_44 = lean_unsigned_to_nat(0u); +x_45 = lean_nat_dec_eq(x_40, x_44); +lean_dec(x_40); +if (x_45 == 0) +{ +lean_object* x_46; +lean_dec(x_43); +lean_dec(x_16); +lean_dec(x_13); +lean_dec(x_7); +x_46 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_46; +} +else +{ +lean_object* x_47; lean_object* x_48; +x_47 = lean_alloc_ctor(2, 1, 0); +lean_ctor_set(x_47, 0, x_43); +x_48 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_16, x_47); +x_1 = x_48; +x_2 = x_7; +goto _start; +} +} +else +{ +lean_object* x_50; +lean_dec(x_40); +lean_dec(x_39); +lean_dec(x_16); +lean_dec(x_13); +lean_dec(x_7); +x_50 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_50; +} +} +} +case 3: +{ +uint8_t x_51; +x_51 = !lean_is_exclusive(x_6); +if (x_51 == 0) +{ +lean_object* x_52; +lean_ctor_set_tag(x_6, 0); +x_52 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_16, x_6); +x_1 = x_52; +x_2 = x_7; +goto _start; +} +else +{ +lean_object* x_54; lean_object* x_55; lean_object* x_56; +x_54 = lean_ctor_get(x_6, 0); +lean_inc(x_54); +lean_dec(x_6); +x_55 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_55, 0, x_54); +x_56 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_16, x_55); +x_1 = x_56; +x_2 = x_7; +goto _start; +} +} +default: +{ +lean_object* x_58; +lean_dec(x_16); +lean_dec(x_13); +lean_dec(x_7); +lean_dec(x_6); +x_58 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_58; +} +} +} +else +{ +lean_object* x_59; lean_object* x_60; lean_object* x_61; lean_object* x_62; +lean_dec(x_16); +lean_dec(x_13); lean_dec(x_7); +lean_dec(x_6); +x_59 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__2; +x_60 = lean_string_append(x_59, x_5); lean_dec(x_5); -x_30 = l_Lean_instFromJsonLeanOptionValue___closed__2; -return x_30; +x_61 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__3; +x_62 = lean_string_append(x_60, x_61); +lean_ctor_set_tag(x_8, 0); +lean_ctor_set(x_8, 0, x_62); +return x_8; +} +} +else +{ +lean_free_object(x_8); +lean_dec(x_5); +switch (lean_obj_tag(x_6)) { +case 1: +{ +uint8_t x_63; +x_63 = !lean_is_exclusive(x_6); +if (x_63 == 0) +{ +lean_object* x_64; lean_object* x_65; +x_64 = lean_box(0); +x_65 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_64, x_6); +x_1 = x_65; +x_2 = x_7; +goto _start; } else { -lean_ctor_set(x_6, 0, x_27); -x_13 = x_6; -goto block_17; +uint8_t x_67; lean_object* x_68; lean_object* x_69; lean_object* x_70; +x_67 = lean_ctor_get_uint8(x_6, 0); +lean_dec(x_6); +x_68 = lean_alloc_ctor(1, 0, 1); +lean_ctor_set_uint8(x_68, 0, x_67); +x_69 = lean_box(0); +x_70 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_69, x_68); +x_1 = x_70; +x_2 = x_7; +goto _start; } } +case 2: +{ +uint8_t x_72; +x_72 = !lean_is_exclusive(x_6); +if (x_72 == 0) +{ +lean_object* x_73; lean_object* x_74; lean_object* x_75; lean_object* x_76; uint8_t x_77; +x_73 = lean_ctor_get(x_6, 0); +x_74 = lean_ctor_get(x_73, 0); +lean_inc(x_74); +x_75 = lean_ctor_get(x_73, 1); +lean_inc(x_75); +lean_dec(x_73); +x_76 = l_Lean_instFromJsonLeanOptionValue___closed__3; +x_77 = lean_int_dec_lt(x_74, x_76); +if (x_77 == 0) +{ +lean_object* x_78; lean_object* x_79; uint8_t x_80; +x_78 = lean_nat_abs(x_74); +lean_dec(x_74); +x_79 = lean_unsigned_to_nat(0u); +x_80 = lean_nat_dec_eq(x_75, x_79); +lean_dec(x_75); +if (x_80 == 0) +{ +lean_object* x_81; +lean_dec(x_78); +lean_free_object(x_6); +lean_dec(x_13); +lean_dec(x_7); +x_81 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_81; +} else { -lean_object* x_31; -lean_dec(x_24); -lean_dec(x_23); +lean_object* x_82; lean_object* x_83; +lean_ctor_set(x_6, 0, x_78); +x_82 = lean_box(0); +x_83 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_82, x_6); +x_1 = x_83; +x_2 = x_7; +goto _start; +} +} +else +{ +lean_object* x_85; +lean_dec(x_75); +lean_dec(x_74); lean_free_object(x_6); -lean_dec(x_12); +lean_dec(x_13); lean_dec(x_7); -lean_dec(x_5); -x_31 = l_Lean_instFromJsonLeanOptionValue___closed__2; -return x_31; +x_85 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_85; } } else { -lean_object* x_32; lean_object* x_33; lean_object* x_34; lean_object* x_35; uint8_t x_36; -x_32 = lean_ctor_get(x_6, 0); -lean_inc(x_32); +lean_object* x_86; lean_object* x_87; lean_object* x_88; lean_object* x_89; uint8_t x_90; +x_86 = lean_ctor_get(x_6, 0); +lean_inc(x_86); lean_dec(x_6); -x_33 = lean_ctor_get(x_32, 0); -lean_inc(x_33); -x_34 = lean_ctor_get(x_32, 1); -lean_inc(x_34); -lean_dec(x_32); -x_35 = l_Lean_instFromJsonLeanOptionValue___closed__3; -x_36 = lean_int_dec_lt(x_33, x_35); -if (x_36 == 0) -{ -lean_object* x_37; lean_object* x_38; uint8_t x_39; -x_37 = lean_nat_abs(x_33); -lean_dec(x_33); -x_38 = lean_unsigned_to_nat(0u); -x_39 = lean_nat_dec_eq(x_34, x_38); -lean_dec(x_34); -if (x_39 == 0) +x_87 = lean_ctor_get(x_86, 0); +lean_inc(x_87); +x_88 = lean_ctor_get(x_86, 1); +lean_inc(x_88); +lean_dec(x_86); +x_89 = l_Lean_instFromJsonLeanOptionValue___closed__3; +x_90 = lean_int_dec_lt(x_87, x_89); +if (x_90 == 0) { -lean_object* x_40; -lean_dec(x_37); -lean_dec(x_12); +lean_object* x_91; lean_object* x_92; uint8_t x_93; +x_91 = lean_nat_abs(x_87); +lean_dec(x_87); +x_92 = lean_unsigned_to_nat(0u); +x_93 = lean_nat_dec_eq(x_88, x_92); +lean_dec(x_88); +if (x_93 == 0) +{ +lean_object* x_94; +lean_dec(x_91); +lean_dec(x_13); lean_dec(x_7); -lean_dec(x_5); -x_40 = l_Lean_instFromJsonLeanOptionValue___closed__2; -return x_40; +x_94 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_94; } else { -lean_object* x_41; -x_41 = lean_alloc_ctor(2, 1, 0); -lean_ctor_set(x_41, 0, x_37); -x_13 = x_41; -goto block_17; +lean_object* x_95; lean_object* x_96; lean_object* x_97; +x_95 = lean_alloc_ctor(2, 1, 0); +lean_ctor_set(x_95, 0, x_91); +x_96 = lean_box(0); +x_97 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_96, x_95); +x_1 = x_97; +x_2 = x_7; +goto _start; } } else { -lean_object* x_42; -lean_dec(x_34); -lean_dec(x_33); -lean_dec(x_12); +lean_object* x_99; +lean_dec(x_88); +lean_dec(x_87); +lean_dec(x_13); lean_dec(x_7); -lean_dec(x_5); -x_42 = l_Lean_instFromJsonLeanOptionValue___closed__2; -return x_42; +x_99 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_99; } } } case 3: { -uint8_t x_43; -x_43 = !lean_is_exclusive(x_6); -if (x_43 == 0) +uint8_t x_100; +x_100 = !lean_is_exclusive(x_6); +if (x_100 == 0) { +lean_object* x_101; lean_object* x_102; lean_ctor_set_tag(x_6, 0); -x_13 = x_6; -goto block_17; +x_101 = lean_box(0); +x_102 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_101, x_6); +x_1 = x_102; +x_2 = x_7; +goto _start; } else { -lean_object* x_44; lean_object* x_45; -x_44 = lean_ctor_get(x_6, 0); -lean_inc(x_44); +lean_object* x_104; lean_object* x_105; lean_object* x_106; lean_object* x_107; +x_104 = lean_ctor_get(x_6, 0); +lean_inc(x_104); lean_dec(x_6); -x_45 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_45, 0, x_44); -x_13 = x_45; -goto block_17; +x_105 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_105, 0, x_104); +x_106 = lean_box(0); +x_107 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_13, x_106, x_105); +x_1 = x_107; +x_2 = x_7; +goto _start; } } default: { -lean_object* x_46; -lean_dec(x_12); +lean_object* x_109; +lean_dec(x_13); lean_dec(x_7); lean_dec(x_6); +x_109 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_109; +} +} +} +} +else +{ +lean_object* x_110; lean_object* x_111; uint8_t x_112; +x_110 = lean_ctor_get(x_8, 0); +lean_inc(x_110); +lean_dec(x_8); +x_111 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__1; +x_112 = lean_string_dec_eq(x_5, x_111); +if (x_112 == 0) +{ +lean_object* x_113; uint8_t x_114; +lean_inc(x_5); +x_113 = l_String_toName(x_5); +x_114 = l_Lean_Name_isAnonymous(x_113); +if (x_114 == 0) +{ lean_dec(x_5); -x_46 = l_Lean_instFromJsonLeanOptionValue___closed__2; -return x_46; +switch (lean_obj_tag(x_6)) { +case 1: +{ +uint8_t x_115; lean_object* x_116; lean_object* x_117; lean_object* x_118; +x_115 = lean_ctor_get_uint8(x_6, 0); +if (lean_is_exclusive(x_6)) { + x_116 = x_6; +} else { + lean_dec_ref(x_6); + x_116 = lean_box(0); +} +if (lean_is_scalar(x_116)) { + x_117 = lean_alloc_ctor(1, 0, 1); +} else { + x_117 = x_116; +} +lean_ctor_set_uint8(x_117, 0, x_115); +x_118 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_110, x_113, x_117); +x_1 = x_118; +x_2 = x_7; +goto _start; } +case 2: +{ +lean_object* x_120; lean_object* x_121; lean_object* x_122; lean_object* x_123; lean_object* x_124; uint8_t x_125; +x_120 = lean_ctor_get(x_6, 0); +lean_inc(x_120); +if (lean_is_exclusive(x_6)) { + lean_ctor_release(x_6, 0); + x_121 = x_6; +} else { + lean_dec_ref(x_6); + x_121 = lean_box(0); } -block_17: +x_122 = lean_ctor_get(x_120, 0); +lean_inc(x_122); +x_123 = lean_ctor_get(x_120, 1); +lean_inc(x_123); +lean_dec(x_120); +x_124 = l_Lean_instFromJsonLeanOptionValue___closed__3; +x_125 = lean_int_dec_lt(x_122, x_124); +if (x_125 == 0) { -lean_object* x_14; lean_object* x_15; -x_14 = l_String_toName(x_5); -x_15 = l_Lean_RBNode_insert___at_Lean_LeanOptions_fromOptions_x3f___spec__1(x_12, x_14, x_13); -x_1 = x_15; +lean_object* x_126; lean_object* x_127; uint8_t x_128; +x_126 = lean_nat_abs(x_122); +lean_dec(x_122); +x_127 = lean_unsigned_to_nat(0u); +x_128 = lean_nat_dec_eq(x_123, x_127); +lean_dec(x_123); +if (x_128 == 0) +{ +lean_object* x_129; +lean_dec(x_126); +lean_dec(x_121); +lean_dec(x_113); +lean_dec(x_110); +lean_dec(x_7); +x_129 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_129; +} +else +{ +lean_object* x_130; lean_object* x_131; +if (lean_is_scalar(x_121)) { + x_130 = lean_alloc_ctor(2, 1, 0); +} else { + x_130 = x_121; +} +lean_ctor_set(x_130, 0, x_126); +x_131 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_110, x_113, x_130); +x_1 = x_131; +x_2 = x_7; +goto _start; +} +} +else +{ +lean_object* x_133; +lean_dec(x_123); +lean_dec(x_122); +lean_dec(x_121); +lean_dec(x_113); +lean_dec(x_110); +lean_dec(x_7); +x_133 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_133; +} +} +case 3: +{ +lean_object* x_134; lean_object* x_135; lean_object* x_136; lean_object* x_137; +x_134 = lean_ctor_get(x_6, 0); +lean_inc(x_134); +if (lean_is_exclusive(x_6)) { + lean_ctor_release(x_6, 0); + x_135 = x_6; +} else { + lean_dec_ref(x_6); + x_135 = lean_box(0); +} +if (lean_is_scalar(x_135)) { + x_136 = lean_alloc_ctor(0, 1, 0); +} else { + x_136 = x_135; + lean_ctor_set_tag(x_136, 0); +} +lean_ctor_set(x_136, 0, x_134); +x_137 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_110, x_113, x_136); +x_1 = x_137; +x_2 = x_7; +goto _start; +} +default: +{ +lean_object* x_139; +lean_dec(x_113); +lean_dec(x_110); +lean_dec(x_7); +lean_dec(x_6); +x_139 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_139; +} +} +} +else +{ +lean_object* x_140; lean_object* x_141; lean_object* x_142; lean_object* x_143; lean_object* x_144; +lean_dec(x_113); +lean_dec(x_110); +lean_dec(x_7); +lean_dec(x_6); +x_140 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__2; +x_141 = lean_string_append(x_140, x_5); +lean_dec(x_5); +x_142 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__3; +x_143 = lean_string_append(x_141, x_142); +x_144 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_144, 0, x_143); +return x_144; +} +} +else +{ +lean_dec(x_5); +switch (lean_obj_tag(x_6)) { +case 1: +{ +uint8_t x_145; lean_object* x_146; lean_object* x_147; lean_object* x_148; lean_object* x_149; +x_145 = lean_ctor_get_uint8(x_6, 0); +if (lean_is_exclusive(x_6)) { + x_146 = x_6; +} else { + lean_dec_ref(x_6); + x_146 = lean_box(0); +} +if (lean_is_scalar(x_146)) { + x_147 = lean_alloc_ctor(1, 0, 1); +} else { + x_147 = x_146; +} +lean_ctor_set_uint8(x_147, 0, x_145); +x_148 = lean_box(0); +x_149 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_110, x_148, x_147); +x_1 = x_149; +x_2 = x_7; +goto _start; +} +case 2: +{ +lean_object* x_151; lean_object* x_152; lean_object* x_153; lean_object* x_154; lean_object* x_155; uint8_t x_156; +x_151 = lean_ctor_get(x_6, 0); +lean_inc(x_151); +if (lean_is_exclusive(x_6)) { + lean_ctor_release(x_6, 0); + x_152 = x_6; +} else { + lean_dec_ref(x_6); + x_152 = lean_box(0); +} +x_153 = lean_ctor_get(x_151, 0); +lean_inc(x_153); +x_154 = lean_ctor_get(x_151, 1); +lean_inc(x_154); +lean_dec(x_151); +x_155 = l_Lean_instFromJsonLeanOptionValue___closed__3; +x_156 = lean_int_dec_lt(x_153, x_155); +if (x_156 == 0) +{ +lean_object* x_157; lean_object* x_158; uint8_t x_159; +x_157 = lean_nat_abs(x_153); +lean_dec(x_153); +x_158 = lean_unsigned_to_nat(0u); +x_159 = lean_nat_dec_eq(x_154, x_158); +lean_dec(x_154); +if (x_159 == 0) +{ +lean_object* x_160; +lean_dec(x_157); +lean_dec(x_152); +lean_dec(x_110); +lean_dec(x_7); +x_160 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_160; +} +else +{ +lean_object* x_161; lean_object* x_162; lean_object* x_163; +if (lean_is_scalar(x_152)) { + x_161 = lean_alloc_ctor(2, 1, 0); +} else { + x_161 = x_152; +} +lean_ctor_set(x_161, 0, x_157); +x_162 = lean_box(0); +x_163 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_110, x_162, x_161); +x_1 = x_163; +x_2 = x_7; +goto _start; +} +} +else +{ +lean_object* x_165; +lean_dec(x_154); +lean_dec(x_153); +lean_dec(x_152); +lean_dec(x_110); +lean_dec(x_7); +x_165 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_165; +} +} +case 3: +{ +lean_object* x_166; lean_object* x_167; lean_object* x_168; lean_object* x_169; lean_object* x_170; +x_166 = lean_ctor_get(x_6, 0); +lean_inc(x_166); +if (lean_is_exclusive(x_6)) { + lean_ctor_release(x_6, 0); + x_167 = x_6; +} else { + lean_dec_ref(x_6); + x_167 = lean_box(0); +} +if (lean_is_scalar(x_167)) { + x_168 = lean_alloc_ctor(0, 1, 0); +} else { + x_168 = x_167; + lean_ctor_set_tag(x_168, 0); +} +lean_ctor_set(x_168, 0, x_166); +x_169 = lean_box(0); +x_170 = l_Lean_RBNode_insert___at_Lean_NameMap_insert___spec__1___rarg(x_110, x_169, x_168); +x_1 = x_170; x_2 = x_7; goto _start; } +default: +{ +lean_object* x_172; +lean_dec(x_110); +lean_dec(x_7); +lean_dec(x_6); +x_172 = l_Lean_instFromJsonLeanOptionValue___closed__2; +return x_172; +} +} +} +} } } } @@ -4695,76 +5241,124 @@ static lean_object* _init_l_Lean_instFromJsonLeanOptions___closed__1() { _start: { lean_object* x_1; -x_1 = lean_mk_string_unchecked("invalid LeanOptions type", 24, 24); +x_1 = lean_mk_string_unchecked("expected a `NameMap`, got '", 27, 27); return x_1; } } -static lean_object* _init_l_Lean_instFromJsonLeanOptions___closed__2() { +LEAN_EXPORT lean_object* l_Lean_instFromJsonLeanOptions(lean_object* x_1) { _start: { -lean_object* x_1; lean_object* x_2; -x_1 = l_Lean_instFromJsonLeanOptions___closed__1; -x_2 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_2, 0, x_1); -return x_2; -} +switch (lean_obj_tag(x_1)) { +case 0: +{ +lean_object* x_2; lean_object* x_3; lean_object* x_4; lean_object* x_5; lean_object* x_6; lean_object* x_7; lean_object* x_8; +x_2 = lean_unsigned_to_nat(80u); +x_3 = l_Lean_Json_pretty(x_1, x_2); +x_4 = l_Lean_instFromJsonLeanOptions___closed__1; +x_5 = lean_string_append(x_4, x_3); +lean_dec(x_3); +x_6 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__3; +x_7 = lean_string_append(x_5, x_6); +x_8 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_8, 0, x_7); +return x_8; } -LEAN_EXPORT lean_object* l_Lean_instFromJsonLeanOptions(lean_object* x_1) { -_start: +case 1: { -if (lean_obj_tag(x_1) == 5) +lean_object* x_9; lean_object* x_10; lean_object* x_11; lean_object* x_12; lean_object* x_13; lean_object* x_14; lean_object* x_15; +x_9 = lean_unsigned_to_nat(80u); +x_10 = l_Lean_Json_pretty(x_1, x_9); +x_11 = l_Lean_instFromJsonLeanOptions___closed__1; +x_12 = lean_string_append(x_11, x_10); +lean_dec(x_10); +x_13 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__3; +x_14 = lean_string_append(x_12, x_13); +x_15 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_15, 0, x_14); +return x_15; +} +case 5: { -lean_object* x_2; lean_object* x_3; lean_object* x_4; -x_2 = lean_ctor_get(x_1, 0); -lean_inc(x_2); +lean_object* x_16; lean_object* x_17; lean_object* x_18; +x_16 = lean_ctor_get(x_1, 0); +lean_inc(x_16); lean_dec(x_1); -x_3 = lean_box(0); -x_4 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1(x_3, x_2); -if (lean_obj_tag(x_4) == 0) +x_17 = lean_box(0); +x_18 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1(x_17, x_16); +if (lean_obj_tag(x_18) == 0) { -uint8_t x_5; -x_5 = !lean_is_exclusive(x_4); -if (x_5 == 0) +uint8_t x_19; +x_19 = !lean_is_exclusive(x_18); +if (x_19 == 0) { -return x_4; +return x_18; } else { -lean_object* x_6; lean_object* x_7; -x_6 = lean_ctor_get(x_4, 0); -lean_inc(x_6); -lean_dec(x_4); -x_7 = lean_alloc_ctor(0, 1, 0); -lean_ctor_set(x_7, 0, x_6); -return x_7; +lean_object* x_20; lean_object* x_21; +x_20 = lean_ctor_get(x_18, 0); +lean_inc(x_20); +lean_dec(x_18); +x_21 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_21, 0, x_20); +return x_21; } } else { -uint8_t x_8; -x_8 = !lean_is_exclusive(x_4); -if (x_8 == 0) +uint8_t x_22; +x_22 = !lean_is_exclusive(x_18); +if (x_22 == 0) { -return x_4; +return x_18; } else { -lean_object* x_9; lean_object* x_10; -x_9 = lean_ctor_get(x_4, 0); -lean_inc(x_9); -lean_dec(x_4); -x_10 = lean_alloc_ctor(1, 1, 0); -lean_ctor_set(x_10, 0, x_9); -return x_10; +lean_object* x_23; lean_object* x_24; +x_23 = lean_ctor_get(x_18, 0); +lean_inc(x_23); +lean_dec(x_18); +x_24 = lean_alloc_ctor(1, 1, 0); +lean_ctor_set(x_24, 0, x_23); +return x_24; } } } +default: +{ +lean_object* x_25; lean_object* x_26; uint8_t x_27; +x_25 = lean_unsigned_to_nat(80u); +lean_inc(x_1); +x_26 = l_Lean_Json_pretty(x_1, x_25); +x_27 = !lean_is_exclusive(x_1); +if (x_27 == 0) +{ +lean_object* x_28; lean_object* x_29; lean_object* x_30; lean_object* x_31; lean_object* x_32; +x_28 = lean_ctor_get(x_1, 0); +lean_dec(x_28); +x_29 = l_Lean_instFromJsonLeanOptions___closed__1; +x_30 = lean_string_append(x_29, x_26); +lean_dec(x_26); +x_31 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__3; +x_32 = lean_string_append(x_30, x_31); +lean_ctor_set_tag(x_1, 0); +lean_ctor_set(x_1, 0, x_32); +return x_1; +} else { -lean_object* x_11; +lean_object* x_33; lean_object* x_34; lean_object* x_35; lean_object* x_36; lean_object* x_37; lean_dec(x_1); -x_11 = l_Lean_instFromJsonLeanOptions___closed__2; -return x_11; +x_33 = l_Lean_instFromJsonLeanOptions___closed__1; +x_34 = lean_string_append(x_33, x_26); +lean_dec(x_26); +x_35 = l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__3; +x_36 = lean_string_append(x_34, x_35); +x_37 = lean_alloc_ctor(0, 1, 0); +lean_ctor_set(x_37, 0, x_36); +return x_37; +} +} } } } @@ -5002,78 +5596,84 @@ l_Lean_LeanOptionValue_asCliFlagValue___closed__1 = _init_l_Lean_LeanOptionValue lean_mark_persistent(l_Lean_LeanOptionValue_asCliFlagValue___closed__1); l_Lean_instInhabitedLeanOptions = _init_l_Lean_instInhabitedLeanOptions(); lean_mark_persistent(l_Lean_instInhabitedLeanOptions); -l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__1 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__1(); -lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__1); -l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__2 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__2(); -lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__2); -l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__3 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__3(); -lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__3); -l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__4 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__4(); -lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__4); -l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__5 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__5(); -lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__5); -l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__6 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__6(); -lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__6); -l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__7 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__7(); -lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__7); -l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__8 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__8(); -lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__8); -l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__9 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__9(); -lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__5___closed__9); -l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__1 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__1(); -lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__1); -l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__2 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__2(); -lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__2); -l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__3 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__3(); -lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__3); -l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__4 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__4(); -lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__4); -l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__5 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__5(); -lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__5); -l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__6 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__6(); -lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__6); -l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__7 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__7(); -lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__7); -l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__8 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__8(); -lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____spec__3___closed__8); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__1 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__1(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__1); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__2 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__2(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__2); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__3 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__3(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__3); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__4 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__4(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__4); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__5 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__5(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__5); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__6 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__6(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__6); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__7 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__7(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__7); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__8 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__8(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__8); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__9 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__9(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__9); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__10 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__10(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__10); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__11 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__11(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__11); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__12 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__12(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__12); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__13 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__13(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__13); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__14 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__14(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__14); -l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__15 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__15(); -lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_543____closed__15); +l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__1 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__1(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__1); +l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__2 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__2(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__2); +l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__3 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__3(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__3); +l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__4 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__4(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__4); +l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__5 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__5(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__5); +l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__6 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__6(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__6); +l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__7 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__7(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__7); +l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__8 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__8(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__8); +l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__9 = _init_l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__9(); +lean_mark_persistent(l_Prod_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__5___closed__9); +l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__1 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__1(); +lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__1); +l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__2 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__2(); +lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__2); +l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__3 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__3(); +lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__3); +l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__4 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__4(); +lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__4); +l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__5 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__5(); +lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__5); +l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__6 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__6(); +lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__6); +l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__7 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__7(); +lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__7); +l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__8 = _init_l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__8(); +lean_mark_persistent(l_List_repr___at___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____spec__3___closed__8); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__1 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__1(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__1); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__2 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__2(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__2); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__3 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__3(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__3); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__4 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__4(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__4); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__5 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__5(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__5); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__6 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__6(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__6); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__7 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__7(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__7); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__8 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__8(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__8); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__9 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__9(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__9); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__10 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__10(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__10); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__11 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__11(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__11); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__12 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__12(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__12); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__13 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__13(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__13); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__14 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__14(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__14); +l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__15 = _init_l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__15(); +lean_mark_persistent(l___private_Lean_Util_LeanOptions_0__Lean_reprLeanOptions____x40_Lean_Util_LeanOptions___hyg_541____closed__15); l_Lean_instReprLeanOptions___closed__1 = _init_l_Lean_instReprLeanOptions___closed__1(); lean_mark_persistent(l_Lean_instReprLeanOptions___closed__1); l_Lean_instReprLeanOptions = _init_l_Lean_instReprLeanOptions(); lean_mark_persistent(l_Lean_instReprLeanOptions); +l_Lean_instEmptyCollectionLeanOptions = _init_l_Lean_instEmptyCollectionLeanOptions(); +lean_mark_persistent(l_Lean_instEmptyCollectionLeanOptions); +l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__1 = _init_l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__1(); +lean_mark_persistent(l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__1); +l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__2 = _init_l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__2(); +lean_mark_persistent(l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__2); +l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__3 = _init_l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__3(); +lean_mark_persistent(l_Lean_RBNode_foldM___at_Lean_instFromJsonLeanOptions___spec__1___closed__3); l_Lean_instFromJsonLeanOptions___closed__1 = _init_l_Lean_instFromJsonLeanOptions___closed__1(); lean_mark_persistent(l_Lean_instFromJsonLeanOptions___closed__1); -l_Lean_instFromJsonLeanOptions___closed__2 = _init_l_Lean_instFromJsonLeanOptions___closed__2(); -lean_mark_persistent(l_Lean_instFromJsonLeanOptions___closed__2); l_Lean_RBNode_fold___at_Lean_instToJsonLeanOptions___spec__1___closed__1 = _init_l_Lean_RBNode_fold___at_Lean_instToJsonLeanOptions___spec__1___closed__1(); lean_mark_persistent(l_Lean_RBNode_fold___at_Lean_instToJsonLeanOptions___spec__1___closed__1); return lean_io_result_mk_ok(lean_box(0)); diff --git a/stage0/stdlib/Lean/Util/Profiler.c b/stage0/stdlib/Lean/Util/Profiler.c index c0d06f587389..1d7093b8b209 100644 --- a/stage0/stdlib/Lean/Util/Profiler.c +++ b/stage0/stdlib/Lean/Util/Profiler.c @@ -243,7 +243,6 @@ static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonProfi uint8_t l_Lean_Name_isPrefixOf(lean_object*, lean_object*); static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonFrameTable____x40_Lean_Util_Profiler___hyg_2850____closed__13; static lean_object* l_Lean_Firefox_instToJsonThread___closed__1; -lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(lean_object*, lean_object*); double lean_float_negate(double); LEAN_EXPORT lean_object* l_Array_forIn_x27Unsafe_loop___at___private_Lean_Util_Profiler_0__Lean_Firefox_addTrace_go___spec__34(uint8_t, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, size_t, size_t, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_get_x3f___at___private_Lean_Util_Profiler_0__Lean_Firefox_addTrace_go___spec__36___boxed(lean_object*, lean_object*); @@ -428,6 +427,7 @@ static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonThrea static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonProfile____x40_Lean_Util_Profiler___hyg_4702____closed__12; static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonFrameTable____x40_Lean_Util_Profiler___hyg_2850____closed__38; static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonSampleUnits____x40_Lean_Util_Profiler___hyg_472____closed__18; +lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(lean_object*, lean_object*); static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonThread____x40_Lean_Util_Profiler___hyg_4084____closed__5; LEAN_EXPORT lean_object* l_Std_DHashMap_Internal_AssocList_foldlM___at___private_Lean_Util_Profiler_0__Lean_Firefox_addTrace_go___spec__11(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonStackTable____x40_Lean_Util_Profiler___hyg_1303____closed__11; @@ -654,6 +654,7 @@ static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonResou lean_object* l_Lean_Json_getNat_x3f(lean_object*); static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonStackTable____x40_Lean_Util_Profiler___hyg_1303____closed__26; static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonSampleUnits____x40_Lean_Util_Profiler___hyg_472____closed__1; +lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1(lean_object*, lean_object*); static lean_object* l_Lean_Firefox_categories___closed__1; static lean_object* l_Lean_Firefox_categories___closed__28; static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonFrameTable____x40_Lean_Util_Profiler___hyg_2850____closed__45; @@ -753,7 +754,6 @@ static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonProfi lean_object* lean_int_neg(lean_object*); static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonSamplesTable____x40_Lean_Util_Profiler___hyg_1677____closed__23; static lean_object* l_Array_mapMUnsafe_map___at_Lean_Firefox_Profile_export___spec__15___closed__1; -lean_object* l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1(lean_object*, lean_object*); lean_object* lean_array_get(lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonStackTable____x40_Lean_Util_Profiler___hyg_1303____closed__18; uint8_t lean_nat_dec_le(lean_object*, lean_object*); @@ -1836,7 +1836,7 @@ LEAN_EXPORT lean_object* l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJson lean_object* x_2; lean_object* x_3; x_2 = l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonCategory____x40_Lean_Util_Profiler___hyg_230____closed__1; lean_inc(x_1); -x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonSerialMessage____x40_Lean_Message___hyg_3661____spec__1(x_1, x_2); +x_3 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__1(x_1, x_2); if (lean_obj_tag(x_3) == 0) { uint8_t x_4; @@ -10736,7 +10736,7 @@ lean_inc(x_23); lean_dec(x_14); x_24 = l___private_Lean_Util_Profiler_0__Lean_Firefox_fromJsonThread____x40_Lean_Util_Profiler___hyg_4084____closed__9; lean_inc(x_1); -x_25 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Message_0__Lean_fromJsonBaseMessage____x40_Lean_Message___hyg_3128____spec__4(x_1, x_24); +x_25 = l_Lean_Json_getObjValAs_x3f___at___private_Lean_Setup_0__Lean_fromJsonImport____x40_Lean_Setup___hyg_190____spec__2(x_1, x_24); if (lean_obj_tag(x_25) == 0) { uint8_t x_26; diff --git a/stage0/stdlib/Lean/Widget/UserWidget.c b/stage0/stdlib/Lean/Widget/UserWidget.c index 1ed86cf3031e..491e489f26c0 100644 --- a/stage0/stdlib/Lean/Widget/UserWidget.c +++ b/stage0/stdlib/Lean/Widget/UserWidget.c @@ -458,6 +458,7 @@ lean_object* l_String_Range_toLspRange(lean_object*, lean_object*); lean_object* l_Lean_MessageData_ofConstName(lean_object*, uint8_t); lean_object* l_Array_append___rarg(lean_object*, lean_object*); uint8_t l_Lean_Environment_contains(lean_object*, lean_object*, uint8_t); +lean_object* l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Widget_initFn____x40_Lean_Widget_UserWidget___hyg_240____lambda__6___boxed(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); LEAN_EXPORT lean_object* l_Lean_Widget_initFn____x40_Lean_Widget_UserWidget___hyg_240____lambda__3(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); static lean_object* l___private_Lean_Widget_UserWidget_0__Lean_Widget_fromJsonUserWidgetDefinition____x40_Lean_Widget_UserWidget___hyg_3720____closed__6; @@ -622,7 +623,6 @@ LEAN_EXPORT lean_object* l_Lean_Widget_showWidgetSpec; LEAN_EXPORT lean_object* l_Lean_Widget_widgetInfosAt_x3f___lambda__1(lean_object*, lean_object*, lean_object*, lean_object*, lean_object*); lean_object* l_Lean_registerBuiltinAttribute(lean_object*, lean_object*); static lean_object* l_Lean_Widget_elabWidgetInstanceSpecAux___closed__51; -lean_object* l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(lean_object*, lean_object*); lean_object* l_Lean_bignumToJson(lean_object*); LEAN_EXPORT lean_object* l_Lean_Widget_erasePanelWidget___rarg___boxed(lean_object*, lean_object*); static lean_object* l_Lean_Widget_elabWidgetInstanceSpecAux___closed__50; @@ -8236,7 +8236,7 @@ x_15 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_15, 0, x_8); lean_ctor_set(x_15, 1, x_14); x_16 = l_Lean_Widget_initFn____x40_Lean_Widget_UserWidget___hyg_240____lambda__4___closed__9; -x_17 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_15, x_16); +x_17 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_15, x_16); x_18 = l_Lean_Json_mkObj(x_17); return x_18; } @@ -8571,7 +8571,7 @@ x_7 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_7, 0, x_6); lean_ctor_set(x_7, 1, x_5); x_8 = l_Lean_Widget_initFn____x40_Lean_Widget_UserWidget___hyg_240____lambda__4___closed__9; -x_9 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_7, x_8); +x_9 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_7, x_8); x_10 = l_Lean_Json_mkObj(x_9); return x_10; } @@ -18954,7 +18954,7 @@ x_14 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_14, 0, x_8); lean_ctor_set(x_14, 1, x_13); x_15 = l_Lean_Widget_initFn____x40_Lean_Widget_UserWidget___hyg_240____lambda__4___closed__9; -x_16 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_14, x_15); +x_16 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_14, x_15); x_17 = l_Lean_Json_mkObj(x_16); return x_17; } @@ -18992,7 +18992,7 @@ x_30 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_30, 0, x_24); lean_ctor_set(x_30, 1, x_29); x_31 = l_Lean_Widget_initFn____x40_Lean_Widget_UserWidget___hyg_240____lambda__4___closed__9; -x_32 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_30, x_31); +x_32 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_30, x_31); x_33 = l_Lean_Json_mkObj(x_32); return x_33; } @@ -20475,7 +20475,7 @@ x_25 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_25, 0, x_10); lean_ctor_set(x_25, 1, x_24); x_26 = l_Lean_Widget_initFn____x40_Lean_Widget_UserWidget___hyg_240____lambda__4___closed__9; -x_27 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_25, x_26); +x_27 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_25, x_26); x_28 = l_Lean_Json_mkObj(x_27); return x_28; } @@ -22146,7 +22146,7 @@ x_6 = lean_alloc_ctor(1, 2, 0); lean_ctor_set(x_6, 0, x_5); lean_ctor_set(x_6, 1, x_4); x_7 = l_Lean_Widget_initFn____x40_Lean_Widget_UserWidget___hyg_240____lambda__4___closed__9; -x_8 = l_List_flatMapTR_go___at___private_Lean_Util_Paths_0__Lean_toJsonLeanPaths____x40_Lean_Util_Paths___hyg_55____spec__2(x_6, x_7); +x_8 = l_List_flatMapTR_go___at___private_Lean_Data_Lsp_Window_0__toJsonShowMessageParams____x40_Lean_Data_Lsp_Window___hyg_245____spec__1(x_6, x_7); x_9 = l_Lean_Json_mkObj(x_8); return x_9; } diff --git a/tests/compiler/nat_shiftr.lean b/tests/compiler/nat_shiftr.lean new file mode 100644 index 000000000000..dd6d22d6acb9 --- /dev/null +++ b/tests/compiler/nat_shiftr.lean @@ -0,0 +1,19 @@ +def test (a : Nat) : IO Unit := + for b in #[0, 1, 14, 15, 16, 17, 31, 32, 33, 63, 64, 65] do + IO.println f!"{a >>> b}" + +def main : IO Unit := do + test 0 + test 1 + test 0xff + test 0x100 + test 0x101 + test 0xffff + test 0x1000_0 + test 0x1000_1 + test 0xffff_ffff + test 0x1_0000_0000 + test 0x1_0000_0001 + test 0xffff_ffff_ffff_ffff + test 0x1_0000_0000_0000_0000 + test 0x1_0000_0000_0000_0001 diff --git a/tests/compiler/nat_shiftr.lean.expected.out b/tests/compiler/nat_shiftr.lean.expected.out new file mode 100644 index 000000000000..01fed06ccc7c --- /dev/null +++ b/tests/compiler/nat_shiftr.lean.expected.out @@ -0,0 +1,168 @@ +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +1 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +255 +127 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +256 +128 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +257 +128 +0 +0 +0 +0 +0 +0 +0 +0 +0 +0 +65535 +32767 +3 +1 +0 +0 +0 +0 +0 +0 +0 +0 +65536 +32768 +4 +2 +1 +0 +0 +0 +0 +0 +0 +0 +65537 +32768 +4 +2 +1 +0 +0 +0 +0 +0 +0 +0 +4294967295 +2147483647 +262143 +131071 +65535 +32767 +1 +0 +0 +0 +0 +0 +4294967296 +2147483648 +262144 +131072 +65536 +32768 +2 +1 +0 +0 +0 +0 +4294967297 +2147483648 +262144 +131072 +65536 +32768 +2 +1 +0 +0 +0 +0 +18446744073709551615 +9223372036854775807 +1125899906842623 +562949953421311 +281474976710655 +140737488355327 +8589934591 +4294967295 +2147483647 +1 +0 +0 +18446744073709551616 +9223372036854775808 +1125899906842624 +562949953421312 +281474976710656 +140737488355328 +8589934592 +4294967296 +2147483648 +2 +1 +0 +18446744073709551617 +9223372036854775808 +1125899906842624 +562949953421312 +281474976710656 +140737488355328 +8589934592 +4294967296 +2147483648 +2 +1 +0 diff --git a/tests/lean/243.lean.expected.out b/tests/lean/243.lean.expected.out index 39077e6323f7..862f33fdffaf 100644 --- a/tests/lean/243.lean.expected.out +++ b/tests/lean/243.lean.expected.out @@ -1,14 +1,14 @@ -243.lean:2:10-2:14: error: application type mismatch +243.lean:2:10-2:14: error: Application type mismatch: In the appplication ⟨Bool, true⟩ -argument +the final argument true has type _root_.Bool : Type but is expected to have type Bool : Type -243.lean:13:7-13:8: error: application type mismatch +243.lean:13:7-13:8: error: Application type mismatch: In the appplication ⟨A, a⟩ -argument +the final argument a has type Foo.A : Type diff --git a/tests/lean/283.lean.expected.out b/tests/lean/283.lean.expected.out index 92b3751dbe17..25d05edd77c0 100644 --- a/tests/lean/283.lean.expected.out +++ b/tests/lean/283.lean.expected.out @@ -1,6 +1,6 @@ -283.lean:1:24-1:25: error: application type mismatch +283.lean:1:24-1:25: error: Application type mismatch: In the appplication f f -argument +the final argument f has type ?m : Sort ?u diff --git a/tests/lean/331.lean.expected.out b/tests/lean/331.lean.expected.out index 70a9adb0e50a..7a420631d568 100644 --- a/tests/lean/331.lean.expected.out +++ b/tests/lean/331.lean.expected.out @@ -1,4 +1,6 @@ 331.lean:6:13-6:14: error: failed to infer binder type -when the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed + +Note: When the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed 331.lean:7:13-7:14: error: failed to infer binder type -when the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed + +Note: When the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed diff --git a/tests/lean/389.lean.expected.out b/tests/lean/389.lean.expected.out index 95d34b152a0a..b6c978bf439b 100644 --- a/tests/lean/389.lean.expected.out +++ b/tests/lean/389.lean.expected.out @@ -1,6 +1,6 @@ -389.lean:7:14-7:17: error: application type mismatch +389.lean:7:14-7:17: error: Application type mismatch: In the appplication getFoo bar -argument +the final argument bar has type Bar Nat : Type diff --git a/tests/lean/423.lean.expected.out b/tests/lean/423.lean.expected.out index 2b54a56ed9ea..2959da647265 100644 --- a/tests/lean/423.lean.expected.out +++ b/tests/lean/423.lean.expected.out @@ -1,30 +1,30 @@ -423.lean:3:35-3:40: error: application type mismatch +423.lean:3:35-3:40: error: Application type mismatch: In the appplication HAdd.hAdd a -argument +the final argument a has type T : Sort u but is expected to have type Nat : Type -423.lean:5:37-5:38: error: application type mismatch +423.lean:5:37-5:38: error: Application type mismatch: In the appplication Add T -argument +the final argument T has type Sort u : Type u but is expected to have type Type ?u : Type (?u + 1) -423.lean:5:47-5:48: error: application type mismatch +423.lean:5:47-5:48: error: Application type mismatch: In the appplication OfNat T -argument +the final argument T has type Sort u : Type u but is expected to have type Type ?u : Type (?u + 1) -423.lean:5:55-5:60: error: application type mismatch +423.lean:5:55-5:60: error: Application type mismatch: In the appplication HAdd.hAdd a -argument +the final argument a has type T : Sort u diff --git a/tests/lean/autoPPExplicit.lean.expected.out b/tests/lean/autoPPExplicit.lean.expected.out index 680b9c24f7b1..409c95dc254a 100644 --- a/tests/lean/autoPPExplicit.lean.expected.out +++ b/tests/lean/autoPPExplicit.lean.expected.out @@ -1,6 +1,6 @@ -autoPPExplicit.lean:2:26-2:31: error: application type mismatch +autoPPExplicit.lean:2:26-2:31: error: Application type mismatch: In the appplication @Eq.trans α a (b = c) -argument +the final argument b = c has type Prop : Type diff --git a/tests/lean/doErrorMsg.lean.expected.out b/tests/lean/doErrorMsg.lean.expected.out index 5b5d961590ad..604fe5d77d08 100644 --- a/tests/lean/doErrorMsg.lean.expected.out +++ b/tests/lean/doErrorMsg.lean.expected.out @@ -28,9 +28,9 @@ has type ExceptT String (StateT Nat Id) Nat : Type but is expected to have type ExceptT String (StateT Nat Id) String : Type -doErrorMsg.lean:28:13-28:18: error: application type mismatch +doErrorMsg.lean:28:13-28:18: error: Application type mismatch: In the appplication Prod.mk false -argument +the final argument false has type Bool : Type diff --git a/tests/lean/doIssue.lean.expected.out b/tests/lean/doIssue.lean.expected.out index 6f54835edb3d..39ee6b52f9b7 100644 --- a/tests/lean/doIssue.lean.expected.out +++ b/tests/lean/doIssue.lean.expected.out @@ -10,9 +10,9 @@ has type Array Nat : Type but is expected to have type IO PUnit : Type -doIssue.lean:18:7-18:20: error: application type mismatch +doIssue.lean:18:7-18:20: error: Application type mismatch: In the appplication pure (xs.set! 0 1) -argument +the final argument xs.set! 0 1 has type Array Nat : Type diff --git a/tests/lean/elseifDoErrorPos.lean.expected.out b/tests/lean/elseifDoErrorPos.lean.expected.out index 6d5b5841c2eb..c2efa0663d5e 100644 --- a/tests/lean/elseifDoErrorPos.lean.expected.out +++ b/tests/lean/elseifDoErrorPos.lean.expected.out @@ -1,14 +1,14 @@ -elseifDoErrorPos.lean:4:10-4:11: error: application type mismatch +elseifDoErrorPos.lean:4:10-4:11: error: Application type mismatch: In the appplication @ite ?m x -argument +the final argument x has type Nat : Type but is expected to have type Prop : Type -elseifDoErrorPos.lean:7:11-7:14: error: application type mismatch +elseifDoErrorPos.lean:7:11-7:14: error: Application type mismatch: In the appplication pure "a" -argument +the final argument "a" has type String : Type diff --git a/tests/lean/evalSorry.lean.expected.out b/tests/lean/evalSorry.lean.expected.out index a564755b9ad1..e15c9b086a09 100644 --- a/tests/lean/evalSorry.lean.expected.out +++ b/tests/lean/evalSorry.lean.expected.out @@ -1,7 +1,7 @@ 1 -evalSorry.lean:5:33-5:34: error: application type mismatch +evalSorry.lean:5:33-5:34: error: Application type mismatch: In the appplication f x -argument +the final argument x has type String : Type diff --git a/tests/lean/grind/experiments/option.lean b/tests/lean/grind/experiments/option.lean deleted file mode 100644 index 76abcd5ef2fa..000000000000 --- a/tests/lean/grind/experiments/option.lean +++ /dev/null @@ -1,36 +0,0 @@ -/-! -This file contains WIP notes about potential further `grind` attributes for `Option`. - --/ - -attribute [grind] Option.some_get Option.get_some -attribute [grind] Option.map_map -- `[grind _=_]`? -attribute [grind] Option.get_map -- ?? -attribute [grind] Option.map_id_fun Option.map_id_fun' -attribute [grind] Option.all_guard Option.any_guard -attribute [grind] Option.bind_map Option.map_bind -attribute [grind] Option.join_map_eq_map_join -attribute [grind] Option.join_join -- `[grind _=_]`? -attribute [grind] Option.map_orElse - --- Look again at `Option.guard` lemmas, consider `bind_gaurd`. --- Fix statement of `isSome_guard`, add `isNone_guard` - -attribute [grind] Option.or_assoc -- unless `grind` gains native associativity support in the meantime! - --- attribute [grind] Option.none_beq_none -- warning: this generates just `none` as the pattern! --- attribute [grind] Option.none_beq_some --- attribute [grind] Option.some_beq_none -- warning: this generates just `some _` as the pattern! --- attribute [grind] Option.some_beq_some - -attribute [grind] Option.isSome_filter -attribute [grind] Option.get_filter Option.get_pfilter - -attribute [grind] Option.map_pbind Option.pbind_map -attribute [grind] Option.map_pmap Option.pmap_map Option.elim_pmap - --- Lemmas about inequalities? - --- The `min_none_none` family of lemmas result in grind issues: --- failed to synthesize instance when instantiating Option.min_none_none --- Min α diff --git a/tests/lean/holeErrors.lean.expected.out b/tests/lean/holeErrors.lean.expected.out index b4c033ac1c36..a6a5bfb5e51e 100644 --- a/tests/lean/holeErrors.lean.expected.out +++ b/tests/lean/holeErrors.lean.expected.out @@ -2,21 +2,22 @@ holeErrors.lean:3:14-3:20: error: don't know how to synthesize implicit argument @id ?m context: ⊢ Sort u -holeErrors.lean:3:11-3:20: error: failed to infer definition type -holeErrors.lean:5:9-5:10: error: failed to infer definition type -when the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed +holeErrors.lean:3:11-3:20: error: failed to infer type of `f1.{u}` +holeErrors.lean:5:9-5:10: error: failed to infer type of `f2` + +Note: When the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed holeErrors.lean:8:9-8:15: error: don't know how to synthesize implicit argument 'α' @id ?m context: ⊢ Sort u holeErrors.lean:8:4-8:5: error: failed to infer 'let' declaration type -holeErrors.lean:7:11-9:1: error: failed to infer definition type -holeErrors.lean:11:11-11:15: error: failed to infer definition type +holeErrors.lean:7:11-9:1: error: failed to infer type of `f3.{u}` +holeErrors.lean:11:11-11:15: error: failed to infer type of `f4` holeErrors.lean:11:8-11:9: error: failed to infer binder type -holeErrors.lean:13:15-13:19: error: failed to infer definition type +holeErrors.lean:13:15-13:19: error: failed to infer type of `f5` holeErrors.lean:13:12-13:13: error: failed to infer binder type holeErrors.lean:16:4-16:5: error: failed to infer binder type -holeErrors.lean:15:7-16:10: error: failed to infer definition type +holeErrors.lean:15:7-16:10: error: failed to infer type of `f6` holeErrors.lean:19:13-19:19: error: don't know how to synthesize implicit argument 'α' @id ?m context: diff --git a/tests/lean/holes.lean.expected.out b/tests/lean/holes.lean.expected.out index 75077904be35..384211157af8 100644 --- a/tests/lean/holes.lean.expected.out +++ b/tests/lean/holes.lean.expected.out @@ -29,5 +29,5 @@ a : Nat f : {α : Type} → {β : ?m a} → α → α := fun {α} {β} a => a ⊢ ?m a holes.lean:18:9-18:10: error: failed to infer binder type -holes.lean:21:25-22:4: error: failed to infer definition type +holes.lean:21:25-22:4: error: failed to infer type of `f7` holes.lean:25:8-25:11: error: failed to infer 'let rec' declaration type diff --git a/tests/lean/interactive/completionPrivateTypes.lean b/tests/lean/interactive/completionPrivateTypes.lean index 087005c9a54f..c64b41151b1d 100644 --- a/tests/lean/interactive/completionPrivateTypes.lean +++ b/tests/lean/interactive/completionPrivateTypes.lean @@ -1,4 +1,4 @@ -private structure Foo where +structure Foo where x : Nat def foobar (f : Foo) := f. diff --git a/tests/lean/issue3232.lean b/tests/lean/issue3232.lean index 5f2201b7b0d2..5f1f7e11e353 100644 --- a/tests/lean/issue3232.lean +++ b/tests/lean/issue3232.lean @@ -8,4 +8,4 @@ example : (1 : Nat) = 1 := by apply (rfl : (1 : Int) = 1) example : PUnit.{0} = PUnit.{0} := by - apply Eq.refl PUnit.{1} -- TODO: addPPExplicitToExposeDiff is not handling this yet + apply Eq.refl PUnit.{1} diff --git a/tests/lean/issue3232.lean.expected.out b/tests/lean/issue3232.lean.expected.out index e140ce824643..f2a26a00f630 100644 --- a/tests/lean/issue3232.lean.expected.out +++ b/tests/lean/issue3232.lean.expected.out @@ -1,16 +1,16 @@ -issue3232.lean:5:2-5:9: error: tactic 'apply' failed, failed to unify +issue3232.lean:5:2-5:9: error: tactic 'apply' failed, could not unify the type of `h` @foo 42 -with +with the goal @foo 23 h : foo ⊢ foo -issue3232.lean:8:2-8:29: error: tactic 'apply' failed, failed to unify +issue3232.lean:8:2-8:29: error: tactic 'apply' failed, could not unify the type of `rfl` (1 : Int) = 1 -with +with the goal (1 : Nat) = 1 ⊢ 1 = 1 -issue3232.lean:11:2-11:25: error: tactic 'apply' failed, failed to unify - PUnit = PUnit -with - PUnit = PUnit +issue3232.lean:11:2-11:25: error: tactic 'apply' failed, could not unify the type of `Eq.refl PUnit` + Eq.{2} PUnit PUnit +with the goal + Eq.{1} PUnit PUnit ⊢ PUnit = PUnit diff --git a/tests/lean/librarySearch.lean b/tests/lean/librarySearch.lean index 44a7de15e803..b0a20fafa432 100644 --- a/tests/lean/librarySearch.lean +++ b/tests/lean/librarySearch.lean @@ -275,3 +275,7 @@ error: apply? didn't find any relevant lemmas -/ #guard_msgs in example {α : Sort u} (x y : α) : Eq x y := by apply? + +-- Verify that there is a `sorry` warning when `apply?` closes the goal. +#guard_msgs (drop info) in +example : False := by apply? diff --git a/tests/lean/librarySearch.lean.expected.out b/tests/lean/librarySearch.lean.expected.out index e69de29bb2d1..1dc39a98a4df 100644 --- a/tests/lean/librarySearch.lean.expected.out +++ b/tests/lean/librarySearch.lean.expected.out @@ -0,0 +1 @@ +librarySearch.lean:281:0-281:7: warning: declaration uses 'sorry' diff --git a/tests/lean/macroSwizzle.lean.expected.out b/tests/lean/macroSwizzle.lean.expected.out index 22ee932872ef..60056f259815 100644 --- a/tests/lean/macroSwizzle.lean.expected.out +++ b/tests/lean/macroSwizzle.lean.expected.out @@ -2,9 +2,9 @@ macroSwizzle.lean:4:7-4:23: error: failed to synthesize HAdd Bool String ?m Additional diagnostic information may be available using the `set_option diagnostics true` command. -macroSwizzle.lean:6:7-6:10: error: application type mismatch +macroSwizzle.lean:6:7-6:10: error: Application type mismatch: In the appplication Nat.succ "x" -argument +the final argument "x" has type String : Type diff --git a/tests/lean/modBug.lean.expected.out b/tests/lean/modBug.lean.expected.out index 9bee5bd5bb31..b5fbac51cf54 100644 --- a/tests/lean/modBug.lean.expected.out +++ b/tests/lean/modBug.lean.expected.out @@ -1,6 +1,6 @@ -modBug.lean:1:48-1:64: error: application type mismatch +modBug.lean:1:48-1:64: error: Application type mismatch: In the appplication Nat.zero_ne_one (Nat.mod_zero 1) -argument +the final argument Nat.mod_zero 1 has type 1 % 0 = 1 : Prop diff --git a/tests/lean/motiveNotTypeCorect.lean.expected.out b/tests/lean/motiveNotTypeCorect.lean.expected.out index 7f4ce0d6a5c1..28b4d0d1df5c 100644 --- a/tests/lean/motiveNotTypeCorect.lean.expected.out +++ b/tests/lean/motiveNotTypeCorect.lean.expected.out @@ -1,8 +1,8 @@ motiveNotTypeCorect.lean:7:6-7:7: error: tactic 'rewrite' failed, motive is not type correct: fun _a => P _a d -Error: application type mismatch +Error: Application type mismatch: In the appplication P _a d -argument +the final argument d has type D (f t) : Type diff --git a/tests/lean/multiConstantError.lean.expected.out b/tests/lean/multiConstantError.lean.expected.out index 4fe617b40f64..ad5bc59b298f 100644 --- a/tests/lean/multiConstantError.lean.expected.out +++ b/tests/lean/multiConstantError.lean.expected.out @@ -1,6 +1,9 @@ multiConstantError.lean:1:11-1:12: error: failed to infer binder type -recall that you cannot declare multiple constants in a single declaration. The identifier(s) `b`, `c` are being interpreted as parameters `(b : _)`, `(c : _)` + +Note: Recall that you cannot declare multiple constants in a single declaration. The identifier(s) `b`, `c` are being interpreted as parameters `(b : _)`, `(c : _)`. multiConstantError.lean:1:9-1:10: error: failed to infer binder type -recall that you cannot declare multiple constants in a single declaration. The identifier(s) `b`, `c` are being interpreted as parameters `(b : _)`, `(c : _)` + +Note: Recall that you cannot declare multiple constants in a single declaration. The identifier(s) `b`, `c` are being interpreted as parameters `(b : _)`, `(c : _)`. multiConstantError.lean:3:9-3:10: error: failed to infer binder type -recall that you cannot declare multiple constants in a single declaration. The identifier(s) `α`, `β` are being interpreted as parameters `(α : _)`, `(β : _)` + +Note: Recall that you cannot declare multiple constants in a single declaration. The identifier(s) `α`, `β` are being interpreted as parameters `(α : _)`, `(β : _)`. diff --git a/tests/lean/nameArgErrorIssue.lean.expected.out b/tests/lean/nameArgErrorIssue.lean.expected.out index dc686d546ac6..52875c2c2f24 100644 --- a/tests/lean/nameArgErrorIssue.lean.expected.out +++ b/tests/lean/nameArgErrorIssue.lean.expected.out @@ -1,16 +1,16 @@ bla 5 2 : Nat -nameArgErrorIssue.lean:5:20-5:24: error: application type mismatch +nameArgErrorIssue.lean:5:20-5:24: error: Application type mismatch: In the appplication bla "hi" -argument +the final argument "hi" has type String : Type but is expected to have type Nat : Type bla sorry 5 : Nat -nameArgErrorIssue.lean:6:20-6:24: error: application type mismatch +nameArgErrorIssue.lean:6:20-6:24: error: Application type mismatch: In the appplication bla "hi" -argument +the final argument "hi" has type String : Type diff --git a/tests/lean/namedHoles.lean.expected.out b/tests/lean/namedHoles.lean.expected.out index 63babc018d9b..cb511e2ae257 100644 --- a/tests/lean/namedHoles.lean.expected.out +++ b/tests/lean/namedHoles.lean.expected.out @@ -1,6 +1,6 @@ -namedHoles.lean:9:12-9:14: error: application type mismatch +namedHoles.lean:9:12-9:14: error: Application type mismatch: In the appplication f ?x ?x -argument +the final argument ?x has type Nat : Type diff --git a/tests/lean/phashmap_inst_coherence.lean.expected.out b/tests/lean/phashmap_inst_coherence.lean.expected.out index 30fd0369a0be..c68e37d06dbb 100644 --- a/tests/lean/phashmap_inst_coherence.lean.expected.out +++ b/tests/lean/phashmap_inst_coherence.lean.expected.out @@ -1,6 +1,6 @@ -phashmap_inst_coherence.lean:12:53-12:54: error: application type mismatch +phashmap_inst_coherence.lean:12:53-12:54: error: Application type mismatch: In the appplication m.find? -argument +the final argument m has type @PersistentHashMap Nat Nat instBEqOfDecidableEq instHashableNat : Type diff --git a/tests/lean/run/1234.lean b/tests/lean/run/1234.lean index a2a4581f6888..59c63f092b06 100644 --- a/tests/lean/run/1234.lean +++ b/tests/lean/run/1234.lean @@ -10,7 +10,7 @@ set_option Elab.async false -- for stable message ordering in #guard_msgs /-- warning: declaration uses 'sorry' --- -info: [Meta.Tactic.simp.rewrite] h₁:1000: +trace: [Meta.Tactic.simp.rewrite] h₁:1000: k ≤ v - 1 ==> True @@ -47,7 +47,7 @@ example (h₁: k ≤ v - 1) (h₂: 0 < v): /-- warning: declaration uses 'sorry' --- -info: [Meta.Tactic.simp.rewrite] h₁:1000: +trace: [Meta.Tactic.simp.rewrite] h₁:1000: k ≤ v - 1 ==> True @@ -82,7 +82,7 @@ example (h₁: k ≤ v - 1) (h₂: 0 < v): /-- warning: declaration uses 'sorry' --- -info: [Meta.Tactic.simp.rewrite] h₁:1000: +trace: [Meta.Tactic.simp.rewrite] h₁:1000: k ≤ v - 1 ==> True diff --git a/tests/lean/run/1380.lean b/tests/lean/run/1380.lean index 10d075fdeefb..d3896189d4db 100644 --- a/tests/lean/run/1380.lean +++ b/tests/lean/run/1380.lean @@ -3,7 +3,7 @@ variable (n v₁ v₂) (hv₁: v₁ < n + 1) (hv₂: v₂ < n + 1) theorem foo (_: ¬ Fin.mk v₂ hv₂ = Fin.mk v₁ hv₁ ): True := trivial /-- -info: [Meta.Tactic.simp.unify] eq_self:1000, failed to unify +trace: [Meta.Tactic.simp.unify] eq_self:1000, failed to unify ?a = ?a with ⟨v₂, hv₂⟩ = ⟨v₁, hv₁⟩ diff --git a/tests/lean/run/1815.lean b/tests/lean/run/1815.lean index b68c651d1852..3b33f5ed8fb5 100644 --- a/tests/lean/run/1815.lean +++ b/tests/lean/run/1815.lean @@ -7,7 +7,7 @@ theorem mul_comm (a b : α) : a * b = b * a := sorry set_option trace.Meta.Tactic.simp true /-- -info: [Meta.Tactic.simp.rewrite] mul_comm:1000:perm, perm rejected Left a ==> default * a +trace: [Meta.Tactic.simp.rewrite] mul_comm:1000:perm, perm rejected Left a ==> default * a [Meta.Tactic.simp.rewrite] mul_comm:1000:perm: Right a ==> diff --git a/tests/lean/run/1834.lean b/tests/lean/run/1834.lean index 4d5e6c2213f6..ec5344e9a260 100644 --- a/tests/lean/run/1834.lean +++ b/tests/lean/run/1834.lean @@ -94,7 +94,7 @@ end Rewrote forall, remains a forall, since domain is `Nat`. -/ /-- -info: P : Nat → Prop +trace: P : Nat → Prop q : Prop h : ∀ (n : Nat), P n = q hq : q @@ -111,7 +111,7 @@ example (P : Nat → Prop) (q : Prop) (h : ∀ n, P n = q) (hq : q) : When `pp.foralls` is false, uses non-dependent `→`. -/ /-- -info: P : Nat → Prop +trace: P : Nat → Prop q : Prop h : (n : Nat) → P n = q hq : q @@ -129,7 +129,7 @@ example (P : Nat → Prop) (q : Prop) (h : ∀ n, P n = q) (hq : q) : Rewrote forall, turns into an implication, since domain is a proposition. -/ /-- -info: p : Prop +trace: p : Prop P : p → Prop q : Prop h : ∀ (n : p), P n = q diff --git a/tests/lean/run/1870.lean b/tests/lean/run/1870.lean index d271e8b9589c..60ec2d2e128e 100644 --- a/tests/lean/run/1870.lean +++ b/tests/lean/run/1870.lean @@ -14,10 +14,13 @@ theorem ex1 : (@OfNat.ofNat Nat 0 Zero.toOfNat0) = @OfNat.ofNat Nat 1 One.toOfNa rfl /-- -error: tactic 'apply' failed, failed to unify +error: tactic 'apply' failed, could not unify the conclusion of `@congrArg` ?_ ?_ = ?_ ?_ -with +with the goal OfNat.ofNat 0 = OfNat.ofNat 1 + +Note: The full type of `@congrArg` is + ∀ {α : Sort _} {β : Sort _} {a₁ a₂ : α} (f : α → β), a₁ = a₂ → f a₁ = f a₂ ⊢ OfNat.ofNat 0 = OfNat.ofNat 1 -/ #guard_msgs in @@ -27,10 +30,13 @@ example : (@OfNat.ofNat Nat 0 Zero.toOfNat0) = @OfNat.ofNat Nat 1 One.toOfNat1 : apply rfl /-- -error: tactic 'apply' failed, failed to unify +error: tactic 'apply' failed, could not unify the conclusion of `@congrArg` ?_ ?_ = ?_ ?_ -with +with the goal OfNat.ofNat 0 = OfNat.ofNat 1 + +Note: The full type of `@congrArg` is + ∀ {α : Sort _} {β : Sort _} {a₁ a₂ : α} (f : α → β), a₁ = a₂ → f a₁ = f a₂ ⊢ OfNat.ofNat 0 = OfNat.ofNat 1 -/ #guard_msgs in diff --git a/tests/lean/run/2042.lean b/tests/lean/run/2042.lean index 7e850b3dff3f..849e1eaf3ead 100644 --- a/tests/lean/run/2042.lean +++ b/tests/lean/run/2042.lean @@ -2,7 +2,7 @@ 2 * a /-- -info: case h +trace: case h x : Nat ⊢ 2 * x = x + x -/ @@ -19,7 +19,7 @@ by | a => 2 * a /-- -info: case h +trace: case h x : Nat ⊢ 2 * x = x + x -/ diff --git a/tests/lean/run/2159.lean b/tests/lean/run/2159.lean index 4c51b3ada4de..40e9d83f94ec 100644 --- a/tests/lean/run/2159.lean +++ b/tests/lean/run/2159.lean @@ -1,7 +1,7 @@ /-- -info: ⊢ 1.2 < 2 +trace: ⊢ 1.2 < 2 --- -info: ⊢ 1.2 < 2 +trace: ⊢ 1.2 < 2 --- warning: declaration uses 'sorry' -/ diff --git a/tests/lean/run/2161.lean b/tests/lean/run/2161.lean index eca879e6131e..25659ff1dd69 100644 --- a/tests/lean/run/2161.lean +++ b/tests/lean/run/2161.lean @@ -19,7 +19,7 @@ since its 'Decidable' instance instDecidableEqFoo (((mul 4 1).mul 1).mul 1) 4 did not reduce to 'isTrue' or 'isFalse'. -After unfolding the instances 'decEqFoo✝', 'instDecidableEqFoo', 'instDecidableEqNat' and +After unfolding the instances 'decEqFoo✝', 'instDecidableEqFoo', 'instDecidableEqNat', and 'Nat.decEq', reduction got stuck at the 'Decidable' instance match h : (((mul 4 1).mul 1).mul 1).num.beq 4 with | true => isTrue ⋯ @@ -40,7 +40,7 @@ since its 'Decidable' instance instDecidableEqFoo (((add 4 1).add 1).add 1) 4 did not reduce to 'isTrue' or 'isFalse'. -After unfolding the instances 'decEqFoo✝', 'instDecidableEqFoo', 'instDecidableEqNat' and +After unfolding the instances 'decEqFoo✝', 'instDecidableEqFoo', 'instDecidableEqNat', and 'Nat.decEq', reduction got stuck at the 'Decidable' instance match h : (((add 4 1).add 1).add 1).num.beq 4 with | true => isTrue ⋯ diff --git a/tests/lean/run/2226.lean b/tests/lean/run/2226.lean index 810f37c18eef..71c5f311dbdd 100644 --- a/tests/lean/run/2226.lean +++ b/tests/lean/run/2226.lean @@ -6,7 +6,7 @@ A : Nat #guard_msgs in variable (A : Nat) (B : by skip) -/-- error: failed to infer definition type -/ +/-- error: failed to infer type of `foo` -/ #guard_msgs in def foo := A = B diff --git a/tests/lean/run/2389.lean b/tests/lean/run/2389.lean index e9ac815d0a25..da9be3d02cf1 100644 --- a/tests/lean/run/2389.lean +++ b/tests/lean/run/2389.lean @@ -8,17 +8,17 @@ inductive Forall (P : α → Prop) : List α → Prop | nil : Forall P [] | cons : {x : α} → P x → Forall P l → Forall P (x::l) -inductive Tree : Type := +inductive Tree : Type where | leaf : Nat → Tree | node : List Tree → Tree set_option trace.Meta.IndPredBelow true in -/-- info: [Meta.IndPredBelow] Nested or not recursive -/ +/-- trace: [Meta.IndPredBelow] Nested or not recursive -/ #guard_msgs in /-- Despite not having `.below` and `.brecOn`, the type is still usable thanks to well-founded recursion. -/ -inductive OnlyZeros : Tree → Prop := +inductive OnlyZeros : Tree → Prop where | leaf : OnlyZeros (.leaf 0) | node (l : List Tree): Forall OnlyZeros l → OnlyZeros (.node l) diff --git a/tests/lean/run/2916.lean b/tests/lean/run/2916.lean index 45716a7faa70..04c42093f575 100644 --- a/tests/lean/run/2916.lean +++ b/tests/lean/run/2916.lean @@ -1,7 +1,7 @@ set_option pp.coercions false -- Show `OfNat.ofNat` when present for clarity /-- -info: x : Nat +trace: x : Nat ⊢ OfNat.ofNat 2 = x --- warning: declaration uses 'sorry' @@ -13,7 +13,7 @@ example : nat_lit 2 = x := by sorry /-- -info: x : Nat +trace: x : Nat ⊢ OfNat.ofNat 2 = x --- warning: declaration uses 'sorry' @@ -25,7 +25,7 @@ example : nat_lit 2 = x := by sorry /-- -info: α : Nat → Type +trace: α : Nat → Type f : (n : Nat) → α n x : α (OfNat.ofNat 2) ⊢ f (OfNat.ofNat 2) = x @@ -39,12 +39,12 @@ example (α : Nat → Type) (f : (n : Nat) → α n) (x : α 2) : f (nat_lit 2) sorry /-- -info: x : Nat +trace: x : Nat f : Nat → Nat h : f (OfNat.ofNat 2) = x ⊢ f (OfNat.ofNat 2) = x --- -info: x : Nat +trace: x : Nat f : Nat → Nat h : f (OfNat.ofNat 2) = x ⊢ f 2 = x @@ -57,12 +57,12 @@ example (f : Nat → Nat) (h : f 2 = x) : f 2 = x := by assumption /-- -info: α : Nat → Type +trace: α : Nat → Type f : (n : Nat) → α n x : α (OfNat.ofNat 2) ⊢ f (OfNat.ofNat 2) = x --- -info: α : Nat → Type +trace: α : Nat → Type f : (n : Nat) → α n x : α (OfNat.ofNat 2) ⊢ f 2 = x diff --git a/tests/lean/run/2942.lean b/tests/lean/run/2942.lean index 8643f0e704c8..47af6173833e 100644 --- a/tests/lean/run/2942.lean +++ b/tests/lean/run/2942.lean @@ -6,7 +6,7 @@ The function `g` is "over-applied". Previously, conv-mode `congr` failed. -/ /-- -info: case a +trace: case a a b : Nat g : {α : Type} → α → α f : Nat → Nat @@ -40,19 +40,19 @@ While we are here, test `arg` too via `enter`. -/ /-- -info: a b : Nat +trace: a b : Nat g : {α : Type} → α → α f : Nat → Nat h : a = b | a --- -info: a b : Nat +trace: a b : Nat g : {α : Type} → α → α f : Nat → Nat h : a = b | f --- -info: a b : Nat +trace: a b : Nat g : {α : Type} → α → α f : Nat → Nat h : a = b diff --git a/tests/lean/run/3257.lean b/tests/lean/run/3257.lean index 9ade81163d34..329c452aeeb2 100644 --- a/tests/lean/run/3257.lean +++ b/tests/lean/run/3257.lean @@ -14,7 +14,7 @@ example : U := by simp [foo, T.mk] /-- -info: [Meta.Tactic.simp.discharge] bar discharge ✅️ +trace: [Meta.Tactic.simp.discharge] bar discharge ✅️ autoParam T _auto✝ [Meta.Tactic.simp.rewrite] T.mk:1000: T diff --git a/tests/lean/run/3467.lean b/tests/lean/run/3467.lean index d3df62417278..d240bd4575c8 100644 --- a/tests/lean/run/3467.lean +++ b/tests/lean/run/3467.lean @@ -13,13 +13,13 @@ Basic diamond set_option structure.strictResolutionOrder true set_option trace.Elab.structure.resolutionOrder true -/-- info: [Elab.structure.resolutionOrder] computed resolution order: [A] -/ +/-- trace: [Elab.structure.resolutionOrder] computed resolution order: [A] -/ #guard_msgs in structure A -/-- info: [Elab.structure.resolutionOrder] computed resolution order: [B, A] -/ +/-- trace: [Elab.structure.resolutionOrder] computed resolution order: [B, A] -/ #guard_msgs in structure B extends A -/-- info: [Elab.structure.resolutionOrder] computed resolution order: [C, A] -/ +/-- trace: [Elab.structure.resolutionOrder] computed resolution order: [C, A] -/ #guard_msgs in structure C extends A -/-- info: [Elab.structure.resolutionOrder] computed resolution order: [D, B, C, A] -/ +/-- trace: [Elab.structure.resolutionOrder] computed resolution order: [D, B, C, A] -/ #guard_msgs in structure D extends B, C def A.x (a : A) : Bool := default @@ -55,7 +55,7 @@ Example resolution order failure warning: failed to compute strict resolution order: - parent 'B' must come after parent 'D' --- -info: [Elab.structure.resolutionOrder] computed resolution order: [D', B, D, C, A] +trace: [Elab.structure.resolutionOrder] computed resolution order: [D', B, D, C, A] -/ #guard_msgs in structure D' extends B, D @@ -67,17 +67,17 @@ Example from issue 3467. namespace Issue3467 -/-- info: [Elab.structure.resolutionOrder] computed resolution order: [Issue3467.X] -/ +/-- trace: [Elab.structure.resolutionOrder] computed resolution order: [Issue3467.X] -/ #guard_msgs in structure X where base : Nat -/-- info: [Elab.structure.resolutionOrder] computed resolution order: [Issue3467.A, Issue3467.X] -/ +/-- trace: [Elab.structure.resolutionOrder] computed resolution order: [Issue3467.A, Issue3467.X] -/ #guard_msgs in structure A extends X where countA : Nat -/-- info: [Elab.structure.resolutionOrder] computed resolution order: [Issue3467.B, Issue3467.X] -/ +/-- trace: [Elab.structure.resolutionOrder] computed resolution order: [Issue3467.B, Issue3467.X] -/ #guard_msgs in structure B extends X where countB : Nat @@ -95,7 +95,7 @@ def getTwiceCountB (b : B) := b.countB * 2 end B /-- -info: [Elab.structure.resolutionOrder] computed resolution order: [Issue3467.C, Issue3467.A, Issue3467.B, Issue3467.X] +trace: [Elab.structure.resolutionOrder] computed resolution order: [Issue3467.C, Issue3467.A, Issue3467.B, Issue3467.X] -/ #guard_msgs in structure C extends A, B @@ -112,20 +112,20 @@ end Issue3467 namespace Issue1881 -/-- info: [Elab.structure.resolutionOrder] computed resolution order: [Issue1881.Foo1] -/ +/-- trace: [Elab.structure.resolutionOrder] computed resolution order: [Issue1881.Foo1] -/ #guard_msgs in structure Foo1 where a : Nat b : Nat -/-- info: [Elab.structure.resolutionOrder] computed resolution order: [Issue1881.Foo2] -/ +/-- trace: [Elab.structure.resolutionOrder] computed resolution order: [Issue1881.Foo2] -/ #guard_msgs in structure Foo2 where a : Nat c : Nat /-- -info: [Elab.structure.resolutionOrder] computed resolution order: [Issue1881.Foo3, Issue1881.Foo1, Issue1881.Foo2] +trace: [Elab.structure.resolutionOrder] computed resolution order: [Issue1881.Foo3, Issue1881.Foo1, Issue1881.Foo2] -/ #guard_msgs in structure Foo3 extends Foo1, Foo2 where diff --git a/tests/lean/run/387.lean b/tests/lean/run/387.lean index 8934e4771a4f..2095597c7cda 100644 --- a/tests/lean/run/387.lean +++ b/tests/lean/run/387.lean @@ -12,8 +12,8 @@ example : p 0 0 := by simp [foo 1] -- will not simplify simp [foo 0] -/-- info: ⊢ p 0 0 -/ -#guard_msgs in +/-- trace: ⊢ p 0 0 -/ +#guard_msgs (trace) in example : p 0 0 ∧ p 1 1 := by simp [foo 1] trace_state diff --git a/tests/lean/run/4171.lean b/tests/lean/run/4171.lean index 37c98842ad17..a73c49603d2b 100644 --- a/tests/lean/run/4171.lean +++ b/tests/lean/run/4171.lean @@ -13,7 +13,7 @@ universe v u variable (α : Sort u) -structure Opposite := +structure Opposite where op :: unop : α @@ -714,7 +714,7 @@ example (M : Comon_ (Mon_ C)) : Mon_ (Comon_ C) where /-- -info: [simp] Diagnostics +trace: [simp] Diagnostics [simp] theorems with bad keys [simp] foo, key: @Quiver.Hom.unop _ _ _ _ (@Opposite.op (@Quiver.Hom _ _ _.1 _.1) _) use `set_option diagnostics.threshold <num>` to control threshold for reporting counters @@ -735,7 +735,7 @@ example (M : Comon_ (Mon_ C)) : Mon_ (Comon_ C) where attribute [simp] foo /-- -info: [simp] Diagnostics +trace: [simp] Diagnostics [simp] theorems with bad keys [simp] foo, key: @Quiver.Hom.unop _ _ _ _ (@Opposite.op (@Quiver.Hom _ _ _.1 _.1) _) use `set_option diagnostics.threshold <num>` to control threshold for reporting counters diff --git a/tests/lean/run/4339.lean b/tests/lean/run/4339.lean index bd829197f503..e0952caa2454 100644 --- a/tests/lean/run/4339.lean +++ b/tests/lean/run/4339.lean @@ -7,7 +7,7 @@ structure HH (A B : Nat) where set_option pp.explicit true /-- -info: S T f : Nat +trace: S T f : Nat ⊢ @Eq (HH S T) (@HH.mk S T f trivial) (@id (HH S T) (@HH.mk S T f trivial)) -/ #guard_msgs in @@ -18,7 +18,7 @@ example {S T : Nat} (f : Nat) : rfl /-- -info: S T f : Nat +trace: S T f : Nat ⊢ @Eq (HH S T) (@HH.mk S T f trivial) (@id (HH S T) (@HH.mk S T f trivial)) -/ #guard_msgs in @@ -29,7 +29,7 @@ example {S T : Nat} (f : Nat) : rfl /-- -info: S T f : Nat +trace: S T f : Nat ⊢ @Eq (HH S T) (@HH.mk S T f trivial) (@id (HH S T) (@HH.mk S T f trivial)) -/ #guard_msgs in diff --git a/tests/lean/run/4381.lean b/tests/lean/run/4381.lean index 967e7360ab69..0a6ed99d3375 100644 --- a/tests/lean/run/4381.lean +++ b/tests/lean/run/4381.lean @@ -1,5 +1,5 @@ /-- -info: case h +trace: case h d g : Nat H1 : d = g ⊢ ?w = g diff --git a/tests/lean/run/439.lean b/tests/lean/run/439.lean index d68e4959193f..aa03f8a58d1e 100644 --- a/tests/lean/run/439.lean +++ b/tests/lean/run/439.lean @@ -42,9 +42,9 @@ variable (fn' : Fn ((p : P) -> B.fn p -> B.fn p) ({p : P} -> B.fn p -> B.fn p)) #check fn' Bp /-- -error: application type mismatch +error: Application type mismatch: In the appplication fn'.imp p -argument +the final argument p has type P : Sort u diff --git a/tests/lean/run/4390.lean b/tests/lean/run/4390.lean index 5e10f4c31ca3..81153c3867b7 100644 --- a/tests/lean/run/4390.lean +++ b/tests/lean/run/4390.lean @@ -24,7 +24,7 @@ p : p = state --- -info: [split.failure] `split` tactic failed to generalize discriminant(s) at +trace: [split.failure] `split` tactic failed to generalize discriminant(s) at match h : step state with | none => [state] | some newState => state :: countdown newState diff --git a/tests/lean/run/4405.lean b/tests/lean/run/4405.lean index 215056b08988..b458bf6186bd 100644 --- a/tests/lean/run/4405.lean +++ b/tests/lean/run/4405.lean @@ -3,9 +3,9 @@ import Lean.Elab.Command set_option pp.mvars false /-- -error: application type mismatch +error: Application type mismatch: In the appplication ⟨Nat.lt_irrefl (?_ n), Fin.is_lt ?_⟩ -argument +the final argument Fin.is_lt ?_ has type ↑?_ < ?_ : Prop diff --git a/tests/lean/run/4670.lean b/tests/lean/run/4670.lean index bedb293f55e1..9325eb98a5d4 100644 --- a/tests/lean/run/4670.lean +++ b/tests/lean/run/4670.lean @@ -11,9 +11,9 @@ structure Foo : Type where Was printing `true.out`, but it should have been `Foo.out true`. -/ /-- -error: application type mismatch +error: Application type mismatch: In the appplication Foo.out true -argument +the final argument true has type Bool : Type @@ -29,9 +29,9 @@ Verifying that generalized field notation does not have this bug. -/ def Foo.out' (f : Foo) : Nat := f.out /-- -error: application type mismatch +error: Application type mismatch: In the appplication Foo.out' true -argument +the final argument true has type Bool : Type diff --git a/tests/lean/run/4888.lean b/tests/lean/run/4888.lean index 50f14d513b11..ced596e855ab 100644 --- a/tests/lean/run/4888.lean +++ b/tests/lean/run/4888.lean @@ -5,9 +5,9 @@ https://github.com/leanprover/lean4/issues/4888 -/ /-- -error: application type mismatch +error: Application type mismatch: In the appplication Nat.succ True -argument +the final argument True has type Prop : Type diff --git a/tests/lean/run/5064.lean b/tests/lean/run/5064.lean index 6df9de86225b..f6f47151df4a 100644 --- a/tests/lean/run/5064.lean +++ b/tests/lean/run/5064.lean @@ -13,7 +13,7 @@ def thingy : List (Nat ⊕ Nat) → List Bool | _ => [] termination_by l => l.length -/-- info: ⊢ [] = [] -/ +/-- trace: ⊢ [] = [] -/ #guard_msgs in theorem thingy_empty : thingy [] = [] := by unfold thingy diff --git a/tests/lean/run/5475.lean b/tests/lean/run/5475.lean index eef12db24f3b..f25557564e01 100644 --- a/tests/lean/run/5475.lean +++ b/tests/lean/run/5475.lean @@ -16,7 +16,7 @@ error: don't know how to synthesize implicit argument 'α' context: ⊢ Type --- -error: failed to infer definition type +error: failed to infer type of example -/ #guard_msgs in example := diff --git a/tests/lean/run/790.lean b/tests/lean/run/790.lean index 85db1d626447..e0d88bbe3111 100644 --- a/tests/lean/run/790.lean +++ b/tests/lean/run/790.lean @@ -16,7 +16,7 @@ instance : Vec' Nat := ⟨⟩ set_option trace.Meta.Tactic.simp true /-- -info: [Meta.Tactic.simp.rewrite] differential_of_linear:1000: +trace: [Meta.Tactic.simp.rewrite] differential_of_linear:1000: differential f x dx ==> f dx diff --git a/tests/lean/run/8049.lean b/tests/lean/run/8049.lean index d9c448f61fc7..1cae85f64e9d 100644 --- a/tests/lean/run/8049.lean +++ b/tests/lean/run/8049.lean @@ -25,13 +25,11 @@ elab "test" : tactic => do levelParams := [] } -/-- -info: [Elab.debug] traced --/ +/-- trace: [Elab.debug] traced -/ #guard_msgs in theorem f1 : True := by test; trivial -/-- info: [Elab.debug] traced -/ +/-- trace: [Elab.debug] traced -/ #guard_msgs in def f2 : True := by test; trivial diff --git a/tests/lean/run/CompilerCSE.lean b/tests/lean/run/CompilerCSE.lean index a7647c089f72..dff84f3c81fe 100644 --- a/tests/lean/run/CompilerCSE.lean +++ b/tests/lean/run/CompilerCSE.lean @@ -15,7 +15,7 @@ def cseSizeTest : PassInstaller := set_option trace.Compiler.test true in /-- -info: [Compiler.test] Starting wrapper test cseSizeLeq for cse occurrence 0 +trace: [Compiler.test] Starting wrapper test cseSizeLeq for cse occurrence 0 [Compiler.test] Wrapper test cseSizeLeq for cse occurrence 0 successful [Compiler.test] Starting post condition test cseFix for cse occurrence 0 [Compiler.test] Post condition test cseFix for cse occurrence 0 successful diff --git a/tests/lean/run/CompilerFindJoinPoints.lean b/tests/lean/run/CompilerFindJoinPoints.lean index 75cd6cc0794b..2b60804f60b1 100644 --- a/tests/lean/run/CompilerFindJoinPoints.lean +++ b/tests/lean/run/CompilerFindJoinPoints.lean @@ -15,7 +15,7 @@ def cseSizeTest : PassInstaller := set_option trace.Compiler.test true in /-- -info: [Compiler.test] Starting wrapper test findJoinPointsSizeLeq for findJoinPoints occurrence 0 +trace: [Compiler.test] Starting wrapper test findJoinPointsSizeLeq for findJoinPoints occurrence 0 [Compiler.test] Wrapper test findJoinPointsSizeLeq for findJoinPoints occurrence 0 successful [Compiler.test] Starting post condition test findJoinPointsFix for findJoinPoints occurrence 0 [Compiler.test] Post condition test findJoinPointsFix for findJoinPoints occurrence 0 successful diff --git a/tests/lean/run/CompilerFloatLetIn.lean b/tests/lean/run/CompilerFloatLetIn.lean index 1b609b5c44b2..86f6f2022047 100644 --- a/tests/lean/run/CompilerFloatLetIn.lean +++ b/tests/lean/run/CompilerFloatLetIn.lean @@ -16,7 +16,7 @@ def floatLetInSizeTest : PassInstaller := set_option trace.Compiler.test true in /-- -info: [Compiler.test] Starting wrapper test floatLetInSizeEq for floatLetIn occurrence 0 +trace: [Compiler.test] Starting wrapper test floatLetInSizeEq for floatLetIn occurrence 0 [Compiler.test] Wrapper test floatLetInSizeEq for floatLetIn occurrence 0 successful [Compiler.test] Starting wrapper test floatLetInSizeEq for floatLetIn occurrence 1 [Compiler.test] Wrapper test floatLetInSizeEq for floatLetIn occurrence 1 successful diff --git a/tests/lean/run/CompilerPullInstances.lean b/tests/lean/run/CompilerPullInstances.lean index f9d2de0e0e51..0582ce399142 100644 --- a/tests/lean/run/CompilerPullInstances.lean +++ b/tests/lean/run/CompilerPullInstances.lean @@ -15,7 +15,7 @@ def pullInstancesSizeTest : PassInstaller := set_option trace.Compiler.test true in /-- -info: [Compiler.test] Starting wrapper test pullInstancesSizeEq for pullInstances occurrence 0 +trace: [Compiler.test] Starting wrapper test pullInstancesSizeEq for pullInstances occurrence 0 [Compiler.test] Wrapper test pullInstancesSizeEq for pullInstances occurrence 0 successful [Compiler.test] Starting post condition test pullInstancesFix for pullInstances occurrence 0 [Compiler.test] Post condition test pullInstancesFix for pullInstances occurrence 0 successful diff --git a/tests/lean/run/CompilerSimp.lean b/tests/lean/run/CompilerSimp.lean index 1f6099f00034..bea661e18433 100644 --- a/tests/lean/run/CompilerSimp.lean +++ b/tests/lean/run/CompilerSimp.lean @@ -15,7 +15,7 @@ def simpReaderTest : PassInstaller := set_option trace.Compiler.test true in /-- -info: [Compiler.test] Starting post condition test simpInlinesBinds for simp occurrence 0 +trace: [Compiler.test] Starting post condition test simpInlinesBinds for simp occurrence 0 [Compiler.test] Post condition test simpInlinesBinds for simp occurrence 0 successful [Compiler.test] Starting post condition test simpFix for simp occurrence 0 [Compiler.test] Post condition test simpFix for simp occurrence 0 successful diff --git a/tests/lean/run/DVec.lean b/tests/lean/run/DVec.lean index b80d24d376f5..e986c920d3cc 100644 --- a/tests/lean/run/DVec.lean +++ b/tests/lean/run/DVec.lean @@ -40,9 +40,9 @@ example (v : Vec Nat 1) : Nat := -- Does not work: Aliases find that `v` could be the `TypeVec` argument since `TypeVec` is an abbrev for `Vec`. /-- -error: application type mismatch +error: Application type mismatch: In the appplication @DVec.hd ?_ v -argument +the final argument v has type Vec Nat 1 : Type diff --git a/tests/lean/run/ack.lean b/tests/lean/run/ack.lean index 021094014cce..fdbfde718b5f 100644 --- a/tests/lean/run/ack.lean +++ b/tests/lean/run/ack.lean @@ -5,21 +5,21 @@ def ack : Nat → Nat → Nat termination_by a b => (a, b) /-- -info: [diag] Diagnostics +trace: [diag] Diagnostics [kernel] unfolded declarations (max: 147, num: 3): [kernel] OfNat.ofNat ↦ 147 [kernel] Add.add ↦ 61 [kernel] HAdd.hAdd ↦ 61 use `set_option diagnostics.threshold <num>` to control threshold for reporting counters --- -info: [simp] Diagnostics +trace: [simp] Diagnostics [simp] used theorems (max: 59, num: 1): [simp] ack.eq_3 ↦ 59 [simp] tried theorems (max: 59, num: 1): [simp] ack.eq_3 ↦ 59, succeeded: 59 use `set_option diagnostics.threshold <num>` to control threshold for reporting counters --- -info: [diag] Diagnostics +trace: [diag] Diagnostics [kernel] unfolded declarations (max: 147, num: 3): [kernel] OfNat.ofNat ↦ 147 [kernel] Add.add ↦ 61 diff --git a/tests/lean/run/allGoals.lean b/tests/lean/run/allGoals.lean index 5e0e82337d15..e22071c18435 100644 --- a/tests/lean/run/allGoals.lean +++ b/tests/lean/run/allGoals.lean @@ -12,7 +12,7 @@ open Lean Elab Tactic Tactics may assign other goals. There are three goals, but the tactic is run twice. -/ /-- -info: case a +trace: case a ⊢ 1 ≤ ?m case a @@ -21,7 +21,7 @@ case a case m ⊢ Nat --- -info: running tac +trace: running tac running tac -/ #guard_msgs in @@ -94,7 +94,7 @@ case refine_1 b : Bool ⊢ Unit --- -info: case refine_2.false +trace: case refine_2.false v : Unit := ?_ false ⊢ True @@ -149,7 +149,7 @@ error: Case tag 'true' not found. The only available case tag is 'refine_1'. --- -info: case refine_2.false +trace: case refine_2.false v : Unit := () this : () = v ⊢ True @@ -160,7 +160,7 @@ case refine_1 b : Bool ⊢ Unit --- -info: in true +trace: in true -/ #guard_msgs in example (b : Bool) : True := by @@ -200,7 +200,7 @@ This is the responsibility of `first`, but `all_goals` coordinates by being sure -/ /-- -info: rfl +trace: rfl rfl -/ #guard_msgs in @@ -311,7 +311,7 @@ theorem idEq (a : α) : id a = a := rfl /-- -info: case sunday +trace: case sunday ⊢ sunday.previous.next = id sunday case monday @@ -332,7 +332,7 @@ case friday case saturday ⊢ saturday.previous.next = id saturday --- -info: case sunday +trace: case sunday ⊢ sunday.previous.next = sunday case monday @@ -362,7 +362,7 @@ theorem Weekday.test (d : Weekday) : next (previous d) = id d := by all_goals rfl /-- -info: case sunday +trace: case sunday ⊢ sunday.previous.next = sunday case monday diff --git a/tests/lean/run/apply_error.lean b/tests/lean/run/apply_error.lean new file mode 100644 index 000000000000..055ac5e7e701 --- /dev/null +++ b/tests/lean/run/apply_error.lean @@ -0,0 +1,53 @@ +/-- +error: tactic 'apply' failed, could not unify the conclusion of `h` + True +with the goal + False + +Note: The full type of `h` is + 1 = 1 → True +h : 1 = 1 → True +⊢ False +-/ +#guard_msgs in +example (h : 1 = 1 → True) : False := by + apply h + +/-- +error: tactic 'apply' failed, could not unify the type of `h` + 1 = 1 → True +with the goal + 2 = 2 → False +h : 1 = 1 → True +⊢ 2 = 2 → False +-/ +#guard_msgs in +example (h : 1 = 1 → True) : 2 = 2 → False := by + apply h + +/-- +error: tactic 'apply' failed, could not unify the conclusion of `h` + 1 = 1 → True +with the goal + 2 = 2 → False + +Note: The full type of `h` is + 3 = 3 → 1 = 1 → True +h : 3 = 3 → 1 = 1 → True +⊢ 2 = 2 → False +-/ +#guard_msgs in +example (h : 3 = 3 → 1 = 1 → True) : 2 = 2 → False := by + apply h + +/-- +error: tactic 'apply' failed, could not unify the type of `h` + True +with the goal + False +h : True +⊢ False +-/ +#guard_msgs in +example (h : True) : False := by + apply h diff --git a/tests/lean/run/apply_tac.lean b/tests/lean/run/apply_tac.lean index 0ad662e6d8fb..0c2ab0bf9ebb 100644 --- a/tests/lean/run/apply_tac.lean +++ b/tests/lean/run/apply_tac.lean @@ -5,7 +5,7 @@ open Lean.Meta open Lean.Elab.Tactic /-- -info: a b c : Nat +trace: a b c : Nat h₁ : a = b h₂ : b = c ⊢ a = b @@ -17,7 +17,7 @@ example (a b c : Nat) (h₁ : a = b) (h₂ : b = c) : a = c := by exact h₁ /-- -info: case h +trace: case h a : Nat ⊢ ?w = a @@ -38,7 +38,7 @@ elab "eapply " e:term : tactic => evalApplyLikeTactic (MVarId.apply (cfg := {newGoals := ApplyNewGoals.nonDependentOnly})) e /-- -info: case h +trace: case h a : Nat ⊢ ?w = a -/ @@ -49,7 +49,7 @@ example (a : Nat) : ∃ x, x = a := by rfl /-- -info: case w +trace: case w a : Nat ⊢ Nat @@ -57,7 +57,7 @@ case h a : Nat ⊢ ?w = a --- -info: case h +trace: case h a : Nat ⊢ a = a -/ diff --git a/tests/lean/run/autoboundIssues.lean b/tests/lean/run/autoboundIssues.lean index afcd15a3cc44..ae5a1e96ca4e 100644 --- a/tests/lean/run/autoboundIssues.lean +++ b/tests/lean/run/autoboundIssues.lean @@ -39,7 +39,7 @@ The auto-bound implicit creates a new variable `A✝`, which comes from the argu (This has been the case well before the creation of this test.) -/ /-- -info: A✝ : Sort u_1 +trace: A✝ : Sort u_1 a : A✝ _x : constUnit a ⊢ True @@ -66,7 +66,7 @@ The duplication was because `runTermElabM` wasn't resetting the local context. The poor variable name was due to using `mkForallFVars` instead of `mkForallFVars'`. -/ /-- -info: A✝ : Sort _ +trace: A✝ : Sort _ a : A✝ x : constUnit a ⊢ True @@ -82,7 +82,7 @@ Checking that `#check` also has the improvement. /-- info: 1 : Nat --- -info: A✝ : Sort _ +trace: A✝ : Sort _ a : A✝ x : constUnit a ⊢ ?_ diff --git a/tests/lean/run/binderNameHint.lean b/tests/lean/run/binderNameHint.lean index 084bb4846903..cd4cbbe71715 100644 --- a/tests/lean/run/binderNameHint.lean +++ b/tests/lean/run/binderNameHint.lean @@ -84,7 +84,7 @@ error: unsolved goals names : List String ⊢ (!names.any fun x => !"Waldo".isPrefixOf x) = true --- -info: names : List String +trace: names : List String ⊢ (!names.any fun x => binderNameHint x (fun name => "Waldo".isPrefixOf name) !"Waldo".isPrefixOf x) = true -/ #guard_msgs in diff --git a/tests/lean/run/binderNameHintSimp.lean b/tests/lean/run/binderNameHintSimp.lean index 137af94fc351..5aaa13178e43 100644 --- a/tests/lean/run/binderNameHintSimp.lean +++ b/tests/lean/run/binderNameHintSimp.lean @@ -12,7 +12,7 @@ def z : Nat := 0 set_option trace.Meta.Tactic.simp.rewrite true /-- -info: [Meta.Tactic.simp.rewrite] ↓ binderNameHint.eq_1:1000: +trace: [Meta.Tactic.simp.rewrite] ↓ binderNameHint.eq_1:1000: binderNameHint x y z ==> z @@ -24,7 +24,7 @@ example : binderNameHint x y z = 0 := by simp [x, y, z] /-- -info: [Meta.Tactic.simp.rewrite] ↓ binderNameHint.eq_1:1000: +trace: [Meta.Tactic.simp.rewrite] ↓ binderNameHint.eq_1:1000: binderNameHint x y z ==> z diff --git a/tests/lean/run/canonM_exists_fun.lean b/tests/lean/run/canonM_exists_fun.lean index 3d9f1e0ec501..e1e45b027945 100644 --- a/tests/lean/run/canonM_exists_fun.lean +++ b/tests/lean/run/canonM_exists_fun.lean @@ -12,7 +12,7 @@ elab "foo" t:term : tactic => do #check (∃ f : Nat → Nat, ∀ x, f x = 0) -- works fine /-- -info: [debug] canonicalizing ∃ f, ∀ (x : Nat), f x = 0 +trace: [debug] canonicalizing ∃ f, ∀ (x : Nat), f x = 0 [debug] canonicalized it to ∃ f, ∀ (x : Nat), f x = 0 -/ #guard_msgs in diff --git a/tests/lean/run/cdotAtSimpArg.lean b/tests/lean/run/cdotAtSimpArg.lean index da00e882246e..ca157f90bec4 100644 --- a/tests/lean/run/cdotAtSimpArg.lean +++ b/tests/lean/run/cdotAtSimpArg.lean @@ -6,7 +6,7 @@ example : ¬ true = false := by /-! Test `binop%` -/ /-- -info: y x : Nat +trace: y x : Nat h : y = 0 ⊢ Add.add x y = x -/ @@ -19,7 +19,7 @@ example (h : y = 0) : x + y = x := by done /-- -info: y x : Nat +trace: y x : Nat h : y = 0 ⊢ Add.add x y = x -/ diff --git a/tests/lean/run/change.lean b/tests/lean/run/change.lean index 4f5d9eb00e21..375679db1d87 100644 --- a/tests/lean/run/change.lean +++ b/tests/lean/run/change.lean @@ -85,7 +85,7 @@ is not definitionally equal to target `change` can create new metavariables and assign them -/ /-- -info: x y z : Nat +trace: x y z : Nat w : Nat := x + y ⊢ x + y = z -/ @@ -114,7 +114,7 @@ example : let x := 22; let y : Nat := x; let z : Fin (y + 1) := 0; z.1 < y + 1 : `change` reorders hypotheses if necessary -/ /-- -info: x y z w : Nat +trace: x y z w : Nat a : Nat := x + y h : a = z + w ⊢ True @@ -199,7 +199,7 @@ example (m n : Nat) : m + 2 = n := by conv `change` to create a metavariable -/ /-- -info: a b c d : Nat +trace: a b c d : Nat e : Nat := a + b ⊢ a + b + c = d -/ diff --git a/tests/lean/run/check.lean b/tests/lean/run/check.lean index 90eb3b56ace8..7a1da0fa4306 100644 --- a/tests/lean/run/check.lean +++ b/tests/lean/run/check.lean @@ -33,9 +33,9 @@ def expr_1eq1 : Expr := mkApp3 (.const ``Eq [0]) (.const ``Nat []) (mkNatLit 1) elab "elab_1eq1" : term => return expr_1eq1 /-- -error: application type mismatch +error: Application type mismatch: In the appplication @Eq Nat -argument +the final argument Nat has type Type : Type 1 @@ -45,9 +45,9 @@ but is expected to have type #guard_msgs in #check elab_1eq1 /-- -error: application type mismatch +error: Application type mismatch: In the appplication @Eq Nat -argument +the final argument Nat has type Type : Type 1 diff --git a/tests/lean/run/closure1.lean b/tests/lean/run/closure1.lean index 743f0d8db4e6..8396423e37cd 100644 --- a/tests/lean/run/closure1.lean +++ b/tests/lean/run/closure1.lean @@ -41,7 +41,7 @@ printDef `foo1 set_option pp.mvars false in /-- -info: [Meta.debug] foo1 α f b ?_ ?_ +trace: [Meta.debug] foo1 α f b ?_ ?_ [Meta.debug] fun α f b _x_1 _x_2 => _x_2 (f b) -/ #guard_msgs in @@ -63,7 +63,7 @@ printDef `foo2 set_option pp.mvars false in /-- -info: [Meta.debug] foo2 α v1 v2 p ?_ +trace: [Meta.debug] foo2 α v1 v2 p ?_ [Meta.debug] fun α v1 v2 p _x_1 => v1 = v2 ∧ (_x_1 v2 ∨ p) -/ #guard_msgs in diff --git a/tests/lean/run/coeAttrs.lean b/tests/lean/run/coeAttrs.lean index f2495cc81d4d..b3970f4558ae 100644 --- a/tests/lean/run/coeAttrs.lean +++ b/tests/lean/run/coeAttrs.lean @@ -4,7 +4,7 @@ -- With the option off (default) /-- -info: n : Nat +trace: n : Nat h : n = 0 ⊢ ↑n = 0 -/ @@ -15,7 +15,7 @@ example (n : Nat) (h : n = 0) : (↑n : Int) = 0 := by -- With the option on /-- -info: n : Nat +trace: n : Nat h : n = 0 ⊢ (↑n : Int) = 0 -/ diff --git a/tests/lean/run/conv1.lean b/tests/lean/run/conv1.lean index 86b1aa962668..81014aec1f0f 100644 --- a/tests/lean/run/conv1.lean +++ b/tests/lean/run/conv1.lean @@ -3,7 +3,7 @@ set_option pp.analyze false def p (x y : Nat) := x = y /-- -info: x y : Nat +trace: x y : Nat ⊢ x + y = y.add x -/ #guard_msgs in @@ -18,7 +18,7 @@ example (x y : Nat) : p (x + y) (y + x + 0) := by rfl /-- -info: x y : Nat +trace: x y : Nat ⊢ x + y = y.add x -/ #guard_msgs in @@ -32,7 +32,7 @@ example (x y : Nat) : p (x + y) (y + x + 0) := by rfl /-- -info: x y : Nat +trace: x y : Nat ⊢ x.add y = y.add x -/ #guard_msgs in @@ -48,7 +48,7 @@ example (x y : Nat) : p (x + y) (y + x + 0) := by apply Nat.add_comm x y /-- -info: x y : Nat +trace: x y : Nat | x + y -/ #guard_msgs in @@ -109,14 +109,14 @@ example (h₁ : f x = x + 1) (h₂ : x > 0) : f x = f x := by exact h₁ /-- -info: x y : Nat +trace: x y : Nat f : Nat → Nat → Nat g : Nat → Nat h₁ : ∀ (z : Nat), f z z = z h₂ : ∀ (x y : Nat), f (g x) (g y) = y ⊢ f (g (0 + y)) (f (g x) (g x)) = x --- -info: x y : Nat +trace: x y : Nat f : Nat → Nat → Nat g : Nat → Nat h₁ : ∀ (z : Nat), f z z = z @@ -133,7 +133,7 @@ example (x y : Nat) (f : Nat → Nat → Nat) (g : Nat → Nat) (h₁ : ∀ z, f set_option linter.unusedVariables false /-- -info: x y : Nat +trace: x y : Nat f : Nat → Nat → Nat g : Nat → Nat h₁ : ∀ (z : Nat), f z z = z @@ -141,7 +141,7 @@ h₂ : ∀ (x y : Nat), f (g x) (g y) = y h₃ : f (g x) (g x) = 0 ⊢ g x = 0 --- -info: x y : Nat +trace: x y : Nat f : Nat → Nat → Nat g : Nat → Nat h₁ : ∀ (z : Nat), f z z = z diff --git a/tests/lean/run/conv_arg.lean b/tests/lean/run/conv_arg.lean index 6e6cf576bbbc..bb38eeca291c 100644 --- a/tests/lean/run/conv_arg.lean +++ b/tests/lean/run/conv_arg.lean @@ -30,10 +30,10 @@ example (p q₁ q₂ : Prop) (h : q₁ ↔ q₂) : (p → q₁) ↔ (p → q₂) -- Dependent implications /-- -info: i✝ : Nat +trace: i✝ : Nat | i✝ < 10 --- -info: a✝¹ : Nat +trace: a✝¹ : Nat a✝ : a✝¹ < 10 | ↑⟨a✝¹, ⋯⟩ = a✝¹ -/ diff --git a/tests/lean/run/core.lean b/tests/lean/run/core.lean index bf8fa849b3d7..6816706ee4a8 100644 --- a/tests/lean/run/core.lean +++ b/tests/lean/run/core.lean @@ -29,7 +29,7 @@ testing... --- info: 10 --- -info: [Elab] trace message +trace: [Elab] trace message -/ #guard_msgs in #eval f diff --git a/tests/lean/run/decideTactic.lean b/tests/lean/run/decideTactic.lean index e171fa142dc7..210cba140bf8 100644 --- a/tests/lean/run/decideTactic.lean +++ b/tests/lean/run/decideTactic.lean @@ -92,8 +92,8 @@ since its 'Decidable' instance instDecidableNot did not reduce to 'isTrue' or 'isFalse'. -After unfolding the instances 'baz', 'instDecidableNice' and 'instDecidableNot', reduction got stuck -at the 'Decidable' instance +After unfolding the instances 'baz', 'instDecidableNice', and 'instDecidableNot', reduction got +stuck at the 'Decidable' instance ⋯ ▸ inferInstance Hint: Reduction got stuck on '▸' (Eq.rec), which suggests that one of the 'Decidable' instances is diff --git a/tests/lean/run/declareConfigElabBug.lean b/tests/lean/run/declareConfigElabBug.lean index 818f399dc0eb..b6d426d96bf4 100644 --- a/tests/lean/run/declareConfigElabBug.lean +++ b/tests/lean/run/declareConfigElabBug.lean @@ -1,6 +1,6 @@ set_option trace.Elab true /-- -info: α✝ : Sort u_1 +trace: α✝ : Sort u_1 a b : α✝ h : a = b ⊢ (fun x => x) a = b diff --git a/tests/lean/run/diagRec.lean b/tests/lean/run/diagRec.lean index f93bec0da04f..ab58f07d879a 100644 --- a/tests/lean/run/diagRec.lean +++ b/tests/lean/run/diagRec.lean @@ -8,7 +8,7 @@ termination_by n /-- info: 89 --- -info: [diag] Diagnostics +trace: [diag] Diagnostics [reduction] unfolded declarations (max: 407, num: 3): [reduction] Nat.rec ↦ 407 [reduction] Or.rec ↦ 144 diff --git a/tests/lean/run/diagnostics.lean b/tests/lean/run/diagnostics.lean index ccd158ee0c09..4f88e00a3b70 100644 --- a/tests/lean/run/diagnostics.lean +++ b/tests/lean/run/diagnostics.lean @@ -9,7 +9,7 @@ theorem f_eq : f (x + 1) = q (f x) := rfl set_option trace.Meta.debug true /-- -info: [diag] Diagnostics +trace: [diag] Diagnostics [reduction] unfolded declarations (max: 15, num: 6): [reduction] Nat.rec ↦ 15 [reduction] Add.add ↦ 10 @@ -30,7 +30,7 @@ example : f (x + 5) = q (q (q (q (q (f x))))) := rfl /-- -info: [diag] Diagnostics +trace: [diag] Diagnostics [reduction] unfolded declarations (max: 15, num: 6): [reduction] Nat.rec ↦ 15 [reduction] Add.add ↦ 10 diff --git a/tests/lean/run/dsimp1.lean b/tests/lean/run/dsimp1.lean index dbb54e3dad9f..1629e65b092c 100644 --- a/tests/lean/run/dsimp1.lean +++ b/tests/lean/run/dsimp1.lean @@ -7,7 +7,7 @@ axiom P : Bool → Prop axiom P_false : P false /-- -info: x : Nat +trace: x : Nat ⊢ P (1 + x).isZero -/ #guard_msgs in diff --git a/tests/lean/run/dsimp_bv_simproc.lean b/tests/lean/run/dsimp_bv_simproc.lean index 90560ac72bee..2108982343dd 100644 --- a/tests/lean/run/dsimp_bv_simproc.lean +++ b/tests/lean/run/dsimp_bv_simproc.lean @@ -10,7 +10,7 @@ theorem write_simplify_test_0 (a x y : BitVec 64) simp only [setWidth_eq, BitVec.cast_eq] /-- -info: write : (n : Nat) → BitVec 64 → BitVec (n * 8) → Type → Type +trace: write : (n : Nat) → BitVec 64 → BitVec (n * 8) → Type → Type s aux : Type a x y : BitVec 64 h : 128 = 128 diff --git a/tests/lean/run/duplicatedArgumentApplicationTypeMismatch.lean b/tests/lean/run/duplicatedArgumentApplicationTypeMismatch.lean new file mode 100644 index 000000000000..2a7be17276f5 --- /dev/null +++ b/tests/lean/run/duplicatedArgumentApplicationTypeMismatch.lean @@ -0,0 +1,13 @@ +def foo (x : Nat) (y : Bool) (z : Nat) (w : Nat) := () +/-- +error: Application type mismatch: In the appplication + foo 1 true true +the final argument + true +has type + Bool : Type +but is expected to have type + Nat : Type +-/ +#guard_msgs in +#eval foo 1 true true 1 diff --git a/tests/lean/run/elab_cmd.lean b/tests/lean/run/elab_cmd.lean index aa4030e69e32..d9f408c1353d 100644 --- a/tests/lean/run/elab_cmd.lean +++ b/tests/lean/run/elab_cmd.lean @@ -26,7 +26,7 @@ elab "try" t:tactic : tactic => do set_option linter.unusedVariables false /-- -info: case h₁ +trace: case h₁ x y z : Nat h1 : y = z h2 : x = x @@ -47,7 +47,7 @@ h2 : x = x h3 : x = y ⊢ Nat --- -info: case h₂ +trace: case h₂ x y z : Nat h1 : y = z h2 : x = x diff --git a/tests/lean/run/emptyLcnf.lean b/tests/lean/run/emptyLcnf.lean index 80b7375022e7..2c5acd8dc13a 100644 --- a/tests/lean/run/emptyLcnf.lean +++ b/tests/lean/run/emptyLcnf.lean @@ -7,7 +7,7 @@ def f (x : MyEmpty) : Nat := set_option trace.Compiler.result true /-- -info: [Compiler.result] size: 0 +trace: [Compiler.result] size: 0 def f x : Nat := ⊥ -/ diff --git a/tests/lean/run/eqnsAtSimp3.lean b/tests/lean/run/eqnsAtSimp3.lean index 6ea1b606f179..49075cb456f1 100644 --- a/tests/lean/run/eqnsAtSimp3.lean +++ b/tests/lean/run/eqnsAtSimp3.lean @@ -6,7 +6,7 @@ def f (x y : Nat) : Nat := | x+1, y => 2 * f x y /-- -info: x y : Nat +trace: x y : Nat h : y ≠ 5 ⊢ ∃ z, 2 * f x y = 2 * z -/ @@ -25,7 +25,7 @@ theorem ex1 (x : Nat) (y : Nat) (h : y ≠ 5) : ∃ z, f (x+1) y = 2 * z := by | x+1, y => 2 * g x y /-- -info: x y : Nat +trace: x y : Nat h : y ≠ 5 ⊢ ∃ z, 2 * g x y = 2 * z -/ @@ -37,7 +37,7 @@ theorem ex2 (x : Nat) (y : Nat) (h : y ≠ 5) : ∃ z, g (x+1) y = 2 * z := by rfl /-- -info: x y : Nat +trace: x y : Nat h : y = 5 → False ⊢ ∃ z, 2 * f x y = 2 * z -/ @@ -59,7 +59,7 @@ theorem ex3 (x : Nat) (y : Nat) (h : y = 5 → False) : ∃ z, f (x+1) y = 2 * z #check f2.eq_4 /-- -info: x y z : Nat +trace: x y z : Nat h : y = 5 → z = 6 → False ⊢ ∃ w, 2 * f2 x y z = 2 * w -/ diff --git a/tests/lean/run/erased.lean b/tests/lean/run/erased.lean index 642da1d03488..3262dc052686 100644 --- a/tests/lean/run/erased.lean +++ b/tests/lean/run/erased.lean @@ -20,7 +20,7 @@ set_option pp.funBinderTypes true set_option pp.letVarTypes true set_option trace.Compiler.result true /-- -info: [Compiler.result] size: 1 +trace: [Compiler.result] size: 1 def Erased.mk (α : lcErased) (a : lcAny) : PSigma lcErased lcAny := let _x.1 : PSigma lcErased lcAny := PSigma.mk lcErased ◾ ◾ ◾; return _x.1 diff --git a/tests/lean/run/exposeDiff.lean b/tests/lean/run/exposeDiff.lean new file mode 100644 index 000000000000..0444168aba5d --- /dev/null +++ b/tests/lean/run/exposeDiff.lean @@ -0,0 +1,48 @@ +/-- +error: tactic 'apply' failed, could not unify the type of `x` + PUnit.{1} +with the goal + PUnit.{0} +x : PUnit +⊢ PUnit +-/ +#guard_msgs in +example (x : PUnit.{1}) : PUnit.{0} := by + apply x + +/-- +error: type mismatch + x +has type + PUnit.{1} : Type +but is expected to have type + PUnit.{0} : Prop +-/ +#guard_msgs in +example (x : PUnit.{1}) : PUnit.{0} := + x + +/-- +error: tactic 'rfl' failed, the left-hand side + ∀ (x : PUnit.{1}), True +is not definitionally equal to the right-hand side + ∀ (x : PUnit.{2}), True +⊢ (∀ (x : PUnit), True) ↔ ∀ (x : PUnit), True +-/ +#guard_msgs in +example : (∀ _ : PUnit.{1}, True) ↔ ∀ _ : PUnit.{2}, True := by + rfl + +inductive Test where + | mk (x : Prop) + +/-- +error: tactic 'rfl' failed, the left-hand side + (Test.mk (∀ (x : PUnit.{1}), True)).1 +is not definitionally equal to the right-hand side + (Test.mk (∀ (x : PUnit.{2}), True)).1 +⊢ (Test.mk (∀ (x : PUnit), True)).1 = (Test.mk (∀ (x : PUnit), True)).1 +-/ +#guard_msgs in +example : (Test.mk (∀ _ : PUnit.{1}, True)).1 = (Test.mk (∀ _ : PUnit.{2}, True)).1 := by + rfl diff --git a/tests/lean/run/exposeNames.lean b/tests/lean/run/exposeNames.lean index 186ae3835a68..c53da13fa269 100644 --- a/tests/lean/run/exposeNames.lean +++ b/tests/lean/run/exposeNames.lean @@ -1,5 +1,5 @@ /-- -info: α : Type u_1 +trace: α : Type u_1 inst : DecidableEq α inst_1 : Add α a_1 b_1 : α @@ -9,14 +9,14 @@ h_1 : b = a_2 a : α ⊢ a = b -/ -#guard_msgs (info) in +#guard_msgs (trace) in example [DecidableEq α] [Add α] (a b : α) (_ : a = b) (a : α) (b : α) (_ : b = a) (a : α) : a = b := by expose_names trace_state sorry /-- -info: α : Sort u_1 +trace: α : Sort u_1 a b : α h_1 : a = b h_2 : True @@ -24,14 +24,14 @@ h_3 : True ∨ False h : b = a ⊢ b = a -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b : α) (h : a = b) (_ : True) (_ : True ∨ False) (h : b = a) : b = a := by expose_names trace_state rw [h] /-- -info: α : Sort u_1 +trace: α : Sort u_1 a b : α h : a = b h_3 : True @@ -41,7 +41,7 @@ h_5 : True h_2 : b = a ⊢ b = a -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b : α) (h : a = b) (_ : True) (_ : False) (h_1 : True ∨ False) (_ : True) (h_2 : b = a) : b = a := by expose_names trace_state diff --git a/tests/lean/run/extract_lets.lean b/tests/lean/run/extract_lets.lean index 54df22882c24..1a815edf9e5f 100644 --- a/tests/lean/run/extract_lets.lean +++ b/tests/lean/run/extract_lets.lean @@ -10,7 +10,7 @@ axiom test_sorry {α : Sort _} : α Extract a top-level let, no names given. -/ /-- -info: x✝ : Nat := 2 +trace: x✝ : Nat := 2 ⊢ x✝ = 2 -/ #guard_msgs in @@ -23,7 +23,7 @@ example : let x := 2; x = 2 := by Extract a top-level let, name given. -/ /-- -info: z : Nat := 2 +trace: z : Nat := 2 ⊢ z = 2 -/ #guard_msgs in @@ -36,7 +36,7 @@ example : let x := 2; x = 2 := by Extract a top-level let, placeholder name given. -/ /-- -info: x✝ : Nat := 2 +trace: x✝ : Nat := 2 ⊢ x✝ = 2 -/ #guard_msgs in @@ -49,7 +49,7 @@ example : let x := 2; x = 2 := by Extract an embedded let, name given. -/ /-- -info: z : Nat := 2 +trace: z : Nat := 2 ⊢ z = 2 -/ #guard_msgs in @@ -62,7 +62,7 @@ example : (let x := 2; x) = 2 := by Extract multiple embedded lets, no names given. -/ /-- -info: x✝ : Nat := 2 +trace: x✝ : Nat := 2 y✝ : Nat := 1 + 1 ⊢ x✝ = y✝ -/ @@ -76,7 +76,7 @@ example : (let x := 2; x) = (let y := 1 + 1; y) := by Names extracted lets in order, but keeps extracting even after list is exhausted. -/ /-- -info: z : Nat := 2 +trace: z : Nat := 2 y✝ : Nat := 1 + 1 ⊢ z = y✝ -/ @@ -93,7 +93,7 @@ Too many names, linter warning. warning: unused name note: this linter can be disabled with `set_option linter.tactic.unusedName false` --- -info: z : Nat := 2 +trace: z : Nat := 2 z' : Nat := 1 + 1 ⊢ z = z' -/ @@ -107,7 +107,7 @@ example : (let x := 2; x) = (let y := 1 + 1; y) := by Length of name list controls number of lets in `+onlyGivenNames` mode. -/ /-- -info: z : Nat := 2 +trace: z : Nat := 2 ⊢ z = let y := 1 + 1; y @@ -118,7 +118,7 @@ example : (let x := 2; x) = (let y := 1 + 1; y) := by trace_state rfl /-- -info: z : Nat := 2 +trace: z : Nat := 2 w : Nat := 1 + 1 ⊢ z = w -/ @@ -132,7 +132,7 @@ example : (let x := 2; x) = (let y := 1 + 1; y) := by Merging. -/ /-- -info: x✝ : Nat := 2 +trace: x✝ : Nat := 2 ⊢ x✝ = x✝ -/ #guard_msgs in @@ -145,7 +145,7 @@ example : (let x := 2; x) = (let y := 2; y) := by Merging, even if we run out of names. -/ /-- -info: z : Nat := 2 +trace: z : Nat := 2 ⊢ z = z -/ #guard_msgs in @@ -158,7 +158,7 @@ example : (let x := 2; x) = (let y := 2; y) := by Merging reuses pre-existing declarations -/ /-- -info: z : Nat := 2 +trace: z : Nat := 2 ⊢ z = z -/ #guard_msgs in @@ -172,7 +172,7 @@ example : (let x := 2; x) = (let y := 2; y) := by Merging doesn't reuse pre-existing declarations when `-useContext`. -/ /-- -info: z : Nat := 2 +trace: z : Nat := 2 x✝ : Nat := 2 ⊢ x✝ = x✝ -/ @@ -187,7 +187,7 @@ example : (let x := 2; x) = (let y := 2; y) := by Works with `have` (`let_fun`) -/ /-- -info: a✝ : Nat := 2 +trace: a✝ : Nat := 2 x✝ : Nat := a✝ y✝ : Nat := a✝ + 0 ⊢ x✝ = y✝ @@ -202,7 +202,7 @@ example : have a := 2; (have x := a; x) = (have y := a + 0; y) := by Extracts at both the type and the value of a local definition. -/ /-- -info: α✝ : Type := Nat +trace: α✝ : Type := Nat y✝ : Nat := 2 x : α✝ := 2 ⊢ x = x @@ -215,7 +215,7 @@ example : let x : (let α := Nat; α) := (let y := 2; 2); x = x := by rfl -- Essentially same state: /-- -info: α✝ : Type := Nat +trace: α✝ : Type := Nat y✝ : Nat := 2 x✝ : α✝ := 2 ⊢ x✝ = x✝ @@ -230,7 +230,7 @@ example : let x : (let α := Nat; α) := (let y := 2; 2); x = x := by Basic `descend := false` test. -/ /-- -info: x✝ : Nat := 2 +trace: x✝ : Nat := 2 ⊢ x✝ = 2 -/ #guard_msgs in @@ -243,7 +243,7 @@ example : let x := 2; x = 2 := by Make sure `descend := false` is not obstructed by metadata. -/ /-- -info: this : True +trace: this : True x✝ : Nat := 2 ⊢ x✝ = 2 -/ @@ -271,7 +271,7 @@ example : (let x := 2; x) = 2 := by In `-descend` mode, merges using pre-existing declarations. -/ /-- -info: w : Nat := 2 +trace: w : Nat := 2 y✝ : Nat := 3 ⊢ w = 2 + y✝ - y✝ -/ @@ -286,7 +286,7 @@ example : let x := 2; let y := 3; let z := 3; x = 2 + y - z := by `-descend` works with `have` (`let_fun`) -/ /-- -info: a✝ : Nat := 2 +trace: a✝ : Nat := 2 ⊢ (let_fun x := a✝; x) = let_fun y := a✝ + 0; @@ -302,7 +302,7 @@ example : have a := 2; (have x := a; x) = (have y := a + 0; y) := by Extracting at a hypothesis -/ /-- -info: x✝ : Nat := 1 +trace: x✝ : Nat := 1 h : x✝ = x✝ ⊢ True -/ @@ -317,7 +317,7 @@ example (h : let x := 1; x = x) : True := by Extracting at a hypothesis, with names -/ /-- -info: y : Nat := 1 +trace: y : Nat := 1 h : y = y ⊢ True -/ @@ -332,7 +332,7 @@ example (h : let x := 1; x = x) : True := by Extracting at a hypothesis, reorders hypotheses -/ /-- -info: h' : Nat +trace: h' : Nat y : Nat := 1 h : y = y ⊢ True @@ -348,7 +348,7 @@ example (h : let x := 1; x = x) (h' : Nat) : True := by Extracting at a hypothesis, not all top level. -/ /-- -info: x✝ : Nat := 1 +trace: x✝ : Nat := 1 y✝ : Nat := 2 h : x✝ + 1 = y✝ ⊢ True @@ -363,7 +363,7 @@ example (h : let x := 1; x + 1 = let y := 2; y) : True := by Extracting at a hypothesis, not all top level, in `-descend` mode. -/ /-- -info: x✝ : Nat := 1 +trace: x✝ : Nat := 1 h : x✝ + 1 = let y := 2; @@ -380,7 +380,7 @@ example (h : let x := 1; x + 1 = let y := 2; y) : True := by At multiple locations with `merge := true`. -/ /-- -info: _z✝ : Nat := 3 +trace: _z✝ : Nat := 3 x✝ : Nat := 1 h : x✝ + 2 = _z✝ ⊢ ∀ (x : Nat), True @@ -396,7 +396,7 @@ example (h : let x := 1; let y := 3; x + 2 = y) : let _z := 3; ∀ (_ : Nat), Tr At multiple locations with `merge := false`. -/ /-- -info: _z✝ : Nat := 3 +trace: _z✝ : Nat := 3 x✝ : Nat := 1 y✝ : Nat := 3 h : x✝ + 2 = y✝ @@ -413,7 +413,7 @@ example (h : let x := 1; let y := 3; x + 2 = y) : let _z := 3; ∀ (_ : Nat), Tr Merging can chain. This tests how extracted let declarations are recalled and can handle dependence. -/ /-- -info: x✝ : Nat := 2 +trace: x✝ : Nat := 2 y✝ : Nat := x✝ ⊢ y✝ = y✝ -/ @@ -427,7 +427,7 @@ example : (let x := 2; let y := x; y) = (let x' := 2; let y' := x'; y') := by Same merging example, but with `-merge`. -/ /-- -info: x✝ : Nat := 2 +trace: x✝ : Nat := 2 y✝ : Nat := x✝ x'✝ : Nat := 2 y'✝ : Nat := x'✝ @@ -445,21 +445,21 @@ Reported at https://leanprover.zulipchat.com/#narrow/stream/287929-mathlib4/topi Unused lets are handled properly. -/ /-- -info: ok✝ : Prop := True +trace: ok✝ : Prop := True h : let _not_ok := False; ok✝ ⊢ let _also_ok := 3; True --- -info: ok✝ : Prop := True +trace: ok✝ : Prop := True h : let _not_ok := False; ok✝ _also_ok✝ : Nat := 3 ⊢ True --- -info: ok✝ : Prop := True +trace: ok✝ : Prop := True _also_ok✝ : Nat := 3 _not_ok✝ : Prop := False h : ok✝ @@ -479,12 +479,12 @@ example (h : let ok := True; let _not_ok := False; ok) : let _also_ok := 3; True Testing `+usedOnly` -/ /-- -info: ok✝ : Prop := True +trace: ok✝ : Prop := True h : ok✝ ⊢ let _also_ok := 3; True --- -info: ok✝ : Prop := True +trace: ok✝ : Prop := True h : ok✝ ⊢ True -/ @@ -500,12 +500,12 @@ example (h : let ok := True; let _not_ok := False; ok) : let _also_ok := 3; True Testing `+usedOnly` with `-descend` -/ /-- -info: ok✝ : Prop := True +trace: ok✝ : Prop := True h : ok✝ ⊢ let _also_ok := 3; True --- -info: ok✝ : Prop := True +trace: ok✝ : Prop := True h : ok✝ ⊢ True -/ @@ -521,7 +521,7 @@ example (h : let ok := True; let _not_ok := False; ok) : let _also_ok := 3; True `+proofs` -/ /-- -info: this✝ : (some true).isSome = true := of_eq_true (eq_self true) +trace: this✝ : (some true).isSome = true := of_eq_true (eq_self true) ⊢ (some true).get this✝ = true -/ #guard_msgs in @@ -535,7 +535,7 @@ example : Option.get (some true) (have := (by simp); this) = true := by `+implicits` -/ /-- -info: α✝ : Type := Nat +trace: α✝ : Type := Nat ⊢ id 2 = 2 -/ #guard_msgs in @@ -570,7 +570,7 @@ example : ∀ n : Nat, let x := n; x = x := by Can extract from underneath another `let`. -/ /-- -info: y✝ : Nat := 2 +trace: y✝ : Nat := 2 ⊢ ∀ (n : Nat), let x := n; x + y✝ = x + y✝ @@ -638,7 +638,7 @@ See also the `lift_lets.lean` test file. Lifts, does not make use of name generator. -/ /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let x := n; n = x -/ @@ -654,7 +654,7 @@ example : ∀ n : Nat, n = (let x := n; x) := by Same example, but testing `letFun`. -/ /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let_fun x := n; n = x -/ @@ -671,7 +671,7 @@ Merging of merely-lifted lets. Four cases to this test, depending on whether a ` and whether the second is a `have` or `let`. -/ /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let_fun x := n; x = x -/ @@ -683,7 +683,7 @@ example : ∀ n : Nat, (have x := n; x) = (have x' := n; x') := by intros rfl /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let x := n; x = x -/ @@ -695,7 +695,7 @@ example : ∀ n : Nat, (let x := n; x) = (have x' := n; x') := by intros rfl /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let x := n; x = x -/ @@ -707,7 +707,7 @@ example : ∀ n : Nat, (have x := n; x) = (let x' := n; x') := by intros rfl /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let x := n; x = x -/ @@ -723,7 +723,7 @@ example : ∀ n : Nat, (let x := n; x) = (let x' := n; x') := by Without merging -/ /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let_fun x := n; let_fun x' := n; x = x' @@ -748,7 +748,7 @@ example : ∀ n : Nat, let x := n; let y := x; y = n := by Extracting `let`s in proofs in `+proof` mode. -/ /-- -info: m : Nat +trace: m : Nat h : ∃ n, n + 1 = m x : Fin m y : Fin (h.choose + 1) @@ -771,10 +771,10 @@ example (m : Nat) (h : ∃ n, n + 1 = m) (x : Fin m) (y : Fin _) : Limitation: we can use `extract_lets` within `conv`, but the let bindings do not persist. -/ /-- -info: y : Type := Nat +trace: y : Type := Nat | y = Int --- -info: ⊢ Nat = Int +trace: ⊢ Nat = Int -/ #guard_msgs in example : let x := Nat; x = Int := by diff --git a/tests/lean/run/fixedParams.lean b/tests/lean/run/fixedParams.lean index 04317f95a26b..d857e616aeb5 100644 --- a/tests/lean/run/fixedParams.lean +++ b/tests/lean/run/fixedParams.lean @@ -7,7 +7,7 @@ namespace Ex1 /-- error: well-founded recursion cannot be used, 'Ex1.foo' does not take any (non-fixed) arguments --- -info: [Elab.definition.fixedParams] getFixedParams: +trace: [Elab.definition.fixedParams] getFixedParams: • ⏎ • -/ @@ -25,7 +25,7 @@ namespace Ex2 /-- error: well-founded recursion cannot be used, 'Ex2.foo' does not take any (non-fixed) arguments --- -info: [Elab.definition.fixedParams] getFixedParams: +trace: [Elab.definition.fixedParams] getFixedParams: • [#1 #1] • [#1 #1] -/ @@ -41,7 +41,7 @@ namespace Ex3 /-- error: well-founded recursion cannot be used, 'Ex3.foo' does not take any (non-fixed) arguments --- -info: [Elab.definition.fixedParams] getFixedParams: +trace: [Elab.definition.fixedParams] getFixedParams: • [#1 #2] [#2 #1] • [#2 #1] [#1 #2] -/ @@ -56,7 +56,7 @@ end Ex3 namespace Ex4 /-- -info: [Elab.definition.fixedParams] getFixedParams: notFixed 0 3: +trace: [Elab.definition.fixedParams] getFixedParams: notFixed 0 3: In foo c n b m m not matched [Elab.definition.fixedParams] getFixedParams: • [#1 #3] ❌ [#3 #1] ❌ • [#3 #1] ❌ [#1 #3] ❌ @@ -73,7 +73,7 @@ end Ex4 namespace Append1 /-- -info: [Elab.definition.fixedParams] getFixedParams: notFixed 0 1: +trace: [Elab.definition.fixedParams] getFixedParams: notFixed 0 1: In app as bs x✝¹ =/= as [Elab.definition.fixedParams] getFixedParams: notFixed 0 2: @@ -81,18 +81,18 @@ info: [Elab.definition.fixedParams] getFixedParams: notFixed 0 1: x✝ =/= bs [Elab.definition.fixedParams] getFixedParams: • [#1] ❌ ❌ -/ -#guard_msgs(info) in +#guard_msgs(trace) in def app : List α → List α → List α | [], bs => bs | a::as, bs => a :: app as bs /-- -info: [Elab.definition.fixedParams] getFixedParams: notFixed 0 1: +trace: [Elab.definition.fixedParams] getFixedParams: notFixed 0 1: In app' as bs as✝ =/= as [Elab.definition.fixedParams] getFixedParams: • [#1] ❌ [#3] -/ -#guard_msgs(info) in +#guard_msgs(trace) in def app' (as : List α) (bs : List α) : List α := match as with | [] => bs diff --git a/tests/lean/run/frontend_meeting_2022_09_13.lean b/tests/lean/run/frontend_meeting_2022_09_13.lean index 784ab6ae0327..02e540edc32f 100644 --- a/tests/lean/run/frontend_meeting_2022_09_13.lean +++ b/tests/lean/run/frontend_meeting_2022_09_13.lean @@ -86,11 +86,11 @@ elab "seq" s:tacticSeq : tactic => do evalTactic tac /-- -info: x y : Nat +trace: x y : Nat h : x = y ⊢ 0 + x = y --- -info: x y : Nat +trace: x y : Nat h : x = y ⊢ 0 + y = y -/ @@ -100,11 +100,11 @@ example (h : x = y) : 0 + x = y := by done /-- -info: x y : Nat +trace: x y : Nat h : x = y ⊢ 0 + x = y --- -info: x y : Nat +trace: x y : Nat h : x = y ⊢ 0 + y = y -/ @@ -115,11 +115,11 @@ example (h : x = y) : 0 + x = y := by done /-- -info: x y : Nat +trace: x y : Nat h : x = y ⊢ 0 + x = y --- -info: x y : Nat +trace: x y : Nat h : x = y ⊢ 0 + y = y -/ diff --git a/tests/lean/run/funind_unfolding.lean b/tests/lean/run/funind_unfolding.lean index 45aeaba6c524..0636d1b20917 100644 --- a/tests/lean/run/funind_unfolding.lean +++ b/tests/lean/run/funind_unfolding.lean @@ -10,9 +10,10 @@ termination_by x => x info: fib.fun_cases_unfolding (motive : Nat → Nat → Prop) (case1 : motive 0 0) (case2 : motive 1 1) (case3 : ∀ (n : Nat), motive n.succ.succ (fib n + fib (n + 1))) (x✝ : Nat) : motive x✝ (fib x✝) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check fib.fun_cases_unfolding +-- set_option trace.Meta.FunInd true in def ackermann : Nat → Nat → Nat | 0, m => m + 1 | n+1, 0 => ackermann n 1 @@ -25,7 +26,7 @@ info: ackermann.fun_cases_unfolding (motive : Nat → Nat → Nat → Prop) (cas (case3 : ∀ (n m : Nat), motive n.succ m.succ (ackermann n (ackermann (n + 1) m))) (x✝ x✝¹ : Nat) : motive x✝ x✝¹ (ackermann x✝ x✝¹) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check ackermann.fun_cases_unfolding def fib' : Nat → Nat @@ -39,7 +40,7 @@ info: fib'.fun_cases_unfolding (motive : Nat → Nat → Prop) (case1 : motive 0 (case3 : ∀ (n : Nat), (n = 0 → False) → (n = 1 → False) → motive n (fib' (n - 1) + fib' (n - 2))) (x✝ : Nat) : motive x✝ (fib' x✝) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check fib'.fun_cases_unfolding def fib'' (n : Nat) : Nat := @@ -66,7 +67,7 @@ info: fib''.fun_cases_unfolding (motive : Nat → Nat → Prop) (case1 : ∀ (n ¬foo < 100 → motive n 0) (n : Nat) : motive n (fib'' n) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check fib''.fun_cases_unfolding -- set_option trace.Meta.FunInd true in @@ -80,7 +81,7 @@ info: filter.fun_cases (motive : (Nat → Bool) → List Nat → Prop) (case1 : (case3 : ∀ (p : Nat → Bool) (x : Nat) (xs : List Nat), ¬p x = true → motive p (x :: xs)) (p : Nat → Bool) (x✝ : List Nat) : motive p x✝ -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check filter.fun_cases /-- @@ -90,7 +91,7 @@ info: filter.fun_cases_unfolding (motive : (Nat → Bool) → List Nat → List (case3 : ∀ (p : Nat → Bool) (x : Nat) (xs : List Nat), ¬p x = true → motive p (x :: xs) (filter p xs)) (p : Nat → Bool) (x✝ : List Nat) : motive p x✝ (filter p x✝) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check filter.fun_cases_unfolding /-- @@ -98,7 +99,7 @@ info: filter.induct (p : Nat → Bool) (motive : List Nat → Prop) (case1 : mot (case2 : ∀ (x : Nat) (xs : List Nat), p x = true → motive xs → motive (x :: xs)) (case3 : ∀ (x : Nat) (xs : List Nat), ¬p x = true → motive xs → motive (x :: xs)) (a✝ : List Nat) : motive a✝ -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check filter.induct /-- @@ -107,7 +108,7 @@ info: filter.induct_unfolding (p : Nat → Bool) (motive : List Nat → List Nat (case3 : ∀ (x : Nat) (xs : List Nat), ¬p x = true → motive xs (filter p xs) → motive (x :: xs) (filter p xs)) (a✝ : List Nat) : motive a✝ (filter p a✝) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check filter.induct_unfolding theorem filter_const_false_is_nil : @@ -141,7 +142,7 @@ info: map.fun_cases (motive : (Nat → Bool) → List Nat → Prop) (case1 : ∀ (case2 : ∀ (f : Nat → Bool) (x : Nat) (xs : List Nat), motive f (x :: xs)) (f : Nat → Bool) (x✝ : List Nat) : motive f x✝ -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check map.fun_cases /-- @@ -150,14 +151,14 @@ info: map.fun_cases_unfolding (motive : (Nat → Bool) → List Nat → List Boo (case2 : ∀ (f : Nat → Bool) (x : Nat) (xs : List Nat), motive f (x :: xs) (f x :: map f xs)) (f : Nat → Bool) (x✝ : List Nat) : motive f x✝ (map f x✝) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check map.fun_cases_unfolding /-- info: map.induct (motive : List Nat → Prop) (case1 : motive []) (case2 : ∀ (x : Nat) (xs : List Nat), motive xs → motive (x :: xs)) (a✝ : List Nat) : motive a✝ -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check map.induct /-- @@ -165,11 +166,12 @@ info: map.induct_unfolding (f : Nat → Bool) (motive : List Nat → List Bool (case2 : ∀ (x : Nat) (xs : List Nat), motive xs (map f xs) → motive (x :: xs) (f x :: map f xs)) (a✝ : List Nat) : motive a✝ (map f a✝) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check map.induct_unfolding namespace BinaryWF +-- set_option trace.Meta.FunInd true in def map2 (f : Nat → Nat → Bool) : List Nat → List Nat → List Bool | x::xs, y::ys => f x y::map2 f xs ys | _, _ => [] @@ -187,7 +189,7 @@ info: BinaryWF.map2.induct_unfolding (f : Nat → Nat → Bool) (motive : List N motive x x_1 []) (a✝ a✝¹ : List Nat) : motive a✝ a✝¹ (map2 f a✝ a✝¹) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check map2.induct_unfolding end BinaryWF @@ -209,7 +211,7 @@ info: BinaryStructural.map2.induct_unfolding (f : Nat → Nat → Bool) (motive (∀ (x_1 : Nat) (xs : List Nat) (y : Nat) (ys : List Nat), t = x_1 :: xs → x = y :: ys → False) → motive t x []) (a✝ a✝¹ : List Nat) : motive a✝ a✝¹ (map2 f a✝ a✝¹) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check map2.induct_unfolding end BinaryStructural @@ -245,7 +247,7 @@ info: MutualWF.map2a.mutual_induct_unfolding (f : Nat → Nat → Bool) (motive1 motive2 x x_1 []) : (∀ (a a_1 : List Nat), motive1 a a_1 (map2a f a a_1)) ∧ ∀ (a a_1 : List Nat), motive2 a a_1 (map2b f a a_1) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check map2a.mutual_induct_unfolding /-- @@ -266,7 +268,7 @@ info: MutualWF.map2a.induct_unfolding (f : Nat → Nat → Bool) (motive1 motive motive2 x x_1 []) (a✝ a✝¹ : List Nat) : motive1 a✝ a✝¹ (map2a f a✝ a✝¹) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check map2a.induct_unfolding end MutualWF @@ -297,7 +299,7 @@ info: MutualStructural.map2a.induct (motive_1 motive_2 : List Nat → List Nat (∀ (x_1 : Nat) (xs : List Nat) (y : Nat) (ys : List Nat), t = x_1 :: xs → x = y :: ys → False) → motive_2 t x) (a✝ a✝¹ : List Nat) : motive_1 a✝ a✝¹ -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check map2a.induct @@ -320,7 +322,7 @@ info: MutualStructural.map2a.mutual_induct_unfolding (f : Nat → Nat → Bool) motive_2 t x (map2b f t x)) : (∀ (a a_1 : List Nat), motive_1 a a_1 (map2a f a a_1)) ∧ ∀ (a a_1 : List Nat), motive_2 a a_1 (map2b f a a_1) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check map2a.mutual_induct_unfolding @@ -343,7 +345,7 @@ info: MutualStructural.map2a.induct_unfolding (f : Nat → Nat → Bool) motive_2 t x (map2b f t x)) (a✝ a✝¹ : List Nat) : motive_1 a✝ a✝¹ (map2a f a✝ a✝¹) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check map2a.induct_unfolding end MutualStructural @@ -391,7 +393,7 @@ info: siftDown.induct_unfolding (e : Nat) (motive : (a : Array Int) → Nat → (case3 : ∀ (a : Array Int) (root : Nat) (h : e ≤ a.size), ¬leftChild root < e → motive a root h a) (a : Array Int) (root : Nat) (h : e ≤ a.size) : motive a root h (siftDown a root e h) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check siftDown.induct_unfolding /-- @@ -413,7 +415,7 @@ info: siftDown.induct (e : Nat) (motive : (a : Array Int) → Nat → e ≤ a.si (case3 : ∀ (a : Array Int) (root : Nat) (h : e ≤ a.size), ¬leftChild root < e → motive a root h) (a : Array Int) (root : Nat) (h : e ≤ a.size) : motive a root h -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check siftDown.induct -- Now something with have @@ -429,14 +431,14 @@ info: withHave.induct_unfolding (motive : Nat → Bool → Prop) (case1 : 0 < 42 (case2 : ∀ (x : Nat), 0 < x → ¬x = 42 → motive (x - 1) (withHave (x - 1)) → motive x (withHave (x - 1))) (n : Nat) : motive n (withHave n) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check withHave.induct_unfolding /-- info: withHave.fun_cases_unfolding (motive : Nat → Bool → Prop) (case1 : 0 < 42 → motive 42 true) (case2 : ∀ (n : Nat), 0 < n → ¬n = 42 → motive n (withHave (n - 1))) (n : Nat) : motive n (withHave n) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check withHave.fun_cases_unfolding -- Structural Mutual recursion @@ -466,7 +468,7 @@ info: Tree.size.induct_unfolding.{u_1} {α : Type u_1} (motive_1 : Tree α → N motive_1 t t.size → motive_2 ts (Tree.size_aux ts) → motive_2 (t :: ts) (t.size + Tree.size_aux ts)) (a✝ : Tree α) : motive_1 a✝ a✝.size -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check Tree.size.induct_unfolding /-- @@ -483,5 +485,55 @@ info: Tree.size_aux.induct_unfolding.{u_1} {α : Type u_1} (motive_1 : Tree α motive_1 t t.size → motive_2 ts (Tree.size_aux ts) → motive_2 (t :: ts) (t.size + Tree.size_aux ts)) (a✝ : List (Tree α)) : motive_2 a✝ (Tree.size_aux a✝) -/ -#guard_msgs in +#guard_msgs(pass trace, all) in #check Tree.size_aux.induct_unfolding + + +-- When the discriminants are duplicated, it is very easy for `FunInd` to be confused +-- about how to instantiate the equality theorem. Maybe not relevant in practice for now? +-- Maybe even impossible to solve. + +-- set_option trace.Meta.FunInd true in +set_option linter.unusedVariables false in +def duplicatedDiscriminant (n : Nat) : Bool := + match h1 : n, h2 : n with + | 0, 0 => true + | a+1, 0 => false -- by simp_all + | 0, b+1 => false -- by simp_all + | a, b => true + +/-- +info: duplicatedDiscriminant.fun_cases_unfolding (motive : Nat → Bool → Prop) (case1 : 0 = 0 → motive 0 true) + (case2 : + ∀ (a : Nat), + a.succ = 0 → + motive 0 + (match h1 : 0, h2 : 0 with + | 0, 0 => true + | a.succ, 0 => false + | 0, b.succ => false + | a, b => true)) + (case3 : + ∀ (b : Nat), + 0 = b.succ → + motive b.succ + (match h1 : b.succ, h2 : b.succ with + | 0, 0 => true + | a.succ, 0 => false + | 0, b_1.succ => false + | a, b_1 => true)) + (case4 : + ∀ (b : Nat), + (b = 0 → b = 0 → False) → + (∀ (a : Nat), b = a.succ → b = 0 → False) → + (∀ (b_1 : Nat), b = 0 → b = b_1.succ → False) → + motive b + (match h1 : b, h2 : b with + | 0, 0 => true + | a.succ, 0 => false + | 0, b_1.succ => false + | a, b_1 => true)) + (n : Nat) : motive n (duplicatedDiscriminant n) +-/ +#guard_msgs(pass trace, all) in +#check duplicatedDiscriminant.fun_cases_unfolding diff --git a/tests/lean/run/generalizeMany.lean b/tests/lean/run/generalizeMany.lean index d83050742953..a704ca4a0d7a 100644 --- a/tests/lean/run/generalizeMany.lean +++ b/tests/lean/run/generalizeMany.lean @@ -1,7 +1,7 @@ set_option pp.analyze false /-- -info: p : (n : Nat) → Fin n → Prop +trace: p : (n : Nat) → Fin n → Prop n : Nat v : Fin n n' : Nat diff --git a/tests/lean/run/generalizeTelescope.lean b/tests/lean/run/generalizeTelescope.lean index 22ca23bba877..f8aa7fc1d79d 100644 --- a/tests/lean/run/generalizeTelescope.lean +++ b/tests/lean/run/generalizeTelescope.lean @@ -22,7 +22,7 @@ let t ← mkLambdaFVars ys ys.back! trace[Meta.debug] t pure () -/-- info: [Meta.debug] fun x x => x -/ +/-- trace: [Meta.debug] fun x x => x -/ #guard_msgs in #eval tst1 @@ -39,7 +39,7 @@ let t ← mkLambdaFVars ys ys.back! trace[Meta.debug] t pure () -/-- info: [Meta.debug] fun (x : Nat) (x_1 : Vec Nat x) (x : @Eq.{1} (Vec Nat x) x_1 x_1) => x -/ +/-- trace: [Meta.debug] fun (x : Nat) (x_1 : Vec Nat x) (x : @Eq.{1} (Vec Nat x) x_1 x_1) => x -/ #guard_msgs in #eval tst2 @@ -61,6 +61,6 @@ failIfSuccess do pure () trace[Meta.debug] "failed as expected" -/-- info: [Meta.debug] failed as expected -/ +/-- trace: [Meta.debug] failed as expected -/ #guard_msgs in #eval tst3 diff --git a/tests/lean/run/genindices.lean b/tests/lean/run/genindices.lean index cbde572865fc..99f11bc74e77 100644 --- a/tests/lean/run/genindices.lean +++ b/tests/lean/run/genindices.lean @@ -25,7 +25,7 @@ pure () set_option trace.Elab true /-- -info: [Elab] ⊢ ∀ (α : Type u) (xs : List (List α)) (h : Pred (List α) xs), xs ≠ [] → xs = xs +trace: [Elab] ⊢ ∀ (α : Type u) (xs : List (List α)) (h : Pred (List α) xs), xs ≠ [] → xs = xs [Elab] α✝ : Type u xs✝ : List (List α✝) h✝ : Pred (List α✝) xs✝ diff --git a/tests/lean/run/grind_array.lean b/tests/lean/run/grind_array.lean new file mode 100644 index 000000000000..a14293b85c2a --- /dev/null +++ b/tests/lean/run/grind_array.lean @@ -0,0 +1,4 @@ +set_option grind.warning false + +example {l : List α} {i : USize} {a : α} {h : i.toNat < l.toArray.size} : + l.toArray.uset i a h = (l.set i.toNat a).toArray := by grind diff --git a/tests/lean/run/grind_attrs.lean b/tests/lean/run/grind_attrs.lean index 5eb965cc8b67..7c97e139fd0f 100644 --- a/tests/lean/run/grind_attrs.lean +++ b/tests/lean/run/grind_attrs.lean @@ -27,20 +27,20 @@ opaque Expr.eval : Expr → State → Nat axiom Expr.constProp : Expr → State → Expr -/-- info: [grind.ematch.pattern] eval_constProp_of_sub: [State.le #3 #2, constProp #1 #3] -/ -#guard_msgs (info) in +/-- trace: [grind.ematch.pattern] eval_constProp_of_sub: [State.le #3 #2, constProp #1 #3] -/ +#guard_msgs (trace) in set_option trace.grind.ematch.pattern true in @[grind =>] theorem Expr.eval_constProp_of_sub (e : Expr) (h : State.le σ' σ) : (e.constProp σ').eval σ = e.eval σ := sorry -/-- info: [grind.ematch.pattern] eval_constProp_of_eq_of_sub: [State.le #3 #2, constProp #1 #3] -/ -#guard_msgs (info) in +/-- trace: [grind.ematch.pattern] eval_constProp_of_eq_of_sub: [State.le #3 #2, constProp #1 #3] -/ +#guard_msgs (trace) in set_option trace.grind.ematch.pattern true in @[grind =>] theorem Expr.eval_constProp_of_eq_of_sub {e : Expr} (h₂ : State.le σ' σ) : (e.constProp σ').eval σ = e.eval σ := sorry -/-- info: [grind.ematch.pattern] update_le_update: [le #4 #3, update #4 #2 #1] -/ -#guard_msgs (info) in +/-- trace: [grind.ematch.pattern] update_le_update: [le #4 #3, update #4 #2 #1] -/ +#guard_msgs (trace) in set_option trace.grind.ematch.pattern true in @[grind =>] theorem State.update_le_update (h : State.le σ' σ) : State.le (σ'.update x v) (σ.update x v) := sorry diff --git a/tests/lean/run/grind_beta.lean b/tests/lean/run/grind_beta.lean index 6e24e05d22a8..248a9e3d4af3 100644 --- a/tests/lean/run/grind_beta.lean +++ b/tests/lean/run/grind_beta.lean @@ -30,10 +30,10 @@ example (f : Nat → Nat → Nat) : f 2 3 ≠ 5 → f = (fun x y : Nat => x + y) opaque bla : Nat → Nat → Nat → Nat /-- -info: [grind.beta] f 2 3 = bla 2 3 2, using fun x y => bla x y x +trace: [grind.beta] f 2 3 = bla 2 3 2, using fun x y => bla x y x [grind.beta] f 2 3 = 2 + 3, using fun x y => x + y -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.beta true in example (g h f : Nat → Nat → Nat) : f 2 3 ≠ 5 → diff --git a/tests/lean/run/grind_canon_insts.lean b/tests/lean/run/grind_canon_insts.lean index 432122d4ff22..602c1e791ea0 100644 --- a/tests/lean/run/grind_canon_insts.lean +++ b/tests/lean/run/grind_canon_insts.lean @@ -52,8 +52,8 @@ def fallback : Fallback := do set_option trace.Meta.debug true -/-- info: [Meta.debug] [a * (b * c), b * c, d * (b * c)] -/ -#guard_msgs (info) in +/-- trace: [Meta.debug] [a * (b * c), b * c, d * (b * c)] -/ +#guard_msgs (trace) in example (a b c d : Nat) : b * (a * c) = d * (b * c) → False := by rw [left_comm] -- Introduces a new (non-canonical) instance for `Mul Nat` grind on_failure fallback -- State should have only 3 `*`-applications @@ -62,14 +62,14 @@ example (a b c d : Nat) : b * (a * c) = d * (b * c) → False := by set_option pp.notation false in set_option pp.explicit true in /-- -info: [Meta.debug] [@HMul.hMul Int Int Int (@instHMul Int Int.instMul) (@NatCast.natCast Int instNatCastInt b) +trace: [Meta.debug] [@HMul.hMul Int Int Int (@instHMul Int Int.instMul) (@NatCast.natCast Int instNatCastInt b) (@NatCast.natCast Int instNatCastInt a), @HMul.hMul Int Int Int (@instHMul Int Int.instMul) (@NatCast.natCast Int instNatCastInt b) (@NatCast.natCast Int instNatCastInt d), @HMul.hMul Nat Nat Nat (@instHMul Nat instMulNat) b a, @HMul.hMul Nat Nat Nat (@instHMul Nat instMulNat) b d] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c d : Nat) : b * a = d * b → False := by rw [CommMonoid.mul_comm d b] -- Introduces a new (non-canonical) instance for `Mul Nat` -- See target here diff --git a/tests/lean/run/grind_canon_types.lean b/tests/lean/run/grind_canon_types.lean index 05fa1ee3508c..cf47bd5746b8 100644 --- a/tests/lean/run/grind_canon_types.lean +++ b/tests/lean/run/grind_canon_types.lean @@ -12,9 +12,9 @@ def fallback : Fallback := do set_option trace.Meta.debug true set_option pp.explicit true /-- -info: [Meta.debug] [@f Nat a, @f Nat b] +trace: [Meta.debug] [@f Nat a, @f Nat b] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c d : Nat) : @f Nat a = b → @f (g Nat) a = c → @f (g Nat) b = d → a = b → False := by -- State should have only two `f`-applications: `@f Nat a`, `@f Nat b` -- Note that `@f (g Nat) b` has been canonicalized to `@f Nat b`. diff --git a/tests/lean/run/grind_cases_tac.lean b/tests/lean/run/grind_cases_tac.lean index c63200ab146b..26c075fdf0f7 100644 --- a/tests/lean/run/grind_cases_tac.lean +++ b/tests/lean/run/grind_cases_tac.lean @@ -15,28 +15,28 @@ def f (v : Vec α n) : Bool := | .cons .. => false /-- -info: n : Nat +trace: n : Nat v : Vec Nat n h : f v ≠ false ⊢ n + 1 = 0 → HEq (Vec.cons 10 v) Vec.nil → False --- -info: n : Nat +trace: n : Nat v : Vec Nat n h : f v ≠ false ⊢ ∀ {n_1 : Nat} (a : Nat) (a_1 : Vec Nat n_1), n + 1 = n_1 + 1 → HEq (Vec.cons 10 v) (Vec.cons a a_1) → False -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (v : Vec Nat n) (h : f v ≠ false) : False := by cases' (Vec.cons 10 v) next => trace_state; sorry next => trace_state; sorry /-- -info: ⊢ False → False +trace: ⊢ False → False --- -info: ⊢ True → False +trace: ⊢ True → False -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : False := by cases' (Or.inr (a := False) True.intro) next => trace_state; sorry diff --git a/tests/lean/run/grind_cat.lean b/tests/lean/run/grind_cat.lean index 446941614d3b..4ca15fa49555 100644 --- a/tests/lean/run/grind_cat.lean +++ b/tests/lean/run/grind_cat.lean @@ -1,3 +1,5 @@ +set_option grind.warning false + universe v v₁ v₂ v₃ u u₁ u₂ u₃ namespace CategoryTheory diff --git a/tests/lean/run/grind_congr.lean b/tests/lean/run/grind_congr.lean index 8c13ab8b7dec..72e30aa9fd81 100644 --- a/tests/lean/run/grind_congr.lean +++ b/tests/lean/run/grind_congr.lean @@ -1,4 +1,5 @@ import Lean +set_option grind.warning false def f (a : Nat) := a + a + a def g (a : Nat) := a + a @@ -16,29 +17,36 @@ set_option grind.debug true set_option grind.debug.proofs true /-- -info: [Meta.debug] [d, f b, c, f a] +trace: [Meta.debug] [d, f b, c, f a] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c d : Nat) : a = b → f a = c → f b = d → False := by grind on_failure fallback /-- -info: [Meta.debug] [d, f b, c, f a] +trace: [Meta.debug] [d, f b, c, f a] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c d : Nat) : f a = c → f b = d → a = b → False := by grind on_failure fallback /-- -info: [Meta.debug] [d, f (g b), c, f (g a)] +trace: [Meta.debug] [d, f (g b), c, f (g a)] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c d e : Nat) : f (g a) = c → f (g b) = d → a = e → b = e → False := by grind on_failure fallback /-- -info: [Meta.debug] [d, f (g b), c, f v] +trace: [Meta.debug] [d, f (g b), c, f v] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c d e v : Nat) : f v = c → f (g b) = d → a = e → b = e → v = g a → False := by grind on_failure fallback + +-- arrow congruence test +example : α = α' → α'' = α' → β' = β → (α → β) = (α'' → β') := by + grind + +example (a b c : Nat) (h₁ : a = c) (h₂ : b = c) : (a = b → Nat) = (b = a → Nat) := by + grind diff --git a/tests/lean/run/grind_cutsat_auto.lean b/tests/lean/run/grind_cutsat_auto.lean index e86ff399acca..e445e794f085 100644 --- a/tests/lean/run/grind_cutsat_auto.lean +++ b/tests/lean/run/grind_cutsat_auto.lean @@ -30,13 +30,13 @@ abbrev problem (x y z w v : Int) : Prop := (y ≥ -10) /-- -info: [grind.cutsat.model] x := 121 +trace: [grind.cutsat.model] x := 121 [grind.cutsat.model] y := -10 [grind.cutsat.model] z := -34 [grind.cutsat.model] w := 0 [grind.cutsat.model] v := 1 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.model true in example (x y z w v : Int) : problem x y z w v → False := by fail_if_success grind diff --git a/tests/lean/run/grind_cutsat_cooper.lean b/tests/lean/run/grind_cutsat_cooper.lean index 3405f163a3c1..38dcd6dc4d87 100644 --- a/tests/lean/run/grind_cutsat_cooper.lean +++ b/tests/lean/run/grind_cutsat_cooper.lean @@ -19,17 +19,17 @@ abbrev problem₁ [∀ n, OfNat α n] [Neg α] [Mul α] [Sub α] [Add α] [LE α 7*x - 9*y ≤ 4 /-- -info: [grind.cutsat.model] x := 241/154 +trace: [grind.cutsat.model] x := 241/154 [grind.cutsat.model] y := 1 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.model true in example (x y : Int) : problem₁ x y → False := by fail_if_success grind +qlia -- Rational counterexamples allowed sorry /-- info: true -/ -#guard_msgs (info) in +#guard_msgs in open Std.Internal in #eval problem₁ (241/154 : Rat) (1 : Rat) diff --git a/tests/lean/run/grind_cutsat_diseq_1.lean b/tests/lean/run/grind_cutsat_diseq_1.lean index 0539ed88b2dd..56da6c5620c9 100644 --- a/tests/lean/run/grind_cutsat_diseq_1.lean +++ b/tests/lean/run/grind_cutsat_diseq_1.lean @@ -5,80 +5,80 @@ open Int.Linear set_option trace.grind.cutsat.assert true /-- -info: [grind.cutsat.assert] a + b + 1 ≤ 0 +trace: [grind.cutsat.assert] a + b + 1 ≤ 0 [grind.cutsat.assert] a + -1*b ≠ 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b : Int) : a + b < 0 → a ≠ b → False := by (fail_if_success grind); sorry -#guard_msgs (info) in -- `a` and `b` are not relevant to cutsat in the following example +#guard_msgs (trace) in -- `a` and `b` are not relevant to cutsat in the following example example (a b : Int) : a ≠ b → False := by (fail_if_success grind); sorry /-- -info: [grind.cutsat.assert] a + -1*b ≠ 0 +trace: [grind.cutsat.assert] a + -1*b ≠ 0 [grind.cutsat.assert] a + b + 1 ≤ 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b : Int) : a ≠ b → a + b < 0 → False := by (fail_if_success grind); sorry /-- -info: [grind.cutsat.assert] a + -1*b ≠ 0 +trace: [grind.cutsat.assert] a + -1*b ≠ 0 [grind.cutsat.assert] a + b + 1 ≤ 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c : Int) : a ≠ c → c = b → a + b < 0 → False := by (fail_if_success grind); sorry /-- -info: [grind.cutsat.assert] a + -1*b ≠ 0 +trace: [grind.cutsat.assert] a + -1*b ≠ 0 [grind.cutsat.assert] a + b + 1 ≤ 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c d : Int) : d ≠ c → c = b → a = d → a + b < 0 → False := by (fail_if_success grind); sorry /-- -info: [grind.cutsat.assert] a + b + 1 ≤ 0 +trace: [grind.cutsat.assert] a + b + 1 ≤ 0 [grind.cutsat.assert] a + -1*b ≠ 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c d : Int) : d ≠ c → a = d → a + b < 0 → c = b → False := by (fail_if_success grind); sorry /-- -info: [grind.cutsat.assert] a + b + 1 ≤ 0 +trace: [grind.cutsat.assert] a + b + 1 ≤ 0 [grind.cutsat.assert] a + -1*b ≠ 0 [grind.cutsat.assert] e + -1*b = 0 [grind.cutsat.assert] -1*e + 1 ≤ 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c d e : Int) : d ≠ c → a = d → a + b < 0 → c = b → c = e → e > 0 → False := by (fail_if_success grind); sorry /-- -info: [grind.cutsat.assert] -1*e + 1 ≤ 0 +trace: [grind.cutsat.assert] -1*e + 1 ≤ 0 [grind.cutsat.assert] b + -1*e = 0 [grind.cutsat.assert] a + -1*e ≠ 0 [grind.cutsat.assert] a + b + 1 ≤ 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c d e : Int) : d ≠ c → a = d → c = b → c = e → e > 0 → a + b < 0 → False := by (fail_if_success grind); sorry /-- -info: [grind.cutsat.assert] -1*e + 1 ≤ 0 +trace: [grind.cutsat.assert] -1*e + 1 ≤ 0 [grind.cutsat.assert] b + -1*e = 0 [grind.cutsat.assert] a + b + 1 ≤ 0 [grind.cutsat.assert] a + -1*e ≠ 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c d e : Int) : a = d → c = b → c = e → e > 0 → a + b < 0 → d ≠ c → False := by (fail_if_success grind); sorry -#guard_msgs (info) in -- no propagation to cutsat +#guard_msgs (trace) in -- no propagation to cutsat example (a b c d e : Int) : a = d → c = b → c = e → a = 1 → d ≠ c → False := by (fail_if_success grind); sorry diff --git a/tests/lean/run/grind_cutsat_diseq_2.lean b/tests/lean/run/grind_cutsat_diseq_2.lean index e5a24cc04aa5..747c8bb1843e 100644 --- a/tests/lean/run/grind_cutsat_diseq_2.lean +++ b/tests/lean/run/grind_cutsat_diseq_2.lean @@ -12,14 +12,14 @@ theorem ex₃ (a b c : Int) : a + b + c = 0 → a = c → b = 4 → c = -2 := by grind /-- -info: [grind.cutsat.assert] -1*「a + -2 * b + -2 * c」 + a + -2*b + -2*c = 0 +trace: [grind.cutsat.assert] -1*「a + -2 * b + -2 * c」 + a + -2*b + -2*c = 0 [grind.cutsat.assert] 「a + -2 * b + -2 * c」 = 0 [grind.cutsat.assert] -1*「a + -2 * b + -2 * d」 + a + -2*b + -2*d = 0 [grind.cutsat.assert] 「a + -2 * b + -2 * d」 ≠ 0 [grind.cutsat.assert] -1*d + c = 0 [grind.cutsat.assert] 0 ≠ 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.assert true in theorem ex₄ (a b c d : Int) : a = 2*b + 2*c → a - 2*b - 2*d ≠ 0 → c ≠ d := by grind diff --git a/tests/lean/run/grind_cutsat_div_1.lean b/tests/lean/run/grind_cutsat_div_1.lean index b8eb2a7f26b1..00eafeae4bee 100644 --- a/tests/lean/run/grind_cutsat_div_1.lean +++ b/tests/lean/run/grind_cutsat_div_1.lean @@ -21,22 +21,22 @@ theorem ex₄ (f : Int → Int) (a b : Int) (_ : 2 ∣ f (f a) + 1) (h₁ : 3 #print ex₄ /-- -info: [grind.debug.cutsat.search.assign] a := 1 +trace: [grind.debug.cutsat.search.assign] a := 1 [grind.debug.cutsat.search.assign] b := 0 -/ -#guard_msgs (info) in -- finds the model without any backtracking +#guard_msgs (trace) in -- finds the model without any backtracking set_option trace.grind.debug.cutsat.search.assign true in example (a b : Int) (_ : 2 ∣ a + 3) (_ : 3 ∣ a + b - 4) : False := by fail_if_success grind sorry /-- -info: [grind.cutsat.assert] 2 ∣ a + 3 +trace: [grind.cutsat.assert] 2 ∣ a + 3 [grind.cutsat.assert] 3 ∣ a + 3*b + -4 [grind.debug.cutsat.search.assign] a := 1 [grind.debug.cutsat.search.assign] b := 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.assert true in set_option trace.grind.debug.cutsat.search.assign true in example (a b : Int) (_ : 2 ∣ a + 3) (_ : 3 ∣ a + 3*b - 4) : False := by @@ -44,30 +44,30 @@ example (a b : Int) (_ : 2 ∣ a + 3) (_ : 3 ∣ a + 3*b - 4) : False := by sorry /-- -info: [grind.debug.cutsat.search.assign] a := 1 +trace: [grind.debug.cutsat.search.assign] a := 1 [grind.debug.cutsat.search.assign] b := 15 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.debug.cutsat.search.assign true in example (a b : Int) (_ : 2 ∣ a + 3) (_ : 3 ∣ a + b - 4) (_ : b < 18): False := by fail_if_success grind sorry /-- -info: [grind.debug.cutsat.search.assign] a := 1 +trace: [grind.debug.cutsat.search.assign] a := 1 [grind.debug.cutsat.search.assign] b := 12 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.debug.cutsat.search.assign true in example (a b : Int) (_ : 2 ∣ a + 3) (_ : 3 ∣ a + b - 4) (_ : b ≥ 11): False := by fail_if_success grind sorry /-- -info: [grind.debug.cutsat.search.assign] f 0 := 11 +trace: [grind.debug.cutsat.search.assign] f 0 := 11 [grind.debug.cutsat.search.assign] f 1 := 2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.debug.cutsat.search.assign true in example (f : Int → Int) (_ : 2 ∣ f 0 + 3) (_ : 3 ∣ f 0 + f 1 - 4) (_ : f 0 ≥ 11): False := by fail_if_success grind diff --git a/tests/lean/run/grind_cutsat_div_mod.lean b/tests/lean/run/grind_cutsat_div_mod.lean index 393773e84196..e2477a8b61d3 100644 --- a/tests/lean/run/grind_cutsat_div_mod.lean +++ b/tests/lean/run/grind_cutsat_div_mod.lean @@ -13,10 +13,10 @@ example (x y : Int) : x % 2 + y = 3 → x = 5 → y = 2 := by grind /-- -info: [grind.cutsat.model] x := 5 +trace: [grind.cutsat.model] x := 5 [grind.cutsat.model] y := 2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.model true in example (x y : Int) : x % 2 + y = 3 → x ≤ 5 → x > 4 → y = 1 := by fail_if_success grind diff --git a/tests/lean/run/grind_cutsat_eq_1.lean b/tests/lean/run/grind_cutsat_eq_1.lean index 5dd337768bad..ff71a9630c5e 100644 --- a/tests/lean/run/grind_cutsat_eq_1.lean +++ b/tests/lean/run/grind_cutsat_eq_1.lean @@ -3,10 +3,10 @@ set_option grind.debug true open Int.Linear /-- -info: [grind.cutsat.assert] -1*「b + f a + 1」 + b + f a + 1 = 0 +trace: [grind.cutsat.assert] -1*「b + f a + 1」 + b + f a + 1 = 0 [grind.cutsat.assert] 「b + f a + 1」 = 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.assert true in example (a b : Int) (f : Int → Int) (h₁ : f a + b + 3 = 2) : False := by fail_if_success grind diff --git a/tests/lean/run/grind_cutsat_le_1.lean b/tests/lean/run/grind_cutsat_le_1.lean index e6993c84a87c..63f9c790fb75 100644 --- a/tests/lean/run/grind_cutsat_le_1.lean +++ b/tests/lean/run/grind_cutsat_le_1.lean @@ -2,10 +2,10 @@ set_option grind.warning false set_option grind.debug true /-- -info: [grind.debug.cutsat.search.assign] b := -1 +trace: [grind.debug.cutsat.search.assign] b := -1 [grind.debug.cutsat.search.assign] a := 3 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.debug.cutsat.search.assign true in example (a b : Int) (h₁ : a ≤ 3) (h₂ : a > 2) (h₃ : a + b < 3) : False := by fail_if_success grind diff --git a/tests/lean/run/grind_cutsat_le_2.lean b/tests/lean/run/grind_cutsat_le_2.lean index 87b0e0e26d92..7da35a078944 100644 --- a/tests/lean/run/grind_cutsat_le_2.lean +++ b/tests/lean/run/grind_cutsat_le_2.lean @@ -11,12 +11,12 @@ example (a b c d e : Int) : set_option trace.grind.cutsat.model true /-- -info: [grind.cutsat.model] a := 7 +trace: [grind.cutsat.model] a := 7 [grind.cutsat.model] b := 0 [grind.cutsat.model] c := 3 [grind.cutsat.model] d := 2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c d e : Int) : a + b ≥ 0 → a = 2*c + 1 → @@ -25,11 +25,11 @@ example (a b c d e : Int) : (fail_if_success grind); sorry /-- -info: [grind.cutsat.model] a := 17 +trace: [grind.cutsat.model] a := 17 [grind.cutsat.model] b := -9 [grind.cutsat.model] c := -9 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (a b c : Int) : 2*a + 3*b = 7 → 4*a + 7*c = 5 → diff --git a/tests/lean/run/grind_cutsat_nat_eq.lean b/tests/lean/run/grind_cutsat_nat_eq.lean index d5c4f8dc5140..89beaf0d60cd 100644 --- a/tests/lean/run/grind_cutsat_nat_eq.lean +++ b/tests/lean/run/grind_cutsat_nat_eq.lean @@ -70,21 +70,21 @@ example (a b : Int) : (a - b).toNat = 0 ↔ a ≤ b := by grind /-- -info: [grind.cutsat.model] x := 3 +trace: [grind.cutsat.model] x := 3 [grind.cutsat.model] y := 1 [grind.cutsat.model] z := 4 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.model true in example (x y z : Nat) : x ≥ 3 → x ≠ z → x > y → z ≤ 6 → x + y = z → False := by fail_if_success grind sorry /-- -info: [grind.cutsat.model] x := 13 +trace: [grind.cutsat.model] x := 13 [grind.cutsat.model] y := 9 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.model true in example (x y : Nat) : x > 8 → y > 8 → x ≠ y → (x - y) % 4 = 1 := by fail_if_success grind @@ -115,10 +115,10 @@ example (x y : Nat) : x ^ 0 + y = 0 → False := by grind /-- -info: [grind.cutsat.model] x := 4 +trace: [grind.cutsat.model] x := 4 [grind.cutsat.model] y := 1 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.model true in example (x y : Nat) : x = y + 3 → y > 0 → False := by fail_if_success grind diff --git a/tests/lean/run/grind_cutsat_nat_le.lean b/tests/lean/run/grind_cutsat_nat_le.lean index 5c9f8d717be6..56a28c4b74d0 100644 --- a/tests/lean/run/grind_cutsat_nat_le.lean +++ b/tests/lean/run/grind_cutsat_nat_le.lean @@ -29,7 +29,7 @@ example (a b : Int) : a + b = Int.ofNat 2 → a - 2 = -b := by grind /-- -info: [grind.cutsat.assert] -1*「↑a * ↑b」 ≤ 0 +trace: [grind.cutsat.assert] -1*「↑a * ↑b」 ≤ 0 [grind.cutsat.assert] -1*↑c ≤ 0 [grind.cutsat.assert] -1*↑c + 「↑a * ↑b」 + 1 ≤ 0 [grind.cutsat.assert] ↑c = 0 @@ -37,7 +37,7 @@ info: [grind.cutsat.assert] -1*「↑a * ↑b」 ≤ 0 [grind.cutsat.assert] 「↑a * ↑b」 + 1 ≤ 0 [grind.cutsat.assert] 1 ≤ 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.assert true in example (a b c : Nat) : c > a * b → c >= 1 := by grind diff --git a/tests/lean/run/grind_cutsat_tests.lean b/tests/lean/run/grind_cutsat_tests.lean index 73af9b014ba7..058154e5a086 100644 --- a/tests/lean/run/grind_cutsat_tests.lean +++ b/tests/lean/run/grind_cutsat_tests.lean @@ -15,13 +15,13 @@ abbrev test1 (a b c d e : Int) := a ≤ 100 /-- -info: [grind.cutsat.model] a := 101 +trace: [grind.cutsat.model] a := 101 [grind.cutsat.model] b := 0 [grind.cutsat.model] c := 5335 [grind.cutsat.model] d := 0 [grind.cutsat.model] e := 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.model true in example (a b c d e : Int) : test1 a b c d e := by (fail_if_success grind); sorry diff --git a/tests/lean/run/grind_cutsat_upper_bug.lean b/tests/lean/run/grind_cutsat_upper_bug.lean index 7816edb35538..98c9c46c5a12 100644 --- a/tests/lean/run/grind_cutsat_upper_bug.lean +++ b/tests/lean/run/grind_cutsat_upper_bug.lean @@ -1,5 +1,5 @@ -/-- info: [grind.cutsat.model] a := 2 -/ -#guard_msgs (info) in +/-- trace: [grind.cutsat.model] a := 2 -/ +#guard_msgs (trace) in set_option trace.grind.cutsat.model true in example (a b : Int) : a ≤ 5 → a ≠ 4 → 2 ∣ a → False := by (fail_if_success grind); sorry diff --git a/tests/lean/run/grind_diseq_api.lean b/tests/lean/run/grind_diseq_api.lean index 7429dae31981..a7bc7a52f70f 100644 --- a/tests/lean/run/grind_diseq_api.lean +++ b/tests/lean/run/grind_diseq_api.lean @@ -23,58 +23,58 @@ def fallback : Fallback := do set_option trace.Meta.debug true /-- -info: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_right h_2 (Lean.Grind.ne_of_ne_of_eq_left h (Ne.symm h_1)) : a ≠ b +trace: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_right h_2 (Lean.Grind.ne_of_ne_of_eq_left h (Ne.symm h_1)) : a ≠ b -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (x y : Nat) : a = x → y ≠ x → b = y → False := by grind on_failure fallback /-- -info: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_right h_2 (Lean.Grind.ne_of_ne_of_eq_left h h_1) : a ≠ b +trace: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_right h_2 (Lean.Grind.ne_of_ne_of_eq_left h h_1) : a ≠ b -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (x y : Nat) : a = x → x ≠ y → b = y → False := by grind on_failure fallback /-- -info: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_right h_3 (Lean.Grind.ne_of_ne_of_eq_left (Eq.trans h (Eq.symm h_1)) h_2) : a ≠ b +trace: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_right h_3 (Lean.Grind.ne_of_ne_of_eq_left (Eq.trans h (Eq.symm h_1)) h_2) : a ≠ b -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (x y z : Nat) : a = x → z = x → z ≠ y → b = y → False := by grind on_failure fallback -/-- info: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_left h (Ne.symm h_1) : a ≠ b -/ -#guard_msgs (info) in +/-- trace: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_left h (Ne.symm h_1) : a ≠ b -/ +#guard_msgs (trace) in example (x : Nat) : a = x → b ≠ x → False := by grind on_failure fallback -/-- info: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_left h h_1 : a ≠ b -/ -#guard_msgs (info) in +/-- trace: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_left h h_1 : a ≠ b -/ +#guard_msgs (trace) in example (x : Nat) : a = x → x ≠ b → False := by grind on_failure fallback -/-- info: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_right h h_1 : a ≠ b -/ -#guard_msgs (info) in +/-- trace: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_right h h_1 : a ≠ b -/ +#guard_msgs (trace) in example (x : Nat) : b = x → a ≠ x → False := by grind on_failure fallback -/-- info: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_right h (Ne.symm h_1) : a ≠ b -/ -#guard_msgs (info) in +/-- trace: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_right h (Ne.symm h_1) : a ≠ b -/ +#guard_msgs (trace) in example (x : Nat) : b = x → x ≠ a → False := by grind on_failure fallback -/-- info: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_left h (Ne.symm h_1) : a ≠ b -/ -#guard_msgs (info) in +/-- trace: [Meta.debug] Lean.Grind.ne_of_ne_of_eq_left h (Ne.symm h_1) : a ≠ b -/ +#guard_msgs (trace) in example (x : Nat) : a = x → b ≠ x → False := by grind on_failure fallback -/-- info: [Meta.debug] h : ¬a = b -/ -#guard_msgs (info) in +/-- trace: [Meta.debug] h : ¬a = b -/ +#guard_msgs (trace) in example : a ≠ b → False := by grind on_failure fallback -/-- info: [Meta.debug] Ne.symm h : a ≠ b -/ -#guard_msgs (info) in +/-- trace: [Meta.debug] Ne.symm h : a ≠ b -/ +#guard_msgs (trace) in example : b ≠ a → False := by grind on_failure fallback diff --git a/tests/lean/run/grind_ematch1.lean b/tests/lean/run/grind_ematch1.lean index 907015c794f7..330ae1c13d35 100644 --- a/tests/lean/run/grind_ematch1.lean +++ b/tests/lean/run/grind_ematch1.lean @@ -27,10 +27,10 @@ set_option trace.grind.ematch.instance true attribute [grind =] Array.getElem_set_ne /-- -info: [grind.ematch.instance] Array.size_set: (as.set i v ⋯).size = as.size +trace: [grind.ematch.instance] Array.size_set: (as.set i v ⋯).size = as.size [grind.ematch.instance] Array.getElem_set_ne: ∀ (pj : j < as.size), i ≠ j → (as.set i v ⋯)[j] = as[j] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (as bs cs : Array α) (v : α) (i : Nat) (h₁ : i < as.size) @@ -49,15 +49,15 @@ theorem Rtrans (a b c : Nat) : R a b → R b c → R a c := sorry grind_pattern Rtrans => R a b, R b c /-- -info: [grind.ematch.instance] Rtrans: R a b → R b c → R a c +trace: [grind.ematch.instance] Rtrans: R a b → R b c → R a c -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : R a b → R b c → R a c := by grind /-- -info: [grind.ematch.instance] Rtrans: R a d → R d e → R a e +trace: [grind.ematch.instance] Rtrans: R a d → R d e → R a e [grind.ematch.instance] Rtrans: R c d → R d e → R c e [grind.ematch.instance] Rtrans: R b c → R c d → R b d [grind.ematch.instance] Rtrans: R a b → R b c → R a c @@ -67,7 +67,7 @@ info: [grind.ematch.instance] Rtrans: R a d → R d e → R a e [grind.ematch.instance] Rtrans: R a b → R b d → R a d [grind.ematch.instance] Rtrans: R b c → R c e → R b e -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : R a b → R b c → R c d → R d e → R a d := by grind @@ -85,7 +85,7 @@ error: `@[grind →] theorem using_grind_fwd.StransBad` failed to find patterns set_option trace.grind.debug.ematch.pattern true in /-- -info: [grind.debug.ematch.pattern] place: S a b ∨ R a b +trace: [grind.debug.ematch.pattern] place: S a b ∨ R a b [grind.debug.ematch.pattern] collect: S a b ∨ R a b [grind.debug.ematch.pattern] arg: S a b, support: false [grind.debug.ematch.pattern] collect: S a b @@ -104,13 +104,13 @@ info: [grind.debug.ematch.pattern] place: S a b ∨ R a b [grind.debug.ematch.pattern] found full coverage [grind.ematch.pattern] Strans: [S #4 #3, S #3 #2] -/ -#guard_msgs (info) in +#guard_msgs (trace) in @[grind→] theorem Strans (a b c : Nat) : S a b ∨ R a b → S b c → S a c := sorry /-- -info: [grind.ematch.instance] Strans: S a b ∨ R a b → S b c → S a c +trace: [grind.ematch.instance] Strans: S a b ∨ R a b → S b c → S a c -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : S a b → S b c → S a c := by grind @@ -122,14 +122,14 @@ opaque P : Nat → Prop opaque Q : Nat → Prop opaque f : Nat → Nat → Nat -/-- info: [grind.ematch.pattern] pqf: [f #2 #1] -/ -#guard_msgs (info) in +/-- trace: [grind.ematch.pattern] pqf: [f #2 #1] -/ +#guard_msgs (trace) in @[grind←] theorem pqf : Q x → P (f x y) := sorry /-- -info: [grind.ematch.instance] pqf: Q a → P (f a b) +trace: [grind.ematch.instance] pqf: Q a → P (f a b) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : Q 0 → Q 1 → Q 2 → Q 3 → ¬ P (f a b) → a = 1 → False := by grind @@ -148,18 +148,18 @@ error: `@[grind →] theorem using_grind_fwd2.pqfBad` failed to find patterns in @[grind→] theorem pqfBad : Q x → P (f x y) := sorry /-- -info: [grind.ematch.pattern] pqf: [Q #1] +trace: [grind.ematch.pattern] pqf: [Q #1] -/ -#guard_msgs (info) in +#guard_msgs (trace) in @[grind→] theorem pqf : Q x → P (f x x) := sorry /-- -info: [grind.ematch.instance] pqf: Q 3 → P (f 3 3) +trace: [grind.ematch.instance] pqf: Q 3 → P (f 3 3) [grind.ematch.instance] pqf: Q 2 → P (f 2 2) [grind.ematch.instance] pqf: Q 1 → P (f 1 1) [grind.ematch.instance] pqf: Q 0 → P (f 0 0) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : Q 0 → Q 1 → Q 2 → Q 3 → ¬ P (f a a) → a = 1 → False := by grind @@ -184,9 +184,9 @@ error: `@[grind ←] theorem using_grind_mixed.pqBad2` failed to find patterns i /-- -info: [grind.ematch.pattern] pqBad: [Q #3 #1, P #3 #2] +trace: [grind.ematch.pattern] pqBad: [Q #3 #1, P #3 #2] -/ -#guard_msgs (info) in +#guard_msgs (trace) in @[grind] theorem pqBad : P x y → Q x z := sorry example : P a b → Q a c := by @@ -201,9 +201,9 @@ opaque f : Nat → Nat opaque g : Nat → Nat → Nat /-- -info: [grind.ematch.pattern] fq: [g #0 (f #0)] +trace: [grind.ematch.pattern] fq: [g #0 (f #0)] -/ -#guard_msgs (info) in +#guard_msgs (trace) in @[grind =_] theorem fq : f x = g x (f x) := sorry @@ -215,10 +215,10 @@ opaque f : Nat → Nat opaque g : Nat → Nat → Nat /-- -info: [grind.ematch.pattern] fq: [f #0] +trace: [grind.ematch.pattern] fq: [f #0] [grind.ematch.pattern] fq: [g #0 (g #0 #0)] -/ -#guard_msgs (info) in +#guard_msgs (trace) in @[grind _=_] theorem fq : f x = g x (g x x) := sorry diff --git a/tests/lean/run/grind_ematch2.lean b/tests/lean/run/grind_ematch2.lean index 140e0d62ce68..097ce62a4333 100644 --- a/tests/lean/run/grind_ematch2.lean +++ b/tests/lean/run/grind_ematch2.lean @@ -43,14 +43,14 @@ example (as bs cs : Array α) (v₁ v₂ : α) grind /-- -info: [grind.ematch.instance] Array.size_set: (cs.set i₃ v₃ ⋯).size = cs.size +trace: [grind.ematch.instance] Array.size_set: (cs.set i₃ v₃ ⋯).size = cs.size [grind.ematch.instance] Array.size_set: (bs.set i₂ v₂ ⋯).size = bs.size [grind.ematch.instance] Array.size_set: (as.set i₁ v₁ ⋯).size = as.size [grind.ematch.instance] Array.getElem_set_ne: ∀ (pj : j < cs.size), i₃ ≠ j → (cs.set i₃ v₃ ⋯)[j] = cs[j] [grind.ematch.instance] Array.getElem_set_ne: ∀ (pj : j < bs.size), i₂ ≠ j → (bs.set i₂ v₂ ⋯)[j] = bs[j] [grind.ematch.instance] Array.getElem_set_ne: ∀ (pj : j < as.size), i₁ ≠ j → (as.set i₁ v₁ ⋯)[j] = as[j] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (as bs cs ds : Array α) (v₁ v₂ v₃ : α) (i₁ i₂ i₃ j : Nat) (h₁ : i₁ < as.size) @@ -69,8 +69,8 @@ opaque f (a b : α) : α := a @[grind =] theorem fx : f x (f x x) = x := sorry /-- -info: [grind.ematch.instance] fx: f a (f a a) = a +trace: [grind.ematch.instance] fx: f a (f a a) = a -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : a = b₁ → c = f b₁ b₂ → f a c ≠ a → a = b₂ → False := by grind diff --git a/tests/lean/run/grind_ematch_patterns.lean b/tests/lean/run/grind_ematch_patterns.lean index bbbaef764ff1..0cb8b319bfba 100644 --- a/tests/lean/run/grind_ematch_patterns.lean +++ b/tests/lean/run/grind_ematch_patterns.lean @@ -3,10 +3,10 @@ def replicate : (n : Nat) → (a : α) → List α | n+1, a => a :: replicate n a /-- -info: [grind.ematch.pattern] replicate.eq_1: [@replicate #1 `[0] #0] +trace: [grind.ematch.pattern] replicate.eq_1: [@replicate #1 `[0] #0] [grind.ematch.pattern] replicate.eq_2: [@replicate #2 (#0 + 1) #1] -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.ematch.pattern true in attribute [grind] replicate diff --git a/tests/lean/run/grind_eq.lean b/tests/lean/run/grind_eq.lean index 28ba3f95edc6..c3163306c4b6 100644 --- a/tests/lean/run/grind_eq.lean +++ b/tests/lean/run/grind_eq.lean @@ -14,12 +14,12 @@ set_option trace.grind.ematch.instance true set_option trace.grind.assert true /-- -info: [grind.assert] f (y + 1) = a +trace: [grind.assert] f (y + 1) = a [grind.assert] ¬a = g (f y) [grind.ematch.instance] f.eq_2: f y.succ = g (f y) [grind.assert] f (y + 1) = g (f y) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : f (y + 1) = a → a = g (f y):= by grind @@ -29,7 +29,7 @@ example : f (y + 1) = a → a = g (f y):= by | x::xs => x :: app xs ys /-- -info: [grind.assert] app [1, 2] ys = xs +trace: [grind.assert] app [1, 2] ys = xs [grind.assert] ¬xs = 1 :: 2 :: ys [grind.ematch.instance] app.eq_2: app [1, 2] ys = 1 :: app [2] ys [grind.assert] app [1, 2] ys = 1 :: app [2] ys @@ -38,7 +38,7 @@ info: [grind.assert] app [1, 2] ys = xs [grind.ematch.instance] app.eq_1: app [] ys = ys [grind.assert] app [] ys = ys -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : app [1, 2] ys = xs → xs = 1::2::ys := by grind @@ -48,12 +48,12 @@ opaque q : Nat → Prop @[grind =] theorem pq : p x x ↔ q x := by sorry /-- -info: [grind.assert] p a a +trace: [grind.assert] p a a [grind.assert] ¬q a [grind.ematch.instance] pq: p a a ↔ q a [grind.assert] p a a = q a -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : p a a → q a := by grind @@ -65,7 +65,7 @@ theorem appV_assoc (a : Vector α n) (b : Vector α m) (c : Vector α n') : HEq (appV a (appV b c)) (appV (appV a b) c) := sorry /-- -info: [grind.assert] x1 = appV a_2 b +trace: [grind.assert] x1 = appV a_2 b [grind.assert] x2 = appV x1 c [grind.assert] x3 = appV b c [grind.assert] x4 = appV a_2 x3 @@ -73,6 +73,6 @@ info: [grind.assert] x1 = appV a_2 b [grind.ematch.instance] appV_assoc: HEq (appV a_2 (appV b c)) (appV (appV a_2 b) c) [grind.assert] HEq (appV a_2 (appV b c)) (appV (appV a_2 b) c) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : x1 = appV a b → x2 = appV x1 c → x3 = appV b c → x4 = appV a x3 → HEq x2 x4 := by grind diff --git a/tests/lean/run/grind_eq_bwd.lean b/tests/lean/run/grind_eq_bwd.lean index 46b3f3db103e..fc88bc13db49 100644 --- a/tests/lean/run/grind_eq_bwd.lean +++ b/tests/lean/run/grind_eq_bwd.lean @@ -1,3 +1,5 @@ +set_option grind.warning false + theorem dummy (x : Nat) : x = x := rfl @@ -17,7 +19,7 @@ def one : α := sorry theorem inv_eq {a b : α} (w : mul a b = one) : inv a = b := sorry /-- -info: [grind.ematch.pattern] inv_eq: [@Lean.Grind.eqBwdPattern `[α] (inv #2) #1] +trace: [grind.ematch.pattern] inv_eq: [@Lean.Grind.eqBwdPattern `[α] (inv #2) #1] -/ #guard_msgs in set_option trace.grind.ematch.pattern true in @@ -52,10 +54,10 @@ example (s : S) : a ≠ s.f false → a = inv (s.f true) → False := by grind /-- -info: [grind.ematch.instance] inv_eq: mul (s.f true) (s.f false) = one → inv (s.f true) = s.f false +trace: [grind.ematch.instance] inv_eq: mul (s.f true) (s.f false) = one → inv (s.f true) = s.f false [grind.ematch.instance] S.h: mul (s.f true) (s.f false) = one -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.ematch.instance true in example (s : S) : inv (s.f true) = s.f false := by grind diff --git a/tests/lean/run/grind_eq_pattern.lean b/tests/lean/run/grind_eq_pattern.lean index 9921bd99d9a3..f4da2c60a03d 100644 --- a/tests/lean/run/grind_eq_pattern.lean +++ b/tests/lean/run/grind_eq_pattern.lean @@ -1,32 +1,32 @@ reset_grind_attrs% /-- -info: [grind.ematch.pattern] List.append_ne_nil_of_left_ne_nil: [@HAppend.hAppend (List #3) (List _) (List _) _ #2 #0] +trace: [grind.ematch.pattern] List.append_ne_nil_of_left_ne_nil: [@HAppend.hAppend (List #3) (List _) (List _) _ #2 #0] -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.ematch.pattern true in attribute [grind] List.append_ne_nil_of_left_ne_nil /-- -info: [grind.ematch.pattern] List.append_ne_nil_of_right_ne_nil: [@HAppend.hAppend (List #3) (List _) (List _) _ #1 #2] +trace: [grind.ematch.pattern] List.append_ne_nil_of_right_ne_nil: [@HAppend.hAppend (List #3) (List _) (List _) _ #1 #2] -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.ematch.pattern true in attribute [grind] List.append_ne_nil_of_right_ne_nil -/-- info: [grind.ematch.pattern] List.getLast?_eq_some_iff: [@List.getLast? #2 #1, @some _ #0] -/ -#guard_msgs (info) in +/-- trace: [grind.ematch.pattern] List.getLast?_eq_some_iff: [@List.getLast? #2 #1, @some _ #0] -/ +#guard_msgs (trace) in set_option trace.grind.ematch.pattern true in attribute [grind =] List.getLast?_eq_some_iff /-- -info: [grind.assert] xs.getLast? = b? +trace: [grind.assert] xs.getLast? = b? [grind.assert] b? = some 10 [grind.assert] xs = [] [grind.assert] (xs.getLast? = some 10) = ∃ ys, xs = ys ++ [10] [grind.assert] xs = w ++ [10] [grind.assert] ¬w ++ [10] = [] -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.assert true in example (xs : List Nat) : xs.getLast? = b? → b? = some 10 → xs ≠ [] := by grind diff --git a/tests/lean/run/grind_erase_attr.lean b/tests/lean/run/grind_erase_attr.lean index 94e2b914ac21..13d03834e4a2 100644 --- a/tests/lean/run/grind_erase_attr.lean +++ b/tests/lean/run/grind_erase_attr.lean @@ -13,10 +13,10 @@ attribute [-grind] fthm' set_option trace.grind.assert true /-- -info: [grind.assert] ¬f (f (f a)) = f a +trace: [grind.assert] ¬f (f (f a)) = f a [grind.assert] f (f a) = f a -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : f (f (f a)) = f a := by grind @@ -27,9 +27,9 @@ error: unsolved goals a : Nat ⊢ f (f (f a)) = f a --- -info: [grind.assert] ¬f (f (f a)) = f a +trace: [grind.assert] ¬f (f (f a)) = f a -/ -#guard_msgs (info, error) in +#guard_msgs (trace, error) in example : f (f (f a)) = f a := by fail_if_success grind @@ -61,11 +61,11 @@ error: unsolved goals a b : Nat ⊢ g a = b → a = 0 → b = 1 --- -info: [grind.assert] g a = b +trace: [grind.assert] g a = b [grind.assert] a = 0 [grind.assert] ¬b = 1 -/ -#guard_msgs (info, error) in +#guard_msgs (trace, error) in example : g a = b → a = 0 → b = 1 := by fail_if_success grind diff --git a/tests/lean/run/grind_implies.lean b/tests/lean/run/grind_implies.lean index 30007828b4dc..ca350f73f334 100644 --- a/tests/lean/run/grind_implies.lean +++ b/tests/lean/run/grind_implies.lean @@ -2,7 +2,7 @@ set_option trace.grind.eqc true set_option trace.grind.internalize true /-- -info: [grind.internalize] p → q +trace: [grind.internalize] p → q [grind.internalize] p [grind.internalize] q [grind.eqc] (p → q) = True @@ -10,12 +10,12 @@ info: [grind.internalize] p → q [grind.eqc] (p → q) = q [grind.eqc] q = False -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (p q : Prop) : (p → q) → p → q := by grind /-- -info: [grind.internalize] p → q +trace: [grind.internalize] p → q [grind.internalize] p [grind.internalize] q [grind.eqc] (p → q) = True @@ -23,12 +23,12 @@ info: [grind.internalize] p → q [grind.eqc] p = False [grind.eqc] p = True -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (p q : Prop) : (p → q) → ¬q → ¬p := by grind /-- -info: [grind.internalize] (p → q) = r +trace: [grind.internalize] (p → q) = r [grind.internalize] Prop [grind.internalize] p → q [grind.internalize] p @@ -40,13 +40,13 @@ info: [grind.internalize] (p → q) = r [grind.eqc] (p → q) = True [grind.eqc] r = False -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (p q : Prop) : (p → q) = r → ¬p → r := by grind /-- -info: [grind.internalize] (p → q) = r +trace: [grind.internalize] (p → q) = r [grind.internalize] Prop [grind.internalize] p → q [grind.internalize] p @@ -58,12 +58,12 @@ info: [grind.internalize] (p → q) = r [grind.eqc] (p → q) = True [grind.eqc] r = False -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (p q : Prop) : (p → q) = r → q → r := by grind /-- -info: [grind.internalize] (p → q) = r +trace: [grind.internalize] (p → q) = r [grind.internalize] Prop [grind.internalize] p → q [grind.internalize] p @@ -76,12 +76,12 @@ info: [grind.internalize] (p → q) = r [grind.eqc] p = False [grind.eqc] p = True -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (p q : Prop) : (p → q) = r → ¬q → r → ¬p := by grind /-- -info: [grind.internalize] (p → q) = r +trace: [grind.internalize] (p → q) = r [grind.internalize] Prop [grind.internalize] p → q [grind.internalize] p @@ -94,6 +94,6 @@ info: [grind.internalize] (p → q) = r [grind.eqc] p = True [grind.eqc] p = False -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (p q : Prop) : (p → q) = r → ¬q → ¬r → p := by grind diff --git a/tests/lean/run/grind_lazy_ite.lean b/tests/lean/run/grind_lazy_ite.lean index 7aafb0df9212..d25eea3da877 100644 --- a/tests/lean/run/grind_lazy_ite.lean +++ b/tests/lean/run/grind_lazy_ite.lean @@ -5,16 +5,16 @@ def f (n : Nat) (m : Nat) := n /-- -info: [grind.ematch.instance] f.eq_def: f 5 m = if 5 < m then f (5 + 1) m + 5 else 5 +trace: [grind.ematch.instance] f.eq_def: f 5 m = if 5 < m then f (5 + 1) m + 5 else 5 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.ematch.instance true in example : f 5 m > 0 := by fail_if_success grind (splits := 0) [f.eq_def] sorry -/-- info: [grind.ematch.instance] f.eq_def: f 5 m = if 5 < m then f (5 + 1) m + 5 else 5 -/ -#guard_msgs (info) in +/-- trace: [grind.ematch.instance] f.eq_def: f 5 m = if 5 < m then f (5 + 1) m + 5 else 5 -/ +#guard_msgs (trace) in set_option trace.grind.ematch.instance true in example : f 5 m > 0 := by grind (splits := 1) [f.eq_def] diff --git a/tests/lean/run/grind_match1.lean b/tests/lean/run/grind_match1.lean index b507381a8554..6a72871ade8c 100644 --- a/tests/lean/run/grind_match1.lean +++ b/tests/lean/run/grind_match1.lean @@ -11,7 +11,7 @@ set_option trace.grind.split.candidate true set_option trace.grind.split.resolved true /-- -info: [grind.assert] (match as, bs with +trace: [grind.assert] (match as, bs with | [], x => bs | head :: head_1 :: tail, [] => [] | x :: xs, ys => x :: g xs ys) = @@ -34,7 +34,7 @@ info: [grind.assert] (match as, bs with | head :: head_1 :: tail, [] => [] | x :: xs, ys => x :: g xs ys -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (f : Nat → List Nat) : g as bs = d → bs = [] → a₁ :: f 0 = as → f 0 = a₂ :: f 1 → d = [] := by unfold g grind diff --git a/tests/lean/run/grind_match2.lean b/tests/lean/run/grind_match2.lean index d54abd8276c6..108c4fa0c232 100644 --- a/tests/lean/run/grind_match2.lean +++ b/tests/lean/run/grind_match2.lean @@ -17,11 +17,11 @@ def h (as : List Nat) := | _::_::_ => 3 /-- -info: [grind] closed `grind.1` +trace: [grind] closed `grind.1` [grind] closed `grind.2` [grind] closed `grind.3` -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind true in example : h as ≠ 0 := by grind [h.eq_def] diff --git a/tests/lean/run/grind_mbtc_1.lean b/tests/lean/run/grind_mbtc_1.lean index 7a466b7a006c..ce5b32f9405f 100644 --- a/tests/lean/run/grind_mbtc_1.lean +++ b/tests/lean/run/grind_mbtc_1.lean @@ -9,11 +9,11 @@ example (f : Int → Int) (x : Int) -- and we have an invalid counterexample where `x := 1`, -- but `f x` and `f 1` have different assignments. /-- -info: [grind.cutsat.model] x := 1 +trace: [grind.cutsat.model] x := 1 [grind.cutsat.model] f x := 2 [grind.cutsat.model] f 1 := 5 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.model true in example (f : Int → Int) (x : Int) : 0 ≤ x → x ≠ 0 → x ≤ 1 → f x = 2 → f 1 = 2 := by @@ -21,11 +21,11 @@ example (f : Int → Int) (x : Int) sorry /-- -info: [grind.cutsat.model] x := 2 +trace: [grind.cutsat.model] x := 2 [grind.cutsat.model] f x := 2 [grind.cutsat.model] f 1 := 5 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.model true in example (f : Int → Int) (x : Int) : 0 ≤ x → x ≠ 0 → x ≤ 3 → f x = 2 → f 1 = 2 := by @@ -46,21 +46,21 @@ example (f : Nat → Nat → Nat) (x y : Nat) -- `b` must not be `2`. Otherwise, `f (b+1)` and `f 3` must be equal. -/-- info: [grind.cutsat.model] b := 3 -/ -#guard_msgs (info) in +/-- trace: [grind.cutsat.model] b := 3 -/ +#guard_msgs (trace) in set_option trace.grind.cutsat.model true in example (f : Int → α) (a b : Int) : b > 1 → f (b + 1) = x → f 3 = y → x = y := by (fail_if_success grind); sorry -- `b` must not be `2`. Otherwise, `f (b+1)` and `f 3` must be equal. /-- -info: [grind.cutsat.model] x := 7 +trace: [grind.cutsat.model] x := 7 [grind.cutsat.model] y := 8 [grind.cutsat.model] b := 3 [grind.cutsat.model] f 3 := 8 [grind.cutsat.model] f (b + 1) := 7 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.cutsat.model true in example (f : Int → Int) (a b : Int) : b > 1 → f (b + 1) = x → f 3 = y → x = y := by (fail_if_success grind); sorry diff --git a/tests/lean/run/grind_mvar.lean b/tests/lean/run/grind_mvar.lean index d8fd669659df..e6972526a8fc 100644 --- a/tests/lean/run/grind_mvar.lean +++ b/tests/lean/run/grind_mvar.lean @@ -5,7 +5,7 @@ set_option grind.warning false attribute [grind →] Array.eq_empty_of_append_eq_empty eq_nil_of_length_eq_zero attribute [grind] Vector.getElem?_append getElem?_dropLast -#guard_msgs (info) in -- should not report any issues +#guard_msgs (trace) in -- should not report any issues set_option trace.grind.issues true theorem dropLast_concat : dropLast (l₁ ++ [b]) = l₁ := by fail_if_success grind (gen := 6) diff --git a/tests/lean/run/grind_nested_proofs.lean b/tests/lean/run/grind_nested_proofs.lean index a42f9781b2d0..9cd43963990c 100644 --- a/tests/lean/run/grind_nested_proofs.lean +++ b/tests/lean/run/grind_nested_proofs.lean @@ -22,17 +22,17 @@ detect equalities between array access terms. -/ /-- -info: [Meta.debug] [‹i < a.toList.length›, ‹j < a.toList.length›, ‹j < b.toList.length›] +trace: [Meta.debug] [‹i < a.toList.length›, ‹j < a.toList.length›, ‹j < b.toList.length›] [Meta.debug] [a[i], b[j], a[j]] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (i j : Nat) (a b : Array Nat) (h1 : j < a.size) (h : j < b.size) (h2 : i ≤ j) : a[i] < a[j] + b[j] → i = j → a = b → False := by grind -mbtc on_failure fallback /-- -info: [Meta.debug] [‹i < a.toList.length›, ‹j < a.toList.length›, ‹j < b.toList.length›] +trace: [Meta.debug] [‹i < a.toList.length›, ‹j < a.toList.length›, ‹j < b.toList.length›] [Meta.debug] [a[i], a[j]] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (i j : Nat) (a b : Array Nat) (h1 : j < a.size) (h : j < b.size) (h2 : i ≤ j) : a[i] < a[j] + b[j] → i = j → False := by grind -mbtc on_failure fallback diff --git a/tests/lean/run/grind_norm_levels.lean b/tests/lean/run/grind_norm_levels.lean index 73f86e08fbf7..f692c87b68be 100644 --- a/tests/lean/run/grind_norm_levels.lean +++ b/tests/lean/run/grind_norm_levels.lean @@ -11,8 +11,8 @@ def fallback : Fallback := do -- `grind` final state must contain only two `g`-applications set_option trace.Meta.debug true in /-- -info: [Meta.debug] [g (a, b), g (g (a, b))] +trace: [Meta.debug] [g (a, b), g (g (a, b))] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example {β : Type v} {α : Type u} (a c : α) (b d : β) : g.{max u v + 1} (a, b) = (c, d) → g (g.{max (u+1) (v+1)} (a, b)) = (c, d) → False := by grind on_failure fallback diff --git a/tests/lean/run/grind_offset.lean b/tests/lean/run/grind_offset.lean index 1de16dc99609..d8226fa89520 100644 --- a/tests/lean/run/grind_offset.lean +++ b/tests/lean/run/grind_offset.lean @@ -9,52 +9,52 @@ set_option trace.grind.ematch.pattern true set_option trace.grind.ematch.instance true set_option trace.grind.assert true -/-- info: [grind.ematch.pattern] f.eq_2: [f (#0 + 1)] -/ +/-- trace: [grind.ematch.pattern] f.eq_2: [f (#0 + 1)] -/ #guard_msgs in grind_pattern f.eq_2 => f (x + 1) /-- -info: [grind.assert] f (y + 1) = a +trace: [grind.assert] f (y + 1) = a [grind.assert] ¬a = g (f y) [grind.ematch.instance] f.eq_2: f y.succ = g (f y) [grind.assert] f (y + 1) = g (f y) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : f (y + 1) = a → a = g (f y) := by grind /-- -info: [grind.assert] f 1 = a +trace: [grind.assert] f 1 = a [grind.assert] ¬a = g (f 0) [grind.ematch.instance] f.eq_2: f (Nat.succ 0) = g (f 0) [grind.assert] f 1 = g (f 0) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : f 1 = a → a = g (f 0) := by grind /-- -info: [grind.assert] f 10 = a +trace: [grind.assert] f 10 = a [grind.assert] ¬a = g (f 9) [grind.ematch.instance] f.eq_2: f (Nat.succ 8) = g (f 8) [grind.ematch.instance] f.eq_2: f (Nat.succ 9) = g (f 9) [grind.assert] f 9 = g (f 8) [grind.assert] f 10 = g (f 9) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : f 10 = a → a = g (f 9) := by grind /-- -info: [grind.assert] f (c + 2) = a +trace: [grind.assert] f (c + 2) = a [grind.assert] ¬a = g (g (f c)) [grind.ematch.instance] f.eq_2: f (c + 1).succ = g (f (c + 1)) [grind.assert] f (c + 2) = g (f (c + 1)) [grind.ematch.instance] f.eq_2: f c.succ = g (f c) [grind.assert] f (c + 1) = g (f c) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : f (c + 2) = a → a = g (g (f c)) := by grind @@ -64,86 +64,86 @@ example : f (c + 2) = a → a = g (g (f c)) := by | 1 => 10 | a+2 => g (foo a) -/-- info: [grind.ematch.pattern] foo.eq_3: [foo (#0 + 2)] -/ +/-- trace: [grind.ematch.pattern] foo.eq_3: [foo (#0 + 2)] -/ #guard_msgs in grind_pattern foo.eq_3 => foo (a_2 + 2) -- The instance is correctly found in the following example. -- TODO: to complete the proof, we need linear arithmetic support to prove that `b + 2 = c + 1`. /-- -info: [grind.assert] foo (c + 1) = a +trace: [grind.assert] foo (c + 1) = a [grind.assert] c = b + 1 [grind.assert] ¬a = g (foo b) [grind.ematch.instance] foo.eq_3: foo b.succ.succ = g (foo b) [grind.assert] foo (b + 2) = g (foo b) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : foo (c + 1) = a → c = b + 1 → a = g (foo b) := by grind set_option trace.grind.assert false /-- -info: [grind.ematch.instance] f.eq_2: f (x + 99).succ = g (f (x + 99)) +trace: [grind.ematch.instance] f.eq_2: f (x + 99).succ = g (f (x + 99)) [grind.ematch.instance] f.eq_2: f (x + 98).succ = g (f (x + 98)) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : f (x + 100) = a → a = b := by fail_if_success grind (ematch := 2) sorry /-- -info: [grind.ematch.instance] f.eq_2: f (x + 99).succ = g (f (x + 99)) +trace: [grind.ematch.instance] f.eq_2: f (x + 99).succ = g (f (x + 99)) [grind.ematch.instance] f.eq_2: f (x + 98).succ = g (f (x + 98)) [grind.ematch.instance] f.eq_2: f (x + 97).succ = g (f (x + 97)) [grind.ematch.instance] f.eq_2: f (x + 96).succ = g (f (x + 96)) [grind.ematch.instance] f.eq_2: f (x + 95).succ = g (f (x + 95)) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : f (x + 100) = a → a = b := by fail_if_success grind (ematch := 5) sorry /-- -info: [grind.ematch.instance] f.eq_2: f (x + 99).succ = g (f (x + 99)) +trace: [grind.ematch.instance] f.eq_2: f (x + 99).succ = g (f (x + 99)) [grind.ematch.instance] f.eq_2: f (x + 98).succ = g (f (x + 98)) [grind.ematch.instance] f.eq_2: f (x + 97).succ = g (f (x + 97)) [grind.ematch.instance] f.eq_2: f (x + 96).succ = g (f (x + 96)) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : f (x + 100) = a → a = b := by fail_if_success grind (ematch := 100) (instances := 4) sorry /-- -info: [grind.ematch.instance] f.eq_2: f (y + 9).succ = g (f (y + 9)) +trace: [grind.ematch.instance] f.eq_2: f (y + 9).succ = g (f (y + 9)) [grind.ematch.instance] f.eq_2: f (x + 99).succ = g (f (x + 99)) [grind.ematch.instance] f.eq_2: f (x + 98).succ = g (f (x + 98)) [grind.ematch.instance] f.eq_2: f (y + 8).succ = g (f (y + 8)) [grind.ematch.instance] f.eq_2: f (y + 7).succ = g (f (y + 7)) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : f (x + 100) = a → f (y + 10) = c → a = b := by fail_if_success grind (ematch := 100) (instances := 5) sorry /-- -info: [grind.ematch.instance] f.eq_2: f (x + 99).succ = g (f (x + 99)) +trace: [grind.ematch.instance] f.eq_2: f (x + 99).succ = g (f (x + 99)) [grind.ematch.instance] f.eq_2: f (x + 98).succ = g (f (x + 98)) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : f (x + 100) = a → a = b := by fail_if_success grind (gen := 2) sorry /-- -info: [grind.ematch.instance] f.eq_2: f (x + 99).succ = g (f (x + 99)) +trace: [grind.ematch.instance] f.eq_2: f (x + 99).succ = g (f (x + 99)) [grind.ematch.instance] f.eq_2: f (x + 98).succ = g (f (x + 98)) [grind.ematch.instance] f.eq_2: f (x + 97).succ = g (f (x + 97)) [grind.ematch.instance] f.eq_2: f (x + 96).succ = g (f (x + 96)) [grind.ematch.instance] f.eq_2: f (x + 95).succ = g (f (x + 95)) -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : f (x + 100) = a → a = b := by fail_if_success grind (gen := 5) sorry diff --git a/tests/lean/run/grind_offset_cnstr.lean b/tests/lean/run/grind_offset_cnstr.lean index f9cd7d3e321e..c90d554bf57f 100644 --- a/tests/lean/run/grind_offset_cnstr.lean +++ b/tests/lean/run/grind_offset_cnstr.lean @@ -1,7 +1,8 @@ +set_option grind.warning false set_option grind.debug true /-- -info: [grind.offset.internalize.term] a1 ↦ #0 +trace: [grind.offset.internalize.term] a1 ↦ #0 [grind.offset.internalize.term] a2 ↦ #1 [grind.offset.internalize] a1 + 1 ≤ a2 ↦ #0 + 1 ≤ #1 [grind.offset.internalize.term] a3 ↦ #2 @@ -9,7 +10,7 @@ info: [grind.offset.internalize.term] a1 ↦ #0 [grind.offset.internalize.term] a4 ↦ #3 [grind.offset.internalize] a3 ≤ a4 ↦ #2 ≤ #3 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize true in example (a1 a2 a3) : a1 + 1 ≤ a2 → a2 ≤ a3 + 2 → a3 ≤ a4 → False := by @@ -17,14 +18,14 @@ example (a1 a2 a3) : sorry /-- -info: [grind.offset.internalize.term] a1 ↦ #0 +trace: [grind.offset.internalize.term] a1 ↦ #0 [grind.offset.internalize.term] a2 ↦ #1 [grind.offset.dist] #0 + 1 ≤ #1 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #1 ≤ #2 [grind.offset.dist] #0 + 1 ≤ #2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (a1 a2 a3 : Nat) : @@ -34,14 +35,14 @@ example (a1 a2 a3 : Nat) : /-- -info: [grind.offset.internalize.term] a1 ↦ #0 +trace: [grind.offset.internalize.term] a1 ↦ #0 [grind.offset.internalize.term] a2 ↦ #1 [grind.offset.dist] #0 + 1 ≤ #1 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #1 + 2 ≤ #2 [grind.offset.dist] #0 + 3 ≤ #2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (a1 a2 a3 : Nat) : @@ -50,14 +51,14 @@ example (a1 a2 a3 : Nat) : sorry /-- -info: [grind.offset.internalize.term] a1 ↦ #0 +trace: [grind.offset.internalize.term] a1 ↦ #0 [grind.offset.internalize.term] a2 ↦ #1 [grind.offset.dist] #0 + 1 ≤ #1 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #1 ≤ #2 + 2 [grind.offset.dist] #0 ≤ #2 + 1 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (a1 a2 a3 : Nat) : @@ -66,14 +67,14 @@ example (a1 a2 a3 : Nat) : sorry /-- -info: [grind.offset.internalize.term] a1 ↦ #0 +trace: [grind.offset.internalize.term] a1 ↦ #0 [grind.offset.internalize.term] a2 ↦ #1 [grind.offset.dist] #0 ≤ #1 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #1 ≤ #2 [grind.offset.dist] #0 ≤ #2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (a1 a2 a3 : Nat) : @@ -82,14 +83,14 @@ example (a1 a2 a3 : Nat) : sorry /-- -info: [grind.offset.internalize.term] a1 ↦ #0 +trace: [grind.offset.internalize.term] a1 ↦ #0 [grind.offset.internalize.term] a2 ↦ #1 [grind.offset.dist] #0 ≤ #1 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #1 + 2 ≤ #2 [grind.offset.dist] #0 + 2 ≤ #2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (a1 a2 a3 : Nat) : @@ -98,14 +99,14 @@ example (a1 a2 a3 : Nat) : sorry /-- -info: [grind.offset.internalize.term] a1 ↦ #0 +trace: [grind.offset.internalize.term] a1 ↦ #0 [grind.offset.internalize.term] a2 ↦ #1 [grind.offset.dist] #0 ≤ #1 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #1 ≤ #2 + 5 [grind.offset.dist] #0 ≤ #2 + 5 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (a1 a2 a3 : Nat) : @@ -114,14 +115,14 @@ example (a1 a2 a3 : Nat) : sorry /-- -info: [grind.offset.internalize.term] a1 ↦ #0 +trace: [grind.offset.internalize.term] a1 ↦ #0 [grind.offset.internalize.term] a2 ↦ #1 [grind.offset.dist] #0 ≤ #1 + 5 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #1 ≤ #2 [grind.offset.dist] #0 ≤ #2 + 5 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (a1 a2 a3 : Nat) : @@ -130,14 +131,14 @@ example (a1 a2 a3 : Nat) : sorry /-- -info: [grind.offset.internalize.term] a1 ↦ #0 +trace: [grind.offset.internalize.term] a1 ↦ #0 [grind.offset.internalize.term] a2 ↦ #1 [grind.offset.dist] #0 ≤ #1 + 5 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #1 + 2 ≤ #2 [grind.offset.dist] #0 ≤ #2 + 3 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (a1 a2 a3 : Nat) : @@ -146,14 +147,14 @@ example (a1 a2 a3 : Nat) : sorry /-- -info: [grind.offset.internalize.term] a1 ↦ #0 +trace: [grind.offset.internalize.term] a1 ↦ #0 [grind.offset.internalize.term] a2 ↦ #1 [grind.offset.dist] #0 ≤ #1 + 5 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #1 ≤ #2 + 2 [grind.offset.dist] #0 ≤ #2 + 7 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (a1 a2 a3 : Nat) : @@ -169,14 +170,14 @@ example (a1 a2 a3 : Nat) : sorry /-- -info: [grind.offset.internalize.term] a1 ↦ #0 +trace: [grind.offset.internalize.term] a1 ↦ #0 [grind.offset.internalize.term] a2 ↦ #1 [grind.offset.dist] #0 ≤ #1 + 2 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #1 + 3 ≤ #2 [grind.offset.dist] #0 + 1 ≤ #2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (a1 a2 a3 : Nat) : a1 ≤ a2 + 2 → a2 + 3 ≤ a3 → False := by @@ -184,14 +185,14 @@ example (a1 a2 a3 : Nat) : a1 ≤ a2 + 2 → a2 + 3 ≤ a3 → False := by sorry /-- -info: [grind.offset.internalize.term] a2 ↦ #0 +trace: [grind.offset.internalize.term] a2 ↦ #0 [grind.offset.internalize.term] a1 ↦ #1 [grind.offset.dist] #1 + 3 ≤ #0 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #0 + 3 ≤ #2 [grind.offset.dist] #1 + 6 ≤ #2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (p : Prop) (a1 a2 a3 : Nat) : (p ↔ a2 ≤ a1 + 2) → ¬p → a2 + 3 ≤ a3 → False := by @@ -199,14 +200,14 @@ example (p : Prop) (a1 a2 a3 : Nat) : (p ↔ a2 ≤ a1 + 2) → ¬p → a2 + 3 sorry /-- -info: [grind.offset.internalize.term] a2 ↦ #0 +trace: [grind.offset.internalize.term] a2 ↦ #0 [grind.offset.internalize.term] a1 ↦ #1 [grind.offset.dist] #1 ≤ #0 + 1 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #0 + 3 ≤ #2 [grind.offset.dist] #1 + 2 ≤ #2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (p : Prop) (a1 a2 a3 : Nat) : (p ↔ a2 + 2 ≤ a1) → ¬p → a2 + 3 ≤ a3 → False := by @@ -214,14 +215,14 @@ example (p : Prop) (a1 a2 a3 : Nat) : (p ↔ a2 + 2 ≤ a1) → ¬p → a2 + 3 sorry /-- -info: [grind.offset.internalize.term] a2 ↦ #0 +trace: [grind.offset.internalize.term] a2 ↦ #0 [grind.offset.internalize.term] a1 ↦ #1 [grind.offset.dist] #1 + 1 ≤ #0 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #0 + 3 ≤ #2 [grind.offset.dist] #1 + 4 ≤ #2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (p : Prop) (a1 a2 a3 : Nat) : (p ↔ a2 ≤ a1) → ¬p → a2 + 3 ≤ a3 → False := by @@ -229,14 +230,14 @@ example (p : Prop) (a1 a2 a3 : Nat) : (p ↔ a2 ≤ a1) → ¬p → a2 + 3 ≤ a sorry /-- -info: [grind.offset.internalize.term] a2 ↦ #0 +trace: [grind.offset.internalize.term] a2 ↦ #0 [grind.offset.internalize.term] a1 ↦ #1 [grind.offset.dist] #1 ≤ #0 [grind.offset.internalize.term] a3 ↦ #2 [grind.offset.dist] #0 + 3 ≤ #2 [grind.offset.dist] #1 + 3 ≤ #2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.internalize.term true in set_option trace.grind.offset.dist true in example (p : Prop) (a1 a2 a3 : Nat) : (p ↔ a2 + 1 ≤ a1) → ¬p → a2 + 3 ≤ a3 → False := by @@ -275,45 +276,45 @@ fun {a4} p a1 a2 a3 => (Nat.lo_lo a2 a3 a4 3 3 h_2 (Nat.of_ro_eq_false a4 a3 2 (Eq.trans (Eq.symm h_3) (eq_false h_1))))))) True.intro) -/ -#guard_msgs (info) in +#guard_msgs in open Lean Grind in #print ex1._proof_1 /-! Propagate `cnstr = False` tests -/ -- The following example is solved by `grind` using constraint propagation and 0 case-splits. -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.split true in example (p q r s : Prop) (a b : Nat) : a ≤ b → b + 2 ≤ c → (a + 1 ≤ c ↔ p) → (a + 2 ≤ c ↔ s) → (a ≤ c ↔ q) → (a ≤ c + 4 ↔ r) → p ∧ q ∧ r ∧ s := by grind (splits := 0) -- The following example is solved by `grind` using constraint propagation and 0 case-splits. -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.split true in example (p q : Prop) (a b : Nat) : a ≤ b → b ≤ c → (a ≤ c ↔ p) → (a ≤ c + 1 ↔ q) → p ∧ q := by grind (splits := 0) -- The following example is solved by `grind` using constraint propagation and 0 case-splits. -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.split true in example (p q : Prop) (a b : Nat) : a ≤ b → b ≤ c + 1 → (a ≤ c + 1 ↔ p) → (a ≤ c + 2 ↔ q) → p ∧ q := by grind (splits := 0) -- The following example is solved by `grind` using constraint propagation and 0 case-splits. -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.split true in example (p r s : Prop) (a b : Nat) : a ≤ b → b + 2 ≤ c → (c ≤ a ↔ p) → (c ≤ a + 1 ↔ s) → (c + 1 ≤ a ↔ r) → ¬p ∧ ¬r ∧ ¬s := by grind (splits := 0) -- The following example is solved by `grind` using constraint propagation and 0 case-splits. -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.split true in example (p r : Prop) (a b : Nat) : a ≤ b → b ≤ c → (c + 1 ≤ a ↔ p) → (c + 2 ≤ a + 1 ↔ r) → ¬p ∧ ¬r := by grind (splits := 0) -- The following example is solved by `grind` using constraint propagation and 0 case-splits. -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.split true in example (p r : Prop) (a b : Nat) : a ≤ b → b ≤ c + 3 → (c + 5 ≤ a ↔ p) → (c + 4 ≤ a ↔ r) → ¬p ∧ ¬r := by grind (splits := 0) @@ -321,37 +322,37 @@ example (p r : Prop) (a b : Nat) : a ≤ b → b ≤ c + 3 → (c + 5 ≤ a ↔ /-! Propagate `cnstr = False` tests, but with different internalization order -/ -- The following example is solved by `grind` using constraint propagation and 0 case-splits. -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.split true in example (p q r s : Prop) (a b : Nat) : (a + 1 ≤ c ↔ p) → (a + 2 ≤ c ↔ s) → (a ≤ c ↔ q) → (a ≤ c + 4 ↔ r) → a ≤ b → b + 2 ≤ c → p ∧ q ∧ r ∧ s := by grind (splits := 0) -- The following example is solved by `grind` using constraint propagation and 0 case-splits. -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.split true in example (p q : Prop) (a b : Nat) : (a ≤ c ↔ p) → (a ≤ c + 1 ↔ q) → a ≤ b → b ≤ c → p ∧ q := by grind (splits := 0) -- The following example is solved by `grind` using constraint propagation and 0 case-splits. -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.split true in example (p q : Prop) (a b : Nat) : (a ≤ c + 1 ↔ p) → (a ≤ c + 2 ↔ q) → a ≤ b → b ≤ c + 1 → p ∧ q := by grind (splits := 0) -- The following example is solved by `grind` using constraint propagation and 0 case-splits. -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.split true in example (p r s : Prop) (a b : Nat) : (c ≤ a ↔ p) → (c ≤ a + 1 ↔ s) → (c + 1 ≤ a ↔ r) → a ≤ b → b + 2 ≤ c → ¬p ∧ ¬r ∧ ¬s := by grind (splits := 0) -- The following example is solved by `grind` using constraint propagation and 0 case-splits. -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.split true in example (p r : Prop) (a b : Nat) : (c + 1 ≤ a ↔ p) → (c + 2 ≤ a + 1 ↔ r) → a ≤ b → b ≤ c → ¬p ∧ ¬r := by grind (splits := 0) -- The following example is solved by `grind` using constraint propagation and 0 case-splits. -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.split true in example (p r : Prop) (a b : Nat) : (c + 5 ≤ a ↔ p) → (c + 4 ≤ a ↔ r) → a ≤ b → b ≤ c + 3 → ¬p ∧ ¬r := by grind (splits := 0) @@ -398,7 +399,7 @@ example (a : Nat) : a < 2 → a = 5 → False := by example (a : Nat) : a < 2 → a = b → b = c → c = 5 → False := by grind -#guard_msgs (info) in -- none of the numerals should be internalized by the offset module +#guard_msgs (trace) in -- none of the numerals should be internalized by the offset module set_option trace.grind.offset.internalize true in example (a b c d e : Nat) : a = 1 → b = 2 → c = 3 → d = 4 → e = 5 → a ≠ e := by grind diff --git a/tests/lean/run/grind_offset_model.lean b/tests/lean/run/grind_offset_model.lean index 923500c41947..f9bd77a500e3 100644 --- a/tests/lean/run/grind_offset_model.lean +++ b/tests/lean/run/grind_offset_model.lean @@ -5,54 +5,54 @@ set_option grind.debug.proofs true set_option trace.grind.offset.model true /-- -info: [grind.offset.model] i := 1 +trace: [grind.offset.model] i := 1 [grind.offset.model] j := 0 [grind.offset.model] 「0」 := 0 [grind.offset.model] 「i + 1」 := 2 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (i j : Nat) (h : i + 1 > j + 1) : g (i+1) j = i + 1 := by fail_if_success grind sorry /-- -info: [grind.offset.model] i := 101 +trace: [grind.offset.model] i := 101 [grind.offset.model] 「0」 := 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (i : Nat) : i ≤ 100 := by fail_if_success grind sorry /-- -info: [grind.offset.model] i := 99 +trace: [grind.offset.model] i := 99 [grind.offset.model] 「0」 := 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (i : Nat) : 100 ≤ i := by fail_if_success grind sorry /-- -info: [grind.offset.model] n := 0 +trace: [grind.offset.model] n := 0 [grind.offset.model] j := 0 [grind.offset.model] i := 99 [grind.offset.model] 「0」 := 0 [grind.offset.model] 「n + 1」 := 1 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (i : Nat) : g (n + 1) m = a → 100 + j ≤ i := by fail_if_success grind sorry /-- -info: [grind.offset.model] n := 0 +trace: [grind.offset.model] n := 0 [grind.offset.model] j := 101 [grind.offset.model] i := 0 [grind.offset.model] 「0」 := 0 [grind.offset.model] 「n + 1」 := 1 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (i : Nat) : g (n + 1) m = a → j ≤ i + 100 := by fail_if_success grind sorry diff --git a/tests/lean/run/grind_option.lean b/tests/lean/run/grind_option.lean new file mode 100644 index 000000000000..179295be9f44 --- /dev/null +++ b/tests/lean/run/grind_option.lean @@ -0,0 +1,25 @@ +-- This file uses `#guard_msgs` to check which lemmas `grind` is using. +-- This may prove fragile, so remember it is okay to update the expected output if appropriate! +-- Hopefully these will act as regression tests against `grind` activating irrelevant lemmas. + +set_option grind.warning false + +variable [BEq α] {o₁ o₂ o₃ o₄ o₅ : Option α} + +/-- +info: Try this: grind only [Option.or_some, Option.some_or, Option.or_assoc, Option.some_beq_none] +-/ +#guard_msgs in +example : ((o₁.or (o₂.or (some x))).or (o₄.or o₅) == none) = false := by grind? + +/-- info: Try this: grind only [= Nat.min_def, Option.max_some_none, Option.min_some_some] -/ +#guard_msgs in +example : max (some 7) none = min (some 13) (some 7) := by grind? + +/-- info: Try this: grind only [Option.guard_def] -/ +#guard_msgs in +example : Option.guard (· ≤ 7) 3 = some 3 := by grind? + +/-- info: Try this: grind only [Option.mem_bind_iff] -/ +#guard_msgs in +example {x : β} {o : Option α} {f : α → Option β} (h : a ∈ o) (h' : x ∈ f a) : x ∈ o.bind f := by grind? diff --git a/tests/lean/run/grind_pattern1.lean b/tests/lean/run/grind_pattern1.lean index 3f51edf302fc..3f3ba3efbe5e 100644 --- a/tests/lean/run/grind_pattern1.lean +++ b/tests/lean/run/grind_pattern1.lean @@ -1,20 +1,20 @@ set_option trace.grind.ematch.pattern true /-- -info: [grind.ematch.pattern] Array.getElem_push_lt: [@getElem (Array #4) `[Nat] _ _ _ (@Array.push _ #3 #2) #1 _] +trace: [grind.ematch.pattern] Array.getElem_push_lt: [@getElem (Array #4) `[Nat] _ _ _ (@Array.push _ #3 #2) #1 _] -/ #guard_msgs in grind_pattern Array.getElem_push_lt => (xs.push x)[i] /-- -info: [grind.ematch.pattern] List.getElem_attach: [@getElem (List (@Subtype #3 _)) `[Nat] (@Subtype _ _) _ _ (@List.attach _ #2) #1 _] +trace: [grind.ematch.pattern] List.getElem_attach: [@getElem (List (@Subtype #3 _)) `[Nat] (@Subtype _ _) _ _ (@List.attach _ #2) #1 _] -/ #guard_msgs in grind_pattern List.getElem_attach => xs.attach[i] /-- -info: [grind.ematch.pattern] List.mem_concat_self: [@Membership.mem #2 (List _) _ (@HAppend.hAppend (List _) (List _) (List _) _ #1 (@List.cons _ #0 (@List.nil _))) #0] +trace: [grind.ematch.pattern] List.mem_concat_self: [@Membership.mem #2 (List _) _ (@HAppend.hAppend (List _) (List _) (List _) _ #1 (@List.cons _ #0 (@List.nil _))) #0] -/ #guard_msgs in grind_pattern List.mem_concat_self => a ∈ xs ++ [a] @@ -32,7 +32,7 @@ the following theorem parameters cannot be instantiated: i : Nat h : i < xs.size --- -info: [grind.ematch.pattern] Array.getElem_push_lt: [@Array.push #4 #3 #2] +trace: [grind.ematch.pattern] Array.getElem_push_lt: [@Array.push #4 #3 #2] -/ #guard_msgs in grind_pattern Array.getElem_push_lt => (xs.push x) @@ -54,13 +54,13 @@ instance [Boo α β] : Boo (List α) (Array β) where theorem fEq [Foo α β] [Boo α β] (a : List α) : (f a).1 = a := rfl -/-- info: [grind.ematch.pattern] fEq: [@f (List #4) (Array #3) _ _ #0] -/ +/-- trace: [grind.ematch.pattern] fEq: [@f (List #4) (Array #3) _ _ #0] -/ #guard_msgs in grind_pattern fEq => f a theorem fEq2 [Foo α β] [Boo α β] (a : List α) (_h : a.length > 5) : (f a).1 = a := rfl -/-- info: [grind.ematch.pattern] fEq2: [@f (List #5) (Array #4) _ _ #1] -/ +/-- trace: [grind.ematch.pattern] fEq2: [@f (List #5) (Array #4) _ _ #1] -/ #guard_msgs in grind_pattern fEq2 => f a @@ -76,7 +76,7 @@ the following theorem parameters cannot be instantiated: β : Type inst✝ : Boo α β --- -info: [grind.ematch.pattern] gEq: [@g (List #3) _ _ #0] +trace: [grind.ematch.pattern] gEq: [@g (List #3) _ _ #0] -/ #guard_msgs in grind_pattern gEq => g a @@ -92,7 +92,7 @@ error: invalid pattern(s) for `hThm1` the following theorem parameters cannot be instantiated: c : Nat --- -info: [grind.ematch.pattern] hThm1: [plus #2 #3] +trace: [grind.ematch.pattern] hThm1: [plus #2 #3] -/ #guard_msgs in grind_pattern hThm1 => plus a b @@ -104,12 +104,12 @@ the following theorem parameters cannot be instantiated: b : Nat h : b > 10 --- -info: [grind.ematch.pattern] hThm1: [plus #2 #1] +trace: [grind.ematch.pattern] hThm1: [plus #2 #1] -/ #guard_msgs in grind_pattern hThm1 => plus a c -/-- info: [grind.ematch.pattern] hThm1: [plus #2 #1, plus #2 #3] -/ +/-- trace: [grind.ematch.pattern] hThm1: [plus #2 #1, plus #2 #3] -/ #guard_msgs in grind_pattern hThm1 => plus a c, plus a b diff --git a/tests/lean/run/grind_pattern2.lean b/tests/lean/run/grind_pattern2.lean index 8ff7817c7a45..72eb4dbd60a6 100644 --- a/tests/lean/run/grind_pattern2.lean +++ b/tests/lean/run/grind_pattern2.lean @@ -16,14 +16,14 @@ grind_pattern contains_insert => contains (insertElem s a) a set_option trace.grind.ematch true set_option trace.grind.ematch.pattern true -/-- info: [grind.ematch] activated `contains_insert`, [@contains #3 (@insertElem _ #2 #1 #0) #0] -/ -#guard_msgs (info) in +/-- trace: [grind.ematch] activated `contains_insert`, [@contains #3 (@insertElem _ #2 #1 #0) #0] -/ +#guard_msgs (trace) in example [DecidableEq α] (s₁ s₂ : Set α) (a₁ a₂ : α) : s₂ = insertElem s₁ a₁ → a₁ = a₂ → contains s₂ a₂ := by grind -/-- info: [grind.ematch] activated `contains_insert`, [@contains #3 (@insertElem _ #2 #1 #0) #0] -/ -#guard_msgs (info) in +/-- trace: [grind.ematch] activated `contains_insert`, [@contains #3 (@insertElem _ #2 #1 #0) #0] -/ +#guard_msgs (trace) in example [DecidableEq α] (s₁ s₂ : Set α) (a₁ a₂ : α) : ¬ contains s₂ a₂ → s₂ = insertElem s₁ a₁ → a₁ = a₂ → False := by grind @@ -34,13 +34,13 @@ def foo (x : List Nat) (y : List Nat) := x ++ y ++ x theorem fooThm : foo x [a, b] = x ++ [a, b] ++ x := rfl -/-- info: [grind.ematch.pattern] fooThm: [foo #0 `[[a, b]]] -/ +/-- trace: [grind.ematch.pattern] fooThm: [foo #0 `[[a, b]]] -/ #guard_msgs in grind_pattern fooThm => foo x [a, b] /-- -info: [grind.internalize] foo x y +trace: [grind.internalize] foo x y [grind.internalize] [a, b] [grind.internalize] Nat [grind.internalize] a @@ -52,7 +52,7 @@ info: [grind.internalize] foo x y [grind.internalize] y [grind.internalize] z -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.internalize true in example : foo x y = z → False := by fail_if_success grind @@ -62,7 +62,7 @@ theorem arrEx [Add α] (as : Array α) (h₁ : i < as.size) (h₂ : i = j) : as[ /-- -info: [grind.ematch.pattern] arrEx: [@HAdd.hAdd #6 _ _ _ (@getElem (Array _) `[Nat] _ _ _ #2 #5 _) (@getElem (Array _) `[Nat] _ _ _ #2 #4 _)] +trace: [grind.ematch.pattern] arrEx: [@HAdd.hAdd #6 _ _ _ (@getElem (Array _) `[Nat] _ _ _ #2 #5 _) (@getElem (Array _) `[Nat] _ _ _ #2 #4 _)] -/ #guard_msgs in grind_pattern arrEx => as[i]+as[j]'(h₂▸h₁) diff --git a/tests/lean/run/grind_pattern_proj.lean b/tests/lean/run/grind_pattern_proj.lean index 96fae6ceed4f..d98fab52db49 100644 --- a/tests/lean/run/grind_pattern_proj.lean +++ b/tests/lean/run/grind_pattern_proj.lean @@ -35,13 +35,13 @@ structure Functor (C : Type u₁) [Category.{v₁} C] (D : Type u₂) [Category. set_option trace.grind.ematch.pattern true /-- -info: [grind.ematch.pattern] Functor.map_id: [@Prefunctor.map #5 _ #3 _ (@Functor.toPrefunctor _ #4 _ #2 #1) #0 #0 (@CategoryStruct.id _ _ #0)] +trace: [grind.ematch.pattern] Functor.map_id: [@Prefunctor.map #5 _ #3 _ (@Functor.toPrefunctor _ #4 _ #2 #1) #0 #0 (@CategoryStruct.id _ _ #0)] -/ #guard_msgs in grind_pattern Functor.map_id => self.map (𝟙 X) /-- -info: [grind.ematch.pattern] Functor.map_comp: [@Prefunctor.map #9 _ #7 _ (@Functor.toPrefunctor _ #8 _ #6 #5) #4 #2 (@CategoryStruct.comp _ _ #4 #3 #2 #1 #0)] +trace: [grind.ematch.pattern] Functor.map_comp: [@Prefunctor.map #9 _ #7 _ (@Functor.toPrefunctor _ #8 _ #6 #5) #4 #2 (@CategoryStruct.comp _ _ #4 #3 #2 #1 #0)] -/ #guard_msgs in grind_pattern Functor.map_comp => self.map (f ≫ g) diff --git a/tests/lean/run/grind_pre.lean b/tests/lean/run/grind_pre.lean index 418d7b55ca7e..58ab526ca0c3 100644 --- a/tests/lean/run/grind_pre.lean +++ b/tests/lean/run/grind_pre.lean @@ -79,14 +79,14 @@ end def g (i : Nat) (j : Nat) (_ : i > j := by omega) := i + j /-- -info: [grind.offset.model] i := 1 +trace: [grind.offset.model] i := 1 [grind.offset.model] j := 0 [grind.offset.model] 「0」 := 0 [grind.offset.model] 「i + j」 := 0 [grind.offset.model] 「i + 1」 := 2 [grind.offset.model] 「i + j + 1」 := 1 -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.offset.model true in example (i j : Nat) (h : i + 1 > j + 1) : g (i+1) j = f ((fun x => x) i) + f j + 1 := by fail_if_success grind @@ -173,13 +173,13 @@ example (a : α) (p q r : Prop) : (h₁ : HEq p a) → (h₂ : HEq q a) → (h grind /-- -info: [grind.issues] found congruence between +trace: [grind.issues] found congruence between g b and f a but functions have different types -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.issues true in set_option trace.grind.debug.proof false in example (f : Nat → Bool) (g : Int → Bool) (a : Nat) (b : Int) : HEq f g → HEq a b → f a = g b := by diff --git a/tests/lean/run/grind_propagate_connectives.lean b/tests/lean/run/grind_propagate_connectives.lean index c7f7178b9504..87ceeaa5fdb1 100644 --- a/tests/lean/run/grind_propagate_connectives.lean +++ b/tests/lean/run/grind_propagate_connectives.lean @@ -18,78 +18,78 @@ set_option grind.debug true set_option grind.debug.proofs true /-- -info: [Meta.debug] true: [q, w] +trace: [Meta.debug] true: [q, w] [Meta.debug] false: [p, r] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : (p ∨ (q ∧ ¬r ∧ w)) → ¬p → False := by grind on_failure fallback /-- -info: [Meta.debug] true: [r] +trace: [Meta.debug] true: [r] [Meta.debug] false: [p, q] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : (p ∨ q ∨ r) → (p ∨ ¬q) → ¬p → False := by grind on_failure fallback /-- -info: [Meta.debug] true: [r] +trace: [Meta.debug] true: [r] [Meta.debug] false: [p₁, q] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : ((p₁ ∧ p₂) ∨ q ∨ r) → (p₁ ∨ ¬q) → p₁ = False → False := by grind on_failure fallback /-- -info: [Meta.debug] true: [r] +trace: [Meta.debug] true: [r] [Meta.debug] false: [p₂, q] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example : ((p₁ ∧ p₂) ∨ q ∨ r) → ((p₂ ∧ p₁) ∨ ¬q) → p₂ = False → False := by grind on_failure fallback /-- -info: [Meta.debug] true: [q, r] +trace: [Meta.debug] true: [q, r] [Meta.debug] false: [p] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (p q r : Prop) : p ∨ (q ↔ r) → p = False → q → False := by grind on_failure fallback /-- -info: [Meta.debug] true: [r] +trace: [Meta.debug] true: [r] [Meta.debug] false: [p, s] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (p q r : Prop) : p ∨ ¬(s ∨ (p ↔ r)) → p = False → False := by grind on_failure fallback /-- -info: [Meta.debug] true: [p] +trace: [Meta.debug] true: [p] [Meta.debug] false: [] [Meta.debug] [a, b] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (p : Prop) (a : Vector Nat 5) (b : Vector Nat 6) : (p → HEq a b) → p → False := by grind on_failure fallback /-- -info: [Meta.debug] true: [p, q] +trace: [Meta.debug] true: [p, q] [Meta.debug] false: [r] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (p q r : Prop) : p ∨ (q ↔ r) → q → ¬r → False := by grind on_failure fallback /-- -info: [Meta.debug] hello world +trace: [Meta.debug] hello world [Meta.debug] true: [p, q] [Meta.debug] false: [r] -/ -#guard_msgs (info) in +#guard_msgs (trace) in example (p q r : Prop) : p ∨ (q ↔ r) → ¬r → q → False := by grind on_failure do trace[Meta.debug] "hello world" diff --git a/tests/lean/run/grind_qpartition.lean b/tests/lean/run/grind_qpartition.lean index 7d4ce218fa19..c30541e3abbd 100644 --- a/tests/lean/run/grind_qpartition.lean +++ b/tests/lean/run/grind_qpartition.lean @@ -15,6 +15,9 @@ theorem qpartition_loop_spec₁ {n} (lt : α → α → Bool) (lo hi : Nat) ∀ k, (h₁ : lo ≤ k) → (h₂ : k < mid) → lt as'[k] as'[mid] := by sorry +grind_pattern qpartition_loop_spec₁ => + qpartition.loop lt lo hi hhi pivot as i j ilo jh w, as'[k], as'[mid] + example {n} (lt : α → α → Bool) (lo hi : Nat) (hlo : lo < n := by omega) (hhi : hi < n := by omega) (w : lo ≤ hi := by omega) (as : Vector α n) (mid as') @@ -22,4 +25,4 @@ example {n} (lt : α → α → Bool) (lo hi : Nat) (hmid : mid < n) (w_as : as' = (qpartition as lt lo hi hlo hhi).2) : ∀ i, (h₁ : lo ≤ i) → (h₂ : i < mid) → lt as'[i] as'[mid] := by - grind [qpartition, qpartition_loop_spec₁] + grind [qpartition] diff --git a/tests/lean/run/grind_ring_1.lean b/tests/lean/run/grind_ring_1.lean index a213f6ad6a3d..39cd45e4d2e7 100644 --- a/tests/lean/run/grind_ring_1.lean +++ b/tests/lean/run/grind_ring_1.lean @@ -16,11 +16,11 @@ example (x : UInt8) : (x + 16)*(x - 16) = x^2 := by grind +ring /-- -info: [grind.ring] new ring: Int +trace: [grind.ring] new ring: Int [grind.ring] characteristic: 0 [grind.ring] NoNatZeroDivisors available: true -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.ring true in example (x : Int) : (x + 1)^2 - 1 = x^2 + 2*x := by grind +ring @@ -29,11 +29,11 @@ example (x : BitVec 8) : (x + 16)*(x - 16) = x^2 := by grind +ring /-- -info: [grind.ring] new ring: BitVec 8 +trace: [grind.ring] new ring: BitVec 8 [grind.ring] characteristic: 256 [grind.ring] NoNatZeroDivisors available: false -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.ring true in example (x : BitVec 8) : (x + 1)^2 - 1 = x^2 + 2*x := by grind +ring @@ -60,8 +60,8 @@ example [CommRing α] [IsCharP α 0] (x : α) : (x + 1)*(x - 1) = x^2 → False example [CommRing α] [IsCharP α 8] (x : α) : (x + 1)*(x - 1) = x^2 → False := by grind +ring -/-- info: [grind.ring.assert.queue] -7 * x ^ 2 + 16 * y ^ 2 + x = 0 -/ -#guard_msgs (info) in +/-- trace: [grind.ring.assert.queue] -7 * x ^ 2 + 16 * y ^ 2 + x = 0 -/ +#guard_msgs (trace) in set_option trace.grind.ring.assert.queue true in example (x y : Int) : x + 16*y^2 - 7*x^2 = 0 → False := by fail_if_success grind +ring diff --git a/tests/lean/run/grind_ring_2.lean b/tests/lean/run/grind_ring_2.lean index 75ae1fcb28f7..617fefb67b14 100644 --- a/tests/lean/run/grind_ring_2.lean +++ b/tests/lean/run/grind_ring_2.lean @@ -73,11 +73,11 @@ example [CommRing α] (a b c : α) grind +ring /-- -info: [grind.ring.assert.basis] a + b + c + -3 = 0 +trace: [grind.ring.assert.basis] a + b + c + -3 = 0 [grind.ring.assert.basis] 2 * b ^ 2 + 2 * (b * c) + 2 * c ^ 2 + -6 * b + -6 * c + 4 = 0 [grind.ring.assert.basis] 6 * c ^ 3 + -18 * c ^ 2 + 12 * c + 4 = 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example [CommRing α] (a b c : α) : a + b + c = 3 → a^2 + b^2 + c^2 = 5 → @@ -87,11 +87,11 @@ example [CommRing α] (a b c : α) grind +ring /-- -info: [grind.ring.assert.basis] a + b + c + -3 = 0 +trace: [grind.ring.assert.basis] a + b + c + -3 = 0 [grind.ring.assert.basis] b ^ 2 + b * c + c ^ 2 + -3 * b + -3 * c + 2 = 0 [grind.ring.assert.basis] 3 * c ^ 3 + -9 * c ^ 2 + 6 * c + 2 = 0 -/ -#guard_msgs (info) in +#guard_msgs (trace) in example [CommRing α] [NoNatZeroDivisors α] (a b c : α) : a + b + c = 3 → a^2 + b^2 + c^2 = 5 → @@ -112,8 +112,8 @@ example (a b c : BitVec 8) (f : BitVec 8 → Nat) : c = 255 → - a + b - 1 = c example (a b c : BitVec 8) (f : BitVec 8 → Nat) : c = 255 → - a + b - 1 = c → f (2*a) = f (b + a) := by grind +ring -/-- info: [grind.ring.impEq] skip: b = a, k: 2, noZeroDivisors: false -/ -#guard_msgs (info) in +/-- trace: [grind.ring.impEq] skip: b = a, k: 2, noZeroDivisors: false -/ +#guard_msgs (trace) in example (a b c : BitVec 8) (f : BitVec 8 → Nat) : 2*a = 1 → 2*b = 1 → f (a) = f (b) := by set_option trace.grind.ring.impEq true in fail_if_success grind +ring diff --git a/tests/lean/run/grind_split.lean b/tests/lean/run/grind_split.lean index a159dd95ebd1..56accbc82283 100644 --- a/tests/lean/run/grind_split.lean +++ b/tests/lean/run/grind_split.lean @@ -6,7 +6,7 @@ example (p q : Prop) : p ∨ q → p ∨ ¬q → ¬p ∨ q → ¬p ∨ ¬q → F opaque R : Nat → Prop /-- -info: [grind] working on goal `grind` +trace: [grind] working on goal `grind` [grind.eqc] (if p then a else b) = c [grind.eqc] R a = True [grind.eqc] R b = True @@ -23,7 +23,7 @@ info: [grind] working on goal `grind` [grind.eqc] R b = R c [grind] closed `grind.2` -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind true in example (p : Prop) [Decidable p] (a b c : Nat) : (if p then a else b) = c → R a → R b → R c := by grind diff --git a/tests/lean/run/grind_t1.lean b/tests/lean/run/grind_t1.lean index 98332bb6dd58..689b73963fb0 100644 --- a/tests/lean/run/grind_t1.lean +++ b/tests/lean/run/grind_t1.lean @@ -79,9 +79,9 @@ example (a b c : Nat) (f : Nat → Nat) : p.1 ≠ f a → p = { a := f b, c, b : grind /-- -info: [grind.debug.proj] { a := b, b := v₁, c := v₂ }.a +trace: [grind.debug.proj] { a := b, b := v₁, c := v₂ }.a -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.debug.proj true in example (a b d e : Nat) (x y z : Boo Nat) (f : Nat → Boo Nat) : (f d).1 ≠ a → f d = ⟨b, v₁, v₂⟩ → x.1 = e → y.1 = e → z.1 = e → f d = x → f d = y → f d = z → b = a → False := by grind @@ -115,30 +115,30 @@ example (foo : Nat → Nat) end dite_propagator_test /-- -info: [grind.eqc] x = 2 * a +trace: [grind.eqc] x = 2 * a [grind.eqc] y = x [grind.eqc] (y = 2 * a) = False -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.eqc true in example (a : Nat) : let x := a + a; y = x → y = a + a := by grind -zetaDelta /-- -info: [grind.eqc] x = 2 * a +trace: [grind.eqc] x = 2 * a [grind.eqc] y = x [grind.eqc] (y = 2 * a) = False -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.eqc true in example (a : Nat) : let_fun x := a + a; y = x → y = a + a := by grind -zetaDelta /-- -info: [grind.eqc] y = 2 * a +trace: [grind.eqc] y = 2 * a [grind.eqc] (y = 2 * a) = False -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.eqc true in example (a : Nat) : let_fun x := a + a; y = x → y = a + a := by grind @@ -184,7 +184,7 @@ example (α : Type) (β : Type) (a₁ a₂ : α) (b₁ b₂ : β) grind /-- -info: [grind.assert] ∀ (a : α), a ∈ b → p a +trace: [grind.assert] ∀ (a : α), a ∈ b → p a [grind.ematch.pattern] h₁: [@Membership.mem `[α] `[List α] `[List.instMembership] `[b] #1] [grind.ematch.pattern] h₁: [p #1] [grind.assert] w ∈ b @@ -193,7 +193,7 @@ info: [grind.assert] ∀ (a : α), a ∈ b → p a [grind.ematch.instance] List.length_pos_of_mem: w ∈ b → 0 < b.length [grind.assert] w ∈ b → p w -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.ematch.pattern true in set_option trace.grind.ematch.instance true in set_option trace.grind.assert true in @@ -201,7 +201,7 @@ example (b : List α) (p : α → Prop) (h₁ : ∀ a ∈ b, p a) (h₂ : ∃ a grind /-- -info: [grind.assert] ∀ (x : α), Q x → P x +trace: [grind.assert] ∀ (x : α), Q x → P x [grind.ematch.pattern] h₁: [Q #1] [grind.ematch.pattern] h₁: [P #1] [grind.assert] ∀ (x : α), R x → False = P x @@ -214,7 +214,7 @@ info: [grind.assert] ∀ (x : α), Q x → P x [grind.assert] Q a → P a [grind.assert] R a → False = P a -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.ematch.pattern true in set_option trace.grind.ematch.instance true in set_option trace.grind.assert true in @@ -379,7 +379,7 @@ example (b : Bool) : (if b then 10 else 20) = a → b = true → False := by grind -- Should not generate a trace message about canonicalization issues -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.issues true in example : (if n + 2 < m then a else b) = (if n + 1 < m then c else d) := by fail_if_success grind (splits := 0) diff --git a/tests/lean/run/grind_trace.lean b/tests/lean/run/grind_trace.lean index c3caadda37b1..1c4e67ccfa39 100644 --- a/tests/lean/run/grind_trace.lean +++ b/tests/lean/run/grind_trace.lean @@ -1,5 +1,7 @@ reset_grind_attrs% +set_option grind.warning false + attribute [grind =] List.length_cons attribute [grind →] List.getElem?_eq_getElem attribute [grind =] List.length_replicate diff --git a/tests/lean/run/grind_usr.lean b/tests/lean/run/grind_usr.lean index e1fcf65c8791..13d3b736ce14 100644 --- a/tests/lean/run/grind_usr.lean +++ b/tests/lean/run/grind_usr.lean @@ -7,27 +7,27 @@ error: the modifier `usr` is only relevant in parameters for `grind only` @[grind usr] theorem fthm : f (f x) = f x := sorry -/-- info: [grind.ematch.pattern] fthm: [f #0] -/ -#guard_msgs (info) in +/-- trace: [grind.ematch.pattern] fthm: [f #0] -/ +#guard_msgs (trace) in set_option trace.grind.ematch.pattern true in example : f (f (f x)) = f x := by grind only [fthm] /-- -info: [grind.ematch.instance] fthm: f (f x) = f x +trace: [grind.ematch.instance] fthm: f (f x) = f x [grind.ematch.instance] fthm: f (f (f x)) = f (f x) [grind.ematch.instance] fthm: f (f (f (f x))) = f (f (f x)) -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.ematch.instance true in example : f (f (f x)) = f x := by grind only [fthm] /-- -info: [grind.ematch.instance] fthm: f (f x) = f x +trace: [grind.ematch.instance] fthm: f (f x) = f x [grind.ematch.instance] fthm: f (f (f x)) = f (f x) -/ -#guard_msgs (info) in +#guard_msgs (trace) in -- should not instantiate anything using pattern `f (f #0)` set_option trace.grind.ematch.instance true in example : f x = x := by @@ -53,7 +53,7 @@ grind_pattern fthm => f (f x) example : f (f (f x)) = f x := by grind only [usr fthm] -#guard_msgs (info) in +#guard_msgs (trace) in -- should not instantiate anything using pattern `f (f #0)` set_option trace.grind.ematch.instance true in example : f x = x := by @@ -61,10 +61,10 @@ example : f x = x := by sorry /-- -info: [grind.ematch.instance] fthm: f (f x) = f x +trace: [grind.ematch.instance] fthm: f (f x) = f x [grind.ematch.instance] fthm: f (f (f x)) = f (f x) -/ -#guard_msgs (info) in +#guard_msgs (trace) in set_option trace.grind.ematch.instance true in example : f x = x := by fail_if_success grind only [fthm] diff --git a/tests/lean/run/grind_vector.lean b/tests/lean/run/grind_vector.lean new file mode 100644 index 000000000000..86c9935a1291 --- /dev/null +++ b/tests/lean/run/grind_vector.lean @@ -0,0 +1,5 @@ +set_option grind.warning false + +example [BEq α] (xs ys : Vector α n) : (xs.toList == ys.toList) = (xs == ys) := by grind + +example [LT α] {xs ys : Vector α n} : xs.toList < ys.toList ↔ xs < ys := by grind diff --git a/tests/lean/run/guard_msgs.lean b/tests/lean/run/guard_msgs.lean index 156e6414cff4..98626c629044 100644 --- a/tests/lean/run/guard_msgs.lean +++ b/tests/lean/run/guard_msgs.lean @@ -309,3 +309,36 @@ info: Tree.branch -/ #guard_msgs in #eval Tree.build 8 3 + + +section Trace + +/-! check that guard_msgs by defaults passes trace messages -/ + +set_option trace.debug true + +/-- trace: [debug] a trace -/ +#guard_msgs(all) in +#guard_msgs(info) in +run_meta trace[debug] "a trace" + +#guard_msgs(all) in +/-- trace: [debug] a trace -/ +#guard_msgs(trace) in +run_meta trace[debug] "a trace" + +#guard_msgs(all) in +#guard_msgs(drop trace) in +run_meta trace[debug] "a trace" + +#guard_msgs(all) in +/-- trace: [debug] a trace -/ +#guard_msgs in +run_meta trace[debug] "a trace" + +#guard_msgs(all) in +/-- trace: [debug] a trace -/ +#guard_msgs in +run_meta trace[debug] "a trace" + +end Trace diff --git a/tests/lean/run/haveTactic.lean b/tests/lean/run/haveTactic.lean index 54edb02fb6b9..821b35d66c51 100644 --- a/tests/lean/run/haveTactic.lean +++ b/tests/lean/run/haveTactic.lean @@ -13,7 +13,7 @@ has type but is expected to have type True : Prop --- -info: h : True +trace: h : True ⊢ True -/ #guard_msgs in diff --git a/tests/lean/run/hintSuggestionMessage.lean b/tests/lean/run/hintSuggestionMessage.lean new file mode 100644 index 000000000000..e185a0980ff8 --- /dev/null +++ b/tests/lean/run/hintSuggestionMessage.lean @@ -0,0 +1,33 @@ +import Lean.Meta + +/-! +# Tests for hint suggestion messages + +Tests rendering of hint suggestions in message data. Note that we can only assess the Unicode +versions here. +-/ + +open Lean Meta Hint + +elab foo:"foo" bar:"bar" "baz" : term => do + let sug : Suggestions := { + ref := foo + suggestions := #[ + { suggestion := "hi", preInfo? := "generic: " }, + { suggestion := "cheers", postInfo? := " (at `bar`)", span? := bar } + ], + codeActionPrefix? := "Add greeting: " + } + let msg := m!"Your program is insufficiently friendly" ++ + (← MessageData.hint m!"Consider adding a greeting to your program to make it friendlier" sug) + throwErrorAt foo msg + +/-- +error: Your program is insufficiently friendly + +Hint: Consider adding a greeting to your program to make it friendlier + • generic: f̵o̵o̵h̲i̲ + • b̵a̵c̲h̲e̲e̲rs̲ (at `bar`) +-/ +#guard_msgs in +#eval foo bar baz diff --git a/tests/lean/run/induction1.lean b/tests/lean/run/induction1.lean index 46b07aad70d1..d898cc618f88 100644 --- a/tests/lean/run/induction1.lean +++ b/tests/lean/run/induction1.lean @@ -84,7 +84,7 @@ inductive Vec (α : Type) : Nat → Type | cons : (a : α) → {n : Nat} → (as : Vec α n) → Vec α (n+1) /-- -info: case cons.cons.fst +trace: case cons.cons.fst α β : Type n : Nat a✝¹ : α @@ -129,7 +129,7 @@ theorem ex1 (n m o : Nat) : n = m + 0 → m = o → m = o := by Test of named generalization, of an expression that does not appear in the goal. -/ /-- -info: case succ +trace: case succ α : Type ys zs : List α n : Nat @@ -157,7 +157,7 @@ example {α : Type} (xs ys zs : List α) : (xs ++ ys) ++ zs = xs ++ (ys ++ zs) : Test of named generalization, of an expression that appears in the goal. -/ /-- -info: case cons +trace: case cons α : Type zs : List α w : α @@ -195,7 +195,7 @@ Test of hole for named generalization. Yields a fresh hygienic name. -/ /-- -info: case zero +trace: case zero n : Nat h✝ : n + 1 = 0 ⊢ 0 = 1 + n @@ -217,7 +217,7 @@ example (n : Nat) : n + 1 = 1 + n := by Having no `=>` clause is short for `=> ?_`. -/ /-- -info: case mk +trace: case mk p1 p2 : Nat ⊢ (p1, p2).fst = (p1, p2).fst -/ @@ -238,7 +238,7 @@ induction n with | zero => ?_ | succ n ih => ?_ ``` -/ /-- -info: case zero +trace: case zero ⊢ 0 + 1 = 1 + 0 case succ diff --git a/tests/lean/run/inductionCheckAltNames.lean b/tests/lean/run/inductionCheckAltNames.lean index 9129f1aacecd..ff1721e88b8b 100644 --- a/tests/lean/run/inductionCheckAltNames.lean +++ b/tests/lean/run/inductionCheckAltNames.lean @@ -6,7 +6,7 @@ axiom elimEx (motive : Nat → Nat → Sort u) (x y : Nat) (lower : (delta a : Nat) → motive (a + delta.succ) a) : motive y x -/-- error: invalid alternative name 'lower2', expected 'diag', 'upper' or 'lower' -/ +/-- error: invalid alternative name 'lower2', expected 'diag', 'upper', or 'lower' -/ #guard_msgs in theorem invalidAlt (p: Nat) : p ≤ q ∨ p > q := by cases p, q using elimEx with diff --git a/tests/lean/run/infoFromFailure.lean b/tests/lean/run/infoFromFailure.lean index 8ef4ae6e9044..d425b4e8d333 100644 --- a/tests/lean/run/infoFromFailure.lean +++ b/tests/lean/run/infoFromFailure.lean @@ -14,7 +14,7 @@ set_option trace.Meta.synthInstance true /-- info: B.foo "hello" : String × String --- -info: [Meta.synthInstance] ❌️ Add String +trace: [Meta.synthInstance] ❌️ Add String [Meta.synthInstance] new goal Add String [Meta.synthInstance.instances] #[@Lean.Grind.CommRing.toAdd] [Meta.synthInstance] ✅️ apply @Lean.Grind.CommRing.toAdd to Add String @@ -29,7 +29,7 @@ info: [Meta.synthInstance] ❌️ Add String #check foo "hello" /-- -info: [Meta.synthInstance] ❌️ Add Bool +trace: [Meta.synthInstance] ❌️ Add Bool [Meta.synthInstance] new goal Add Bool [Meta.synthInstance.instances] #[@Lean.Grind.CommRing.toAdd] [Meta.synthInstance] ✅️ apply @Lean.Grind.CommRing.toAdd to Add Bool diff --git a/tests/lean/run/inlineExpr.lean b/tests/lean/run/inlineExpr.lean new file mode 100644 index 000000000000..de3f6374d5dc --- /dev/null +++ b/tests/lean/run/inlineExpr.lean @@ -0,0 +1,38 @@ +import Lean.Meta + +/-! +# Tests for the `inlineExpr` function + +`inlineExpr` should print the given expression inline, unless it exceeds a given length, in which +case it is moved to an indented block. +-/ + +open Lean Meta + +opaque shortFun : Nat → Nat +opaque shortConst : Nat + +def runTest (e : Expr) : MetaM Unit := do + let msg := inlineExpr e (maxInlineLength := 30) + logInfo m!"Before{msg}After" + +def testShort : MetaM Unit := do + runTest <| .app (.const ``shortFun []) (.const ``shortConst []) + +/-- info: Before shortFun shortConst After -/ +#guard_msgs in +#eval testShort + +opaque functionWithLongName : Nat → Nat +opaque constantWithLongName : Nat + +def testLong : MetaM Unit := do + runTest <| .app (.const ``functionWithLongName []) (.const ``constantWithLongName []) + +/-- +info: Before + functionWithLongName constantWithLongName +After +-/ +#guard_msgs in +#eval testLong diff --git a/tests/lean/run/issue7318.lean b/tests/lean/run/issue7318.lean index c93a087ef641..dabd95d33bdd 100644 --- a/tests/lean/run/issue7318.lean +++ b/tests/lean/run/issue7318.lean @@ -55,7 +55,7 @@ theorem bar_decide_4 (t : Three) : Q := by -- Check if messages from dischargers still appear /-- -info: case simp.discharger +trace: case simp.discharger ⊢ 1 + 1 = 2 -/ #guard_msgs in diff --git a/tests/lean/run/issue7550.lean b/tests/lean/run/issue7550.lean index cf9ea43892c8..39ec2d44abe6 100644 --- a/tests/lean/run/issue7550.lean +++ b/tests/lean/run/issue7550.lean @@ -30,11 +30,6 @@ termination_by structural fuel /-- error: tactic 'fail' failed case case1 -x y fuel x✝ : Nat -hfuel✝ : x✝ < 0 -⊢ Bug.divCore x✝ y 0 hfuel✝ = 42 - -case case2 x y fuel x✝ fuel✝ : Nat hfuel✝ : x✝ < fuel✝.succ h✝ : 0 < y ∧ y ≤ x✝ @@ -42,7 +37,7 @@ this✝ : x✝ - y < x✝ ih1✝ : Bug.divCore (x✝ - y) y fuel✝ ⋯ = 42 ⊢ Bug.divCore x✝ y fuel✝.succ hfuel✝ = 42 -case case3 +case case2 x y fuel x✝ fuel✝ : Nat hfuel✝ : x✝ < fuel✝.succ h✝ : ¬(0 < y ∧ y ≤ x✝) @@ -56,11 +51,6 @@ protected theorem divCore_eq_div : Bug.divCore x y fuel h = 42 := by /-- error: tactic 'fail' failed case case1 -x y fuel x✝ : Nat -hfuel✝ : x✝ < 0 -⊢ Bug.divCore x✝ y 0 hfuel✝ = 42 - -case case2 x y fuel x✝ fuel✝ : Nat hfuel✝ : x✝ < fuel✝.succ h✝ : 0 < y ∧ y ≤ x✝ @@ -68,7 +58,7 @@ this✝ : x✝ - y < x✝ ih1✝ : Bug.divCore (x✝ - y) y fuel✝ ⋯ = 42 ⊢ Bug.divCore x✝ y fuel✝.succ hfuel✝ = 42 -case case3 +case case2 x y fuel x✝ fuel✝ : Nat hfuel✝ : x✝ < fuel✝.succ h✝ : ¬(0 < y ∧ y ≤ x✝) diff --git a/tests/lean/run/issue8103.lean b/tests/lean/run/issue8103.lean new file mode 100644 index 000000000000..c1b51eede9a8 --- /dev/null +++ b/tests/lean/run/issue8103.lean @@ -0,0 +1,112 @@ +-- set_option trace.Meta.FunInd true in +def foo (n m : Nat) (h : n < m) : Nat := + match m with + | 0 => by contradiction -- This case should not show up in the principles below + | m+1 => match n with + | 0 => 0 + | n+1 => foo n m (Nat.succ_lt_succ_iff.mp h) + +/-- +info: foo.induct (motive : (n m : Nat) → n < m → Prop) (case1 : ∀ (m : Nat) (h : 0 < m + 1), 0 < m.succ → motive 0 m.succ h) + (case2 : ∀ (m n : Nat) (h : n + 1 < m + 1), n.succ < m.succ → motive n m ⋯ → motive n.succ m.succ h) (n m : Nat) + (h : n < m) : motive n m h +-/ +#guard_msgs(pass trace, all) in +#check foo.induct + +/-- +info: foo.induct_unfolding (motive : (n m : Nat) → n < m → Nat → Prop) + (case1 : ∀ (m : Nat) (h : 0 < m + 1), 0 < m.succ → motive 0 m.succ h 0) + (case2 : + ∀ (m n : Nat) (h : n + 1 < m + 1), n.succ < m.succ → motive n m ⋯ (foo n m ⋯) → motive n.succ m.succ h (foo n m ⋯)) + (n m : Nat) (h : n < m) : motive n m h (foo n m h) +-/ +#guard_msgs(pass trace, all) in +#check foo.induct_unfolding + + +/-- +info: foo.fun_cases (motive : (n m : Nat) → n < m → Prop) + (case1 : ∀ (m : Nat) (h : 0 < m + 1), 0 < m + 1 → 0 < m.succ → motive 0 m.succ h) + (case2 : ∀ (m n : Nat) (h : n + 1 < m + 1), n.succ < m + 1 → n.succ < m.succ → motive n.succ m.succ h) (n m : Nat) + (h : n < m) : motive n m h +-/ +#guard_msgs(pass trace, all) in +#check foo.fun_cases + +def bar (n m : Nat) (h : n = m) : Nat := + match m with + | 0 => 0 + | m+1 => match n with + | 0 => by contradiction + | n+1 => bar n m (Nat.succ.inj h) + +/-- +info: bar.induct (motive : (n m : Nat) → n = m → Prop) (case1 : ∀ (h : 0 = 0), motive 0 0 h) + (case2 : ∀ (m n : Nat) (h : n + 1 = m + 1), m.succ = n.succ → motive n m ⋯ → motive n.succ m.succ h) (n m : Nat) + (h : n = m) : motive n m h +-/ +#guard_msgs(pass trace, all) in +#check bar.induct + +/-- +info: bar.induct_unfolding (motive : (n m : Nat) → n = m → Nat → Prop) (case1 : ∀ (h : 0 = 0), motive 0 0 h 0) + (case2 : + ∀ (m n : Nat) (h : n + 1 = m + 1), m.succ = n.succ → motive n m ⋯ (bar n m ⋯) → motive n.succ m.succ h (bar n m ⋯)) + (n m : Nat) (h : n = m) : motive n m h (bar n m h) +-/ +#guard_msgs(pass trace, all) in +#check bar.induct_unfolding + +/-- +info: bar.fun_cases (motive : (n m : Nat) → n = m → Prop) (case1 : ∀ (h : 0 = 0), motive 0 0 h) + (case2 : ∀ (m n : Nat) (h : n + 1 = m + 1), m.succ = m + 1 → m.succ = n.succ → motive n.succ m.succ h) (n m : Nat) + (h : n = m) : motive n m h +-/ +#guard_msgs(pass trace, all) in +#check bar.fun_cases + +def baz (n : Nat) (h : n ≠ 0) : Nat := + match n with + | 0 => by contradiction + | k + 1 => if h : k = 0 then 0 else baz k h + + +/-- +info: baz.induct (motive : (n : Nat) → n ≠ 0 → Prop) (case1 : ∀ (h : 0 + 1 ≠ 0), motive (Nat.succ 0) h) + (case2 : ∀ (k : Nat) (h : k + 1 ≠ 0) (h_1 : ¬k = 0), motive k h_1 → motive k.succ h) (n : Nat) (h : n ≠ 0) : + motive n h +-/ +#guard_msgs(pass trace, all) in +#check baz.induct + +/-- +info: baz.induct_unfolding (motive : (n : Nat) → n ≠ 0 → Nat → Prop) (case1 : ∀ (h : 0 + 1 ≠ 0), motive (Nat.succ 0) h 0) + (case2 : ∀ (k : Nat) (h : k + 1 ≠ 0) (h_1 : ¬k = 0), motive k h_1 (baz k h_1) → motive k.succ h (baz k h_1)) (n : Nat) + (h : n ≠ 0) : motive n h (baz n h) +-/ +#guard_msgs(pass trace, all) in +#check baz.induct_unfolding + +/-- +info: baz.fun_cases (motive : (n : Nat) → n ≠ 0 → Prop) (case1 : ∀ (h : 0 + 1 ≠ 0), Nat.succ 0 ≠ 0 → motive (Nat.succ 0) h) + (case2 : ∀ (k : Nat) (h : k + 1 ≠ 0), ¬k = 0 → k.succ ≠ 0 → motive k.succ h) (n : Nat) (h : n ≠ 0) : motive n h +-/ +#guard_msgs(pass trace, all) in +#check baz.fun_cases + + +def mean (n m : Nat) (h : n = m) : Nat := + match m with + | 0 => 0 + | m+1 => match n with + | 0 => (by contradiction : Bool → Nat) true -- overapplied `noConfusion` + | n+1 => Nat.noConfusion h fun h' => mean n m h' -- non-contradictory `noConfusion` + +/-- +info: mean.fun_cases (motive : (n m : Nat) → n = m → Prop) (case1 : ∀ (h : 0 = 0), motive 0 0 h) + (case2 : ∀ (m n : Nat) (h : n + 1 = m + 1), m.succ = m + 1 → m.succ = n.succ → motive n.succ m.succ h) (n m : Nat) + (h : n = m) : motive n m h +-/ +#guard_msgs(pass trace, all) in +#check mean.fun_cases diff --git a/tests/lean/run/issue8195.lean b/tests/lean/run/issue8195.lean new file mode 100644 index 000000000000..50162aff9231 --- /dev/null +++ b/tests/lean/run/issue8195.lean @@ -0,0 +1,106 @@ +import Lean + +-- set_option trace.Meta.FunInd true + +axiom testSorry : α + +def test (l : List Nat) : Nat := + match l with + | [] => 0 + | x :: l => + match x == 3 with + | false => test l + | true => test l + +/-- +info: test.induct_unfolding (motive : List Nat → Nat → Prop) (case1 : motive [] 0) + (case2 : ∀ (x : Nat) (l : List Nat), (x == 3) = false → motive l (test l) → motive (x :: l) (test l)) + (case3 : ∀ (x : Nat) (l : List Nat), (x == 3) = true → motive l (test l) → motive (x :: l) (test l)) (l : List Nat) : + motive l (test l) +-/ +#guard_msgs in +#check test.induct_unfolding + +opaque someFunction (x : Nat) (h : (x == 3) = false) : Nat +opaque someOtherFunction (x : Nat) (h : (x == 3) = true) : Nat + +def deptest (l : List Nat) : Nat := + match l with + | [] => 0 + | x :: l => + match h : x == 3 with + | false => deptest l + someFunction x h + | true => deptest l + someOtherFunction x h + +/-- +info: deptest.induct_unfolding (motive : List Nat → Nat → Prop) (case1 : motive [] 0) + (case2 : + ∀ (x : Nat) (l : List Nat) (h : (x == 3) = false), + motive l (deptest l) → motive (x :: l) (deptest l + someFunction x h)) + (case3 : + ∀ (x : Nat) (l : List Nat) (h : (x == 3) = true), + motive l (deptest l) → motive (x :: l) (deptest l + someOtherFunction x h)) + (l : List Nat) : motive l (deptest l) +-/ +#guard_msgs in +#check deptest.induct_unfolding + +-- This one doesn't work, the result type varies in the branches +-- But we fail gracefully +def depTestOddType (l : List Nat) : + match l with + | [] => Unit + | x :: _ => + if x == 3 then + Unit + else + Nat + := + match l with + | [] => () + | x :: _ => + (match h : x == 3 with + | false => someFunction x h + | true => () : if x == 3 then Unit else Nat) + +/-- +info: depTestOddType.fun_cases_unfolding + (motive : + (l : List Nat) → + (match l with + | [] => Unit + | x :: tail => if (x == 3) = true then Unit else Nat) → + Prop) + (case1 : motive [] ()) + (case2 : + ∀ (x : Nat) (l : List Nat), + (x == 3) = false → + motive (x :: l) + (match h : x == 3 with + | false => someFunction x h + | true => ())) + (case3 : + ∀ (x : Nat) (l : List Nat), + (x == 3) = true → + motive (x :: l) + (match h : x == 3 with + | false => someFunction x h + | true => ())) + (l : List Nat) : motive l (depTestOddType l) +-/ +#guard_msgs in +#check depTestOddType.fun_cases_unfolding + +-- set_option trace.Meta.FunInd true in +set_option linter.unusedVariables false in +def testMe (n : Nat) : Bool := + match _ : n - 2 with + | 0 => true + | m => false + +/-- +info: testMe.fun_cases_unfolding (motive : Nat → Bool → Prop) (case1 : ∀ (n : Nat), n - 2 = 0 → motive n true) + (case2 : ∀ (n : Nat), (n - 2 = 0 → False) → motive n false) (n : Nat) : motive n (testMe n) +-/ +#guard_msgs in +#check testMe.fun_cases_unfolding diff --git a/tests/lean/run/issue8213.lean b/tests/lean/run/issue8213.lean new file mode 100644 index 000000000000..1425f9440e7a --- /dev/null +++ b/tests/lean/run/issue8213.lean @@ -0,0 +1,32 @@ +-- set_option trace.Meta.FunInd true + +def myTest {α} + (mmotive : (x : List α) → Sort v) + (x : List α) + (h_1 : (a : α) → (dc : List α) → x = a :: dc → mmotive (a :: dc)) + (h_2 : (x' : List α) → x = x' → mmotive x') : mmotive x := + match (generalizing := false) h : x with + | a :: dc => h_1 a dc h + | x' => h_2 x' h + + +/-- +error: Failed to realize constant myTest.fun_cases: + Cannot derive functional cases principle (please report this issue) + ⏎ + failed to transform matcher, type error when constructing new pre-splitter motive: + @myTest.match_1 _fvar.28 (fun x => @_fvar.27 _fvar.28 _fvar.29 x _fvar.31 _fvar.32) _fvar.30 + ⏎ + Application type mismatch: In the appplication + motive mmotive x✝ h_1 + the final argument + h_1 + has type + (a : α) → (dc : List α) → x = a :: dc → mmotive (a :: dc) : Sort (imax (u_1 + 1) (u_1 + 1) v) + but is expected to have type + (a : α) → (dc : List α) → x✝ = a :: dc → mmotive (a :: dc) : Sort (imax (u_1 + 1) (u_1 + 1) v) +--- +error: unknown identifier 'myTest.fun_cases' +-/ +#guard_msgs in +def foo := @myTest.fun_cases diff --git a/tests/lean/run/issue8274.lean b/tests/lean/run/issue8274.lean new file mode 100644 index 000000000000..663cf3f308ed --- /dev/null +++ b/tests/lean/run/issue8274.lean @@ -0,0 +1,20 @@ +set_option linter.unusedVariables false + +noncomputable def myTest (x : List Bool) : Bool := + match hx : x with + | x'@hx':(x::xs) => false + | x'@([]) => true + +-- #check myTest.match_1 +/-- +info: private def myTest.match_1.splitter.{u_1} : (motive : List Bool → Sort u_1) → + (x : List Bool) → + ((x_1 : Bool) → (xs : List Bool) → x = x_1 :: xs → motive (x_1 :: xs)) → (x = [] → motive []) → motive x := +fun motive x h_1 h_2 => + List.casesOn (motive := fun x_1 => x = x_1 → motive x_1) x h_2 (fun head tail => h_1 head tail) ⋯ +-/ +#guard_msgs in +#print myTest.match_1.splitter + +#guard_msgs in +example : myTest [] := by unfold myTest; split; contradiction; rfl diff --git a/tests/lean/run/lazyListRotateUnfoldProof.lean b/tests/lean/run/lazyListRotateUnfoldProof.lean index 2a7e4d0c9344..d44201181140 100644 --- a/tests/lean/run/lazyListRotateUnfoldProof.lean +++ b/tests/lean/run/lazyListRotateUnfoldProof.lean @@ -42,7 +42,7 @@ def LazyList.ind {α : Type u} {motive : LazyList α → Sort v} -- Remark: Lean used well-founded recursion behind the scenes to define LazyList.ind /-- -info: case cons +trace: case cons τ : Type u_1 nil : LazyList τ R : List τ @@ -52,7 +52,7 @@ ih : ∀ (h : t.length + 1 = R.length), (rotate t R nil h).length = t.length + R ⊢ ∀ (h_1 : (LazyList.cons h t).length + 1 = R.length), (rotate (LazyList.cons h t) R nil h_1).length = (LazyList.cons h t).length + R.length --- -info: case delayed +trace: case delayed τ : Type u_1 nil : LazyList τ R : List τ diff --git a/tests/lean/run/letDeclSimp.lean b/tests/lean/run/letDeclSimp.lean index d22646132387..2f545102d310 100644 --- a/tests/lean/run/letDeclSimp.lean +++ b/tests/lean/run/letDeclSimp.lean @@ -6,12 +6,12 @@ example (a : Nat) : let n := 0; n + a = a := by simp (config := { zeta := false }) [n] /-- -info: a b : Nat +trace: a b : Nat h : a = b n : Nat := 0 ⊢ n + a = b --- -info: a b : Nat +trace: a b : Nat h : a = b n : Nat := 0 ⊢ a = b diff --git a/tests/lean/run/lift_lets.lean b/tests/lean/run/lift_lets.lean index e8142a9829f3..00849d10b1c0 100644 --- a/tests/lean/run/lift_lets.lean +++ b/tests/lean/run/lift_lets.lean @@ -9,7 +9,7 @@ axiom test_sorry {α : Sort _} : α Basic test of let in expression. -/ /-- -info: ⊢ let x := 1; +trace: ⊢ let x := 1; x = 1 -/ #guard_msgs in @@ -23,7 +23,7 @@ example : (let x := 1; x) = 1 := by Merging -/ /-- -info: ⊢ let x := 1; +trace: ⊢ let x := 1; x = x -/ #guard_msgs in @@ -37,7 +37,7 @@ example : (let x := 1; x) = (let y := 1; y) := by Merging off. -/ /-- -info: ⊢ let x := 1; +trace: ⊢ let x := 1; let y := 1; x = y -/ @@ -52,7 +52,7 @@ example : (let x := 1; x) = (let y := 1; y) := by Not mergable, since they must match syntactically. -/ /-- -info: ⊢ let x := 2; +trace: ⊢ let x := 2; let y := 1 + 1; x = y -/ @@ -66,7 +66,7 @@ example : (let x := 2; x) = (let y := 1 + 1; y) := by Merging with local context. -/ /-- -info: y : Nat := 1 +trace: y : Nat := 1 ⊢ y = 1 -/ #guard_msgs in @@ -80,7 +80,7 @@ example : (let x := 1; x) = 1 := by Merging with local context, for top-level. -/ /-- -info: y : Nat := 1 +trace: y : Nat := 1 ⊢ y = 1 -/ #guard_msgs in @@ -94,7 +94,7 @@ example : let x := 1; x = 1 := by Recursive lifting -/ /-- -info: ⊢ let y := 1; +trace: ⊢ let y := 1; let x := y + 1; x + 1 = 3 -/ @@ -109,7 +109,7 @@ example : (let x := (let y := 1; y + 1); x + 1) = 3 := by Lifting under a binder, dependency. -/ /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let x := n; n = x -/ @@ -124,7 +124,7 @@ example : ∀ n : Nat, n = (let x := n; x) := by Lifting under a binder, no dependency. -/ /-- -info: ⊢ let x := 0; +trace: ⊢ let x := 0; ∀ (n : Nat), n = n + x -/ #guard_msgs in @@ -138,7 +138,7 @@ example : ∀ n : Nat, n = (let x := 0; n + x) := by Lifting `letFun` under a binder, dependency. -/ /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let_fun x := n; n = x -/ @@ -153,7 +153,7 @@ example : ∀ n : Nat, n = (have x := n; x) := by Lifting `letFun` under a binder, no dependency. -/ /-- -info: ⊢ let_fun x := 0; +trace: ⊢ let_fun x := 0; ∀ (n : Nat), n = n + x -/ #guard_msgs in @@ -167,7 +167,7 @@ example : ∀ n : Nat, n = (have x := 0; n + x) := by Recursive lifting, one of the internal lets can leave the binder. -/ /-- -info: ⊢ let y := 1; +trace: ⊢ let y := 1; (fun x => let a := x; a + y) @@ -185,7 +185,7 @@ example : (fun x => let a := x; let y := 1; a + y) 2 = 2 + 1 := by Lifting out of binder type. -/ /-- -info: ⊢ let ty := Nat; +trace: ⊢ let ty := Nat; (fun x => Nat) 2 -/ #guard_msgs in @@ -211,7 +211,7 @@ Four cases to this test, depending on whether a `have` or `let` is seen first, and whether the second is a `have` or `let`. -/ /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let_fun x := n; x = x -/ @@ -222,7 +222,7 @@ example : ∀ n : Nat, (have x := n; x) = (have x' := n; x') := by intros rfl /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let x := n; x = x -/ @@ -233,7 +233,7 @@ example : ∀ n : Nat, (let x := n; x) = (have x' := n; x') := by intros rfl /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let x := n; x = x -/ @@ -244,7 +244,7 @@ example : ∀ n : Nat, (have x := n; x) = (let x' := n; x') := by intros rfl /-- -info: ⊢ ∀ (n : Nat), +trace: ⊢ ∀ (n : Nat), let x := n; x = x -/ @@ -267,7 +267,7 @@ example : ∀ n : Nat, let x := n; let y := x; y = n := by Lifting from underneath an unliftable let is OK. -/ /-- -info: ⊢ let y := 0; +trace: ⊢ let y := 0; ∀ (n : Nat), let x := n; x + y = n @@ -294,7 +294,7 @@ example : (id : (let ty := Nat; ty) → Nat) = @id Nat := by Enable lifting from implicit arguments using `+implicit`. -/ /-- -info: ⊢ let ty := Nat; +trace: ⊢ let ty := Nat; id = id -/ #guard_msgs in @@ -307,7 +307,7 @@ example : (id : (let ty := Nat; ty) → Nat) = @id Nat := by Lifting at a local hypothesis. -/ /-- -info: y : Nat +trace: y : Nat h : let x := 1; x = y @@ -323,7 +323,7 @@ example (h : (let x := 1; x) = y) : True := by Lifting in both the type and value for local declarations. -/ /-- -info: v : let ty := Nat; +trace: v : let ty := Nat; id ty := let x := 2; id x @@ -340,7 +340,7 @@ example : True := by Merges using local context, even if the local declaration comes after. -/ /-- -info: y : Type := Nat +trace: y : Type := Nat h : y ⊢ True -/ @@ -355,7 +355,7 @@ example (h : let x := Nat; x) : True := by A test to make sure `lift_lets` works after other tactics. -/ /-- -info: y : Nat +trace: y : Nat ⊢ let x := 1; x = y → True -/ @@ -372,7 +372,7 @@ example (h : (let x := 1; x) = y) : True := by Lifting `let`s in proofs in `+proof` mode. -/ /-- -info: m : Nat +trace: m : Nat h : ∃ n, n + 1 = m x : Fin m y : Fin (h.choose + 1) @@ -397,10 +397,10 @@ Unlike `extract_lets`, the `lift_lets` conv tactic's modifications persist, since the local context remains the same. -/ /-- -info: | let x := Nat; +trace: | let x := Nat; x = Int --- -info: ⊢ let x := Nat; +trace: ⊢ let x := Nat; x = Int -/ #guard_msgs in @@ -415,10 +415,10 @@ example : (let x := Nat; x) = Int := by Merging with local context. -/ /-- -info: y : Type := Nat +trace: y : Type := Nat | y --- -info: y : Type := Nat +trace: y : Type := Nat ⊢ y = Int -/ #guard_msgs in diff --git a/tests/lean/run/matchCongrEqns.lean b/tests/lean/run/matchCongrEqns.lean new file mode 100644 index 000000000000..260ee08dac3b --- /dev/null +++ b/tests/lean/run/matchCongrEqns.lean @@ -0,0 +1,192 @@ +/-! +Tricky cases and regressions tests for generalized match equations. +-/ +-- set_option trace.Meta.Match.matchEqs true + +def myTest {α} + (mmotive : (x : List α) → Sort v) + (x : List α) + (h_1 : (a : α) → (dc : List α) → x = a :: dc → mmotive (a :: dc)) + (h_2 : (x' : List α) → x = x' → mmotive x') : mmotive x := + match (generalizing := false) h : x with + | (a :: dc) => h_1 a dc h + | x' => h_2 x' h + +-- #check myTest.match_1 +/-- +info: myTest.match_1.splitter.{u_1, u_2} {α : Type u_1} (motive : List α → Sort u_2) (x✝ : List α) + (h_1 : (a : α) → (dc : List α) → x✝ = a :: dc → motive (a :: dc)) + (h_2 : (x' : List α) → (∀ (a : α) (dc : List α), x' = a :: dc → False) → x✝ = x' → motive x') : motive x✝ +-/ +#guard_msgs(pass trace, all) in +#check myTest.match_1.splitter +/-- +info: myTest.match_1.congr_eq_1.{u_1, u_2} {α : Type u_1} (motive : List α → Sort u_2) (x✝ : List α) + (h_1 : (a : α) → (dc : List α) → x✝ = a :: dc → motive (a :: dc)) (h_2 : (x' : List α) → x✝ = x' → motive x') (a : α) + (dc : List α) (heq_1 : x✝ = a :: dc) : + HEq + (match h : x✝ with + | a :: dc => h_1 a dc h + | x' => h_2 x' h) + (h_1 a dc heq_1) +-/ +#guard_msgs(pass trace, all) in +#check myTest.match_1.congr_eq_1 + +/-- +info: myTest.match_1.congr_eq_2.{u_1, u_2} {α : Type u_1} (motive : List α → Sort u_2) (x✝ : List α) + (h_1 : (a : α) → (dc : List α) → x✝ = a :: dc → motive (a :: dc)) (h_2 : (x' : List α) → x✝ = x' → motive x') + (x' : List α) (heq_1 : x✝ = x') : + (∀ (a : α) (dc : List α), x' = a :: dc → False) → + HEq + (match h : x✝ with + | a :: dc => h_1 a dc h + | x' => h_2 x' h) + (h_2 x' heq_1) +-/ +#guard_msgs(pass trace, all) in +#check myTest.match_1.congr_eq_2 + + +def take (n : Nat) (xs : List α) : List α := match n, xs with + | 0, _ => [] + | _+1, [] => [] + | n+1, a::as => a :: take n as + +/-- +info: take.match_1.{u_1, u_2} {α : Type u_1} (motive : Nat → List α → Sort u_2) (n✝ : Nat) (xs✝ : List α) + (h_1 : (x : List α) → motive 0 x) (h_2 : (n : Nat) → motive n.succ []) + (h_3 : (n : Nat) → (a : α) → (as : List α) → motive n.succ (a :: as)) : motive n✝ xs✝ +-/ +#guard_msgs(pass trace, all) in +#check take.match_1 + +/-- +info: take.match_1.congr_eq_1.{u_1, u_2} {α : Type u_1} (motive : Nat → List α → Sort u_2) (n✝ : Nat) (xs✝ : List α) + (h_1 : (x : List α) → motive 0 x) (h_2 : (n : Nat) → motive n.succ []) + (h_3 : (n : Nat) → (a : α) → (as : List α) → motive n.succ (a :: as)) (x✝ : List α) (heq_1 : n✝ = 0) + (heq_2 : xs✝ = x✝) : + HEq + (match n✝, xs✝ with + | 0, x => h_1 x + | n.succ, [] => h_2 n + | n.succ, a :: as => h_3 n a as) + (h_1 x✝) +-/ +#guard_msgs(pass trace, all) in #check take.match_1.congr_eq_1 + +def matchOptionUnit (o? : Option Unit) : Bool := Id.run do + if let some _ := o? then + true + else + false + +/-- +info: matchOptionUnit.match_1.congr_eq_1.{u_1} (motive : Option Unit → Sort u_1) (o?✝ : Option Unit) + (h_1 : (val : Unit) → motive (some val)) (h_2 : (x : Option Unit) → motive x) (val✝ : Unit) + (heq_1 : o?✝ = some val✝) : + HEq + (match o?✝ with + | some val => h_1 () + | x => h_2 x) + (h_1 val✝) +-/ +#guard_msgs(pass trace, all) in +#check matchOptionUnit.match_1.congr_eq_1 + +set_option linter.unusedVariables false in +partial def utf16PosToCodepointPosFromAux (s : String) : Nat → String.Pos → Nat → Bool + | 0, _, cp => true + | utf16pos, utf8pos, cp => false + +/-- +info: utf16PosToCodepointPosFromAux.match_1.congr_eq_1.{u_1} (motive : Nat → String.Pos → Nat → Sort u_1) (x✝ : Nat) + (x✝¹ : String.Pos) (x✝² : Nat) (h_1 : (x : String.Pos) → (cp : Nat) → motive 0 x cp) + (h_2 : (utf16pos : Nat) → (utf8pos : String.Pos) → (cp : Nat) → motive utf16pos utf8pos cp) (x✝³ : String.Pos) + (cp : Nat) (heq_1 : x✝ = 0) (heq_2 : x✝¹ = x✝³) (heq_3 : x✝² = cp) : + HEq + (match x✝, x✝¹, x✝² with + | 0, x, cp => h_1 x cp + | utf16pos, utf8pos, cp => h_2 utf16pos utf8pos cp) + (h_1 x✝³ cp) +-/ +#guard_msgs(pass trace, all) in +#check utf16PosToCodepointPosFromAux.match_1.congr_eq_1 + +opaque some_expr : Option Nat +def wrongEq (m? : Option Nat) (h : some_expr = m?) + (w : 0 < m?.getD 0) : Bool := by + match m?, w with + | some m?, _ => exact true + +/-- +info: wrongEq.match_1.congr_eq_1.{u_1} (motive : (m? : Option Nat) → 0 < m?.getD 0 → some_expr = m? → Sort u_1) + (m?✝ : Option Nat) (w✝ : 0 < m?✝.getD 0) (h : some_expr = m?✝) + (h_1 : (m? : Nat) → (x : 0 < (some m?).getD 0) → (h : some_expr = some m?) → motive (some m?) x h) (m? : Nat) + (x✝ : 0 < (some m?).getD 0) (h✝ : some_expr = some m?) (heq_1 : m?✝ = some m?) (heq_2 : HEq w✝ x✝) + (heq_3 : HEq h h✝) : + HEq + (match m?✝, w✝, h with + | some m?, x, h => h_1 m? x h) + (h_1 m? x✝ h✝) +-/ +#guard_msgs(pass trace, all) in #check wrongEq.match_1.congr_eq_1 + +set_option linter.unusedVariables false in +noncomputable def myNamedPatternTest (x : List Bool) : Bool := + match hx : x with + | x'@hx':(x::xs) => false + | x' => true + +/-- +info: myNamedPatternTest.match_1.congr_eq_1.{u_1} (motive : List Bool → Sort u_1) (x✝ : List Bool) + (h_1 : (x' : List Bool) → (x : Bool) → (xs : List Bool) → x' = x :: xs → x✝ = x :: xs → motive (x :: xs)) + (h_2 : (x' : List Bool) → x✝ = x' → motive x') (x : Bool) (xs : List Bool) (heq_1 : x✝ = x :: xs) : + HEq + (match hx : x✝ with + | x'@hx':(x :: xs) => h_1 x' x xs hx' hx + | x' => h_2 x' hx) + (h_1 (x :: xs) x xs ⋯ heq_1) +-/ +#guard_msgs(pass trace, all) in +#check myNamedPatternTest.match_1.congr_eq_1 + +set_option linter.unusedVariables false in +def testMe (n : Nat) : Bool := + match _ : n - 2 with + | 0 => true + | m => false + +/-- +info: testMe.match_1.congr_eq_2.{u_1} (motive : Nat → Sort u_1) (x✝ : Nat) (h_1 : x✝ = 0 → motive 0) + (h_2 : (m : Nat) → x✝ = m → motive m) (m : Nat) (heq_1 : x✝ = m) : + (m = 0 → False) → + HEq + (match h : x✝ with + | 0 => h_1 h + | m => h_2 m h) + (h_2 m heq_1) +-/ +#guard_msgs(pass trace, all) in +#check testMe.match_1.congr_eq_2 + +-- JFR: Code to check if all matchers with equations also have congruence equations +/- +open Lean Meta in +run_meta do + -- if false do -- comment this line to run the test on all matchers in the environment + let s := Lean.Meta.Match.Extension.extension.getState (← getEnv) (asyncMode := .local) + for (k,_) in s.map do --, _ in [:600] do + unless (`Lean).isPrefixOf (privateToUserName k) do + let mut ok := false + try + let _ ← Match.getEquationsFor k + ok := true + catch _ => + pure () + if ok then + try + let _ ← Lean.Meta.Match.Match.gendMatchCongrEqns k + catch e => + logError m!"failed to generate equations for {k} in {.ofConstName k.getPrefix}\n{indentD e.toMessageData}" +-/ diff --git a/tests/lean/run/meta2.lean b/tests/lean/run/meta2.lean index c2096ce305bb..a08fca4b0181 100644 --- a/tests/lean/run/meta2.lean +++ b/tests/lean/run/meta2.lean @@ -38,7 +38,7 @@ do print "----- tst1 -----"; checkM $ isExprDefEq mvar (mkNatLit 10); pure () -/-- info: [Meta.debug] ----- tst1 ----- -/ +/-- trace: [Meta.debug] ----- tst1 ----- -/ #guard_msgs in #eval tst1 @@ -49,7 +49,7 @@ do print "----- tst2 -----"; checkM $ isExprDefEq mvar (mkNatLit 10); pure () -/-- info: [Meta.debug] ----- tst2 ----- -/ +/-- trace: [Meta.debug] ----- tst2 ----- -/ #guard_msgs in #eval tst2 @@ -66,7 +66,7 @@ do print "----- tst3 -----"; pure () /-- -info: [Meta.debug] ----- tst3 ----- +trace: [Meta.debug] ----- tst3 ----- [Meta.debug] fun x => x.add (Nat.add 10 x) -/ #guard_msgs in @@ -87,7 +87,7 @@ do print "----- tst4 -----"; pure () /-- -info: [Meta.debug] ----- tst4 ----- +trace: [Meta.debug] ----- tst4 ----- [Meta.debug] fun x => x.add (Nat.add 10 x) -/ #guard_msgs in @@ -125,7 +125,7 @@ do print "----- tst5 -----"; print y /-- -info: [Meta.debug] ----- tst5 ----- +trace: [Meta.debug] ----- tst5 ----- [Meta.debug] (1, 2).fst [Meta.debug] 1 [Meta.debug] 1 @@ -161,7 +161,7 @@ do print "----- tst6 -----"; pure () /-- -info: [Meta.debug] ----- tst6 ----- +trace: [Meta.debug] ----- tst6 ----- [Meta.debug] x + 2 [Meta.debug] 6 -/ @@ -186,7 +186,7 @@ do print "----- tst7 -----"; /-- error: check failed --- -info: [Meta.debug] ----- tst7 ----- +trace: [Meta.debug] ----- tst7 ----- -/ #guard_msgs in #eval tst7 @@ -199,7 +199,7 @@ do print "----- tst9 -----"; pure () /-- -info: [Meta.debug] ----- tst9 ----- +trace: [Meta.debug] ----- tst9 ----- [Meta.debug] true [Meta.debug] false -/ @@ -218,7 +218,7 @@ do print "----- tst10 -----"; pure () /-- -info: [Meta.debug] ----- tst10 ----- +trace: [Meta.debug] ----- tst10 ----- [Meta.debug] fun x => x.add (Nat.add 2 3) [Meta.debug] fun x => x.succ.succ.succ.succ.succ -/ @@ -243,7 +243,7 @@ do print "----- tst11 -----"; pure () /-- -info: [Meta.debug] ----- tst11 ----- +trace: [Meta.debug] ----- tst11 ----- [Meta.debug] ∀ (x : Nat), x = 0 -/ #guard_msgs in @@ -263,7 +263,7 @@ do print "----- tst12 -----"; pure () /-- -info: [Meta.debug] ----- tst12 ----- +trace: [Meta.debug] ----- tst12 ----- [Meta.debug] fun x => Eq.refl x [Meta.debug] ∀ (x : Nat), x = x [Meta.debug] true @@ -303,7 +303,7 @@ do print "----- tst14 -----"; pure () /-- -info: [Meta.debug] ----- tst14 ----- +trace: [Meta.debug] ----- tst14 ----- [Meta.debug] StateM Nat [Meta.debug] #[@StateT.instMonad] -/ @@ -318,7 +318,7 @@ do print "----- tst15 -----"; pure () /-- -info: [Meta.debug] ----- tst15 ----- +trace: [Meta.debug] ----- tst15 ----- [Meta.debug] instAddNat -/ #guard_msgs in @@ -335,7 +335,7 @@ do print "----- tst16 -----"; pure () /-- -info: [Meta.debug] ----- tst16 ----- +trace: [Meta.debug] ----- tst16 ----- [Meta.debug] ToString (Nat × Nat) [Meta.debug] instToStringProd -/ @@ -353,7 +353,7 @@ do print "----- tst17 -----"; pure () /-- -info: [Meta.debug] ----- tst17 ----- +trace: [Meta.debug] ----- tst17 ----- [Meta.debug] ToString (Bool × Nat × Nat) [Meta.debug] instToStringProd -/ @@ -368,7 +368,7 @@ do print "----- tst18 -----"; pure () /-- -info: [Meta.debug] ----- tst18 ----- +trace: [Meta.debug] ----- tst18 ----- [Meta.debug] instDecidableEqNat -/ #guard_msgs in @@ -385,7 +385,7 @@ do print "----- tst19 -----"; pure () /-- -info: [Meta.debug] ----- tst19 ----- +trace: [Meta.debug] ----- tst19 ----- [Meta.debug] StateM Nat [Meta.debug] Monad (StateM Nat) [Meta.debug] StateT.instMonad @@ -404,7 +404,7 @@ do print "----- tst20 -----"; pure () /-- -info: [Meta.debug] ----- tst20 ----- +trace: [Meta.debug] ----- tst20 ----- [Meta.debug] StateM Nat [Meta.debug] MonadState Nat (StateM Nat) [Meta.debug] instMonadStateOfMonadStateOf Nat (StateM Nat) @@ -437,7 +437,7 @@ do print "----- tst21 -----"; pure () /-- -info: [Meta.debug] ----- tst21 ----- +trace: [Meta.debug] ----- tst21 ----- [Meta.debug] congrArg (fun x => x.succ.succ) (Eq.symm (Eq.trans h₁ h₂)) [Meta.debug] z.succ.succ = x.succ.succ [Meta.debug] x.succ = x.succ @@ -458,7 +458,7 @@ do print "----- tst22 -----"; pure () /-- -info: [Meta.debug] ----- tst22 ----- +trace: [Meta.debug] ----- tst22 ----- [Meta.debug] Add.add x y [Meta.debug] Add.add y x [Meta.debug] toString x @@ -476,7 +476,7 @@ do print "----- tst23 -----"; print v.headBeta /-- -info: [Meta.debug] ----- tst23 ----- +trace: [Meta.debug] ----- tst23 ----- [Meta.debug] (fun x y => x + y) 0 1 [Meta.debug] 0 + 1 -/ @@ -493,7 +493,7 @@ checkM $ do { let b ← m1.mvarId!.isAssigned; pure (!b) }; checkM $ m3.mvarId!.isAssigned; pure () -/-- info: [Meta.debug] ----- tst26 ----- -/ +/-- trace: [Meta.debug] ----- tst26 ----- -/ #guard_msgs in #eval tst26 @@ -533,7 +533,7 @@ withLocalDeclD `z nat $ fun z => do pure () /-- -info: [Meta.debug] ----- tst28 ----- +trace: [Meta.debug] ----- tst28 ----- [Meta.debug] ∀ (z : Nat), Add.add z y = Add.add (Add.add x (Add.add x y)) (Add.add x (Add.add x y)) [Meta.debug] ∀ (z : Nat), Add.add z y = Add.add #0 #0 [Meta.debug] ∀ (z : Nat), Add.add z y = Add.add (Add.add x #0) (Add.add x #0) @@ -568,7 +568,7 @@ print (norm m); pure () /-- -info: [Meta.debug] ----- tst29 ----- +trace: [Meta.debug] ----- tst29 ----- [Meta.debug] u+1 [Meta.debug] u+1 [Meta.debug] max (max 1 (u+2)) 2 @@ -596,7 +596,7 @@ withLocalDeclD `x nat $ fun x => do pure () /-- -info: [Meta.debug] ----- tst30 ----- +trace: [Meta.debug] ----- tst30 ----- [Meta.debug] Nat.succ (?_ x) [Meta.debug] Nat.succ ?_ [Meta.debug] fun x => ?_ @@ -630,7 +630,7 @@ check r; pure () /-- -info: [Meta.debug] ----- tst32 ----- +trace: [Meta.debug] ----- tst32 ----- [Meta.debug] a.add a = a [Meta.debug] h2 ▸ h1 [Meta.debug] a.add b = a @@ -658,7 +658,7 @@ check r; pure () /-- -info: [Meta.debug] ----- tst33 ----- +trace: [Meta.debug] ----- tst33 ----- [Meta.debug] h2 ▸ h1 [Meta.debug] a.add b = a -/ @@ -675,7 +675,7 @@ withLocalDeclD `α type $ fun α => do pure () /-- -info: [Meta.debug] ----- tst34 ----- +trace: [Meta.debug] ----- tst34 ----- [Meta.debug] fun α => ?_ α → ?_ α -/ #guard_msgs in @@ -699,7 +699,7 @@ withLocalDeclD `α type $ fun α => do pure () /-- -info: [Meta.debug] ----- tst35 ----- +trace: [Meta.debug] ----- tst35 ----- [Meta.debug] fun α => ?_ α → ?_ α [Meta.debug] fun α => α → α -/ @@ -718,7 +718,7 @@ withLocalDeclD `α type $ fun α => do checkM $ approxDefEq $ isDefEq m1 (mkConst `Id [levelZero]); pure () -/-- info: [Meta.debug] ----- tst36 ----- -/ +/-- trace: [Meta.debug] ----- tst36 ----- -/ #guard_msgs in #eval tst36 @@ -735,7 +735,7 @@ withLocalDeclD `v nat $ fun v => do pure () /-- -info: [Meta.debug] ----- tst37 ----- +trace: [Meta.debug] ----- tst37 ----- [Meta.debug] ?_ v (?_ v) [Meta.debug] StateM Nat Nat -/ @@ -869,7 +869,7 @@ check t; | none => throwError "array lit expected") /-- -info: [Meta.debug] ----- tst42 ----- +trace: [Meta.debug] ----- tst42 ----- [Meta.debug] [1, 2] [Meta.debug] #[1, 2] -/ diff --git a/tests/lean/run/meta3.lean b/tests/lean/run/meta3.lean index 79d3b5b03180..58d452c441b2 100644 --- a/tests/lean/run/meta3.lean +++ b/tests/lean/run/meta3.lean @@ -53,7 +53,7 @@ do let d : DiscrTree Nat := {}; set_option trace.Meta.debug true in set_option pp.mvars false in /-- -info: [Meta.debug] (Add.add => (node +trace: [Meta.debug] (Add.add => (node (Nat => (node (* => (node (* => (node (10 => (node #[1])) (20 => (node #[4])))) (0 => (node (10 => (node #[2])))))))))) (* => (node #[5])) diff --git a/tests/lean/run/meta4.lean b/tests/lean/run/meta4.lean index 182517ccc0f8..73afff443efc 100644 --- a/tests/lean/run/meta4.lean +++ b/tests/lean/run/meta4.lean @@ -40,7 +40,7 @@ forallBoundedTelescope cinfo.type (some 10) $ fun xs body => do { print xs; chec pure () /-- -info: [Meta.debug] (α β : Type) → α → β → DecidableEq β +trace: [Meta.debug] (α β : Type) → α → β → DecidableEq β [Meta.debug] (β : Type) → ?α → β → DecidableEq β [Meta.debug] (b : ?β) → Decidable (?a = b) [Meta.debug] Decidable (?a = ?b) diff --git a/tests/lean/run/meta5.lean b/tests/lean/run/meta5.lean index 2e4b11edd602..28afc84145dc 100644 --- a/tests/lean/run/meta5.lean +++ b/tests/lean/run/meta5.lean @@ -27,7 +27,7 @@ set_option pp.mvars false set_option trace.Meta.debug true /-- -info: [Meta.debug] ?_ +trace: [Meta.debug] ?_ [Meta.debug] fun y => let x := 0; ?_ diff --git a/tests/lean/run/meta7.lean b/tests/lean/run/meta7.lean index 6e968a6a7d56..21a980ea66df 100644 --- a/tests/lean/run/meta7.lean +++ b/tests/lean/run/meta7.lean @@ -57,7 +57,7 @@ forallBoundedTelescope t (some 1) fun xs b => do pure () /-- -info: [Meta.debug] ----- tst2 ----- +trace: [Meta.debug] ----- tst2 ----- [Meta.debug] Nat → IO Nat [Meta.debug] IO Nat -/ @@ -79,7 +79,7 @@ forallBoundedTelescope t (some 0) fun xs b => do pure () /-- -info: [Meta.debug] ----- tst2 ----- +trace: [Meta.debug] ----- tst2 ----- [Meta.debug] IO Nat [Meta.debug] IO Nat -/ @@ -109,7 +109,7 @@ pure () set_option pp.mvars false in /-- -info: [Meta.debug] ----- tst4 ----- +trace: [Meta.debug] ----- tst4 ----- [Meta.debug] x y : Nat ⊢ Nat [Meta.debug] ?_ (Add.add 10 y) y @@ -139,7 +139,7 @@ check m; pure () /-- -info: [Meta.debug] ----- tst5 ----- +trace: [Meta.debug] ----- tst5 ----- [Meta.debug] p q : Prop h₁ : q h₂ : p = q @@ -172,7 +172,7 @@ pure () set_option pp.mvars false in /-- -info: [Meta.debug] ----- tst6 ----- +trace: [Meta.debug] ----- tst6 ----- [Meta.debug] x y : Nat ⊢ Nat [Meta.debug] ?_ (Add.add 10 y) @@ -200,7 +200,7 @@ checkM (pure $ val == expected); pure () /-- -info: [Meta.debug] ----- tst7 ----- +trace: [Meta.debug] ----- tst7 ----- [Meta.debug] Add.add x y [Meta.debug] Add.add 0 1 [Meta.debug] Add.add 0 1 @@ -221,7 +221,7 @@ def tst8 : MetaM Unit := do pure () /-- -info: [Meta.debug] ----- tst8 ----- +trace: [Meta.debug] ----- tst8 ----- [Meta.debug] match [1, 2, 3] with | [] => true | head :: tail => false @@ -237,7 +237,7 @@ def tst9 : MetaM Unit := do pure () /-- -info: [Meta.debug] ----- tst9 ----- +trace: [Meta.debug] ----- tst9 ----- [Meta.debug] [(instOfNatNat, 100)] -/ #guard_msgs in @@ -269,7 +269,7 @@ def tst11 : MetaM Unit := do checkM (isDefEq x y) pure () -/-- info: [Meta.debug] ----- tst11 ----- -/ +/-- trace: [Meta.debug] ----- tst11 ----- -/ #guard_msgs in #eval tst11 @@ -287,7 +287,7 @@ def tst12 : MetaM Unit := do pure () /-- -info: [Meta.debug] ----- tst12 ----- +trace: [Meta.debug] ----- tst12 ----- [Meta.debug] Add.add 10 y [Meta.debug] Add.add (Int.ofNat 10) (Int.ofNat y) [Meta.debug] Add.add 10 y diff --git a/tests/lean/run/multiTargetCasesInductionIssue.lean b/tests/lean/run/multiTargetCasesInductionIssue.lean index 211ccc709c34..9b4dd6ba5427 100644 --- a/tests/lean/run/multiTargetCasesInductionIssue.lean +++ b/tests/lean/run/multiTargetCasesInductionIssue.lean @@ -24,7 +24,7 @@ def Vec.casesOn | ⟨as, h⟩ => go n as h /-- -info: α : Type u_1 +trace: α : Type u_1 n✝ : Nat a✝ : α as✝ : Vec α n✝ @@ -43,7 +43,7 @@ example (n : Nat) (a : α) (as : Vec α n) : Vec.cons a (Vec.cons a as) = Vec.co constructor /-- -info: α : Type u_1 +trace: α : Type u_1 n✝ : Nat a✝ : α as✝ : Vec α n✝ @@ -62,7 +62,7 @@ example (n : Nat) (a : α) (as : Vec α n) : Vec.cons a (Vec.cons a as) = Vec.co constructor /-- -info: α : Type u_1 +trace: α : Type u_1 n : Nat a : α as : Vec α n diff --git a/tests/lean/run/mutual_termination_by_errors.lean b/tests/lean/run/mutual_termination_by_errors.lean index 56e3a9df8a19..139027bd8c43 100644 --- a/tests/lean/run/mutual_termination_by_errors.lean +++ b/tests/lean/run/mutual_termination_by_errors.lean @@ -2,7 +2,7 @@ namespace Test /-- error: incomplete set of termination hints: -This function is mutually recursive with Test.f, Test.h and Test.i, which do not have a termination hint. +This function is mutually recursive with Test.f, Test.h, and Test.i, which do not have a termination hint. The present clause is ignored. -/ #guard_msgs in diff --git a/tests/lean/run/opaqueNewCodeGen.lean b/tests/lean/run/opaqueNewCodeGen.lean index fa4b15d73fc6..fd46a297bb0e 100644 --- a/tests/lean/run/opaqueNewCodeGen.lean +++ b/tests/lean/run/opaqueNewCodeGen.lean @@ -3,7 +3,7 @@ import Lean set_option compiler.enableNew true /-- -info: [Compiler.result] size: 1 +trace: [Compiler.result] size: 1 def f x : Nat := let _x.1 := Nat.add x x; return _x.1 @@ -14,7 +14,7 @@ opaque f : Nat → Nat := fun x => Nat.add x x /-- -info: [Compiler.result] size: 0 +trace: [Compiler.result] size: 0 def g a._@.opaqueNewCodeGen._hyg.1 a._@.opaqueNewCodeGen._hyg.2 : Nat := extern -/ diff --git a/tests/lean/run/ppMVars.lean b/tests/lean/run/ppMVars.lean index 968c2be219f5..d57eecedda45 100644 --- a/tests/lean/run/ppMVars.lean +++ b/tests/lean/run/ppMVars.lean @@ -13,8 +13,8 @@ Default values /-- info: ?a : Nat -/ #guard_msgs in #check (?a : Nat) -/-- info: ⊢ Sort ?u.1 -/ -#guard_msgs (info, drop all) in +/-- trace: ⊢ Sort ?u.1 -/ +#guard_msgs (trace, drop all) in example : (by_elab do return .sort (.mvar (.mk (.num `_uniq 1)))) := by trace_state sorry @@ -31,14 +31,14 @@ set_option pp.mvars false /-- info: ?_ : Nat -/ #guard_msgs in #check (_ : Nat) -/-- info: ⊢ Sort _ -/ -#guard_msgs (info, drop all) in +/-- trace: ⊢ Sort _ -/ +#guard_msgs (trace, drop all) in example : (by_elab do return .sort (.mvar (.mk (.num `_uniq 1)))) := by trace_state sorry -/-- info: ⊢ Type _ -/ -#guard_msgs (info, drop all) in +/-- trace: ⊢ Type _ -/ +#guard_msgs (trace, drop all) in example : Type _ := by trace_state sorry @@ -63,14 +63,14 @@ set_option pp.mvars.levels false Lean.MonadMCtx.modifyMCtx fun mctx => mctx.addExprMVarDecl mvarId .anonymous lctx {} type .natural 0 return .mvar mvarId -/-- info: ⊢ Sort _ -/ -#guard_msgs (info, drop all) in +/-- trace: ⊢ Sort _ -/ +#guard_msgs (trace, drop all) in example : (by_elab do return .sort (.mvar (.mk (.num `_uniq 1)))) := by trace_state sorry -/-- info: ⊢ Type _ -/ -#guard_msgs (info, drop all) in +/-- trace: ⊢ Type _ -/ +#guard_msgs (trace, drop all) in example : Type _ := by trace_state sorry @@ -95,14 +95,14 @@ set_option pp.mvars.anonymous false Lean.MonadMCtx.modifyMCtx fun mctx => mctx.addExprMVarDecl mvarId .anonymous lctx {} type .natural 0 return .mvar mvarId -/-- info: ⊢ Sort _ -/ -#guard_msgs (info, drop all) in +/-- trace: ⊢ Sort _ -/ +#guard_msgs (trace, drop all) in example : (by_elab do return .sort (.mvar (.mk (.num `_uniq 1)))) := by trace_state sorry -/-- info: ⊢ Type _ -/ -#guard_msgs (info, drop all) in +/-- trace: ⊢ Type _ -/ +#guard_msgs (trace, drop all) in example : Type _ := by trace_state sorry diff --git a/tests/lean/run/renameSelf.lean b/tests/lean/run/renameSelf.lean new file mode 100644 index 000000000000..47df6110af8e --- /dev/null +++ b/tests/lean/run/renameSelf.lean @@ -0,0 +1,11 @@ +variable {P Q : Prop} + +/-- +error: failed to find a hypothesis with type + P ↔ Q +-/ +#guard_msgs in +example : P ↔ Q := by + rename P ↔ Q => goal + obtain ⟨hpq, hqp⟩ := goal + constructor <;> trivial diff --git a/tests/lean/run/rflTacticErrors.lean b/tests/lean/run/rflTacticErrors.lean index f8c4d195376f..c9cfb84725ff 100644 --- a/tests/lean/run/rflTacticErrors.lean +++ b/tests/lean/run/rflTacticErrors.lean @@ -197,10 +197,13 @@ is not definitionally equal to the right-hand side #guard_msgs in example : true'' = true := by with_reducible apply_rfl -- Error /-- -error: tactic 'apply' failed, failed to unify +error: tactic 'apply' failed, could not unify the conclusion of 'HEq.refl' @HEq ?α ?a ?α ?a -with +with the goal @HEq Bool true'' Bool true + +Note: The full type of 'HEq.refl' is + ∀ {α : Sort ?u.601} (a : α), HEq a a ⊢ HEq true'' true -/ #guard_msgs in @@ -262,10 +265,13 @@ is not definitionally equal to the right-hand side #guard_msgs in example : false = true := by apply_rfl -- Error /-- -error: tactic 'apply' failed, failed to unify +error: tactic 'apply' failed, could not unify the conclusion of 'HEq.refl' HEq ?a ?a -with +with the goal HEq false true + +Note: The full type of 'HEq.refl' is + ∀ {α : Sort ?u.653} (a : α), HEq a a ⊢ HEq false true -/ #guard_msgs in @@ -326,10 +332,13 @@ is not definitionally equal to the right-hand side #guard_msgs in example : false = true := by with_reducible apply_rfl -- Error /-- -error: tactic 'apply' failed, failed to unify +error: tactic 'apply' failed, could not unify the conclusion of 'HEq.refl' HEq ?a ?a -with +with the goal HEq false true + +Note: The full type of 'HEq.refl' is + ∀ {α : Sort ?u.705} (a : α), HEq a a ⊢ HEq false true -/ #guard_msgs in @@ -383,19 +392,25 @@ example : R false true := by with_reducible apply_rfl -- Error -- Inheterogeneous unequal /-- -error: tactic 'apply' failed, failed to unify +error: tactic 'apply' failed, could not unify the conclusion of 'HEq.refl' HEq ?a ?a -with +with the goal HEq true 1 + +Note: The full type of 'HEq.refl' is + ∀ {α : Sort ?u.774} (a : α), HEq a a ⊢ HEq true 1 -/ #guard_msgs in example : HEq true 1 := by apply_rfl -- Error /-- -error: tactic 'apply' failed, failed to unify +error: tactic 'apply' failed, could not unify the conclusion of 'HEq.refl' HEq ?a ?a -with +with the goal HEq true 1 + +Note: The full type of 'HEq.refl' is + ∀ {α : Sort ?u.815} (a : α), HEq a a ⊢ HEq true 1 -/ #guard_msgs in diff --git a/tests/lean/run/safeExp.lean b/tests/lean/run/safeExp.lean index ec9986fb78fa..8b112a25575a 100644 --- a/tests/lean/run/safeExp.lean +++ b/tests/lean/run/safeExp.lean @@ -19,7 +19,7 @@ example : 2^257 = 2*2^256 := /-- warning: exponent 2008 exceeds the threshold 256, exponentiation operation was not evaluated, use `set_option exponentiation.threshold <num>` to set a new threshold --- -info: k : Nat +trace: k : Nat h : k = 2008 ^ 2 + 2 ^ 2008 ⊢ ((4032064 + 2 ^ 2008) ^ 2 + 2 ^ (4032064 + 2 ^ 2008)) % 10 = 6 --- @@ -34,7 +34,7 @@ example (k : Nat) (h : k = 2008^2 + 2^2008) : (k^2 + 2^k)%10 = 6 := by sorry /-- -info: k : Nat +trace: k : Nat h : k = 2008 ^ 2 + 2 ^ 2008 ⊢ ((2008 ^ 2 + 2 ^ 2008) ^ 2 + 2 ^ (2008 ^ 2 + 2 ^ 2008)) % 10 = 6 --- diff --git a/tests/lean/run/scopedunifhint.lean b/tests/lean/run/scopedunifhint.lean index 09125d2521fb..b11eb721db35 100644 --- a/tests/lean/run/scopedunifhint.lean +++ b/tests/lean/run/scopedunifhint.lean @@ -28,9 +28,9 @@ set_option pp.mvars false def x : Nat := 10 /-- -error: application type mismatch +error: Application type mismatch: In the appplication mul ?_ x -argument +the final argument x has type Nat : Type @@ -41,9 +41,9 @@ but is expected to have type #check mul x x -- Error: unification hint is not active /-- -error: application type mismatch +error: Application type mismatch: In the appplication mul ?_ (x, x) -argument +the final argument (x, x) has type Nat × Nat : Type @@ -56,9 +56,9 @@ but is expected to have type local infix:65 (priority := high) "*" => mul /-- -error: application type mismatch +error: Application type mismatch: In the appplication ?_*x -argument +the final argument x has type Nat : Type @@ -74,9 +74,9 @@ open Algebra -- activate unification hints #check x*x -- works /-- -error: application type mismatch +error: Application type mismatch: In the appplication ?_*(x, x) -argument +the final argument (x, x) has type Nat × Nat : Type @@ -102,9 +102,9 @@ local unif_hint (s : Magma) (m : Magma) (n : Magma) (β : Type u) (δ : Type v) end Sec1 /-- -error: application type mismatch +error: Application type mismatch: In the appplication ?_*(x, x) -argument +the final argument (x, x) has type Nat × Nat : Type diff --git a/tests/lean/run/simpArithCacheIssue.lean b/tests/lean/run/simpArithCacheIssue.lean index 21f426497af1..2e7037129cdc 100644 --- a/tests/lean/run/simpArithCacheIssue.lean +++ b/tests/lean/run/simpArithCacheIssue.lean @@ -1,5 +1,5 @@ /-- -info: x y : Nat +trace: x y : Nat h : y = 0 ⊢ id (2 * x + y) = id (2 * x) -/ diff --git a/tests/lean/run/simpDiag.lean b/tests/lean/run/simpDiag.lean index 20feb4999d20..23260790fc74 100644 --- a/tests/lean/run/simpDiag.lean +++ b/tests/lean/run/simpDiag.lean @@ -8,7 +8,7 @@ theorem f_eq : f (x + 1) = q (f x) := rfl axiom q_eq (x : Nat) : q x = x /-- -info: [simp] Diagnostics +trace: [simp] Diagnostics [simp] used theorems (max: 50, num: 2): [simp] f_eq ↦ 50 [simp] q_eq ↦ 50 @@ -32,7 +32,7 @@ def ack : Nat → Nat → Nat | x+1, y+1 => ack x (ack (x+1) y) /-- -info: [simp] Diagnostics +trace: [simp] Diagnostics [simp] used theorems (max: 1201, num: 3): [simp] ack.eq_3 ↦ 1201 [simp] Nat.reduceAdd (builtin simproc) ↦ 771 @@ -98,14 +98,14 @@ opaque q1 : Nat → Nat → Prop @[simp] axiom q1_ax (x : Nat) : q1 x 10 /-- -info: [simp] Diagnostics +trace: [simp] Diagnostics [simp] used theorems (max: 1, num: 1): [simp] q1_ax ↦ 1 [simp] tried theorems (max: 1, num: 1): [simp] q1_ax ↦ 1, succeeded: 1 use `set_option diagnostics.threshold <num>` to control threshold for reporting counters --- -info: [diag] Diagnostics +trace: [diag] Diagnostics [reduction] unfolded declarations (max: 246, num: 2): [reduction] Nat.rec ↦ 246 [reduction] OfNat.ofNat ↦ 24 diff --git a/tests/lean/run/simpStar.lean b/tests/lean/run/simpStar.lean index 669dd3988eef..6e4fb528801e 100644 --- a/tests/lean/run/simpStar.lean +++ b/tests/lean/run/simpStar.lean @@ -13,7 +13,7 @@ theorem ex2 (x : Nat) (h₁ : f x x = g x) (h₂ : g x = x) : f x (f x x) = x := axiom g_ax (x : Nat) : g x = 0 /-- -info: x y : Nat +trace: x y : Nat h₁ : f x x = g x h₂ : g x < 5 ⊢ g x + g x = 0 diff --git a/tests/lean/run/sorry.lean b/tests/lean/run/sorry.lean index cb4f0222f0bd..8d66421ab809 100644 --- a/tests/lean/run/sorry.lean +++ b/tests/lean/run/sorry.lean @@ -84,7 +84,7 @@ error: unknown identifier 'a' --- error: unknown identifier 'b' --- -info: ⊢ sorry = sorry +trace: ⊢ sorry = sorry -/ #guard_msgs in set_option autoImplicit false in @@ -98,7 +98,7 @@ error: unknown identifier 'a' --- error: unknown identifier 'b' --- -info: ⊢ sorry `«sorry:106:10» = sorry `«sorry:106:14» +trace: ⊢ sorry `«sorry:106:10» = sorry `«sorry:106:14» -/ #guard_msgs in set_option autoImplicit false in @@ -111,7 +111,7 @@ This requires `Lean.Widget.ppExprTagged` to have a pretty printing mode that doe https://github.com/leanprover/lean4/issues/6715 -/ /-- -info: n : Nat := sorry +trace: n : Nat := sorry ⊢ True --- warning: declaration uses 'sorry' diff --git a/tests/lean/run/structInst.lean b/tests/lean/run/structInst.lean index 2895fdb1a692..b78ed393694e 100644 --- a/tests/lean/run/structInst.lean +++ b/tests/lean/run/structInst.lean @@ -198,7 +198,7 @@ structure Bar extends Foo where /- Rather than `(fun x => x) 0 = 0` or `{ toFun := fun x => x }.toFun 0 = 0` -/ -/-- info: ⊢ 0 = 0 -/ +/-- trace: ⊢ 0 = 0 -/ #guard_msgs in def bar : Bar where toFun x := x @@ -309,9 +309,9 @@ structure A where m : Fin n /-- -info: a +trace: a --- -info: b +trace: b -/ #guard_msgs in example : A where diff --git a/tests/lean/run/structuralMutual.lean b/tests/lean/run/structuralMutual.lean index be2f6f530ee7..74406a81be3d 100644 --- a/tests/lean/run/structuralMutual.lean +++ b/tests/lean/run/structuralMutual.lean @@ -73,7 +73,7 @@ theorem B_size_eq3 : B.empty.size = 0 := rfl -- Smart unfolding works /-- -info: a : A +trace: a : A h : (B.other a).size = 1 ⊢ a.size = 0 -/ @@ -249,7 +249,7 @@ theorem eq_true_of_not_eq_false {b : Bool} : (! b) = false → b = true := by si theorem eq_false_of_not_eq_true {b : Bool} : (! b) = true → b = false := by simp /-- -info: n : Nat +trace: n : Nat h : isOdd (n + 1) = false ⊢ isEven n = true -/ diff --git a/tests/lean/run/tactic.lean b/tests/lean/run/tactic.lean index 6ac073f7d09a..56f3d4fdc826 100644 --- a/tests/lean/run/tactic.lean +++ b/tests/lean/run/tactic.lean @@ -20,6 +20,6 @@ print result set_option trace.Meta.Tactic true -/-- info: [Meta.Tactic] fun {p q} a a_1 => a -/ +/-- trace: [Meta.Tactic] fun {p q} a a_1 => a -/ #guard_msgs in #eval tst1 diff --git a/tests/lean/run/tactic_config.lean b/tests/lean/run/tactic_config.lean index 5dee17ae491d..b2ce2ed7e33c 100644 --- a/tests/lean/run/tactic_config.lean +++ b/tests/lean/run/tactic_config.lean @@ -151,7 +151,7 @@ error: structure 'C' does not have a field named 'x' --- info: config is { b := { toA := { x := true } } } --- -info: ⊢ True +trace: ⊢ True -/ #guard_msgs in example : True := by @@ -160,7 +160,7 @@ example : True := by trivial -- Check that when recovery mode is false, no error is reported. -/-- info: ⊢ True -/ +/-- trace: ⊢ True -/ #guard_msgs in example : True := by fail_if_success ctac -x diff --git a/tests/lean/run/tojson_fromjson_perf_issue.lean b/tests/lean/run/tojson_fromjson_perf_issue.lean new file mode 100644 index 000000000000..d7ececeea610 --- /dev/null +++ b/tests/lean/run/tojson_fromjson_perf_issue.lean @@ -0,0 +1,107 @@ +import Lean.Data.Json +open Lean + +structure Foo where + a1 : Option Nat + a2 : Option Nat + a3 : Option Nat + a4 : Option Nat + a5 : Option Nat + a6 : Option Nat + a7 : Option Nat + a8 : Option Nat + a9 : Option Nat + a10 : Option Nat + a11 : Option Nat + a12 : Option Nat + a13 : Option Nat + a14 : Option Nat + a15 : Option Nat + a16 : Option Nat + a17 : Option Nat + a18 : Option Nat + a19 : Option Nat + a20 : Option Nat + a21 : Option Nat + a22 : Option Nat + a23 : Option Nat + a24 : Option Nat + a25 : Option Nat + a26 : Option Nat + a27 : Option Nat + a28 : Option Nat + a29 : Option Nat + a30 : Option Nat + a31 : Option Nat + a32 : Option Nat + a33 : Option Nat + a34 : Option Nat + a35 : Option Nat + a36 : Option Nat + a37 : Option Nat + a38 : Option Nat + a39 : Option Nat + deriving ToJson, FromJson, Repr, BEq + +structure Boo where + a1 : String × Option Nat + a2 : String × Option Nat + a3 : String × Option Nat + a4 : String × Option Nat + a5 : String × Option Nat + a6 : String × Option Nat + a7 : String × Option Nat + a8 : String × Option Nat + a9 : String × Option Nat + a10 : Array Nat + a11 : Array Nat + a12 : Array Nat + a13 : Array Nat + a14 : Array Nat + a15 : Array Nat + a16 : Array Nat + a17 : Array Nat + a18 : Array Nat + a19 : Array Nat + a20 : Array Nat + a21 : Array Nat + a22 : Array Nat + a23 : Array Nat + a24 : Array Nat + a25 : Array Nat + a26 : Array Nat + a27 : Array Nat + a28 : Array Nat + a29 : List Nat + a30 : List Nat + a31 : List Nat + a32 : List Nat + a33 : List Nat + a34 : List Nat + a35 : List Nat + a36 : List Nat + a37 : List Nat + a38 : List Nat + a39 : List Nat + aa10 : Float × USize × UInt64 + aa11 : Float × USize × UInt64 + aa12 : Float × USize × UInt64 + aa13 : Float × USize × UInt64 + aa14 : Float × USize × UInt64 + aa15 : Float × USize × UInt64 + aa16 : Float × USize × UInt64 + aa17 : Float × USize × UInt64 + aa18 : Float × USize × UInt64 + aa19 : Float × USize × UInt64 + aa20 : Float × USize × UInt64 + aa21 : Float × USize × UInt64 + aa22 : Float × USize × UInt64 + aa23 : Float × USize × UInt64 + aa24 : Float × USize × UInt64 + aa25 : Float × USize × UInt64 + aa26 : Float × USize × UInt64 + aa27 : Float × USize × UInt64 + aa28 : Float × USize × UInt64 + aa29 : Float × USize × UInt64 + aa30 : Float × USize × UInt64 + deriving ToJson, FromJson, Repr, BEq diff --git a/tests/lean/run/trace.lean b/tests/lean/run/trace.lean index 139c7444dbdd..421eebd47acf 100644 --- a/tests/lean/run/trace.lean +++ b/tests/lean/run/trace.lean @@ -2,7 +2,7 @@ import Lean.CoreM open Lean -structure MyState := +structure MyState where (trace_state : TraceState := {}) (s : Nat := 0) @@ -60,7 +60,7 @@ info: [module] message [bughunt] at test2 ERROR --- -info: [module] message +trace: [module] message [module] hello world [bughunt] at test2 @@ -78,7 +78,7 @@ info: [module] message world [bughunt] at end of tst3 --- -info: [module] message +trace: [module] message [module] hello world [bughunt] at test2 diff --git a/tests/lean/run/traceFormat.lean b/tests/lean/run/traceFormat.lean index a809bf7662fa..35abf80ccff1 100644 --- a/tests/lean/run/traceFormat.lean +++ b/tests/lean/run/traceFormat.lean @@ -18,7 +18,7 @@ def withNode (cls : Name) (msg : MessageData) (k : CoreM Unit) (collapsed := tru oldTraces.push { ref, msg } /-- -info: [test] top-level leaf +trace: [test] top-level leaf [test] top-level leaf [test] node with single leaf [test] leaf diff --git a/tests/lean/run/type_as_hole.lean b/tests/lean/run/type_as_hole.lean new file mode 100644 index 000000000000..f6bac3990031 --- /dev/null +++ b/tests/lean/run/type_as_hole.lean @@ -0,0 +1,38 @@ +/-- +error: failed to infer type of `foo` + +Note: All holes (e.g., `_`) in the header of a theorem are resolved before the proof is processed; information from the proof cannot be used to infer what these values should be +--- +error: type of theorem 'foo' is not a proposition + ?m.2 +-/ +#guard_msgs (error) in +theorem foo : _ := + sorry + +/-- +error: failed to infer type of example + +Note: When the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed +-/ +#guard_msgs (error) in +example : _ := + sorry + +/-- +error: failed to infer type of `boo` + +Note: When the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed +-/ +#guard_msgs (error) in +def boo : _ := + sorry + +/-- +error: failed to infer type of instance + +Note: When the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed +-/ +#guard_msgs (error) in +instance boo : _ := + sorry diff --git a/tests/lean/run/variable.lean b/tests/lean/run/variable.lean index 300d8af97e5a..01ce46127787 100644 --- a/tests/lean/run/variable.lean +++ b/tests/lean/run/variable.lean @@ -136,9 +136,9 @@ theorem t13 (a : α) : toString a = toString a := rfl set_option pp.mvars false in /-- -error: application type mismatch +error: Application type mismatch: In the appplication ToString True -argument +the final argument True has type Prop : Type diff --git a/tests/lean/run/wf_preprocess.lean b/tests/lean/run/wf_preprocess.lean index c7b54bea1e24..f9c34a993990 100644 --- a/tests/lean/run/wf_preprocess.lean +++ b/tests/lean/run/wf_preprocess.lean @@ -8,12 +8,12 @@ structure Tree (α : Type u) where def Tree.isLeaf (t : Tree α) := t.cs.isEmpty /-- -info: α : Type u_1 +trace: α : Type u_1 t t' : Tree α h✝ : t' ∈ t.cs ⊢ sizeOf t' < sizeOf t -/ -#guard_msgs in +#guard_msgs(trace) in def Tree.map (f : α → β) (t : Tree α) : Tree β := ⟨f t.val, t.cs.map (fun t' => t'.map f)⟩ termination_by t @@ -35,12 +35,12 @@ info: Tree.map.induct.{u_1} {α : Type u_1} (motive : Tree α → Prop) #check Tree.map.induct /-- -info: α : Type u_1 +trace: α : Type u_1 t x✝ : Tree α h✝ : x✝ ∈ t.cs ⊢ sizeOf x✝ < sizeOf t -/ -#guard_msgs in +#guard_msgs(trace) in def Tree.pruneRevAndMap (f : α → β) (t : Tree α) : Tree β := ⟨f t.val, (t.cs.filter (fun t' => not t'.isLeaf)).reverse.map (·.pruneRevAndMap f)⟩ termination_by t @@ -64,14 +64,14 @@ info: Tree.pruneRevAndMap.induct.{u_1} {α : Type u_1} (motive : Tree α → Pro #check Tree.pruneRevAndMap.induct /-- -info: α : Type u_1 +trace: α : Type u_1 v : α cs : List (Tree α) x✝ : Tree α h✝ : x✝ ∈ cs ⊢ sizeOf x✝ < sizeOf { val := v, cs := cs } -/ -#guard_msgs in +#guard_msgs(trace) in def Tree.pruneRevAndMap' (f : α → β) : Tree α → Tree β | ⟨v,cs⟩ => ⟨f v, (cs.filter (fun t' => not t'.isLeaf)).reverse.map (·.pruneRevAndMap' f)⟩ termination_by t => t @@ -125,7 +125,7 @@ structure MTree (α : Type u) where /-- warning: declaration uses 'sorry' --- -info: α : Type u_1 +trace: α : Type u_1 t : MTree α x✝¹ : List (MTree α) h✝¹ : x✝¹ ∈ t.cs @@ -156,7 +156,7 @@ info: MTree.map.induct.{u_1} {α : Type u_1} (motive : MTree α → Prop) #check MTree.map.induct /-- -info: α : Type u_1 +trace: α : Type u_1 t : MTree α css : List (MTree α) h✝¹ : css ∈ t.cs @@ -164,7 +164,7 @@ c : MTree α h✝ : c ∈ css ⊢ sizeOf c < sizeOf t -/ -#guard_msgs in +#guard_msgs(trace) in def MTree.size (t : MTree α) : Nat := Id.run do let mut s := 1 for css in t.cs do @@ -223,7 +223,7 @@ inductive Expression where /-- warning: declaration uses 'sorry' --- -info: L : List (String × Expression) +trace: L : List (String × Expression) x : String × Expression h✝ : x ∈ L ⊢ sizeOf x.snd < sizeOf (Expression.object L) @@ -264,7 +264,7 @@ inductive Expression where /-- warning: declaration uses 'sorry' --- -info: L : List (String × Expression) +trace: L : List (String × Expression) x : String × Expression h✝ : x ∈ L ⊢ sizeOf x.snd < sizeOf (Expression.object L) @@ -316,7 +316,7 @@ section Binary -- Main point of this test is to check whether `Tree.map2._unary` leaks the preprocessing /-- -info: α : Type u_1 +trace: α : Type u_1 β : Type u_2 t1 : Tree α t2 y : Tree β diff --git a/tests/lean/run/wf_preprocess_leak.lean b/tests/lean/run/wf_preprocess_leak.lean index dd6d90823312..e6e58547538a 100644 --- a/tests/lean/run/wf_preprocess_leak.lean +++ b/tests/lean/run/wf_preprocess_leak.lean @@ -9,7 +9,7 @@ def Tree.isLeaf (t : Tree α) := t.cs.isEmpty -- the proof state: /-- -info: α : Type +trace: α : Type n : Nat cs : List (Tree α) x✝ : @@ -19,7 +19,7 @@ x✝ : ⊢ Prod.Lex (fun a₁ a₂ => a₁ < a₂) (fun a₁ a₂ => sizeOf a₁ < sizeOf a₂) (n, { cs := List.map (fun x => x✝ ⟨n + 1, x.val⟩ ⋯) cs.attach }) (n.succ, { cs := cs }) -/ -#guard_msgs in +#guard_msgs(trace) in def Tree.revrev : (n : Nat) → (t : Tree α) → Tree α | 0, t => t | n + 1, Tree.mk cs => revrev n (Tree.mk (cs.map (·.revrev (n + 1)))) diff --git a/tests/lean/run/zetaUnused.lean b/tests/lean/run/zetaUnused.lean index f096fbe76b94..cdf0dca1e65b 100644 --- a/tests/lean/run/zetaUnused.lean +++ b/tests/lean/run/zetaUnused.lean @@ -1,6 +1,6 @@ /-- -info: b : Bool +trace: b : Bool ⊢ if b = true then let_fun unused := (); True @@ -13,7 +13,7 @@ example (b : Bool) : if b then have unused := (); True else False := by trace_state; sorry /-- -info: b : Bool +trace: b : Bool ⊢ b = true --- warning: declaration uses 'sorry' @@ -23,7 +23,7 @@ example (b : Bool) : if b then have unused := (); True else False := by simp; trace_state; sorry /-- -info: b : Bool +trace: b : Bool ⊢ b = true ∧ let_fun unused := (); True @@ -40,7 +40,7 @@ example (b : Bool) : if b then have unused := (); True else False := by simp (config := Lean.Meta.Simp.neutralConfig) only; trace_state; sorry /-- -info: b : Bool +trace: b : Bool ⊢ if b = true then True else False --- warning: declaration uses 'sorry' @@ -51,7 +51,7 @@ example (b : Bool) : if b then have unused := (); True else False := by /-- -info: b : Bool +trace: b : Bool ⊢ if b = true then True else False --- warning: declaration uses 'sorry' @@ -65,7 +65,7 @@ example (b : Bool) : if b then have unused := (); True else False := by -- Now they are preserved: /-- -info: case isTrue +trace: case isTrue b : Bool h✝ : b = true ⊢ let_fun unused := (); diff --git a/tests/lean/simpArgTypeMismatch.lean.expected.out b/tests/lean/simpArgTypeMismatch.lean.expected.out index 1e471a675cda..8c4605b4106c 100644 --- a/tests/lean/simpArgTypeMismatch.lean.expected.out +++ b/tests/lean/simpArgTypeMismatch.lean.expected.out @@ -1,6 +1,6 @@ -simpArgTypeMismatch.lean:3:29-3:33: error: application type mismatch +simpArgTypeMismatch.lean:3:29-3:33: error: Application type mismatch: In the appplication decide_eq_false Unit -argument +the final argument Unit has type Type : Type 1 diff --git a/tests/lean/sorryAtError.lean.expected.out b/tests/lean/sorryAtError.lean.expected.out index bae3916f4fff..de6e075a73d6 100644 --- a/tests/lean/sorryAtError.lean.expected.out +++ b/tests/lean/sorryAtError.lean.expected.out @@ -1,6 +1,6 @@ -sorryAtError.lean:13:46-13:47: error: application type mismatch +sorryAtError.lean:13:46-13:47: error: Application type mismatch: In the appplication ty.ty Γ -argument +the final argument Γ has type x.ty.ctx : Type diff --git a/tests/lean/struct1.lean b/tests/lean/struct1.lean index 95fd8c9c2d53..fc602bd608ca 100644 --- a/tests/lean/struct1.lean +++ b/tests/lean/struct1.lean @@ -41,16 +41,16 @@ structure S extends A Nat where structure S'' where (x : Nat := true) -- error type mismatch -private structure S where +structure S where private mk :: (x : Nat) -private structure S where +structure S where protected mk :: (x : Nat) -private structure S where +structure S where protected (x : Nat) -private structure S where +structure S where mk2 :: (x : Nat) #check S diff --git a/tests/lean/theoremType.lean.expected.out b/tests/lean/theoremType.lean.expected.out index 6cef68f8cde3..824cac97b064 100644 --- a/tests/lean/theoremType.lean.expected.out +++ b/tests/lean/theoremType.lean.expected.out @@ -1,8 +1,10 @@ theoremType.lean:1:22-1:23: error: don't know how to synthesize placeholder context: ⊢ Nat -when the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed + +Note: All holes (e.g., `_`) in the header of a theorem are resolved before the proof is processed; information from the proof cannot be used to infer what these values should be theoremType.lean:4:18-4:19: error: don't know how to synthesize placeholder context: ⊢ Nat -when the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed + +Note: When the resulting type of a declaration is explicitly provided, all holes (e.g., `_`) in the header are resolved before the declaration body is processed diff --git a/tests/lean/withSetOptionIn.lean b/tests/lean/withSetOptionIn.lean index 8f34667133a5..bc6e3a657ef8 100644 --- a/tests/lean/withSetOptionIn.lean +++ b/tests/lean/withSetOptionIn.lean @@ -26,7 +26,7 @@ Ensure that `#trace_debug_foo` works as expected. #trace_debug_foo /-- info: [debug] foo -/ -#guard_msgs in +#guard_msgs(trace) in set_option trace.debug true in #trace_debug_foo /-! ## Test @@ -35,5 +35,5 @@ Should trace `[debug] foo`, and not log the error "unexpected command 'in'". -/ /-- info: [debug] foo -/ -#guard_msgs in +#guard_msgs(trace) in #test set_option trace.debug true in #trace_debug_foo diff --git a/tests/lean/withSetOptionIn.lean.expected.out b/tests/lean/withSetOptionIn.lean.expected.out index e69de29bb2d1..36ff910b23fe 100644 --- a/tests/lean/withSetOptionIn.lean.expected.out +++ b/tests/lean/withSetOptionIn.lean.expected.out @@ -0,0 +1,8 @@ +[debug] foo +withSetOptionIn.lean:29:0-29:11: error: ❌️ Docstring on `#guard_msgs` does not match generated message: + +trace: [debug] foo +[debug] foo +withSetOptionIn.lean:38:0-38:11: error: ❌️ Docstring on `#guard_msgs` does not match generated message: + +trace: [debug] foo diff --git a/tests/pkg/setup/Dep.lean b/tests/pkg/setup/Dep.lean new file mode 100644 index 000000000000..5d95528adde8 --- /dev/null +++ b/tests/pkg/setup/Dep.lean @@ -0,0 +1 @@ +def hello := "hello" diff --git a/tests/pkg/setup/Test.lean b/tests/pkg/setup/Test.lean new file mode 100644 index 000000000000..fac75d77bd82 --- /dev/null +++ b/tests/pkg/setup/Test.lean @@ -0,0 +1 @@ +#eval hello diff --git a/tests/pkg/setup/clean.sh b/tests/pkg/setup/clean.sh new file mode 100755 index 000000000000..2c4fabf32bf6 --- /dev/null +++ b/tests/pkg/setup/clean.sh @@ -0,0 +1 @@ +rm -f Dep.olean diff --git a/tests/pkg/setup/setup.json b/tests/pkg/setup/setup.json new file mode 100644 index 000000000000..ab86b7d2a4fe --- /dev/null +++ b/tests/pkg/setup/setup.json @@ -0,0 +1,19 @@ +{ + "name": "Dep", + "isModule": false, + "imports": [ + { + "module": "Dep", + "importAll": false, + "isExported": true + } + ], + "modules": { + "Dep": { + "olean": "Dep.olean" + } + }, + "dynlibs": [], + "plugins": [], + "options": {} +} diff --git a/tests/pkg/setup/test.sh b/tests/pkg/setup/test.sh new file mode 100755 index 000000000000..ff76796b2fc3 --- /dev/null +++ b/tests/pkg/setup/test.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +set -euo pipefail + +# Test that Lean will use the specified olean from `setup.json` +lean Dep.lean -o Dep.olean +lean Test.lean --setup setup.json